hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
837126c7ed58a646eeb7ff8f2ca3a90bb536b289
| 3,486
|
py
|
Python
|
rootfs/guest/daemon.py
|
ucsdsysnet/faasnap
|
6d47f5a808d34d37213c57e42a302b351e904614
|
[
"MIT"
] | null | null | null |
rootfs/guest/daemon.py
|
ucsdsysnet/faasnap
|
6d47f5a808d34d37213c57e42a302b351e904614
|
[
"MIT"
] | null | null | null |
rootfs/guest/daemon.py
|
ucsdsysnet/faasnap
|
6d47f5a808d34d37213c57e42a302b351e904614
|
[
"MIT"
] | null | null | null |
import time, sys, mmap
import subprocess
from flask import Flask, request
app = Flask(__name__)
import fcntl, time, struct
import redis
from concurrent.futures import ProcessPoolExecutor, ThreadPoolExecutor
# executor = ProcessPoolExecutor(max_workers=2)
executor = ThreadPoolExecutor(max_workers=2)
MEMINFO = False
ENABLE_TCPDUMP = False
# DUMPPATH = '/dev/shm/dump'
if ENABLE_TCPDUMP:
dumpfile = open('/dev/shm/dump', 'w+')
tcpdump_proc = subprocess.Popen(['tcpdump', '--immediate-mode', '-l', '-i', 'any'], bufsize=0, shell=True, stdout=dumpfile, stderr=dumpfile, text=True)
def function(*args):
funcname, hostname, password, request_args = args
r = redis.Redis(host=hostname, port=6379, db=0, password=password)
if funcname == 'hello':
ts = time.time()
return [ts, ts]
if funcname == 'allocate':
ts1 = time.time()
l = [1] * int(request_args['size'])
ts2 = time.time()
return [ts1, ts2]
if funcname == 'image':
import image_processing
return image_processing.lambda_handler(request_args, {'r':r})
if funcname == 'json':
import json_dumps_loads
return json_dumps_loads.lambda_handler(request_args, {'r':r})
if funcname == 'ffmpeg':
import ffmpeg_lambda_handler
return ffmpeg_lambda_handler.lambda_handler(request_args, {'r':r})
if funcname == 'chameleon':
import chameleon_handler
return chameleon_handler.lambda_handler(request_args, {'r':r})
if funcname == 'matmul':
import matmul_lambda_handler
return matmul_lambda_handler.lambda_handler(request_args, {'r':r})
if funcname == 'pyaes':
import pyaes_lambda_handler
return pyaes_lambda_handler.lambda_handler(request_args, {'r':r})
if funcname == 'compression':
import compression_handler
return compression_handler.lambda_handler(request_args, {'r':r})
if funcname == 'recognition':
import recognition_handler
return recognition_handler.lambda_handler(request_args, {'r':r})
if funcname == 'pagerank':
import pagerank_handler
return pagerank_handler.lambda_handler(request_args, {'r':r})
if funcname == 'exec':
ts1 = time.time()
exec(request_args['script'], globals())
ts2 = time.time()
return [ts1, ts2]
if funcname == 'run':
ts1 = time.time()
subprocess.run(request_args['args'], shell=True, check=True)
ts2 = time.time()
return [ts1, ts2]
raise RuntimeError('unknown function')
@app.route('/')
def hello_world():
return 'Hello, World!'
@app.route('/invoke', methods=['POST'])
def invoke():
funcname = request.args['function']
redishost = request.args['redishost']
redispasswd = request.args['redispasswd']
starttime = time.time()
result = function(funcname, redishost, redispasswd, request.json)
finishtime = time.time()
return 'read %f\nprocess %f\nwrite %f' % (result[0]-starttime, result[1]-result[0], finishtime-result[1])
@app.route('/logs')
def logs():
ret, output = subprocess.getstatusoutput('journalctl')
return output
@app.route('/tcpdump')
def tcpdump():
dumpfile = open('/dev/shm/dump', 'r')
contents = dumpfile.read()
return contents
@app.route('/dmesg')
def dmesg():
ret, output = subprocess.getstatusoutput('dmesg')
return output
@app.route('/makenoise')
def syslog():
size = 1024 * 1024 * 500
l = [1]*size
| 33.2
| 155
| 0.660356
| 0
| 0
| 0
| 0
| 907
| 0.260184
| 0
| 0
| 482
| 0.138267
|
837207e8e61e09370cb7047d5c02c7ae05cae9d2
| 2,729
|
py
|
Python
|
mil_text/rank_plot_all.py
|
AntonValk/BagGraph-Graph-MIL
|
1447b52b32995cf6c71e731dd1261104cd66ced0
|
[
"MIT"
] | 8
|
2021-12-10T19:21:03.000Z
|
2022-03-24T18:53:02.000Z
|
mil_text/rank_plot_all.py
|
AntonValk/BagGraph-Graph-MIL
|
1447b52b32995cf6c71e731dd1261104cd66ced0
|
[
"MIT"
] | null | null | null |
mil_text/rank_plot_all.py
|
AntonValk/BagGraph-Graph-MIL
|
1447b52b32995cf6c71e731dd1261104cd66ced0
|
[
"MIT"
] | null | null | null |
import csv
import numpy as np
import seaborn as sns
import pandas as pd
import matplotlib.pyplot as plt
datasets = ['alt.atheism', 'comp.graphics', 'comp.os.ms-windows.misc', 'comp.sys.ibm.pc.hardware', 'comp.sys.mac.hardware', 'comp.windows.x',
'misc.forsale', 'rec.autos', 'rec.motorcycles', 'rec.sport.baseball', 'rec.sport.hockey', 'sci.crypt', 'sci.electronics',
'sci.med', 'sci.space', 'soc.religion.christian', 'talk.politics.guns', 'talk.politics.mideast', 'talk.politics.misc', 'talk.religion.misc']
algorithms = ['MI-Kernel', 'mi-Graph', 'miFV', 'mi-Net', 'MI-Net', 'MI-Net \nwith DS', 'MI-Net \nwith RC',
'Res+pool', 'Res+pool\n-GCN', 'B-Res+pool\n-GCN (ours)']
my_pal = {'MI-Kernel': 'k', 'mi-Graph': 'gray', 'miFV': 'c', 'mi-Net': 'b', 'MI-Net': 'gold', 'MI-Net \nwith DS': 'teal', 'MI-Net \nwith RC': 'brown',
'Res+pool': 'darkgreen', 'Res+pool\n-GCN': 'm', 'B-Res+pool\n-GCN (ours)': 'r'}
num_data_set = len(datasets)
num_alg = len(algorithms)
acc_matrix = np.loadtxt('rank_box_results.txt', delimiter=' ', usecols=range(num_alg))
print(acc_matrix)
rank = num_alg - np.argsort(np.argsort(acc_matrix, axis=1), axis=1)
print(rank)
for data_id_, data in enumerate(datasets):
print('----------------------------------------------------------------')
print(data + ', first: ' + algorithms[int(np.where(rank[data_id_]==1)[0])].strip() + ', second: ' + algorithms[int(np.where(rank[data_id_]==2)[0])].strip())
rank = rank.transpose()
# print(rank.shape)
rank_mean = np.mean(rank, axis=1)
print('Average rank')
print(rank_mean)
# rank_std = np.std(rank, axis=1)
rank_median = np.median(rank, axis=1)
print('Median rank')
print(rank_median)
order = np.argsort(rank_mean)
rank = rank[order][0: num_alg]
algorithms = [algorithms[idx] for idx in order]
algorithms = [algorithms[idx_new] for idx_new in np.arange(num_alg)]
print(algorithms)
rank_df = pd.concat([pd.DataFrame({algorithms[i]: rank[i, :]}) for i in range(num_alg)], axis=1)
# print(rank_df.head)
data_df = rank_df.melt(var_name='algorithm', value_name='Rank')
fig, ax = plt.subplots(1, 1, figsize=(12, 9), dpi=75)
# plt.figure(figsize=(6, 9))
b = sns.boxplot(y="algorithm", x="Rank", data=data_df, showmeans=True, order=algorithms, whis=[0, 100],
meanprops={"markerfacecolor":"black", "markeredgecolor":"black", "markersize":"50"}, palette=my_pal, linewidth=6)
# plt.ylabel("algorithm", size=18)
plt.xticks(ticks=np.arange(1, num_alg + 1, 1))
plt.xlabel("Rank", size=40)
# plt.plot(rank.mean(axis=1), np.arange(num_alg), '--r*', lw=2)
b.tick_params(labelsize=30)
ax.set_ylabel('')
plt.tight_layout()
plt.show()
| 43.31746
| 161
| 0.635398
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,120
| 0.410407
|
8372488d6e57ae388189d3f6803e33eed08b9007
| 6,434
|
py
|
Python
|
rh_logger/backends/backend_datadog_logging.py
|
tomuram/rh_logger
|
dbd1d918ac163994694da82c7e90758cc29bf0e5
|
[
"MIT"
] | 1
|
2020-05-08T15:22:46.000Z
|
2020-05-08T15:22:46.000Z
|
rh_logger/backends/backend_datadog_logging.py
|
HoraceKem/rh_logger
|
7217ce54f1578e7324947ad33381f3c2d1f07e6b
|
[
"MIT"
] | 1
|
2016-05-13T17:35:02.000Z
|
2016-05-13T17:35:02.000Z
|
rh_logger/backends/backend_datadog_logging.py
|
HoraceKem/rh_logger
|
7217ce54f1578e7324947ad33381f3c2d1f07e6b
|
[
"MIT"
] | 3
|
2016-11-28T05:44:42.000Z
|
2021-08-10T18:28:56.000Z
|
'''logger.py - the Datadog logger'''
import collections
import datadog
import datetime
import os
import logging
import rh_logger
import rh_logger.api
import sys
import traceback
class DatadogLogger(rh_logger.api.Logger):
'''Logger for datadog'''
def __init__(self, name, config):
self.name = name
if "api-key" not in config:
raise IndexError(
"The api-key is missing from the datadog configuration "
"subsection in the rh-logger section. See README.md for "
"a configuration example."
)
api_key = config["api-key"]
if name not in config:
raise IndexError(
("The %s section is missing from the datadog configuration "
"subsection of the rh-logger section. See README.md for "
"a configuration example.") % name)
if "app-key" not in config[name]:
raise IndexError(
"There is no app-key in your application's logger "
"configuration section. See README.md for a configuration "
"example.")
app_key = config[name]['app-key']
datadog.initialize(api_key=api_key, app_key=app_key)
def start_process(self, name, msg, args=None):
'''Report the start of a process
:param msg: an introductory message for the process
'''
if args is None:
context = []
elif isinstance(args, basestring):
context = [ args ]
elif isinstance(args, collections.Sequence):
context = args
else:
context = [ str(args) ]
datadog.api.Event.create(title="%s starting" % self.name,
text=msg,
alert_type="info",
tags=[self.name, "startup"] + context)
def end_process(self, msg, exit_code):
'''Report the end of a process
:param msg: an informative message about why the process ended
:param exit_code: one of the :py:class: `ExitCode` enumerations
'''
if exit_code == rh_logger.ExitCode.success:
datadog.api.Event.create(title="%s exiting" % self.name,
text=msg,
alert_type="success",
tags=[self.name, "success"])
else:
datadog.api.Event.create(title="%s exiting with error" % self.name,
text=msg,
alert_type="error",
tags=[self.name, "error", exit_code.name])
def report_metric(self, name, metric, subcontext=None):
'''Report a metric such as accuracy or execution time
:param name: name of the metric, e.g. "Rand score"
:param metric: the value
:param subcontext: an optional sequence of objects identifying a
subcontext for the metric such as a tile of the MFOV being processed.
'''
if isinstance(subcontext, collections.Sequence)\
and not isinstance(subcontext, basestring):
tags = [self.name] + subcontext
elif subcontext is not None:
tags = [self.name, subcontext]
else:
tags = [self.name]
datadog.api.Metric.send(metric=name,
points=[metric],
host=self.name,
tags=tags)
def report_metrics(self, name, time_series, context=None):
if isinstance(context, collections.Sequence)\
and not isinstance(context, basestring):
tags = [self.name] + context
elif context is not None:
tags = [self.name, context]
else:
tags = [self.name]
datadog.api.Metric.send(metric=name,
points=time_series.timestamps_and_metrics,
host=self.name,
tags=tags)
def report_event(self, event, context=None, log_level=None):
'''Report an event
:param event: the name of the event, for instance, "Frobbing complete"
:param context: a subcontext such as "MFOV: 5, Tile: 3"
'''
if isinstance(context, collections.Sequence)\
and not isinstance(context, basestring):
tags = [self.name] + context
else:
tags = [self.name, context]
if log_level is None or log_level in (logging.DEBUG, logging.INFO):
alert_type="info"
elif log_level == logging.WARNING:
alert_type="warning"
elif log_level in (logging.ERROR, logging.CRITICAL):
alert_type="error"
else:
alert_type="info"
datadog.api.Event.create(title=event,
text=event,
alert_type=alert_type,
tags=tags)
def report_exception(self, exception=None, msg=None):
'''Report an exception
:param exception: the :py:class: `Exception` that was thrown. Default
is the one reported by sys.exc_info()
:param msg: an informative message
'''
if exception is None:
exc_type, exception, tb = sys.exc_info()
else:
exc_type = type(exception)
tb = None
if msg is None:
msg = str(exception)
tags = [self.name, "exception", exc_type.__name__]
if tb is not None:
# TODO: add stack breadcrumbs to the tags
# Consider using Sentry for logging exceptions
msg += "\n" + "".join(traceback.format_exception(
exc_type, exception, tb))
datadog.api.Event.create(title="Exception report",
text=msg,
alert_type="error",
tags=tags)
datadog.api.Metric.send(metric="exception",
points=[(datetime.datetime.now(),
1)],
type="counter",
host=self.name,
tags=tags)
def get_logger(name, config):
return DatadogLogger(name, config)
| 38.993939
| 79
| 0.524712
| 6,182
| 0.960833
| 0
| 0
| 0
| 0
| 0
| 0
| 1,751
| 0.272148
|
8372ad2c895756d8ba6acd08356e8ae7366b2454
| 11,715
|
py
|
Python
|
script_preprocess/building_aggregated_data.py
|
FrappucinoGithub/school_meal_forecast_regressions
|
23db636e7592b39cf100d7e7c707a411779b79bc
|
[
"MIT"
] | 2
|
2021-05-06T19:02:44.000Z
|
2021-05-10T09:04:36.000Z
|
script_preprocess/building_aggregated_data.py
|
FrappucinoGithub/school_meal_forecast_regressions
|
23db636e7592b39cf100d7e7c707a411779b79bc
|
[
"MIT"
] | 1
|
2021-03-15T11:16:54.000Z
|
2021-03-15T11:16:54.000Z
|
script_preprocess/building_aggregated_data.py
|
FrappucinoGithub/school_meal_forecast_regressions
|
23db636e7592b39cf100d7e7c707a411779b79bc
|
[
"MIT"
] | 1
|
2021-02-24T13:49:46.000Z
|
2021-02-24T13:49:46.000Z
|
import os
import pandas as pd
import spacy
from sklearn.feature_extraction.text import CountVectorizer
import datetime
import numpy as np
from processing import get_annee_scolaire
if __name__ == "__main__":
#print("files", os.listdir("data_processed"))
##########################
# Chargement des données
##########################
path_g = os.path.join("data_processed", "greves.pk")
g = pd.read_pickle(path_g)
g["ind"] = g.ind.map(lambda x: 1 if x == "GREVE" else 0)
g = g[["taux_grevistes", "nos", "ind", "greves_manquantes"]]
path_m = os.path.join("data_processed", "menus.pk")
m = pd.read_pickle(path_m)
path_fe = os.path.join("data_processed", "frequentation_effectif.pk")
fe = pd.read_pickle(path_fe)
path_ferie = os.path.join("data_processed", "feries.pk")
feries = pd.read_pickle(path_ferie)
path_vacs = os.path.join("data_processed", "vacances.pk")
vacances = pd.read_pickle(path_vacs)
path_epidemies = os.path.join("data_processed", "epidemies.pk")
epidemies = pd.read_pickle(path_epidemies)
path_religions = os.path.join("data_processed", "religions.pk")
religions = pd.read_pickle(path_religions)
##########################
# Join sur les dates des différentes BDD
##########################
df = fe.groupby("date")[["prevision", "reel", "effectif"]].sum().join(g).join(m).join(feries).join(vacances).join(epidemies).join(religions)
##########################
# Remplacement des valeurs manquantes
##########################
for col in df.isnull().sum()[df.isnull().sum()>0].index.drop("menu"):
df[col] = df[col].fillna(0)
df["menu"] = df["menu"].map(lambda x: x if type(x) == list else [])
####################################
# Ajout des jours, mois semaines, année scolaire, repas noel
####################################
dic_jour = {0: "Lundi", 1: "Mardi", 2: "Mercredi", 3: "Jeudi", 4: "Vendredi", 5: "Samedi", 6: "Dimanche"}
dic_mois = {1: "Janvier", 2: "Fevrier", 3: "Mars", 4: "Avril", 5: "Mai", 6: "Juin", 7: "Juillet", 8: "Aout",
9: "Septembre", 10: "Octobre", 11: "Novembre", 12: "Decembre"}
df["jour"] = df.index.weekday
df["jour"] = df["jour"].apply(lambda x: dic_jour[x])
df["semaine"] = df.index.week
df["mois"] = df.index.month
df["mois"] = df["mois"].apply(lambda x: dic_mois[x])
df["annee_scolaire"] = df.index.to_series().map(get_annee_scolaire)
date_repas_noel = ["2012-12-20", "2013-12-19", "2014-12-18", "2015-12-17", "2016-12-15",
"2017-12-21", "2018-12-20"]
l_noel = [datetime.datetime.strptime(x, '%Y-%m-%d') for x in date_repas_noel]
df_noel = pd.DataFrame(l_noel, columns=["date"])
df_noel["repas_noel"] = 1
df = df.join(df_noel.set_index("date"))
df["repas_noel"] = df["repas_noel"].fillna(0)
####################################
# Ajout du gaspillage
####################################
assert df.isnull().sum().sum() == 0
df["gaspillage_volume"] = df["prevision"] - df["reel"]
df["gaspillage_pourcentage"] = 100 * (df["prevision"] - df["reel"]) / df["prevision"]
####################################
# Ajout des variables liées au menu
####################################
nlp = spacy.load("fr_core_news_sm")
corpus = df['menu'].apply(lambda x: "".join([i + " " for i in x]))
corpus = corpus.dropna()
# stop_word
liste = ['04', '10', '17', '18225', '2015', '2016', '220gr', '268', '29', '500', '500g', '5kg', '850''500', '500g',
'5kg', '850', 'ab', 'an', 'au', 'aux', 'avec', 'baut', 'bbc', 'de', 'des', 'du', 'en', 'et', 'gr', 'kg',
'la', 'le', 'les', 'ou', 'par', 's17', 'sa', 'sans', 'ses', 'son']
# Create CountVectorizer object
vectorizer = CountVectorizer(strip_accents='ascii', stop_words=liste, lowercase=True, ngram_range=(1, 1))
# Generate matrix of word vectors
bow_matrix = vectorizer.fit_transform(corpus)
# Convert bow_matrix into a DataFrame
bow_df = pd.DataFrame(bow_matrix.toarray())
# Map the column names to vocabulary
bow_df.columns = vectorizer.get_feature_names()
bow_df.index = df.index
# feature porc
l_porc = ["carbonara", "carbonata", "cassoulet", "chipo", "chipolatas", "choucroute",
"cordon", "croziflette", "francfort", "jambon", "knacks", "lardons", "porc", "rosette",
"saucisse", "saucisses", "tartiflette"]
df["porc"] = sum([bow_df[alim] for alim in l_porc])
df['porc'] = df['porc'] > 0
df['porc'] = df['porc'].astype('int')
# feature viande
l_viande = ["roti", "agneau", "blanquette", "boeuf", "boudin", "boulettes",
"bourguignon", "bourguignonne", "canard", "carne", "chapon", "colombo",
"couscous", "dinde", "escalope", "farci", "foie", "kebab", "lapin", "merguez",
"mouton", "napolitaines", "nuggets", "paupiette", "pintade",
"poulet", "steak", "stogonoff", "strogonoff", "tagine", "tajine",
"veau", "viande", "volaile", "volaille", "carbonara", "carbonata", "cassoulet", "chipo", "chipolatas",
"choucroute", "cordon", "croziflette", "francfort", "jambon", "knacks", "lardons", "porc", "rosette",
"saucisse", "saucisses", "tartiflette", "parmentier"]
df["viande"] = sum([bow_df[alim] for alim in l_viande])
df['viande'] = df['viande'] > 0
df['viande'] = df['viande'].astype('int')
df = df.reset_index().rename(columns = {"index":"date"})
l_index = ["2018-01-22", "2017-10-09", "2017-05-09", "2016-10-18", "2016-04-25", "2015-05-26", "2014-11-24",
"2014-05-26", "2014-03-31", "2014-01-20", "2012-01-16", "2012-01-30", "2012-07-02", "2012-10-01",
"2011-01-17", "2011-01-31", "2011-09-13", "2015-06-22", "2015-01-19", "2014-06-30", "2012-06-18",
"2011-06-20"]
index = [datetime.datetime.strptime(x, '%Y-%m-%d') for x in l_index]
for i in index:
df.loc[df[df["date"] == i].index, "viande"] = 1
# traitement particulier des lasagnes napolitaines pour éviter les confusions avec les lasagnes de poisson
l_index = ["2016-02-22", "2016-02-04", "2015-11-23", "2015-11-17", "2015-10-05",
"2015-05-04", "2015-01-26", "2014-12-15", "2013-09-23", "2012-10-09", "2012-05-21", "2012-02-27",
"2011-11-03", "2011-09-05", "2011-05-09", "2012-12-10", "2013-12-02", "2014-05-12", "2016-05-09"]
index = [datetime.datetime.strptime(x, '%Y-%m-%d') for x in l_index]
for i in index:
df.loc[df[df["date"] == i].index, "viande"] = 1
# traitement particulier de certains termes qui peuvent être utilisés pour du poisson ou de la viande sautés, chili, pot au feu, bolognaise, courgette farcie,ravioli
l_index = ["2016-01-28", "2016-03-17", "2016-03-07", "2015-09-15", "2012-12-06", "2012-05-03", "2012-02-09",
"2011-11-03",
"2011-09-13", "2011-06-07", "2011-04-04", "2014-06-12", "2012-11-12", "2015-06-22"]
index = [datetime.datetime.strptime(x, '%Y-%m-%d') for x in l_index]
for i in index:
df.loc[df[df["date"] == i].index, "viande"] = 1
# traitement particulier pour parmentier végétale, steack de soja
l_index = ["2019-11-25", "2014-06-20"]
index = [datetime.datetime.strptime(x, '%Y-%m-%d') for x in l_index]
for i in index:
df.loc[df[df["date"] == i].index, "viande"] = 0
# feature poisson
l_poisson = ["poissons", "sardines", "perray", "thon", "calamar", "lieu", "colin", "crabe", "crevette", "crustace",
"dorade", "maquereau", "poisson", "rillette", "sardine", "saumon"]
df["poisson"] = sum([bow_df[alim] for alim in l_poisson])
df['poisson'] = df['poisson'] > 0
df['poisson'] = df['poisson'].astype('int')
df['poisson'][(df['viande'] == 1) & (df['poisson'] == 1)] = np.zeros(
len(df['poisson'][(df['viande'] == 1) & (df['poisson'] == 1)]))
# traitement particulier parmentier poisson #nuggets de poisson,steack de soja et sale au thon, carbo de saumon
l_index = ["2019-05-17", "2019-05-17", "2019-02-01", "2018-11-23", "2018-10-19", "2018-09-14", "2018-06-05",
"2018-03-27", "2018-01-16", "2017-12-01", "2017-09-22", "2017-05-05", "2016-05-03", "2016-02-26",
"2016-01-15", "2015-11-20", "2015-09-22", "2015-09-08", "2015-06-05", "2014-09-08", "2014-03-25",
"2014-02-18", "2014-01-24", "2013-12-10", "2013-11-29", "2013-10-01", "2012-12-14", "2012-10-19",
"2012-09-21", "2012-03-16", "2012-01-20", "2011-09-09", "2011-03-18", "2019-03-08"]
index = [datetime.datetime.strptime(x, '%Y-%m-%d') for x in l_index]
for i in index:
df.loc[df[df["date"] == i].index, "viande"] = 0
df.loc[df[df["date"] == i].index, "poisson"] = 1
# traitement particulier paella de la mer, filet
l_index = ['2011-01-10', '2012-01-09', '2011-01-07', "2012-01-06"]
index = [datetime.datetime.strptime(x, '%Y-%m-%d') for x in l_index]
for i in index:
df.loc[df[df["date"] == i].index, "poisson"] = 1
# 2 menus : végé et viande, on considère que c'est un menu végé
l_index = ["2015-11-13", "2015-09-11"]
index = [datetime.datetime.strptime(x, '%Y-%m-%d') for x in l_index]
for i in index:
df.loc[df[df["date"] == i].index, "poisson"] = 0
df.loc[df[df["date"] == i].index, "viande"] = 0
# 2 menus : poisson et viande, on considère que c'est un menu poisson
l_index = ["2015-11-20", "2015-10-16", "2015-10-02", "2015-09-25", "2015-09-18", "2015-09-04", "2015-06-25",
"2015-06-11"]
index = [datetime.datetime.strptime(x, '%Y-%m-%d') for x in l_index]
for i in index:
df.loc[df[df["date"] == i].index, "poisson"] = 1
df.loc[df[df["date"] == i].index, "viande"] = 0
# menu inconnu, mais probablement avec viande d'après le modèle
df.loc[df[df["date"] == datetime.datetime.strptime("2015-10-15", "%Y-%m-%d")].index, "viande"] = 1
# feature bio
df['bio'] = bow_df["bio"]
# set date as index
df = df.set_index("date")
###############################################################
# Ajout des 4 premiers et 4 derniers jours de l'année scolaire (grosse incertitude)
#############################################################
ind = []
temp = []
subset = df.copy()
#print("subset", subset["annee_scolaire"].unique()[1:])
for i in range(1, 5):
for annee in subset["annee_scolaire"].unique()[1:]:
temp.append(min(subset[(subset.index.year == min(subset[subset["annee_scolaire"] == annee].index.year)) & (
subset["annee_scolaire"] == annee)].index))
df.loc[temp, "4_premiers_jours"] = 1
ind.append(temp)
subset.drop(temp, inplace=True)
temp = []
for i in range(1, 5):
for annee in subset["annee_scolaire"].unique()[:-1]:
temp.append(max(subset[(subset.index.year == max(subset[subset["annee_scolaire"] == annee].index.year)) & (
subset["annee_scolaire"] == annee)].index))
df.loc[temp, "4_derniers_jours"] = 1
ind.append(temp)
subset.drop(temp, inplace=True)
temp = []
df["4_derniers_jours"].fillna(0, inplace=True)
df["4_premiers_jours"].fillna(0, inplace=True)
####################################
# Tests (longueur et valeurs manquantes)
####################################
assert len(df) == 1188
df.to_pickle("data_processed/global.pk")
df.to_excel("data_processed/global.xlsx")
| 41.39576
| 169
| 0.553564
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 5,725
| 0.487898
|
83731763e6cbd3e1546d2f2ccc7d203e0567127e
| 1,411
|
py
|
Python
|
main/migrations/0028_auto_20170103_1634.py
|
jsmnbom/htxaarhuslan
|
5244c4e65f4912c5d2e193f1ac355b3206d1c1b8
|
[
"MIT"
] | 1
|
2019-09-06T10:28:40.000Z
|
2019-09-06T10:28:40.000Z
|
main/migrations/0028_auto_20170103_1634.py
|
jsmnbom/htxaarhuslan
|
5244c4e65f4912c5d2e193f1ac355b3206d1c1b8
|
[
"MIT"
] | 2
|
2018-10-22T10:33:04.000Z
|
2019-01-31T19:36:04.000Z
|
main/migrations/0028_auto_20170103_1634.py
|
jsmnbom/htxaarhuslan
|
5244c4e65f4912c5d2e193f1ac355b3206d1c1b8
|
[
"MIT"
] | 1
|
2019-09-06T10:28:41.000Z
|
2019-09-06T10:28:41.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2017-01-03 15:34
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('main', '0027_auto_20170103_1130'),
]
operations = [
migrations.CreateModel(
name='Event',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255, verbose_name='navn')),
('url', models.URLField(blank=True, help_text='Valgfri. Link som kan klikkes på kalenderen.', max_length=255, null=True, verbose_name='link')),
('start', models.DateTimeField(null=True, verbose_name='Start')),
('end', models.DateTimeField(blank=True, null=True, verbose_name='Slut')),
('lan', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='main.Lan', verbose_name='lan')),
],
options={
'verbose_name_plural': 'begivenheder',
'verbose_name': 'begivenhed',
},
),
migrations.AlterField(
model_name='tournament',
name='end',
field=models.DateTimeField(blank=True, null=True, verbose_name='Slut'),
),
]
| 38.135135
| 159
| 0.59674
| 1,221
| 0.864731
| 0
| 0
| 0
| 0
| 0
| 0
| 316
| 0.223796
|
8373339dd9a549bf7c7f2156eb693bffea51c85a
| 6,207
|
py
|
Python
|
gw_grb_h_0_ppd_summaries.py
|
KamshatTazhenova/hh0
|
c71845056c5a108cb95654b1a1012d63034541c2
|
[
"MIT"
] | 11
|
2017-07-04T06:56:17.000Z
|
2022-01-04T08:35:48.000Z
|
gw_grb_h_0_ppd_summaries.py
|
KamshatTazhenova/hh0
|
c71845056c5a108cb95654b1a1012d63034541c2
|
[
"MIT"
] | null | null | null |
gw_grb_h_0_ppd_summaries.py
|
KamshatTazhenova/hh0
|
c71845056c5a108cb95654b1a1012d63034541c2
|
[
"MIT"
] | 5
|
2017-07-07T10:00:19.000Z
|
2021-06-08T09:38:25.000Z
|
import numpy as np
import matplotlib.pyplot as mp
import matplotlib.cm as mpcm
import matplotlib.colors as mpc
import scipy.stats as ss
# plotting settings
lw = 1.5
mp.rc('font', family = 'serif')
mp.rcParams['text.latex.preamble'] = [r'\boldmath']
mp.rcParams['axes.linewidth'] = lw
mp.rcParams['lines.linewidth'] = lw
cm = mpcm.get_cmap('plasma')
# datafiles
ppds = ['cmb', 'loc']
sums = ['ptes', 'prs']
# posterior summaries
post_means = np.genfromtxt('gw_grb_h_0_posterior_means.csv', \
delimiter=',')
post_vars = np.genfromtxt('gw_grb_h_0_posterior_vars.csv', \
delimiter=',')
n_h_0_true = post_means.shape[0]
n_bs = post_means.shape[1]
print n_bs
h_0_true_col = [cm(col) for col in np.linspace(0.2, 0.8, n_h_0_true)]
fig, axes = mp.subplots(1, 2, figsize=(12, 5))
for i in range(n_h_0_true):
print '* H_0 = {:5.2f}'.format(post_means[i, 0])
to_print = 'posterior mean = {:5.2f} +/- {:4.2f}'
print to_print.format(np.mean(post_means[i, 1:]), \
np.std(post_means[i, 1:]))
to_print = 'posterior sigma = {:5.2f} +/- {:4.2f}'
print to_print.format(np.mean(np.sqrt(post_vars[i, 1:])), \
np.std(np.sqrt(post_vars[i, 1:])))
kde = ss.gaussian_kde(post_means[i, 1:])
grid = np.linspace(np.min(post_means[i, 1:]), \
np.max(post_means[i, 1:]), \
1000)
axes[0].plot(grid, kde.evaluate(grid), color=h_0_true_col[i])
axes[0].axvline(post_means[i, 0], color=h_0_true_col[i], ls='--')
kde = ss.gaussian_kde(np.sqrt(post_vars[i, 1:]))
grid = np.linspace(np.min(np.sqrt(post_vars[i, 1:])), \
np.max(np.sqrt(post_vars[i, 1:])), \
1000)
axes[1].plot(grid, kde.evaluate(grid), color=h_0_true_col[i], \
label=r'$H_0 = {:5.2f}$'.format(post_vars[i, 0]))
axes[0].set_xlabel(r'$\bar{H}_0$', fontsize=18)
axes[0].set_ylabel(r'${\rm Pr}(\bar{H}_0)$', fontsize=18)
axes[0].tick_params(axis='both', which='major', labelsize=12)
axes[1].set_xlabel(r'$\sigma_{H_0}$', fontsize=18)
axes[1].set_ylabel(r'${\rm Pr}(\sigma_{H_0})$', fontsize=18)
axes[1].tick_params(axis='both', which='major', labelsize=12)
axes[1].legend(loc='upper right', fontsize=14)
fig.suptitle('Bootstrap-Averaged Posterior Means / Sigmas', \
fontsize=18)
fig.savefig('gw_grd_h_0_bs_avg_posterior_moments.pdf', \
bbox_inches = 'tight')
mp.close(fig)
# PPD summaries
for i in range(len(ppds)):
for j in range(len(sums)):
# read data
fname = 'gw_grb_h_0_' + ppds[i] + '_ppd_' + sums[j]
data = np.genfromtxt(fname + '.csv', delimiter=',')
n_bs = data.shape[1]
print n_bs
# plot
n_h_0_true = data.shape[0]
fig, axes = mp.subplots(1, n_h_0_true, \
figsize=(6 * n_h_0_true, 5))
if ppds[i] == 'cmb':
fig.suptitle(r'$\hat{H}_0^{\rm CMB}\, {\rm Prediction}$', \
fontsize=18)
else:
fig.suptitle(r'$\hat{H}_0^{\rm CDL}\, {\rm Prediction}$', \
fontsize=18)
if sums[j] == 'ptes':
x_label = r'$p$'
y_label = r'${\rm Pr}(p)$'
else:
x_label = r'$\rho$'
y_label = r'${\rm Pr}(\rho)$'
for k in range(n_h_0_true):
kde = ss.gaussian_kde(data[k, 1:])
grid = np.linspace(np.min(data[k, 1:]), \
np.max(data[k, 1:]), \
1000)
axes[k].plot(grid, kde.evaluate(grid), color=cm(0.5))
axes[k].set_xlabel(x_label, fontsize=18)
axes[k].set_ylabel(y_label, fontsize=18)
axes[k].tick_params(axis='both', which='major', labelsize=12)
axes[k].set_title(r'$H_0 = {:5.2f}$'.format(data[k, 0]), \
fontsize=18)
# finish plot
fig.savefig(fname + '.pdf', bbox_inches = 'tight')
mp.close(fig)
# quick check of required numbers of samples
def rho(d, n, var_ratio, n_event_ref, n_event):
d_n_event = n_event_ref / n_event
return np.exp(-0.5 * rho_num(d, n, d_n_event) / \
rho_den(var_ratio, d_n_event))
def rho_num(d, n, d_n_event):
if d > 0.0:
return (d - n * np.sqrt(d_n_event)) ** 2
else:
return (d + n * np.sqrt(d_n_event)) ** 2
def rho_den(var_ratio, d_n_event):
return var_ratio + d_n_event
def num_ratio(d, n, m, var_ratio):
term = (m ** 2 * var_ratio - d ** 2)
print term
return [((-n * d - \
np.sqrt((n * d) ** 2 - term * (m ** 2 - n ** 2))) / \
term) ** 2, \
((-n * d + \
np.sqrt((n * d) ** 2 - term * (m ** 2 - n ** 2))) / \
term) ** 2]
n_ref = 51.0
mu_obs = np.array([67.81, 73.24])
sig_obs = np.array([0.92, 1.74])
n_sigma_sv = 1.0
n_sigma_thresh = 3.0
n_sigma_diff = [(mu_obs[1] - mu_obs[0]) / np.sqrt(post_vars[i, 1]), \
(mu_obs[0] - mu_obs[1]) / np.sqrt(post_vars[i, 1])]
var_ratio = [sig_obs[1] ** 2 / post_vars[i, 1], \
sig_obs[0] ** 2 / post_vars[i, 1]]
print n_sigma_diff
print var_ratio
n_req = np.zeros(2)
n_req[0] = n_ref * num_ratio(n_sigma_diff[0], n_sigma_sv, \
n_sigma_thresh, var_ratio[0])[0]
ln_rho = -2.0 * np.log(rho(n_sigma_diff[0], n_sigma_sv, \
var_ratio[0], n_ref, n_req[0]))
print n_req[0], ln_rho, n_sigma_thresh ** 2
n_req[1] = n_ref * num_ratio(n_sigma_diff[1], n_sigma_sv, \
n_sigma_thresh, var_ratio[1])[1]
ln_rho = -2.0 * np.log(rho(n_sigma_diff[1], n_sigma_sv, \
var_ratio[1], n_ref, n_req[1]))
print n_req[1], ln_rho, n_sigma_thresh ** 2
n_grid = np.arange(n_ref, 5000.0)
mp.loglog(n_grid, rho_num(n_sigma_diff[0], n_sigma_sv, n_ref / n_grid), 'r', lw=1.0)
mp.plot(n_grid, 1.0 / rho_den(var_ratio[0], n_ref / n_grid), 'g', lw=1.0)
mp.plot(n_grid, 1.0 / rho_den(var_ratio[1], n_ref / n_grid), 'b', lw=1.0)
mp.plot(n_grid, -2.0 * np.log(rho(n_sigma_diff[0], n_sigma_sv, var_ratio[0], \
n_ref, n_grid)), 'g')
mp.plot(n_grid, -2.0 * np.log(rho(n_sigma_diff[1], n_sigma_sv, var_ratio[1], \
n_ref, n_grid)), 'b')
mp.axhline(n_sigma_thresh ** 2, color='k', linestyle='-.')
mp.axvline(n_req[0], color='g', linestyle='-.')
mp.axvline(n_req[1], color='b', linestyle='-.')
mp.xlabel(r'$N$')
mp.ylabel(r'$f(N)$')
mp.xlim(n_ref, 5000)
mp.ylim(0.3, 40.0)
mp.savefig('gw_grb_h_0_ppd_samp_var_limits.pdf', bbox_inches='tight')
mp.show()
exit()
print num_ratio(4.53, n_sigma_sv, n_sigma_thresh, 2.1)
print 5.43, mu_obs[1] - mu_obs[0]
print 1.2, np.sqrt(post_vars[i, 1])
print 5.43 / 1.2, n_sigma_diff[0]
m = 3.0
n = 1.0
d = 3.77 # 4.53
vrat = 1.46 # 2.1
print ((d*n+np.sqrt((d*n)**2-(vrat*m**2-d**2)*(m**2-n**2)))/(vrat*m**2-d**2))**2
| 33.733696
| 84
| 0.624617
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 972
| 0.156597
|
837393fa0b035c535d77757051efaef98b194a88
| 1,724
|
py
|
Python
|
first_lab.py
|
ShevchenyaIlya/Chess_knight_move
|
b7339edbf9423d028f6eb852e0c1c46869a6c0ff
|
[
"Apache-2.0"
] | null | null | null |
first_lab.py
|
ShevchenyaIlya/Chess_knight_move
|
b7339edbf9423d028f6eb852e0c1c46869a6c0ff
|
[
"Apache-2.0"
] | null | null | null |
first_lab.py
|
ShevchenyaIlya/Chess_knight_move
|
b7339edbf9423d028f6eb852e0c1c46869a6c0ff
|
[
"Apache-2.0"
] | null | null | null |
import pygame
from laboratory.base import ChessBoard, ChessHorse, Grid
import os
os.environ["SDL_VIDEO_WINDOW_POS"] = "400, 100"
surface = pygame.display.set_mode((600, 600))
pygame.display.set_caption("Chess knight move")
pygame.init()
grid = ChessBoard()
horse = ChessHorse()
cells = Grid()
def stand_color():
surface.fill((255, 255, 255))
grid.draw(surface)
horse.draw(surface)
if cells.count_of_steps:
cells.show_forbidden_cell(surface, (0, 0, 0))
if horse.is_active:
horse.draw_frame(surface, cells)
pygame.display.flip()
def restart_game():
global grid, horse, cells
del grid, horse, cells
grid = ChessBoard()
horse = ChessHorse()
cells = Grid()
def main():
running = True
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
if event.type == pygame.MOUSEBUTTONDOWN:
if pygame.mouse.get_pressed()[0]:
pos = pygame.mouse.get_pos()
if (pos[0] // 75, pos[1] // 75) == horse.current_pos:
horse.click_horse_handler(surface)
horse.get_possible_steps()
else:
if horse.is_active:
horse.change_pos(pos[0] // 75, pos[1] // 75, cells)
horse.draw(surface)
horse.auto_step(cells, surface)
case = horse.check_end(cells)
if case == "restart":
restart_game()
elif case == "quit":
running = False
stand_color()
pygame.quit()
main()
| 26.523077
| 79
| 0.541763
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 66
| 0.038283
|
8375a587533b43ad16c1fde976759a83788c8ad5
| 1,925
|
py
|
Python
|
models/faster_rcnn_fpn.py
|
martin-marek/parking-space-occupancy
|
5514adcb435e239f36b19f4e868678ae0a65f5b8
|
[
"MIT"
] | 6
|
2021-07-29T04:15:15.000Z
|
2022-01-12T07:18:14.000Z
|
models/faster_rcnn_fpn.py
|
martin-marek/parking-space-occupancy
|
5514adcb435e239f36b19f4e868678ae0a65f5b8
|
[
"MIT"
] | null | null | null |
models/faster_rcnn_fpn.py
|
martin-marek/parking-space-occupancy
|
5514adcb435e239f36b19f4e868678ae0a65f5b8
|
[
"MIT"
] | 4
|
2021-07-27T10:04:33.000Z
|
2021-11-27T20:28:35.000Z
|
from torch import nn
from torchvision.models.detection.backbone_utils import resnet_fpn_backbone
from torchvision.models.utils import load_state_dict_from_url
from .utils import pooling
from .utils.class_head import ClassificationHead
class FasterRCNN_FPN(nn.Module):
"""
A Faster R-CNN FPN inspired parking lot classifier.
Passes the whole image through a CNN -->
pools ROIs from the feature pyramid --> passes
each ROI separately through a classification head.
"""
def __init__(self, roi_res=7, pooling_type='square'):
super().__init__()
# backbone
# by default, uses frozen batchnorm and 3 trainable layers
self.backbone = resnet_fpn_backbone('resnet50', pretrained=True)
hidden_dim = 256
# pooling
self.roi_res = roi_res
self.pooling_type = pooling_type
# classification head
in_channels = hidden_dim * self.roi_res**2
self.class_head = ClassificationHead(in_channels)
# load coco weights
# url taken from: https://github.com/pytorch/vision/blob/master/torchvision/models/detection/faster_rcnn.py
weights_url = 'https://download.pytorch.org/models/fasterrcnn_resnet50_fpn_coco-258fb6c6.pth'
state_dict = load_state_dict_from_url(weights_url, progress=False)
self.load_state_dict(state_dict, strict=False)
def forward(self, image, rois):
# get backbone features
features = self.backbone(image[None])
# pool ROIs from features pyramid
features = list(features.values())
features = pooling.pool_FPN_features(features, rois, self.roi_res, self.pooling_type)
# pass pooled ROIs through classification head to get class logits
features = features.flatten(1)
class_logits = self.class_head(features)
return class_logits
| 37.019231
| 115
| 0.682078
| 1,686
| 0.875844
| 0
| 0
| 0
| 0
| 0
| 0
| 661
| 0.343377
|
837739a005a237684a780c9335e0ae3dc01c7873
| 730
|
py
|
Python
|
configProvider.py
|
misc77/dsegenerator
|
3fbaed79ff2809de5b7efb3ac86acf8ffb45afe4
|
[
"MIT"
] | null | null | null |
configProvider.py
|
misc77/dsegenerator
|
3fbaed79ff2809de5b7efb3ac86acf8ffb45afe4
|
[
"MIT"
] | null | null | null |
configProvider.py
|
misc77/dsegenerator
|
3fbaed79ff2809de5b7efb3ac86acf8ffb45afe4
|
[
"MIT"
] | null | null | null |
from resources import Resources
import configparser
def getConfigEntry(group, item):
entry = None
if group != None and item != None:
config = configparser.ConfigParser()
try:
config.read(Resources.getConfigFile())
except(FileNotFoundError):
print("ERROR: File '" + Resources.getConfigFile() + "' NOT found! " + FileNotFoundError.strerror)
config = None
if config is not None and group in config:
entry = config[group].getint(item)
return entry
def getConfigEntryOrDefault(group, item, defaultValue=None):
entry = None
entry = getConfigEntry(group, item)
if entry is None:
entry = defaultValue
return entry
| 29.2
| 109
| 0.642466
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 30
| 0.041096
|
8378ea628ccc21371175ad9061b5e8ae8ef0a59b
| 3,041
|
py
|
Python
|
H5_News_Tracker/gui/ticker_window.py
|
Mouse-Diplodicus/H5-NewsTracker
|
a771105463db6757171ea28e847208960c7ac598
|
[
"BSD-2-Clause"
] | null | null | null |
H5_News_Tracker/gui/ticker_window.py
|
Mouse-Diplodicus/H5-NewsTracker
|
a771105463db6757171ea28e847208960c7ac598
|
[
"BSD-2-Clause"
] | 20
|
2020-02-27T01:39:28.000Z
|
2021-12-13T20:39:17.000Z
|
H5_News_Tracker/gui/ticker_window.py
|
Mouse-Diplodicus/H5-NewsTracker
|
a771105463db6757171ea28e847208960c7ac598
|
[
"BSD-2-Clause"
] | null | null | null |
"""
Program displays a window with text using Tkinter when run.
"""
import tkinter
import webbrowser
from tkinter import font
from tkinter import ttk
class TickerWindow(tkinter.Frame):
"""Main Object for creating and running the news ticker gui"""
max_label_width = 80
font_size = 12
updating_feed = []
def __init__(self, master=None):
"""Initializes the display window for the news ticker"""
print("constructing gui")
super().__init__(master)
self.master = master
self.label_ticker = ttk.Label(master)
self.button_exit = ttk.Button(master)
self.master.overrideredirect(1)
self.label_ticker.configure(padding=[0, -1, 0, -1])
self.button_exit.configure(text="X", padding=[2, -1, 2, -1], command=self.master.quit)
self.set_style()
self.default_font = font.nametofont("TkDefaultFont")
self.default_font.configure(size=self.font_size)
self.build()
print("Gui constructed")
def start(self):
"""Start gui main update loop """
print("starting main loop")
self.master.mainloop()
def set_style(self):
"""Sets styling for various Tkinter objects"""
print("setting styling")
style = ttk.Style()
style.configure("default.TLabel", foreground="#000000", background="#ffffff")
style.configure("WB.TLabel", foreground="#ffffff", background="#000000", relief="GROOVE")
style.configure("Exit.TLabel", foreground="#000000", background="#931113", relief="RAISED")
self.label_ticker.configure(style="WB.TLabel")
self.button_exit.configure(style="Exit.TLabel")
def build(self):
"""Sets organization for label and exit button"""
print("organizing gui layout")
self.label_ticker.grid(row=0, column=0)
self.button_exit.grid(row=0, column=1)
def update_headline(self, headline, url):
"""Function updates the headline and associated url being displayed"""
output = self.size_headline(headline)
print("updating ticker to headline: ", output, " url: ", url)
self.label_ticker.configure(text=output)
self.label_ticker.bind("<Button-1>", lambda e: webbrowser.open_new(url))
def size_headline(self, headline):
"""Function takes a string representing a headline and if it is longer than the maximum width allowed it will
shorten the string and append an ellipse"""
if headline is None:
return ""
max_pixel_width = font.Font.measure(self.default_font, "n")*self.max_label_width
if max_pixel_width < font.Font.measure(self.default_font, headline):
index = self.max_label_width
max_pixel_width -= font.Font.measure(self.default_font, "...")
while max_pixel_width > font.Font.measure(self.default_font, headline[:index]):
index += 1
output = headline[:index-1]+"..."
else:
output = headline
return output
| 39.493506
| 117
| 0.64584
| 2,888
| 0.949688
| 0
| 0
| 0
| 0
| 0
| 0
| 864
| 0.284117
|
837a6c35581467319f1075c05fa1224fd922d268
| 3,562
|
py
|
Python
|
Yu/Web.py
|
Hiroshiba/KotohiraYu
|
1ab5a5376e01aae5c730ae163298e1c34980b586
|
[
"MIT"
] | null | null | null |
Yu/Web.py
|
Hiroshiba/KotohiraYu
|
1ab5a5376e01aae5c730ae163298e1c34980b586
|
[
"MIT"
] | 1
|
2019-05-18T13:16:25.000Z
|
2019-05-18T13:16:25.000Z
|
Yu/Web.py
|
Hiroshiba/KotohiraYu
|
1ab5a5376e01aae5c730ae163298e1c34980b586
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import os
import configparser
import glob
import sqlite3
import traceback
import json
from bottle import route, run, auth_basic, abort, response
from sqlite3 import OperationalError
config = configparser.ConfigParser()
config.read('config/config.ini')
def VERIFY(username, password):
return username == config['web']['user'] and password == config['web']['pass']
@route('/')
@auth_basic(VERIFY)
def index():
return """<!DOCTYPE html>
<html>
<head>
<title>Yu-Console</title>
</head>
<body>
<ul>
<li><a href="panic-log">PANIC LOG</a></li>
</ul>
</body>
</html>
"""
@route('/panic-log')
@route('/panic-log/')
@auth_basic(VERIFY)
def list_panicLog():
panicLogList = glob.glob("panic-log/PANIC-*.TXT")
output = "<!DOCTYPE html><html><head><title>PANICLOG</title></head><body><h1>PANICLOG</h1><ul>"
link = ""
for l in panicLogList:
link = l.replace('panic-log/PANIC-', '').replace('.TXT', '')
output += f'<li><a href="/panic-log/{link}">{link}</a></li>'
output += '</ul></body></html>\n'
return output
@route('/db')
@route('/db/')
@auth_basic(VERIFY)
def list_table():
return "Underconstruction"
@route('/db/<table:re:[a-z_]+>')
@auth_basic(VERIFY)
def list_dbtable(table):
try:
conn = sqlite3.connect('Yu_{}.db'.format(config['instance']['address']))
c = conn.cursor()
output = f"<!DOCTYPE html><html><head><title>TABLE SHOW: {table}</title></head><body><h1>TABLE SHOW: {table}</h1><table>"
output += "<tr>"
for tn in c.execute(f"PRAGMA table_info('{table}')"):
output += f"<th>{tn[1]}</th>"
output += "</tr>"
for tb in c.execute(f"SELECT * FROM {table}"):
output += f"<tr>"
for i in tb:
output += f"<td>{i}</td>"
output += f"</tr>"
output += "</table></body>"
return output
except:
traceback.print_exc()
abort(404, "TABLE NOT FOUND")
finally:
conn.close()
@route('/user-memos/<date:re:[0-9_+]+>')
@auth_basic(VERIFY)
def list_usermemos(date):
try:
conn = sqlite3.connect('Yu_{}.db'.format(config['instance']['address']))
c = conn.cursor()
c.execute('SELECT * FROM user_memos WHERE memo_time = ?', (date, ))
memoRaw = c.fetchone()
if memoRaw == None:
abort(404, "This memo time was not found")
else:
memo = json.loads(memoRaw[2], encoding="utf-8")
output = f"<!DOCTYPE html><html><head><title>UESR MEMO SHOW: {date}</title></head><body><h1>UESR MEMO SHOW: {date}</h1><table><tr><th>User</th><th>Memo</th></tr>"
for me in memo:
output += f"<tr><td><a href=\"https://{config['instance']['address']}/@{me['from']}\">@{me['from']}</a></td><td>{me['body']}</td></tr>"
output += "</table></body>\n"
return output
except OperationalError:
traceback.print_exc()
abort(500, "INTERNAL SERVER ERROR")
finally:
conn.close()
@route('/panic-log/<panicdate:int>')
@auth_basic(VERIFY)
def show_panicLog(panicdate):
if os.path.isdir('panic-log') and os.path.isfile(f'panic-log/PANIC-{str(panicdate)}.TXT'):
with open(f'panic-log/PANIC-{str(panicdate)}.TXT', encoding="utf-8") as panic:
txtRaw = panic.read()
response.content_type = "text/plain"
return txtRaw
else:
abort(404, "PANIC LOG NOT FOUND")
def WEBRUN():
run(port=7878)
| 31.522124
| 174
| 0.571028
| 0
| 0
| 0
| 0
| 3,122
| 0.876474
| 0
| 0
| 1,488
| 0.417743
|
837a8a805f3fa86050a9f939d897eba29f04412d
| 1,713
|
py
|
Python
|
urduhack/stop_words.py
|
fahdrazavi/urduhack
|
a2370b0d8c1ee3f260ff90ca5056f45ed9b73ee8
|
[
"MIT"
] | null | null | null |
urduhack/stop_words.py
|
fahdrazavi/urduhack
|
a2370b0d8c1ee3f260ff90ca5056f45ed9b73ee8
|
[
"MIT"
] | null | null | null |
urduhack/stop_words.py
|
fahdrazavi/urduhack
|
a2370b0d8c1ee3f260ff90ca5056f45ed9b73ee8
|
[
"MIT"
] | null | null | null |
# coding: utf8
"""
Complete collection of stopwords for the Urdu language.
Maintainer: Ikram Ali(mrikram1989@gmail.com)
version = 2019.04.07
Source = https://github.com/urduhack/urdu-stopwords
"""
# Urdu Language Stop words list
STOP_WORDS = frozenset("""
آ آئی آئیں آئے آتا آتی آتے آس آنا آنی آنے آپ آیا ابھی از اس اسی اسے البتہ
الف ان انہوں انہی انہیں اور اپ اپنا اپنی اپنے اکثر اگر اگرچہ ایسا ایسی ایسے ایک اے بار بارے
باوجود باہر بعض بغیر بلکہ بن بنا بناؤ بند بڑی بھر بھریں بھی بہت بیس بے تا تاکہ تب تجھ
تجھے تحت تر تم تمہارا تمہاری تمہارے تمہیں تو تک تھا تھی تھیں تھے تیری جا جاؤ جائیں جائے جاتا
جاتی جاتے جانی جانے جب جبکہ جس جن جنہوں جنہیں جو جہاں جیسا جیسوں جیسی جیسے حالانکہ حالاں حصہ خالی
خود درمیان دوران دوسرا دوسروں دوسری دوسرے دوں دکھائیں دی دیئے دیا دیتا دیتی دیتے دیر دینا دینی دینے
دیکھو دیں دیے دے ذریعے رکھا رکھتا رکھتی رکھتے رکھنا رکھنی رکھنے رکھو رکھی رکھے رہ رہا رہتا رہتی رہتے
رہنا رہنی رہنے رہو رہی رہیں رہے سا ساتھ سامنے سب سو سکا سکتا سکتے سی سے شاید صرف طرح
طرف طور علاوہ عین لئے لا لائی لائے لاتا لاتی لاتے لانا لانی لانے لایا لو لوجی لوگ لوگوں لگ
لگا لگتا لگتی لگی لگیں لگے لہذا لی لیا لیتا لیتی لیتے لیکن لیں لیے لے مجھ مجھے مزید مطابق
مل مگر میرا میری میرے میں نا نہ نہیں نے وار واقعی والا والوں والی والے
وجہ وغیرہ وہ وہاں وہی وہیں وی ویسے پایا پر پھر پیچھے چاہئے چاہتے چاہیئے چاہے چلا چلو چلیں چلے
چونکہ چکی چکیں چکے ڈالنی ڈالنے ڈالے کئے کا کب کبھی کر کرتا کرتی کرتے کرنا کرنے کرو کریں کرے
کس کسی کسے کم کو کوئی کون کونسا کچھ کہ کہا کہاں کہہ کہی کہیں کہے کی کیا کیسے کیونکہ
کیوں کیے کے گئی گئے گا گویا گی گیا گے ہاں ہر ہم ہمارا ہماری ہمارے ہو ہوئی ہوئیں ہوئے
ہوا ہوتا ہوتی ہوتیں ہوتے ہونا ہونگے ہونی ہونے ہوں ہی ہیں ہے یا یات یعنی یہ یہاں یہی یہیں
""".split())
| 57.1
| 101
| 0.76474
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,787
| 0.986898
|
837ad353a450f945fc5a6b024a1362ed9689c173
| 3,982
|
py
|
Python
|
src/sequencemodel_09.py
|
PatrikValkovic/neural-networks-step-by-step
|
86f5f98de1dbeb3a69ba101f06e303dbaabe6b8e
|
[
"MIT"
] | 1
|
2021-02-04T09:01:44.000Z
|
2021-02-04T09:01:44.000Z
|
src/sequencemodel_09.py
|
alexdevero/neural-networks-step-by-step
|
55e12e82c78f9be2d942fc1bff252b92fb61c1dd
|
[
"MIT"
] | null | null | null |
src/sequencemodel_09.py
|
alexdevero/neural-networks-step-by-step
|
55e12e82c78f9be2d942fc1bff252b92fb61c1dd
|
[
"MIT"
] | 2
|
2021-01-30T15:17:50.000Z
|
2021-02-04T09:01:45.000Z
|
import numpy as np
from progressbar import progressbar
class SequenceModel:
def __init__(self, layers, loss, metrices = [], random_seed = None):
self._rand = np.random.RandomState(random_seed);
self.loss = loss
self.metrices = metrices
self.layers = layers
def zero_grad(self):
for layer in self.layers:
for grad in layer.grads:
grad.fill(0)
def fit(self, X, y, optimizer, Xtest=None, ytest=None, epochs=100, batch_size=32, progress=False):
# get how many data we have
n_train_data = len(X)
n_test_data = 1 if ytest is None else len(ytest)
# store gradients and losses
train_losses = np.zeros((epochs,))
test_losses = np.zeros((epochs,))
train_metrices = np.zeros((len(self.metrices), epochs))
test_metrices = np.zeros((len(self.metrices), epochs))
# decide whatever to log progress
epoch_counter = progressbar(range(epochs)) if progress else range(epochs)
# Learning
outputs = [None] * (len(self.layers) + 1)
for epoch in epoch_counter:
# shuffle the data
permutation = self._rand.permutation(n_train_data)
# for each batch
for batch_start in range(0, n_train_data, batch_size):
# get batch
batch_data = X[permutation[batch_start:batch_start+batch_size]]
batch_target = y[permutation[batch_start:batch_start+batch_size]]
# forward pass
outputs[0] = batch_data
for layer, i in zip(self.layers, range(len(self.layers))):
outputs[i+1] = layer(outputs[i])
# backward pass
self.zero_grad()
current_grad = self.loss.gradient(batch_target, outputs[-1])
for layer, layer_input in zip(self.layers[::-1], outputs[-2::-1]):
current_grad = layer.gradient(layer_input, current_grad)
# update the weights
optimizer.optim(self)
# store loss
train_losses[epoch] += np.sum(self.loss(batch_target, outputs[-1]))
# compute the metrices
for metric in self.metrices:
metric(batch_target, outputs[-1])
# store train metrices
for num_metric, metric in enumerate(self.metrices):
train_metrices[num_metric, epoch] = metric.summary()
# evaluate on the test set
if Xtest is not None and ytest is not None:
# for each batch
for batch_start in range(0, n_test_data, batch_size):
# get batch
batch_data = Xtest[batch_start:batch_start+ batch_size]
batch_target = ytest[batch_start:batch_start + batch_size]
# predict the data
prediction = self.predict(batch_data)
# store loss
test_losses[epoch] += np.sum(self.loss(batch_target, prediction))
# compute the metrices
for metric in self.metrices:
metric(batch_target, prediction)
# store test metrices
for num_metric, metric in enumerate(self.metrices):
test_metrices[num_metric, epoch] = metric.summary()
results = {
"train_loss": train_losses / n_train_data,
"test_loss": test_losses / n_test_data,
}
results.update({f"train_{metric.name}": train_metrices[num_metric] for num_metric in range(len(self.metrices))})
results.update({f"test_{metric.name}": test_metrices[num_metric] for num_metric in range(len(self.metrices))})
return results
def predict(self, X):
for layer in self.layers:
X = layer(X)
return X
| 45.770115
| 120
| 0.566298
| 3,925
| 0.985686
| 0
| 0
| 0
| 0
| 0
| 0
| 440
| 0.110497
|
837c932ac45c8e6207580a84130808a7f51d5177
| 685
|
py
|
Python
|
django_sso_app/app/views.py
|
paiuolo/django-sso-app
|
75b96c669dc0b176dc77e08f018a3e97d259f636
|
[
"MIT"
] | 1
|
2021-11-16T15:16:08.000Z
|
2021-11-16T15:16:08.000Z
|
django_sso_app/app/views.py
|
paiuolo/django-sso-app
|
75b96c669dc0b176dc77e08f018a3e97d259f636
|
[
"MIT"
] | null | null | null |
django_sso_app/app/views.py
|
paiuolo/django-sso-app
|
75b96c669dc0b176dc77e08f018a3e97d259f636
|
[
"MIT"
] | null | null | null |
from django.http import HttpResponseRedirect
from django.views import View
from ..core import app_settings
class AppLoginView(View):
def get(self, request, *args, **kwargs):
next = request.GET.get('next', '')
return HttpResponseRedirect(app_settings.REMOTE_LOGIN_URL + next)
class AppSignupView(View):
def get(self, request, *args, **kwargs):
next = request.GET.get('next', '')
return HttpResponseRedirect(app_settings.REMOTE_SIGNUP_URL + next)
class AppLogoutView(View):
def get(self, request, *args, **kwargs):
next = request.GET.get('next', '')
return HttpResponseRedirect(app_settings.REMOTE_LOGOUT_URL + next)
| 26.346154
| 74
| 0.694891
| 568
| 0.829197
| 0
| 0
| 0
| 0
| 0
| 0
| 24
| 0.035036
|
837cbe3de90b812a9c90cd64972dc52fe2924f87
| 8,989
|
py
|
Python
|
tests/components/multimatic/__init__.py
|
thomasgermain/home-assistant
|
69a8ba678e0276bc1bfde0f3d9e9d3682209f962
|
[
"Apache-2.0"
] | 7
|
2019-08-15T13:36:58.000Z
|
2020-03-18T10:46:29.000Z
|
tests/components/multimatic/__init__.py
|
thomasgermain/home-assistant
|
69a8ba678e0276bc1bfde0f3d9e9d3682209f962
|
[
"Apache-2.0"
] | 73
|
2020-10-01T06:39:39.000Z
|
2022-03-31T06:16:15.000Z
|
tests/components/multimatic/__init__.py
|
thomasgermain/home-assistant
|
69a8ba678e0276bc1bfde0f3d9e9d3682209f962
|
[
"Apache-2.0"
] | 4
|
2019-10-26T14:25:13.000Z
|
2020-11-10T11:00:18.000Z
|
"""The tests for multimatic integration."""
from __future__ import annotations
import datetime
from typing import Any
from unittest.mock import AsyncMock, patch
from pymultimatic.model import (
ActiveFunction,
BoilerStatus,
Circulation,
Device,
Dhw,
EmfReport,
Error,
FacilityDetail,
HolidayMode,
HotWater,
HvacStatus,
OperatingModes,
Report,
Room,
SettingModes,
TimePeriodSetting,
TimeProgram,
TimeProgramDay,
Ventilation,
Zone,
ZoneHeating,
)
from pymultimatic.systemmanager import SystemManager
from homeassistant import config_entries
from homeassistant.components.multimatic import COORDINATORS, DOMAIN
from homeassistant.const import CONF_PASSWORD, CONF_USERNAME
from homeassistant.util import utcnow
from tests.common import async_fire_time_changed
VALID_MINIMAL_CONFIG = {CONF_USERNAME: "test", CONF_PASSWORD: "test"}
class SystemManagerMock(SystemManager):
"""Mock implementation of SystemManager."""
instance = None
_methods = [f for f in dir(SystemManager) if not f.startswith("_")]
data: dict[str, Any] = {}
@classmethod
def reset_mock(cls):
"""Reset mock, clearing instance and system."""
cls.instance = None
cls.data = {}
def __init__(self, **kwargs):
"""Mock the constructor."""
self._init()
self.__class__.instance = self
def _init(self):
def _handle_mock(name):
data = self.data.get(name)
if isinstance(data, BaseException):
raise data
return data
for method in self.__class__._methods:
setattr(
self,
method,
AsyncMock(
side_effect=lambda x=method, *args, **kwargs: _handle_mock(x)
),
)
@classmethod
def init_defaults(cls):
"""Init mock data with some default values."""
cls.update_data(
{
"get_zones": zones(True),
"get_rooms": rooms(),
"get_dhw": dhw(),
"get_live_reports": reports(),
"get_outdoor_temperature": 18,
"get_ventilation": ventilation(),
"get_quick_mode": None,
"get_holiday_mode": HolidayMode(False),
"get_hvac_status": hvac_status(),
"get_facility_detail": facility_detail(),
"get_gateway": "VR920",
"get_live_report": report(),
"DomesticHotWaterTankTemperature": report_dhw(),
"get_emf_devices": emf_reports(),
}
)
@classmethod
def update_data(cls, data):
"""Modify data."""
cls.data.update(data)
def zones(with_rb=True):
"""Get zones."""
zones = []
heating = ZoneHeating(
time_program=time_program(SettingModes.NIGHT, None),
operating_mode=OperatingModes.AUTO,
target_low=22,
target_high=30,
)
zones.append(
Zone(
id="zone_1",
name="Zone 1",
temperature=25,
active_function=ActiveFunction.HEATING,
rbr=False,
heating=heating,
)
)
if with_rb:
zones.append(
Zone(
id="zone_2",
name="Zone rbr",
temperature=25,
active_function=ActiveFunction.HEATING,
rbr=True,
heating=heating,
)
)
return zones
def rooms():
"""Get rooms."""
room_device = Device("Device 1", "123456789", "VALVE", False, False)
return [
Room(
id="1",
name="Room 1",
time_program=time_program(),
temperature=22,
target_high=24,
operating_mode=OperatingModes.AUTO,
child_lock=False,
window_open=False,
devices=[room_device],
)
]
def dhw():
"""Get dhw."""
hot_water = HotWater(
id="dhw",
name="Hot water",
time_program=time_program(temp=None),
temperature=None,
target_high=40,
operating_mode=OperatingModes.AUTO,
)
circulation = Circulation(
id="dhw",
name="Circulation",
time_program=time_program(temp=None),
operating_mode=OperatingModes.AUTO,
)
return Dhw(hotwater=hot_water, circulation=circulation)
def report():
"""Get report."""
return Report(
device_name="VRC700 MultiMatic",
device_id="Control_SYS_MultiMatic",
unit="bar",
value=1.9,
name="Water pressure",
id="WaterPressureSensor",
)
def report_dhw():
"""Get report for dhw."""
return Report(
device_name="Control_DHW",
device_id="DomesticHotWaterTankTemperature",
unit="°C",
value=45,
name="DomesticHotWaterTankTemperature",
id="DomesticHotWaterTankTemperature",
)
def reports():
"""Get reports."""
return [report()]
def ventilation():
"""Return ventilation."""
return Ventilation(
time_program=time_program(SettingModes.ON, 6),
operating_mode=OperatingModes.AUTO,
target_high=6,
target_low=2,
id="ventilation",
name="Ventilation",
)
def active_holiday_mode():
"""Return a active holiday mode."""
start = datetime.date.today() - datetime.timedelta(days=1)
end = datetime.date.today() + datetime.timedelta(days=1)
return HolidayMode(True, start, end, 15)
def time_program(heating_mode=SettingModes.OFF, temp=20):
"""Create a default time program."""
tp_day_setting = TimePeriodSetting("00:00", temp, heating_mode)
tp_day = TimeProgramDay([tp_day_setting])
tp_days = {
"monday": tp_day,
"tuesday": tp_day,
"wednesday": tp_day,
"thursday": tp_day,
"friday": tp_day,
"saturday": tp_day,
"sunday": tp_day,
}
return TimeProgram(tp_days)
def facility_detail():
"""Get facility detail."""
return FacilityDetail(
name="Home",
serial_number="12345",
firmware_version="1.2.3",
ethernet_mac="01:23:45:67:89:AB",
wifi_mac="23:45:67:89:0A:BC",
)
def hvac_status(with_error=False, with_status=True):
"""Get hvac status."""
boiler_status = None
if with_status:
boiler_status = BoilerStatus(
device_name="boiler",
title="Status",
status_code="1",
description="This is the status",
timestamp=datetime.datetime.now(),
hint="Do nothing",
)
errors = None
if with_error:
errors = [
Error(
device_name="Device",
title="Status",
status_code="99",
description="This is the error",
timestamp=datetime.datetime.now(),
)
]
return HvacStatus(
boiler_status=boiler_status,
errors=errors,
online="ONLINE",
update="UPDATE_NOT_PENDING",
)
def emf_reports():
"""Get emf reports."""
return [
EmfReport(
"flexoTHERM_PR_EBUS",
"VWF 117/4",
"HEAT_PUMP",
"COOLING",
"CONSUMED_ELECTRICAL_POWER",
1000,
datetime.date(2021, 1, 1),
datetime.date(2021, 1, 10),
)
]
async def goto_future(hass):
"""Move to future."""
future = utcnow() + datetime.timedelta(minutes=5)
with patch("homeassistant.util.utcnow", return_value=future):
async_fire_time_changed(hass, future)
entry_id = hass.config_entries.async_entries(DOMAIN)[0].unique_id
coordinators = hass.data[DOMAIN][entry_id][COORDINATORS]
for coord in coordinators.values():
await coord.async_request_refresh()
await hass.async_block_till_done()
async def setup_multimatic(hass, config=None, with_defaults=True, data=None):
"""Set up multimatic component."""
if not config:
config = VALID_MINIMAL_CONFIG
if with_defaults:
SystemManagerMock.init_defaults()
if data:
SystemManagerMock.update_data(data)
with patch(
"homeassistant.components.multimatic.config_flow.validate_authentication",
return_value=True,
):
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}, data=config
)
await hass.async_block_till_done()
return result
async def call_service(hass, domain, service, data):
"""Call hass service."""
await hass.services.async_call(domain, service, data)
await hass.async_block_till_done()
def assert_entities_count(hass, count):
"""Count entities owned by the component."""
assert len(hass.states.async_entity_ids()) == count
| 26.206997
| 82
| 0.585938
| 1,872
| 0.208231
| 0
| 0
| 1,064
| 0.118354
| 1,307
| 0.145384
| 1,652
| 0.18376
|
837cd4561ed86c61a564513e1e29e4b4eaead664
| 4,877
|
py
|
Python
|
test/e2e/test_200_header_invalid.py
|
elukey/mod_h2
|
3418fc31b8ffe9fe477899d60ccfdecdfac1df34
|
[
"Apache-2.0"
] | null | null | null |
test/e2e/test_200_header_invalid.py
|
elukey/mod_h2
|
3418fc31b8ffe9fe477899d60ccfdecdfac1df34
|
[
"Apache-2.0"
] | null | null | null |
test/e2e/test_200_header_invalid.py
|
elukey/mod_h2
|
3418fc31b8ffe9fe477899d60ccfdecdfac1df34
|
[
"Apache-2.0"
] | null | null | null |
#
# mod-h2 test suite
# check handling of invalid chars in headers
#
import copy
import os
import re
import sys
import time
import pytest
from datetime import datetime
from TestEnv import TestEnv
from TestHttpdConf import HttpdConf
def setup_module(module):
print("setup_module: %s" % module.__name__)
TestEnv.init()
HttpdConf().add_vhost_cgi().install()
assert TestEnv.apache_restart() == 0
def teardown_module(module):
print("teardown_module: %s" % module.__name__)
assert TestEnv.apache_stop() == 0
class TestStore:
def setup_method(self, method):
print("setup_method: %s" % method.__name__)
def teardown_method(self, method):
print("teardown_method: %s" % method.__name__)
# let the hecho.py CGI echo chars < 0x20 in field name
# for almost all such characters, the stream gets aborted with a h2 error and
# there will be no http status, cr and lf are handled special
def test_200_01(self):
url = TestEnv.mkurl("https", "cgi", "/hecho.py")
for x in range(1, 32):
r = TestEnv.curl_post_data(url, "name=x%%%02xx&value=yz" % x)
if x in [ 10 ]:
assert 0 == r["rv"], "unexpected exit code for char 0x%02x" % x
assert 500 == r["response"]["status"], "unexpected status for char 0x%02x" % x
elif x in [ 13 ]:
assert 0 == r["rv"], "unexpected exit code for char 0x%02x" % x
assert 200 == r["response"]["status"], "unexpected status for char 0x%02x" % x
else:
assert 0 != r["rv"], "unexpected exit code for char 0x%02x" % x
# let the hecho.py CGI echo chars < 0x20 in field value
# for almost all such characters, the stream gets aborted with a h2 error and
# there will be no http status, cr and lf are handled special
def test_200_02(self):
url = TestEnv.mkurl("https", "cgi", "/hecho.py")
for x in range(1, 32):
if 9 != x:
r = TestEnv.curl_post_data(url, "name=x&value=y%%%02x" % x)
if x in [ 10, 13 ]:
assert 0 == r["rv"], "unexpected exit code for char 0x%02x" % x
assert 200 == r["response"]["status"], "unexpected status for char 0x%02x" % x
else:
assert 0 != r["rv"], "unexpected exit code for char 0x%02x" % x
# let the hecho.py CGI echo 0x10 and 0x7f in field name and value
def test_200_03(self):
url = TestEnv.mkurl("https", "cgi", "/hecho.py")
for hex in [ "10", "7f" ]:
r = TestEnv.curl_post_data(url, "name=x%%%s&value=yz" % hex)
assert 0 != r["rv"]
r = TestEnv.curl_post_data(url, "name=x&value=y%%%sz" % hex)
assert 0 != r["rv"]
# test header field lengths check, LimitRequestLine (default 8190)
def test_200_10(self):
url = TestEnv.mkurl("https", "cgi", "/")
val = "1234567890" # 10 chars
for i in range(3): # make a 10000 char string
val = "%s%s%s%s%s%s%s%s%s%s" % (val, val, val, val, val, val, val, val, val, val)
# LimitRequestLine 8190 ok, one more char -> 431
r = TestEnv.curl_get(url, options=[ "-H", "x: %s" % (val[:8187]) ])
assert 200 == r["response"]["status"]
r = TestEnv.curl_get(url, options=[ "-H", "x: %sx" % (val[:8188]) ])
assert 431 == r["response"]["status"]
# test header field lengths check, LimitRequestFieldSize (default 8190)
def test_200_11(self):
url = TestEnv.mkurl("https", "cgi", "/")
val = "1234567890" # 10 chars
for i in range(3): # make a 10000 char string
val = "%s%s%s%s%s%s%s%s%s%s" % (val, val, val, val, val, val, val, val, val, val)
# LimitRequestFieldSize 8190 ok, one more char -> 400 in HTTP/1.1
# (we send 4000+4188 since they are concatenated by ", "
r = TestEnv.curl_get(url, options=[ "-H", "x: %s" % (val[:4000]), "-H", "x: %s" % (val[:4188]) ])
assert 200 == r["response"]["status"]
r = TestEnv.curl_get(url, options=[ "--http1.1", "-H", "x: %s" % (val[:4000]), "-H", "x: %s" % (val[:4189]) ])
assert 400 == r["response"]["status"]
r = TestEnv.curl_get(url, options=[ "-H", "x: %s" % (val[:4000]), "-H", "x: %s" % (val[:4191]) ])
assert 431 == r["response"]["status"]
# test header field lengths check, LimitRequestFields (default 100)
def test_200_12(self):
url = TestEnv.mkurl("https", "cgi", "/")
opt=[]
for i in range(98): # curl sends 2 headers itself (user-agent and accept)
opt += [ "-H", "x: 1" ]
r = TestEnv.curl_get(url, options=opt)
assert 200 == r["response"]["status"]
r = TestEnv.curl_get(url, options=(opt + [ "-H", "y: 2" ]))
assert 431 == r["response"]["status"]
| 43.159292
| 119
| 0.565717
| 4,336
| 0.889071
| 0
| 0
| 0
| 0
| 0
| 0
| 1,994
| 0.408858
|
837d850bff1c24037cf6a37770c38618903819c0
| 7,529
|
py
|
Python
|
controller/controller.py
|
angelocarbone/MoDelS
|
5bfee8d0b6e719c1d2445acf4e332597427ac906
|
[
"MIT"
] | 1
|
2021-12-02T07:29:29.000Z
|
2021-12-02T07:29:29.000Z
|
controller/controller.py
|
angelocarbone/MoDelS
|
5bfee8d0b6e719c1d2445acf4e332597427ac906
|
[
"MIT"
] | null | null | null |
controller/controller.py
|
angelocarbone/MoDelS
|
5bfee8d0b6e719c1d2445acf4e332597427ac906
|
[
"MIT"
] | null | null | null |
from scenarios import helper
from scenarios.builder import Builder
from model.enumerations import e_ExperienceFactor, e_MentalOrEmotionalFactor, e_PhyOrPhyFactor, e_EntityType, e_Relation, e_CausalFactorType
from model.knowledge_base import kb
from model.entities import Entity, CausalFactor
from model.utils import BoundingBox
from model import rule
class Controller:
_instance = None
def __new__(cls, *args, **kwargs):
if cls._instance is None:
print('Controller object has been created.')
cls._instance = super(Controller, cls).__new__(cls)
# Put any initialization here.
return cls._instance
def __init__(self, v_obj, m_obj):
self._v_obj = v_obj
self._m_obj = m_obj
def __str__(self):
return "Controller"
def withdraw_btn_callback(self):
val = float(self._v_obj.rmb_text.displayText())
self._m_obj.withdraw(val)
def deposit_btn_callback(self):
val = float(self._v_obj.rmb_text.displayText())
self._m_obj.deposit(val)
def get_experience_causal_factor(self):
res = e_ExperienceFactor.all()
causal_factors = []
for _ in res:
causal_factors.append(_[0])
return causal_factors
def get_mental_or_emotional_causal_factor(self):
res = e_MentalOrEmotionalFactor.all()
causal_factors = []
for _ in res:
causal_factors.append(_[0])
return causal_factors
def get_phy_or_phy_causal_factor(self):
res = e_PhyOrPhyFactor.all()
causal_factors = []
for _ in res:
causal_factors.append(_[0])
return causal_factors
def list_of_vehicle_causal_factor(self):
_ = {
"cf_driver": {
"0": {
"id": "0",
"name": "Name of causal factor",
"description": "Description of causal factor."
},
"1": {
"id": "0",
"name": "Name of causal factor",
"description": "Description of causal factor."
},
"3": {
"id": "0",
"name": "Name of causal factor",
"description": "Description of causal factor."
},
},
"cf_fellow_passenger": {
"0": {
"id": "0",
"name": "Name of causal factor",
"description": "Description of causal factor."
},
"1": {
"id": "0",
"name": "Name of causal factor",
"description": "Description of causal factor."
},
"3": {
"id": "0",
"name": "Name of causal factor",
"description": "Description of causal factor."
},
},
"cf_vehicle": {
"0": {
"id": "0",
"name": "Name of causal factor",
"description": "Description of causal factor."
},
"1": {
"id": "0",
"name": "Name of causal factor",
"description": "Description of causal factor."
},
"3": {
"id": "0",
"name": "Name of causal factor",
"description": "Description of causal factor."
},
},
}
return _
def list_of_pedestrian_causal_factor(self):
pass
def list_of_scenarios(self):
# todo: Load list of scenarios from dedicate module.
_ = {
"0": {
"name": "scenario7",
"description": "Description of scenario 01 here",
"sub_scenarios": {
"0": {
"name": "scenario01_20211016-100245",
"description": "Description of scenario01_20211016-100245",
},
"1": {
"name": "scenario01_20211016-101607",
"description": "Description of scenario01_20211016-101607",
},
"2": {
"name": "scenario01_20211021-101607",
"description": "Description of scenario01_20211021-101607",
}
},
"ego_settings": {},
"vehicles_settings": {
"0": {
"cf_vehicle": {
"causal_factor": {}
},
"cf_driver": {
"causal_factor": {
"id": "1",
"value": "distraction",
"description": "Description of causal factor.",
}
},
"cf_fellow_passenger": {}
},
},
"pedestrian_settings": {},
},
"1": {"name": "Scenario02", "description": "Description of scenario 02 here"},
}
return helper.get_scenarios()
def run_scenario_callback(self, scenario_name: str):
print("pushButton_run_scenario {}".format(scenario_name))
helper.run_scenario(scenario_name)
def run_sub_scenario_callback(self, scenario):
import os
from scenariogeneration import esmini
from config import ESMINI_DIR
esmini(scenario, os.path.join(ESMINI_DIR))
def action_exit_callback(self):
return True
def update_scenario_callback(self, vehicle_id):
print("pushButton_update_scenario")
# done: create Driver (AndreA) instance in ontology
# done: link Driver to Vehicle (JamCar)
# done: assign CausalFactor to Driver (AndreA) instance
# done: SPARQL: Given a CausalFactor, give me DrivingError
# todo: SPARQL: Given current Action + DrivingError, give me next Actions.
# todo: For each actions returned, link each one to an alternative behavior in library
# todo: Present alternative to UI
entity = Entity('Andrea', 68, e_EntityType.Driver, BoundingBox(0.5, 0.6, 1.8, 1.3, 0.0, 0.8))
andrea = kb.insert_entity(entity)
vehicle = kb.get_entity_from_cache(vehicle_id)
kb.add_relation(andrea, vehicle, e_Relation.isOn.isOnVehicle.driverIsOnVehicle)
current_action = kb.get_current_action(vehicle)
cf = CausalFactor("test_name", e_CausalFactorType.HumanFactor.MentalOrEmotionalFactor.Distraction)
cf_i = kb.insert_entity(cf)
kb.add_relation(andrea, cf_i, e_Relation.isImpaired.driverIsImpaired)
driving_errors = rule.get_driving_error_to_causal_factor_rule(e_CausalFactorType.HumanFactor.MentalOrEmotionalFactor.Distraction)
builder = Builder()
sub_scenarios = builder.get_sub_scenario_foo()
import re
for d_error in driving_errors:
_ = re.sub(".*#", "", d_error['x'])
# builder.build("scenario10", _, current_action)
return sub_scenarios
| 37.272277
| 140
| 0.504582
| 7,174
| 0.952849
| 0
| 0
| 0
| 0
| 0
| 0
| 2,080
| 0.276265
|
837e4da85868086f6aef55e405fd04f2686a56f3
| 1,567
|
py
|
Python
|
stan/data/data_lex.py
|
chappers/Stan
|
61c189ab12ea50214390804cff5694ac51f8df35
|
[
"MIT"
] | 1
|
2015-01-06T11:10:24.000Z
|
2015-01-06T11:10:24.000Z
|
stan/data/data_lex.py
|
chappers/Stan
|
61c189ab12ea50214390804cff5694ac51f8df35
|
[
"MIT"
] | null | null | null |
stan/data/data_lex.py
|
chappers/Stan
|
61c189ab12ea50214390804cff5694ac51f8df35
|
[
"MIT"
] | null | null | null |
"""
The :mod:`stan.data_lex` module is the lexer for SAS-like language.
"""
from pyparsing import *
from stan.data.data_expr import EXPR_, ID_, DATA, SET, RENAME, RUN, DROP, KEEP, SEMI_, LOGICAL_
# set up logic
dataStepStmt = Forward()
# data/set inline options
rename_stmt = (OneOrMore(Group(ID_ + Suppress("=") +
ID_ ))).setResultsName('rename')
drop_stmt = OneOrMore( ID_ ).setResultsName('drop')
keep_stmt = OneOrMore( ID_ ).setResultsName('keep')
dataset_opt_stmt = Optional("("+
Optional(Suppress(RENAME) + "=" + "(" + rename_stmt + ")") +
Optional(Suppress(DROP) + "=" + drop_stmt) +
Optional(Suppress(KEEP) + "=" + keep_stmt) +")")
# data step options (not inline)
opt_stmt = (
(Suppress(RENAME) + rename_stmt + SEMI_) |
(Suppress(KEEP) + keep_stmt + SEMI_) |
(Suppress(DROP) + drop_stmt + SEMI_)
#add by statement
)
# data step logic
s_stmt = Group(ID_ + Suppress("=") + ( LOGICAL_.setResultsName('logical') | EXPR_ ) + SEMI_)
# data set statements
data_stmt = Group(Suppress(DATA) + ID_.setResultsName('name') + dataset_opt_stmt.setResultsName('data opt')).setResultsName('data') + SEMI_
set_stmt = Group(Suppress(SET) + ID_.setResultsName('name') + dataset_opt_stmt.setResultsName('set opt')).setResultsName('set') + SEMI_
dataStepStmt << (data_stmt +
set_stmt +
(ZeroOrMore(opt_stmt) &
ZeroOrMore(s_stmt).setResultsName('stmt')) +
RUN + SEMI_)
| 35.613636
| 139
| 0.613912
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 305
| 0.194639
|
837e63fb36e90c2f7dc83ee4de463a8b38b3fbca
| 2,334
|
py
|
Python
|
setup.py
|
JayDwayne/Neopo
|
964e1a13ed016b5a74ccb33b7384a0f783100cd7
|
[
"MIT"
] | null | null | null |
setup.py
|
JayDwayne/Neopo
|
964e1a13ed016b5a74ccb33b7384a0f783100cd7
|
[
"MIT"
] | null | null | null |
setup.py
|
JayDwayne/Neopo
|
964e1a13ed016b5a74ccb33b7384a0f783100cd7
|
[
"MIT"
] | null | null | null |
import os
from platform import system
from setuptools import setup
from subprocess import run, PIPE, CalledProcessError
running_on_windows = system() == "Windows"
running_in_docker = os.path.isfile("/.dockerenv")
# Consistent version as AUR
try:
count = run(["git", "rev-list", "--count", "HEAD"],
stdout=PIPE, check=True).stdout.splitlines()[0].decode('utf-8')
commit = run(["git", "rev-parse", "--short", "HEAD"],
stdout=PIPE, check=True).stdout.splitlines()[0].decode('utf-8')
VERSION = "%s.%s" % (count, commit)
except CalledProcessError:
print("Could not determine package version with Git! Exiting...")
raise
# Additional files for *nix: completion, man page, etc.
share_files = [
('/usr/share/man/man1', ['man/neopo.1']),
('/usr/share/licenses/neopo', ['LICENSE']),
('/usr/share/neopo/scripts', ['scripts/POSTINSTALL']),
('/usr/share/bash-completion/completions', ['completion/neopo'])
]
# Skip share_files on Windows, docker, or when installing as non-root
if running_on_windows or running_in_docker or os.geteuid() != 0:
share_files=None
# Provide neopo, neopo-script, and particle commands
script_unix = ['scripts/unix/neopo',
'scripts/unix/neopo-script',
'scripts/unix/particle']
script_windows = ['scripts/windows/neopo.cmd',
'scripts/windows/neopo-script.cmd',
'scripts/windows/particle.cmd']
script_files = script_windows if running_on_windows else script_unix
# update version.py
with open(os.path.join('neopo', 'version.py'), 'w') as file:
file.writelines(['NEOPO_VERSION="%s"' % VERSION])
setup(
name='neopo',
version=VERSION,
description='A lightweight solution for local Particle development.',
long_description="""
Neopo is a Particle development management utility that simplifies the
installation and usage of Particle's toolchains on a variety of distributions.
It features options to build or flash projects, iterable commands, a scripting
interface, and Particle Workbench/CLI compatibility.""",
author='Nathan Robinson',
author_email='nrobinson2000@me.com',
url="https://neopo.xyz",
download_url='https://github.com/nrobinson2000/neopo',
license="MIT",
packages=['neopo'],
platforms=["Linux", "macOS", "Windows", "ARM"],
data_files=share_files,
scripts=script_files
)
| 35.363636
| 81
| 0.70437
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,263
| 0.541131
|
837ecef31551741e285f87a84d5925f220afb694
| 2,045
|
py
|
Python
|
jmetal/core/operator.py
|
LuckysonKhaidem/ProjectAlpha
|
e4b4779a8968a83f1e8add3490a4d2c4ad145d55
|
[
"MIT"
] | 1
|
2020-05-26T18:57:31.000Z
|
2020-05-26T18:57:31.000Z
|
jmetal/core/operator.py
|
LuckysonKhaidem/ProjectAlpha
|
e4b4779a8968a83f1e8add3490a4d2c4ad145d55
|
[
"MIT"
] | null | null | null |
jmetal/core/operator.py
|
LuckysonKhaidem/ProjectAlpha
|
e4b4779a8968a83f1e8add3490a4d2c4ad145d55
|
[
"MIT"
] | 2
|
2019-01-08T11:52:52.000Z
|
2020-05-25T13:21:26.000Z
|
from abc import ABCMeta, abstractmethod
from typing import TypeVar, Generic, List
S = TypeVar('S')
R = TypeVar('R')
"""
.. module:: Operator
:platform: Unix, Windows
:synopsis: Templates for operators.
.. moduleauthor:: Antonio J. Nebro <antonio@lcc.uma.es>
"""
class Operator(Generic[S, R]):
""" Class representing operator """
__metaclass__ = ABCMeta
@abstractmethod
def execute(self, source: S) -> R:
pass
@abstractmethod
def get_name(self) -> str:
pass
class Mutation(Operator[S, S]):
""" Class representing mutation operator. """
__metaclass__ = ABCMeta
def __init__(self, probability: float):
if probability > 1.0:
raise Exception('The probability is greater than one: {}'.format(probability))
elif probability < 0.0:
raise Exception('The probability is lower than zero: {}'.format(probability))
self.probability = probability
@abstractmethod
def execute(self, source: S) -> R:
pass
@abstractmethod
def get_name(self) -> str:
pass
class Crossover(Operator[List[S], List[R]]):
""" Class representing crossover operator. """
__metaclass__ = ABCMeta
def __init__(self, probability: float):
if probability > 1.0:
raise Exception('The probability is greater than one: {}'.format(probability))
elif probability < 0.0:
raise Exception('The probability is lower than zero: {}'.format(probability))
self.probability = probability
@abstractmethod
def get_number_of_parents(self):
pass
@abstractmethod
def execute(self, source: S) -> R:
pass
@abstractmethod
def get_name(self) -> str:
pass
class Selection(Operator[S, R]):
""" Class representing selection operator. """
__metaclass__ = ABCMeta
def __init__(self):
pass
@abstractmethod
def execute(self, source: S) -> R:
pass
@abstractmethod
def get_name(self) -> str:
pass
| 21.989247
| 90
| 0.627873
| 1,762
| 0.861614
| 0
| 0
| 569
| 0.27824
| 0
| 0
| 492
| 0.240587
|
8383163f22959bd98885d5ed979d31561a7823ce
| 1,389
|
py
|
Python
|
foo/pictureR/wordsTemplate.py
|
MangetsuC/arkHelper
|
02705294f1bc3ecf926e0a9c62c59026494f62f8
|
[
"MIT"
] | 147
|
2020-05-06T10:36:13.000Z
|
2022-03-17T13:03:16.000Z
|
foo/pictureR/wordsTemplate.py
|
MangetsuC/arkHelper
|
02705294f1bc3ecf926e0a9c62c59026494f62f8
|
[
"MIT"
] | 34
|
2020-07-21T01:20:10.000Z
|
2022-01-30T06:38:11.000Z
|
foo/pictureR/wordsTemplate.py
|
MangetsuC/arkHelper
|
02705294f1bc3ecf926e0a9c62c59026494f62f8
|
[
"MIT"
] | 17
|
2020-12-10T14:42:34.000Z
|
2022-02-26T15:23:58.000Z
|
from PIL import Image, ImageDraw, ImageFont
from numpy import asarray
from cv2 import cvtColor, COLOR_RGB2BGR, imshow, waitKey
from os import getcwd
def getFontSize_name(resolution):
x = resolution[0]
if x <= 1024:
return (16, (1024,576))
elif x <= 1280:
return (21, (1280,720))
elif x <= 1440:
return (23, (1440,810))
elif x <= 1600:
return (26, (1600,900))
else:
return (31, (1920,1080))
def getTemplatePic_CH(words, fontsize):
#字号典型值 基建干员名称23 进驻总览房屋名称28(1440*810) 基建干员名称30 进驻总览房屋名称38(1920*1080)
ttf = ImageFont.truetype(getcwd() + "/res/fonts/SourceHanSansCN-Regular.otf", fontsize) #字体选用思源黑体
wordsPic = Image.new('RGB', ttf.getsize(words))
wordsDraw = ImageDraw.Draw(wordsPic)
wordsDraw.text((0, 0), words, font=ttf, fill=(255,255,255)) #创建对应的模板
#temp = cvtColor(asarray(wordsPic), COLOR_RGB2BGR)
#imshow('test', temp)
#waitKey(0)
return cvtColor(asarray(wordsPic), COLOR_RGB2BGR)
def getTemplatePic_NUM(num, fontsize):
#字号典型值 进驻总览干员心情28
num = str(num)
ttf = ImageFont.truetype(getcwd() + "/res/fonts/Bender.otf", fontsize) #字体选用bender
wordsPic = Image.new('RGB', ttf.getsize(num), color = (255, 255, 255))
wordsDraw = ImageDraw.Draw(wordsPic)
wordsDraw.text((0, 0), num, font=ttf, fill=(0,0,0)) #创建对应的模板
return cvtColor(asarray(wordsPic), COLOR_RGB2BGR)
| 37.540541
| 101
| 0.670266
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 419
| 0.27332
|
8383af1ee3c86c7a8396f853fcb82a399a1772cb
| 1,185
|
py
|
Python
|
bin/concat_msa.py
|
HadrienG/arbetsprov
|
ee4b887a1a8ac43c9c8cbb016480fde14cf0e48f
|
[
"MIT"
] | 5
|
2021-10-11T09:30:52.000Z
|
2022-01-03T07:03:17.000Z
|
bin/concat_msa.py
|
HadrienG/arbetsprov
|
ee4b887a1a8ac43c9c8cbb016480fde14cf0e48f
|
[
"MIT"
] | null | null | null |
bin/concat_msa.py
|
HadrienG/arbetsprov
|
ee4b887a1a8ac43c9c8cbb016480fde14cf0e48f
|
[
"MIT"
] | 1
|
2022-01-03T07:03:51.000Z
|
2022-01-03T07:03:51.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
from Bio import AlignIO
def concat_msa(msas, output):
"""concatenate msas together"""
alignments = []
for msa in msas:
align = AlignIO.read(msa, "fasta")
# shorten id so the concatenated alignment keeps it
for record in align._records:
record.id = record.id.split("|")[0]
if len(align._records) == 3:
alignments.append(align)
concatenated_alignment = alignments[0]
for alignment in alignments[1:]:
concatenated_alignment += alignment
with open(output, "w") as outfile:
AlignIO.write(concatenated_alignment, outfile, "fasta")
def main():
parser = argparse.ArgumentParser(
prog="concat_msa.py"
)
parser.add_argument(
"--msa",
type=str,
required=True,
nargs="*",
help="multiple sequence alignment to concatenate"
)
parser.add_argument(
"--output",
type=str,
required=True,
help="output file"
)
args = parser.parse_args()
concat_msa(args.msa, args.output)
if __name__ == "__main__":
main()
| 23.7
| 63
| 0.592405
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 248
| 0.209283
|
8384d1480db51cc6251738da74aa3074adb07e4f
| 11,099
|
py
|
Python
|
rendez-vous.py
|
MrDarkness117/parseTsum
|
03f9f4d7c9e90a48eec5c689082a4274a160f501
|
[
"MIT"
] | null | null | null |
rendez-vous.py
|
MrDarkness117/parseTsum
|
03f9f4d7c9e90a48eec5c689082a4274a160f501
|
[
"MIT"
] | null | null | null |
rendez-vous.py
|
MrDarkness117/parseTsum
|
03f9f4d7c9e90a48eec5c689082a4274a160f501
|
[
"MIT"
] | null | null | null |
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from webdriver_manager.chrome import ChromeDriverManager
from os import path
import re
import json
import time
import datetime
import xlsxwriter
print("Start: " + str(datetime.datetime.now()))
options = Options()
prefs = {"profile.managed_default_content_settings.images": 2}
options.add_experimental_option('prefs', prefs)
driver = webdriver.Chrome(executable_path=ChromeDriverManager().install(), options=options)
# driver = webdriver.Chrome(options=options)
driver.maximize_window()
driver.implicitly_wait(0.5)
url_brands = "https://www.rendez-vous.ru/catalog/brands/"
brands = [
"Aldo Brue", "AGL", "BANU", "Bally", 'Bresciani', 'Brimarts', 'Carlo Visintini', 'Casadei', 'Casheart,',
'Cerruti 1881', 'Cesare Casadei', 'Coccinelle', 'DKNY', 'Doria Maria', 'Doucal\'s', 'F_WD', 'Fabi', 'Fabrizio Lesi',
'Ferre Milano', 'Flower Mountain', 'Franceschetti', 'Frasconi', 'Fratelli Rossetti', 'Fratelli Rossetti One',
'Gianni Chiarini', 'Goose Tech', 'GUM', 'HIDE&JACK', 'Ice Play', 'Iceberg', 'In The Box', 'Inuikii',
'John Galliano', 'John Richmond', 'Kalliste', 'Kat Maconie', 'Kate Spade', 'Lancaster', 'Landi', 'Le Silla',
'Lemon Jelly', "L'Impermeabile", 'Marsell', 'Merola Gloves', 'Moose Knuckles', 'Moreschi', 'Moschino', 'Panchic',
'Pantanetti', 'Parajumpers', 'Pasotti', 'Pertini', 'Pierre Cardin', 'Pollini', 'Portolano', 'Premiata',
'Principe Di Bologna', 'RBRSL', "Reptile's House", 'Roberto Cavalli', 'Rocco P', 'Sergio Rossi', 'SPRAYGROUND',
'Stemar', 'Stuart Weitzman', 'V SEASON', "VIC MATIE'", "Vic Matie", 'Voile Blanche', 'What For', 'Wolford', '3JUIN',
'Premiata will be', 'Sprayground', 'Domrebel', 'GIUSEPPE ZANOTTI DESIGN', 'Giuseppe Zanotti Design',
'GIUSEPPE ZANOTTI', 'Giuseppe Zanotti'
]
search_values = ['Wolford', 'RBRSL', "Rocco P", "DKNY", 'Flower Mountain', 'HIDE&JACK', 'Inuikii', 'Lancaster']
categories = [
"Женское",
'Мужское',
"Детское"
]
iframe_ids = ['fl-545545']
show = "//li[@class='next']/a"
pagination_class_selected = 'page selected'
last_page = '//ul[@id="pagination_bottom"]/li[@class="last"]'
search_btn = '//*[@id="search-toggle"]'
search_bar = '//*[@id="Search_q"]'
failed_pages = {'pages': []}
output = xlsxwriter.Workbook('C:\\Users\\admin\\Documents\\outputs\\Rendez-vous {}.xlsx'.format(str(datetime.date.today())))
sheet = output.add_worksheet('Rendez-vous')
sheet.write('A1', 'Артикул')
sheet.write('B1', 'Цена')
sheet.write('C1', 'Старая цена')
sheet.write('D1', 'Скидка')
sheet.write('E1', 'Бренд')
sheet.write('F1', 'Артикул производителя')
sheet.write('G1', 'Ссылка')
tables = {}
count = 0
row = 2
closed = False
scrolled = False
def open_brands():
driver.get(url_brands)
for el in brands:
global scrolled
scrolled = False
scroll_brands(el)
def open_brand(el):
driver.find_element(By.XPATH, '//div[@class="js-quick-search-source brand-popover"]'
'//a[contains(text(), "{}")]'.format(el.upper())).click()
write_data()
def scroll_brands(el):
driver.get(url_brands)
driver.execute_script('window.scrollBy(0, -7000)')
try:
actions = ActionChains(driver)
actions.move_to_element(driver.find_element_by_xpath('//div[@class="js-quick-search-source brand-popover"]'
'//a[contains(text(), "{}")]'.format(el.upper()))).perform()
open_brand(el)
except Exception as e:
print(el.upper() + " not found in the list, skipping.")
print(e)
global scrolled
scrolled = True
def search():
driver.get(url_brands)
for b in search_values:
# WebDriverWait(driver, 10).until(EC.visibility_of_element_located((By.XPATH, '//button[@class="search-mobile__open-button"]')))
driver.find_element(By.XPATH, search_btn).click()
driver.find_element(By.XPATH, search_bar).click()
driver.find_element(By.XPATH, search_bar).clear()
driver.find_element(By.XPATH, search_bar).send_keys(b)
driver.find_element(By.XPATH, search_bar).send_keys(Keys.ENTER)
time.sleep(2)
try:
write_data()
except Exception as e:
print("Failure in finding elements")
def change_page():
try:
# driver.execute_script('window.scrollBy(0, -800)')
WebDriverWait(driver, 10).until(EC.visibility_of_element_located((By.XPATH, show)))
change_page = driver.find_element(By.XPATH, show)
actions = ActionChains(driver)
actions.move_to_element(change_page).perform()
change_page.click()
time.sleep(2)
except Exception as e:
for iframe in iframe_ids:
try:
print("Attempting to close iframe")
frame = driver.find_element(By.XPATH, iframe)
driver.switch_to.frame(frame)
driver.find_element_by_xpath('//div[@class="widget__close"]').click()
driver.switch_to.default_content()
driver.find_element_by_xpath(show).click()
except Exception as e:
print(e)
print("Attempting to close lead form")
try:
driver.execute_script("document.querySelector('.lead-form__close').click();")
driver.find_element_by_xpath(show).click()
except Exception as e:
print(e)
print("Attempting to refresh the page")
driver.refresh()
time.sleep(1)
WebDriverWait(driver, 10).until(EC.element_to_be_clickable((By.XPATH, show)))
driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
driver.find_element_by_xpath(show).click()
time.sleep(2)
def get_data():
# driver.execute_script('window.scrollBy(0, -7000)')
print('Get prices')
elems_var = '//ul[@class="list-items list-view-1 js-list-items"]/li'
WebDriverWait(driver, 10).until(EC.visibility_of_element_located((By.XPATH, elems_var)))
elems = driver.find_elements(By.XPATH, elems_var)
WebDriverWait(driver, 10).until(EC.visibility_of_element_located((
By.XPATH, '//ul[@class="list-items list-view-1 js-list-items"]/li[last()]'
)))
counter = 0
print('[begin gather loop]')
print(elems)
for el in elems:
counter += 1
driver.execute_script('window.scrollBy(0, {})'.format(counter * 20))
try:
title, price, brand, link, price_old = [None] * 5 # assign None to 5 variables
WebDriverWait(driver, 10).until(EC.visibility_of_element_located((By.XPATH,
'//ul[@class="list-items list-view-1 js-list-items"]/li[{}]'.format(counter))))
productinfo = json.loads(str(el.find_element_by_xpath(
'//ul[@class="list-items list-view-1 js-list-items"]/li[{}]'
.format(counter)).get_attribute('data-productinfo')).replace('\'', '"'))
try:
title = productinfo['name'].replace(productinfo['brand'] + ' ', '')
except:
print('Failed to obtain price')
# id = productinfo['id']
try:
price = float(productinfo['price'])
except:
print('Failed to obtain price')
try:
brand = productinfo['brand']
except:
print("Failed to obtain brand")
try:
link = str(el.find_element(
By.XPATH,
'//ul[@class="list-items list-view-1 js-list-items"]/li[{}]//a[@class="item-link"]'.format(counter))
.get_attribute('href'))
except:
print("Failed to obtain link")
try:
WebDriverWait(driver, 10).until(EC.visibility_of_element_located)
price_old = el.find_element(By.XPATH, '//ul[@class="list-items list-view-1 js-list-items"]'
'/li[{}]//span[@class="item-price-old"]/span'.format(counter)).get_attribute(
'content')
except Exception as e:
print("No discount for element {}".format(counter))
print(e)
tables[title] = [price, price_old, brand, link]
global row
sheet.write('A' + str(row), title)
sheet.write('B' + str(row), price)
sheet.write('C' + str(row), price_old)
sheet.write('D' + str(row), brand)
sheet.write('E' + str(row), link)
row += 1
except Exception as e:
print("Exception detected while parsing: ")
print(e)
global failed_pages
failed_pages['pages'].append(re.sub('[^0-9]', '', str(driver.current_url)[-3:]).replace('=', ''))
print("Page {}".format(str(re.sub('[^0-9]', '', str(driver.current_url)[-3:]).replace('=', ''))))
print('Prices obtained')
def write_data():
try:
while driver.find_element(By.XPATH, last_page).get_attribute('class') != pagination_class_selected:
get_data()
change_page()
except:
get_data()
def write_file(url, filename, params=0):
try:
if params == 0:
""" ==== FULL ==== """
driver.get(url)
write_data()
driver.quit()
elif params == 1:
""" ==== BRANDS ==== """
open_brands()
driver.quit()
elif params == 2:
""" ==== SEARCH ==== """
search()
driver.quit()
output.close()
except Exception as e:
print("Error caught, terminating: " + str(e))
print('Writing file...')
if not path.exists('{}.json'.format(filename)):
with open('{}.json'.format(filename), 'w') as t:
json.dump({}, t)
t.close()
with open('{}.json'.format(filename), 'r+', encoding='utf-8') as t:
info = json.load(t)
t.seek(0)
info.update(tables)
json.dump(info, t, ensure_ascii=False, indent=4)
t.truncate()
print('...Completed writing')
t.close()
with open('{}_failed_pages.json'.format(filename), 'w', encoding='utf-8') as p:
json.dump(failed_pages, p, ensure_ascii=False, indent=4)
p.close()
def run():
write_file(url_brands, 'C:\\Users\\admin\\Documents\\outputs\\rendez-vous_brands_full', params=1)
write_file(url_brands, 'C:\\Users\\admin\\Documents\\outputs\\rendez-vous_brands_full', params=2)
print("End: " + str(datetime.datetime.now()))
if __name__ == '__main__':
run()
| 39.080986
| 136
| 0.594918
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 3,549
| 0.317499
|
838511c8e3372a6ae2d5fbb109dbbc9156779d54
| 171
|
py
|
Python
|
stdlib/getpass_qs.py
|
bpuderer/python-snippets27
|
8d51ff34c48bee1247575536d8ed506eafde8631
|
[
"MIT"
] | 3
|
2015-11-20T14:30:53.000Z
|
2015-12-19T05:55:19.000Z
|
stdlib/getpass_qs.py
|
bpuderer/python-snippets27
|
8d51ff34c48bee1247575536d8ed506eafde8631
|
[
"MIT"
] | null | null | null |
stdlib/getpass_qs.py
|
bpuderer/python-snippets27
|
8d51ff34c48bee1247575536d8ed506eafde8631
|
[
"MIT"
] | 1
|
2016-01-05T20:54:49.000Z
|
2016-01-05T20:54:49.000Z
|
import getpass
# prompt user without echoing output
print getpass.getpass()
print getpass.getpass(prompt="Custom Prompt:")
print "user login name:", getpass.getuser()
| 17.1
| 46
| 0.766082
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 70
| 0.409357
|
8385780ba677837b4c5c4c3d8cf272c764342143
| 385
|
py
|
Python
|
backend/todo/migrations/0008_auto_20190403_0812.py
|
Bhunesh2000/todoWithDjango
|
e5fa52a087180b66ae283e6b36fe790323d7b920
|
[
"MIT"
] | null | null | null |
backend/todo/migrations/0008_auto_20190403_0812.py
|
Bhunesh2000/todoWithDjango
|
e5fa52a087180b66ae283e6b36fe790323d7b920
|
[
"MIT"
] | 11
|
2019-04-03T09:49:17.000Z
|
2022-02-10T08:23:26.000Z
|
backend/todo/migrations/0008_auto_20190403_0812.py
|
Bhunesh2000/todoWithDjango
|
e5fa52a087180b66ae283e6b36fe790323d7b920
|
[
"MIT"
] | 1
|
2019-10-21T19:26:29.000Z
|
2019-10-21T19:26:29.000Z
|
# Generated by Django 2.2 on 2019-04-03 08:12
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('todo', '0007_todo_dateevent'),
]
operations = [
migrations.AlterField(
model_name='todo',
name='dateEvent',
field=models.DateField(default='2019-04-03'),
),
]
| 20.263158
| 57
| 0.58961
| 294
| 0.763636
| 0
| 0
| 0
| 0
| 0
| 0
| 101
| 0.262338
|
8385a072d6737fbd7ff6db50b44b8505e7dcadb3
| 1,797
|
py
|
Python
|
public/neumeeditor/models/fields/short_code_field.py
|
jacobsanz97/cantus
|
37d139ae20972c36d4abb96a2a5ac5106b0c1b47
|
[
"MIT"
] | null | null | null |
public/neumeeditor/models/fields/short_code_field.py
|
jacobsanz97/cantus
|
37d139ae20972c36d4abb96a2a5ac5106b0c1b47
|
[
"MIT"
] | null | null | null |
public/neumeeditor/models/fields/short_code_field.py
|
jacobsanz97/cantus
|
37d139ae20972c36d4abb96a2a5ac5106b0c1b47
|
[
"MIT"
] | null | null | null |
import re
from django.db import models
unacceptable_chars = "[^a-z0-9\._]"
duplicate_spaces_and_dots = "[\ .]+"
class ShortCodeField(models.CharField):
description = "A short string representing a glyph name"
def pre_save(self, model_instance, add):
model_instance.short_code = sanitize_short_code(model_instance.short_code)
return model_instance.short_code
def sanitize_short_code(input):
"""
We want to filter-out the undesirable characters.
"""
# Turn spaces and dots into single dots
new_code = re.sub(duplicate_spaces_and_dots, '.', input.strip().lower())
# Filter out everything bad
new_code = replace_common_words(re.sub(unacceptable_chars, '', new_code))
# Duplicates once more
return re.sub(duplicate_spaces_and_dots, '.', new_code)
def replace_common_words(input):
# Neumes that we will shorten
replacements = [
("torculus", "torc"),
("tractulus", "trac"),
("punctum", "pun"),
("stropha", "stro"),
("virga", "vir"),
("porrectus", "por"),
("ancus", "anc"),
("status", "stra"),
("quadratus", "q"),
("quassus", "quas"),
("oriscus", "ori"),
("episema", "e"),
("clivis", "cli"),
("rotundus", "r"),
("liquescent", "l"),
("quilismapes", "pes.quil"),
("two", "2"),
("three", "3"),
# Important to strip simple
(".simple", ""),
# Some other language stuff
("langer", "long"),
(".zweiter", ""),
(".abstrich", "")
]
return replace_words(input, replacements)
def replace_words(input, replacements):
for replacement in replacements:
old, new = replacement
input = re.sub(old, new, input)
return input
| 28.983871
| 82
| 0.582638
| 270
| 0.15025
| 0
| 0
| 0
| 0
| 0
| 0
| 611
| 0.340011
|
8386475aa5d024fe1d36e6904efce3bbf70bc22b
| 328
|
py
|
Python
|
2/week2/c.py
|
briannice/logiscool-python
|
00cf772072f574d297ed487e8edc9bb0158b6c68
|
[
"Apache-2.0"
] | null | null | null |
2/week2/c.py
|
briannice/logiscool-python
|
00cf772072f574d297ed487e8edc9bb0158b6c68
|
[
"Apache-2.0"
] | null | null | null |
2/week2/c.py
|
briannice/logiscool-python
|
00cf772072f574d297ed487e8edc9bb0158b6c68
|
[
"Apache-2.0"
] | null | null | null |
def som(a, b):
"""Bereken de som van twee getallen. Als de som groter is dan nul return je de som.
Als de som kleiner is dan nul, dan return je nul.
Args:
a (int): het eerste getal
b (int): het tweede getal
"""
pass
assert som(1, 2) == 3
assert som(-1, -2) == -3
assert som(0, 0) == 0
| 20.5
| 87
| 0.570122
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 227
| 0.692073
|
83882ea566cc14498c7c6f7269a02089a389aa86
| 2,862
|
py
|
Python
|
src/plugins/pipeline_plugins/utils/blob.py
|
google/cc4d
|
206543832368f96bac7f55c0de93c96e32127779
|
[
"Apache-2.0"
] | 11
|
2021-03-23T22:03:00.000Z
|
2022-03-30T17:12:38.000Z
|
src/plugins/pipeline_plugins/utils/blob.py
|
google/cc4d
|
206543832368f96bac7f55c0de93c96e32127779
|
[
"Apache-2.0"
] | 3
|
2021-07-21T10:13:24.000Z
|
2021-10-18T03:44:03.000Z
|
src/plugins/pipeline_plugins/utils/blob.py
|
google/cc4d
|
206543832368f96bac7f55c0de93c96e32127779
|
[
"Apache-2.0"
] | 5
|
2021-05-07T03:30:29.000Z
|
2021-11-03T21:05:00.000Z
|
# python3
# coding=utf-8
# Copyright 2020 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# python3
"""A Blob class for data-in representation.
The Blob class contains all JSON events and all necessary metadata to the
operators.
"""
from typing import Any, Dict, List, Optional, Tuple
class Blob(object):
"""A Blob class for data-in representation.
The Blob class contains all JSON events and all necessary metadata to the
operators.
Attributes:
events: A list of JSON events to be sent.
location: The specific object location of the events within the source.
position: The events starting position within the object.
failed_events: A list of (id, event, error_num) tuples conntaining the
following info:
- id: start_id + the index of the event in events list.
- event: the JSON event.
- error: The errors.MonitoringIDsMap error ID.
num_rows: Number of events in blob. Defaults to length of events list.
reports: any additional optional information about the blob.
"""
def __init__(self,
events: List[Dict[str, Any]],
location: str,
reports: Optional[List[Any]] = None,
failed_events: Optional[List[Tuple[int, Dict[str, Any],
int]]] = None,
position: int = 0,
num_rows: Optional[int] = None) -> None:
"""Initiates Blob with events and location metadata."""
self.events = events
self.location = location
self.position = position
self.num_rows = num_rows if num_rows is not None else len(events)
self.failed_events = failed_events if failed_events else list()
self.reports = reports if reports else list()
def append_failed_events(
self, failed_events: List[Tuple[int, Dict[str, Any], int]]) -> None:
"""Appends the given events list to the blob's reports list."""
self.failed_events.extend(failed_events)
def append_failed_event(self, index: int, event: Dict[str, Any],
error_num: int) -> None:
"""Appends the given event to the blob's reports list."""
self.failed_events.append((index, event, error_num))
def extend_reports(self, report: Any) -> None:
"""Appends the given report to the blob's reports list."""
self.reports.extend(report)
| 37.657895
| 75
| 0.677149
| 2,060
| 0.719776
| 0
| 0
| 0
| 0
| 0
| 0
| 1,692
| 0.591195
|
8388c207ef02a512832cd36b34b04ff91b5bc7e2
| 2,636
|
py
|
Python
|
LinearModel/scripts/three_classes_train.py
|
SMZCC/TF-deep-learn
|
7517685d8b4fb51f1823d4595165538305739fc7
|
[
"MIT"
] | null | null | null |
LinearModel/scripts/three_classes_train.py
|
SMZCC/TF-deep-learn
|
7517685d8b4fb51f1823d4595165538305739fc7
|
[
"MIT"
] | null | null | null |
LinearModel/scripts/three_classes_train.py
|
SMZCC/TF-deep-learn
|
7517685d8b4fb51f1823d4595165538305739fc7
|
[
"MIT"
] | null | null | null |
# coding=utf-8
# date: 2019/1/1, 19:38
# name: smz
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from sklearn.utils import shuffle
from LinearModel.modules.model3 import ModelThreeClasses
from LinearModel.configuration.options import opts
from LinearModel.scripts.gen_data import generate_data
def gen_train_data():
np.random.seed(10)
fields_num = 2
num_classes = 3
sample_size = 2000
mean = np.random.randn(fields_num)
cov = np.eye(fields_num)
diffs = [[3.0], [3.0, 0.0]] # 第三类样本中心与第二类样本中心之间只有y方向上的误差,第二类样本与第一类样本在x和y方向上均偏移3.0
train_X, train_Y = generate_data(num_classes=num_classes, sample_size=sample_size, mean=mean, cov=cov, diffs=diffs)
np.save("../data/train_data_X3.npy", train_X)
np.save("../data/train_data_Y3.npy", train_Y)
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
colors = ['r' if np.argmax(label) == 0 else 'b' if np.argmax(label) == 1 else 'y' for label in train_Y]
ax.scatter(train_X[:, 0], train_X[:, 1], c=colors)
ax.set_xlabel("Scaled age(in years)")
ax.set_ylabel("Tumor size(in cm)")
plt.show()
def train_3_classes():
"""这个有问题,因为使用softmax表示的结果和使用sigmoid的那个模型是不同的,需要重写模型"""
model3 = ModelThreeClasses(opts)
model3.build()
train_x3 = np.load("../data/train_data_X3.npy")
train_y3 = np.load("../data/train_data_Y3.npy")
model_name = "model3s.ckpt"
num_samples = len(train_x3)
with tf.Session() as sess:
init = tf.global_variables_initializer()
sess.run(init)
for epoch in range(opts["epochs"]):
start_pointer = 0
train_x, train_y = shuffle(train_x3, train_y3)
while start_pointer < num_samples:
end_pointer = start_pointer + opts["batch_size"]
batch_x = train_x[start_pointer:end_pointer]
batch_y = train_y[start_pointer:end_pointer]
start_pointer = end_pointer
feed_dict = {model3.inputs: batch_x, model3.labels: batch_y}
loss_value, glob_step_value, merge_str, _ = sess.run(
fetches=[model3.loss, model3.global_step, model3.merge_op,model3.train_step],
feed_dict=feed_dict)
model3.writer.add_summary(merge_str, global_step=glob_step_value)
print("epoch:%d, step:%d, loss:%.6f"%(epoch, glob_step_value, loss_value))
if (epoch + 1) % 10 == 0:
model3.saver.save(sess, opts["checkpoints_dir"] + model_name, global_step=model3.global_step)
if __name__ == "__main__":
# gen_train_data()
train_3_classes()
| 35.621622
| 119
| 0.65478
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 576
| 0.206452
|
83894f358de50ff81cde8fdfc6091027cb2fdbb8
| 21,108
|
py
|
Python
|
trio/_core/tests/test_multierror.py
|
JefffHofffman/trio
|
d8631117ce4ca19017bbe3850704dd5ce6cfaeb1
|
[
"Apache-2.0",
"MIT"
] | 4
|
2017-03-01T22:14:46.000Z
|
2020-07-31T07:18:18.000Z
|
trio/_core/tests/test_multierror.py
|
JefffHofffman/trio
|
d8631117ce4ca19017bbe3850704dd5ce6cfaeb1
|
[
"Apache-2.0",
"MIT"
] | 81
|
2017-01-22T11:58:29.000Z
|
2017-05-27T22:17:49.000Z
|
trio/_core/tests/test_multierror.py
|
JefffHofffman/trio
|
d8631117ce4ca19017bbe3850704dd5ce6cfaeb1
|
[
"Apache-2.0",
"MIT"
] | 1
|
2020-05-28T19:38:09.000Z
|
2020-05-28T19:38:09.000Z
|
import logging
import pytest
from traceback import extract_tb, print_exception, format_exception, _cause_message
import sys
import os
import re
from pathlib import Path
import subprocess
from .tutil import slow
from .._multierror import MultiError, concat_tb
from ..._core import open_nursery
class NotHashableException(Exception):
code = None
def __init__(self, code):
super().__init__()
self.code = code
def __eq__(self, other):
if not isinstance(other, NotHashableException):
return False
return self.code == other.code
async def raise_nothashable(code):
raise NotHashableException(code)
def raiser1():
raiser1_2()
def raiser1_2():
raiser1_3()
def raiser1_3():
raise ValueError("raiser1_string")
def raiser2():
raiser2_2()
def raiser2_2():
raise KeyError("raiser2_string")
def raiser3():
raise NameError
def get_exc(raiser):
try:
raiser()
except Exception as exc:
return exc
def get_tb(raiser):
return get_exc(raiser).__traceback__
def einfo(exc):
return (type(exc), exc, exc.__traceback__)
def test_concat_tb():
tb1 = get_tb(raiser1)
tb2 = get_tb(raiser2)
# These return a list of (filename, lineno, fn name, text) tuples
# https://docs.python.org/3/library/traceback.html#traceback.extract_tb
entries1 = extract_tb(tb1)
entries2 = extract_tb(tb2)
tb12 = concat_tb(tb1, tb2)
assert extract_tb(tb12) == entries1 + entries2
tb21 = concat_tb(tb2, tb1)
assert extract_tb(tb21) == entries2 + entries1
# Check degenerate cases
assert extract_tb(concat_tb(None, tb1)) == entries1
assert extract_tb(concat_tb(tb1, None)) == entries1
assert concat_tb(None, None) is None
# Make sure the original tracebacks didn't get mutated by mistake
assert extract_tb(get_tb(raiser1)) == entries1
assert extract_tb(get_tb(raiser2)) == entries2
def test_MultiError():
exc1 = get_exc(raiser1)
exc2 = get_exc(raiser2)
assert MultiError([exc1]) is exc1
m = MultiError([exc1, exc2])
assert m.exceptions == [exc1, exc2]
assert "ValueError" in str(m)
assert "ValueError" in repr(m)
with pytest.raises(TypeError):
MultiError(object())
with pytest.raises(TypeError):
MultiError([KeyError(), ValueError])
def test_MultiErrorOfSingleMultiError():
# For MultiError([MultiError]), ensure there is no bad recursion by the
# constructor where __init__ is called if __new__ returns a bare MultiError.
exceptions = [KeyError(), ValueError()]
a = MultiError(exceptions)
b = MultiError([a])
assert b == a
assert b.exceptions == exceptions
async def test_MultiErrorNotHashable():
exc1 = NotHashableException(42)
exc2 = NotHashableException(4242)
exc3 = ValueError()
assert exc1 != exc2
assert exc1 != exc3
with pytest.raises(MultiError):
async with open_nursery() as nursery:
nursery.start_soon(raise_nothashable, 42)
nursery.start_soon(raise_nothashable, 4242)
def test_MultiError_filter_NotHashable():
excs = MultiError([NotHashableException(42), ValueError()])
def handle_ValueError(exc):
if isinstance(exc, ValueError):
return None
else:
return exc
filtered_excs = MultiError.filter(handle_ValueError, excs)
assert isinstance(filtered_excs, NotHashableException)
def test_traceback_recursion():
exc1 = RuntimeError()
exc2 = KeyError()
exc3 = NotHashableException(42)
# Note how this creates a loop, where exc1 refers to exc1
# This could trigger an infinite recursion; the 'seen' set is supposed to prevent
# this.
exc1.__cause__ = MultiError([exc1, exc2, exc3])
# python traceback.TracebackException < 3.6.4 does not support unhashable exceptions
# and raises a TypeError exception
if sys.version_info < (3, 6, 4):
with pytest.raises(TypeError):
format_exception(*einfo(exc1))
else:
format_exception(*einfo(exc1))
def make_tree():
# Returns an object like:
# MultiError([
# MultiError([
# ValueError,
# KeyError,
# ]),
# NameError,
# ])
# where all exceptions except the root have a non-trivial traceback.
exc1 = get_exc(raiser1)
exc2 = get_exc(raiser2)
exc3 = get_exc(raiser3)
# Give m12 a non-trivial traceback
try:
raise MultiError([exc1, exc2])
except BaseException as m12:
return MultiError([m12, exc3])
def assert_tree_eq(m1, m2):
if m1 is None or m2 is None:
assert m1 is m2
return
assert type(m1) is type(m2)
assert extract_tb(m1.__traceback__) == extract_tb(m2.__traceback__)
assert_tree_eq(m1.__cause__, m2.__cause__)
assert_tree_eq(m1.__context__, m2.__context__)
if isinstance(m1, MultiError):
assert len(m1.exceptions) == len(m2.exceptions)
for e1, e2 in zip(m1.exceptions, m2.exceptions):
assert_tree_eq(e1, e2)
def test_MultiError_filter():
def null_handler(exc):
return exc
m = make_tree()
assert_tree_eq(m, m)
assert MultiError.filter(null_handler, m) is m
assert_tree_eq(m, make_tree())
# Make sure we don't pick up any detritus if run in a context where
# implicit exception chaining would like to kick in
m = make_tree()
try:
raise ValueError
except ValueError:
assert MultiError.filter(null_handler, m) is m
assert_tree_eq(m, make_tree())
def simple_filter(exc):
if isinstance(exc, ValueError):
return None
if isinstance(exc, KeyError):
return RuntimeError()
return exc
new_m = MultiError.filter(simple_filter, make_tree())
assert isinstance(new_m, MultiError)
assert len(new_m.exceptions) == 2
# was: [[ValueError, KeyError], NameError]
# ValueError disappeared & KeyError became RuntimeError, so now:
assert isinstance(new_m.exceptions[0], RuntimeError)
assert isinstance(new_m.exceptions[1], NameError)
# implicit chaining:
assert isinstance(new_m.exceptions[0].__context__, KeyError)
# also, the traceback on the KeyError incorporates what used to be the
# traceback on its parent MultiError
orig = make_tree()
# make sure we have the right path
assert isinstance(orig.exceptions[0].exceptions[1], KeyError)
# get original traceback summary
orig_extracted = (
extract_tb(orig.__traceback__) +
extract_tb(orig.exceptions[0].__traceback__) +
extract_tb(orig.exceptions[0].exceptions[1].__traceback__)
)
def p(exc):
print_exception(type(exc), exc, exc.__traceback__)
p(orig)
p(orig.exceptions[0])
p(orig.exceptions[0].exceptions[1])
p(new_m.exceptions[0].__context__)
# compare to the new path
assert new_m.__traceback__ is None
new_extracted = extract_tb(new_m.exceptions[0].__context__.__traceback__)
assert orig_extracted == new_extracted
# check preserving partial tree
def filter_NameError(exc):
if isinstance(exc, NameError):
return None
return exc
m = make_tree()
new_m = MultiError.filter(filter_NameError, m)
# with the NameError gone, the other branch gets promoted
assert new_m is m.exceptions[0]
# check fully handling everything
def filter_all(exc):
return None
assert MultiError.filter(filter_all, make_tree()) is None
def test_MultiError_catch():
# No exception to catch
def noop(_):
pass # pragma: no cover
with MultiError.catch(noop):
pass
# Simple pass-through of all exceptions
m = make_tree()
with pytest.raises(MultiError) as excinfo:
with MultiError.catch(lambda exc: exc):
raise m
assert excinfo.value is m
# Should be unchanged, except that we added a traceback frame by raising
# it here
assert m.__traceback__ is not None
assert m.__traceback__.tb_frame.f_code.co_name == "test_MultiError_catch"
assert m.__traceback__.tb_next is None
m.__traceback__ = None
assert_tree_eq(m, make_tree())
# Swallows everything
with MultiError.catch(lambda _: None):
raise make_tree()
def simple_filter(exc):
if isinstance(exc, ValueError):
return None
if isinstance(exc, KeyError):
return RuntimeError()
return exc
with pytest.raises(MultiError) as excinfo:
with MultiError.catch(simple_filter):
raise make_tree()
new_m = excinfo.value
assert isinstance(new_m, MultiError)
assert len(new_m.exceptions) == 2
# was: [[ValueError, KeyError], NameError]
# ValueError disappeared & KeyError became RuntimeError, so now:
assert isinstance(new_m.exceptions[0], RuntimeError)
assert isinstance(new_m.exceptions[1], NameError)
# Make sure that Python did not successfully attach the old MultiError to
# our new MultiError's __context__
assert not new_m.__suppress_context__
assert new_m.__context__ is None
# check preservation of __cause__ and __context__
v = ValueError()
v.__cause__ = KeyError()
with pytest.raises(ValueError) as excinfo:
with MultiError.catch(lambda exc: exc):
raise v
assert isinstance(excinfo.value.__cause__, KeyError)
v = ValueError()
context = KeyError()
v.__context__ = context
with pytest.raises(ValueError) as excinfo:
with MultiError.catch(lambda exc: exc):
raise v
assert excinfo.value.__context__ is context
assert not excinfo.value.__suppress_context__
for suppress_context in [True, False]:
v = ValueError()
context = KeyError()
v.__context__ = context
v.__suppress_context__ = suppress_context
distractor = RuntimeError()
with pytest.raises(ValueError) as excinfo:
def catch_RuntimeError(exc):
if isinstance(exc, RuntimeError):
return None
else:
return exc
with MultiError.catch(catch_RuntimeError):
raise MultiError([v, distractor])
assert excinfo.value.__context__ is context
assert excinfo.value.__suppress_context__ == suppress_context
def assert_match_in_seq(pattern_list, string):
offset = 0
print("looking for pattern matches...")
for pattern in pattern_list:
print("checking pattern:", pattern)
reobj = re.compile(pattern)
match = reobj.search(string, offset)
assert match is not None
offset = match.end()
def test_assert_match_in_seq():
assert_match_in_seq(["a", "b"], "xx a xx b xx")
assert_match_in_seq(["b", "a"], "xx b xx a xx")
with pytest.raises(AssertionError):
assert_match_in_seq(["a", "b"], "xx b xx a xx")
def test_format_exception():
exc = get_exc(raiser1)
formatted = "".join(format_exception(*einfo(exc)))
assert "raiser1_string" in formatted
assert "in raiser1_3" in formatted
assert "raiser2_string" not in formatted
assert "in raiser2_2" not in formatted
assert "direct cause" not in formatted
assert "During handling" not in formatted
exc = get_exc(raiser1)
exc.__cause__ = get_exc(raiser2)
formatted = "".join(format_exception(*einfo(exc)))
assert "raiser1_string" in formatted
assert "in raiser1_3" in formatted
assert "raiser2_string" in formatted
assert "in raiser2_2" in formatted
assert "direct cause" in formatted
assert "During handling" not in formatted
# ensure cause included
assert _cause_message in formatted
exc = get_exc(raiser1)
exc.__context__ = get_exc(raiser2)
formatted = "".join(format_exception(*einfo(exc)))
assert "raiser1_string" in formatted
assert "in raiser1_3" in formatted
assert "raiser2_string" in formatted
assert "in raiser2_2" in formatted
assert "direct cause" not in formatted
assert "During handling" in formatted
exc.__suppress_context__ = True
formatted = "".join(format_exception(*einfo(exc)))
assert "raiser1_string" in formatted
assert "in raiser1_3" in formatted
assert "raiser2_string" not in formatted
assert "in raiser2_2" not in formatted
assert "direct cause" not in formatted
assert "During handling" not in formatted
# chain=False
exc = get_exc(raiser1)
exc.__context__ = get_exc(raiser2)
formatted = "".join(format_exception(*einfo(exc), chain=False))
assert "raiser1_string" in formatted
assert "in raiser1_3" in formatted
assert "raiser2_string" not in formatted
assert "in raiser2_2" not in formatted
assert "direct cause" not in formatted
assert "During handling" not in formatted
# limit
exc = get_exc(raiser1)
exc.__context__ = get_exc(raiser2)
# get_exc adds a frame that counts against the limit, so limit=2 means we
# get 1 deep into the raiser stack
formatted = "".join(format_exception(*einfo(exc), limit=2))
print(formatted)
assert "raiser1_string" in formatted
assert "in raiser1" in formatted
assert "in raiser1_2" not in formatted
assert "raiser2_string" in formatted
assert "in raiser2" in formatted
assert "in raiser2_2" not in formatted
exc = get_exc(raiser1)
exc.__context__ = get_exc(raiser2)
formatted = "".join(format_exception(*einfo(exc), limit=1))
print(formatted)
assert "raiser1_string" in formatted
assert "in raiser1" not in formatted
assert "raiser2_string" in formatted
assert "in raiser2" not in formatted
# handles loops
exc = get_exc(raiser1)
exc.__cause__ = exc
formatted = "".join(format_exception(*einfo(exc)))
assert "raiser1_string" in formatted
assert "in raiser1_3" in formatted
assert "raiser2_string" not in formatted
assert "in raiser2_2" not in formatted
# ensure duplicate exception is not included as cause
assert _cause_message not in formatted
# MultiError
formatted = "".join(format_exception(*einfo(make_tree())))
print(formatted)
assert_match_in_seq(
[
# Outer exception is MultiError
r"MultiError:",
# First embedded exception is the embedded MultiError
r"\nDetails of embedded exception 1",
# Which has a single stack frame from make_tree raising it
r"in make_tree",
# Then it has two embedded exceptions
r" Details of embedded exception 1",
r"in raiser1_2",
# for some reason ValueError has no quotes
r"ValueError: raiser1_string",
r" Details of embedded exception 2",
r"in raiser2_2",
# But KeyError does have quotes
r"KeyError: 'raiser2_string'",
# And finally the NameError, which is a sibling of the embedded
# MultiError
r"\nDetails of embedded exception 2:",
r"in raiser3",
r"NameError",
],
formatted
)
# Prints duplicate exceptions in sub-exceptions
exc1 = get_exc(raiser1)
def raise1_raiser1():
try:
raise exc1
except:
raise ValueError("foo")
def raise2_raiser1():
try:
raise exc1
except:
raise KeyError("bar")
exc2 = get_exc(raise1_raiser1)
exc3 = get_exc(raise2_raiser1)
try:
raise MultiError([exc2, exc3])
except MultiError as e:
exc = e
formatted = "".join(format_exception(*einfo(exc)))
print(formatted)
assert_match_in_seq(
[
r"Traceback",
# Outer exception is MultiError
r"MultiError:",
# First embedded exception is the embedded ValueError with cause of raiser1
r"\nDetails of embedded exception 1",
# Print details of exc1
r" Traceback",
r"in get_exc",
r"in raiser1",
r"ValueError: raiser1_string",
# Print details of exc2
r"\n During handling of the above exception, another exception occurred:",
r" Traceback",
r"in get_exc",
r"in raise1_raiser1",
r" ValueError: foo",
# Second embedded exception is the embedded KeyError with cause of raiser1
r"\nDetails of embedded exception 2",
# Print details of exc1 again
r" Traceback",
r"in get_exc",
r"in raiser1",
r"ValueError: raiser1_string",
# Print details of exc3
r"\n During handling of the above exception, another exception occurred:",
r" Traceback",
r"in get_exc",
r"in raise2_raiser1",
r" KeyError: 'bar'",
],
formatted
)
def test_logging(caplog):
exc1 = get_exc(raiser1)
exc2 = get_exc(raiser2)
m = MultiError([exc1, exc2])
message = "test test test"
try:
raise m
except MultiError as exc:
logging.getLogger().exception(message)
# Join lines together
formatted = "".join(
format_exception(type(exc), exc, exc.__traceback__)
)
assert message in caplog.text
assert formatted in caplog.text
def run_script(name, use_ipython=False):
import trio
trio_path = Path(trio.__file__).parent.parent
script_path = Path(__file__).parent / "test_multierror_scripts" / name
env = dict(os.environ)
print("parent PYTHONPATH:", env.get("PYTHONPATH"))
if "PYTHONPATH" in env: # pragma: no cover
pp = env["PYTHONPATH"].split(os.pathsep)
else:
pp = []
pp.insert(0, str(trio_path))
pp.insert(0, str(script_path.parent))
env["PYTHONPATH"] = os.pathsep.join(pp)
print("subprocess PYTHONPATH:", env.get("PYTHONPATH"))
if use_ipython:
lines = [script_path.open().read(), "exit()"]
cmd = [
sys.executable,
"-u",
"-m",
"IPython",
# no startup files
"--quick",
"--TerminalIPythonApp.code_to_run=" + '\n'.join(lines),
]
else:
cmd = [sys.executable, "-u", str(script_path)]
print("running:", cmd)
completed = subprocess.run(
cmd, env=env, stdout=subprocess.PIPE, stderr=subprocess.STDOUT
)
print("process output:")
print(completed.stdout.decode("utf-8"))
return completed
def check_simple_excepthook(completed):
assert_match_in_seq(
[
"in <module>",
"MultiError",
"Details of embedded exception 1",
"in exc1_fn",
"ValueError",
"Details of embedded exception 2",
"in exc2_fn",
"KeyError",
], completed.stdout.decode("utf-8")
)
def test_simple_excepthook():
completed = run_script("simple_excepthook.py")
check_simple_excepthook(completed)
def test_custom_excepthook():
# Check that user-defined excepthooks aren't overridden
completed = run_script("custom_excepthook.py")
assert_match_in_seq(
[
# The warning
"RuntimeWarning",
"already have a custom",
# The message printed by the custom hook, proving we didn't
# override it
"custom running!",
# The MultiError
"MultiError:",
],
completed.stdout.decode("utf-8")
)
# This warning is triggered by ipython 7.5.0 on python 3.8
import warnings
warnings.filterwarnings(
"ignore",
message=".*\"@coroutine\" decorator is deprecated",
category=DeprecationWarning,
module="IPython.*"
)
try:
import IPython
except ImportError: # pragma: no cover
have_ipython = False
else:
have_ipython = True
need_ipython = pytest.mark.skipif(not have_ipython, reason="need IPython")
@slow
@need_ipython
def test_ipython_exc_handler():
completed = run_script("simple_excepthook.py", use_ipython=True)
check_simple_excepthook(completed)
@slow
@need_ipython
def test_ipython_imported_but_unused():
completed = run_script("simple_excepthook_IPython.py")
check_simple_excepthook(completed)
@slow
@need_ipython
def test_ipython_custom_exc_handler():
# Check we get a nice warning (but only one!) if the user is using IPython
# and already has some other set_custom_exc handler installed.
completed = run_script("ipython_custom_exc.py", use_ipython=True)
assert_match_in_seq(
[
# The warning
"RuntimeWarning",
"IPython detected",
"skip installing Trio",
# The MultiError
"MultiError",
"ValueError",
"KeyError",
],
completed.stdout.decode("utf-8")
)
# Make sure our other warning doesn't show up
assert "custom sys.excepthook" not in completed.stdout.decode("utf-8")
| 29.646067
| 88
| 0.648664
| 287
| 0.013597
| 0
| 0
| 1,037
| 0.049128
| 449
| 0.021272
| 5,729
| 0.271414
|
838a825c230b5aebf0d63f09c997caea89e365c9
| 3,896
|
py
|
Python
|
servo/drv/ec3po_gpio.py
|
mmind/servo-hdctools
|
c7d50190837497dafc45f6efe18bf01d6e70cfd2
|
[
"BSD-3-Clause"
] | 2
|
2019-09-25T22:44:39.000Z
|
2020-07-26T22:29:20.000Z
|
servo/drv/ec3po_gpio.py
|
mmind/servo-hdctools
|
c7d50190837497dafc45f6efe18bf01d6e70cfd2
|
[
"BSD-3-Clause"
] | null | null | null |
servo/drv/ec3po_gpio.py
|
mmind/servo-hdctools
|
c7d50190837497dafc45f6efe18bf01d6e70cfd2
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright 2016 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Driver for gpio controls through ec3po.
Provides the following console controlled function:
_Get_single, _Set_single, _Get_multi, _Set_multi
"""
import logging
import pty_driver
import servo
# EC console mask for enabling only command channel
COMMAND_CHANNEL_MASK = 0x1
# servod numeric translation for GPIO state.
GPIO_STATE = {
0: '0',
1: '1',
2: 'IN',
3: 'A',
4: 'ALT'
}
class ec3poGpioError(Exception):
"""Exception class for ec."""
class ec3poGpio(pty_driver.ptyDriver):
"""Object to access drv=ec3po_gpio controls.
Note, instances of this object get dispatched via base class,
HwDriver's get/set method. That method ultimately calls:
"_[GS]et_%s" % params['subtype'] below.
For example, a control to read kbd_en would be dispatched to
call _Get_kbd_en.
"""
def __init__(self, interface, params):
"""Constructor.
Args:
interface: ec3po interface object to handle low-level communication to
control
params: dictionary of params needed to perform operations on
devices. Must contain name:"GPIO_NAME" or names:"GPIO_b0,GPIO_b1"
Must contain subtype=single or multi.
Raises:
ec3poGpioError: on init failure
"""
super(ec3poGpio, self).__init__(interface, params)
if "name" in params:
self._gpio_name = params["name"]
elif "names" in params:
self._gpio_names = []
for name in params["names"].split(','):
self._gpio_names.insert(0, name.strip())
else:
raise ec3poGpioError("No GPIO name specified")
if "console" in params:
if params["console"] == "enhanced" and \
type(interface) is servo.ec3po_interface.EC3PO:
interface._console.oobm_queue.put('interrogate never enhanced')
else:
raise ec3poGpioError("Enhanced console must be ec3po!")
self._logger.debug("")
def set_gpio(self, name, value):
"""Set the named GPIO to the specified value.
Uses the console gpioset command.
Args:
name: name of the GPIO to modify
value: the state to set into the GPIO
"""
cmd = "gpioset %s %s\r" % (name, GPIO_STATE[value])
self._issue_cmd(cmd)
def get_gpio(self, name):
"""Get gpio logical value.
Args:
name: name of the GPIO to query
Returns:
0 or 1
"""
cmd = "gpioget %s\r" % name
regex = " ([01])[ *] .*%s" % name
results = self._issue_cmd_get_results(cmd, [regex])[0]
res_value = int(results[1])
return res_value
def _Set_single(self, value):
"""Set GPIO through gpioset console command.
Args:
value: the state to set into the GPIO
"""
self.set_gpio(self._gpio_name, value)
def _Get_single(self):
"""Get gpio logical value.
Returns:
0 or 1
"""
value = self.get_gpio(self._gpio_name)
return value
def _Set_multi(self, value):
"""Set several GPIOs according to a mask
Assigns the GPIOs specified in "names" to the bit values
specified in value.
Args:
value: An integer value, where each bit will be assigned to a GPIO.
"""
if value >> len(self._gpio_names):
raise ec3poGpioError("Extra bits left over in v:%d on %s" % (
value, self._gpio_names))
offset = len(self._gpio_names) - 1
for gpio in self._gpio_names:
bit = (value >> offset) & 0x1
self.set_gpio(gpio, bit)
offset -= 1
def _Get_multi(self):
"""Get each listed gpio and provide a bit array of values.
Returns:
an integer with each bit set according to the state of its GPIO.
"""
value = 0
for gpio in self._gpio_names:
bit = self.get_gpio(gpio)
value = value << 1
value = value | (bit & 0x1)
return value
| 25.973333
| 76
| 0.652977
| 3,330
| 0.854723
| 0
| 0
| 0
| 0
| 0
| 0
| 2,183
| 0.560318
|
838aa6ee041eb5a569fe74b2aa529125b8e2fda2
| 2,595
|
py
|
Python
|
scipy_util/util.py
|
idf/sci_util_py
|
53b4d961a1a8faeb444d2972ca7a2baf4a966f6e
|
[
"BSD-3-Clause"
] | null | null | null |
scipy_util/util.py
|
idf/sci_util_py
|
53b4d961a1a8faeb444d2972ca7a2baf4a966f6e
|
[
"BSD-3-Clause"
] | 1
|
2016-02-10T19:17:20.000Z
|
2016-02-10T20:04:59.000Z
|
scipy_util/util.py
|
idf/scipy_util
|
53b4d961a1a8faeb444d2972ca7a2baf4a966f6e
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Adapted from https://github.com/bytefish/facerec
"""
import os
from PIL import Image
import numpy as np
import random
def read_image(filename):
imarr = np.array([])
try:
im = Image.open(os.path.join(filename))
im = im.convert("L") # convert to greyscale
imarr = np.array(im, dtype=np.uint8)
except IOError as (errno, strerror):
print "I/O error({0}): {1}".format(errno, strerror)
except:
print "Cannot open image."
return imarr
def asRowMatrix(X):
"""
Creates a row-matrix from multi-dimensional data items in list l.
X [list] List with multi-dimensional data.
"""
if len(X) == 0:
return np.array([])
total = 1
for i in range(0, np.ndim(X[0])):
total = total * X[0].shape[i]
mat = np.empty([0, total], dtype=X[0].dtype)
for row in X:
mat = np.append(mat, row.reshape(1, -1), axis=0) # same as vstack
return np.asmatrix(mat)
def asColumnMatrix(X):
"""
Creates a column-matrix from multi-dimensional data items in list X.
X [list] List with multi-dimensional data.
"""
if len(X) == 0:
return np.array([])
total = 1
for i in range(0, np.ndim(X[0])):
total = total * X[0].shape[i]
mat = np.empty([total, 0], dtype=X[0].dtype)
for col in X:
mat = np.append(mat, col.reshape(-1, 1), axis=1) # same as hstack
return np.asmatrix(mat)
def minmax_normalize(X, low, high, minX=None, maxX=None, dtype=np.float):
""" min-max normalize a given matrix to given range [low,high].
Args:
X [rows x columns] input data
low [numeric] lower bound
high [numeric] upper bound
"""
if minX is None:
minX = np.min(X)
if maxX is None:
maxX = np.max(X)
minX = float(minX)
maxX = float(maxX)
# Normalize to [0...1].
X = X - minX
X = X / (maxX - minX)
# Scale to [low...high].
X = X * (high - low)
X = X + low
return np.asarray(X, dtype=dtype)
def shuffle(X, y):
idx = np.argsort([random.random() for i in xrange(y.shape[0])])
return X[:, idx], y[idx]
def shuffle_array(X, y):
""" Shuffles two arrays!
"""
idx = np.argsort([random.random() for i in xrange(len(y))])
X = [X[i] for i in idx]
y = [y[i] for i in idx]
return X, y
def to_col_vec(row_vec):
"""
:param row_vec: 1d np array
:return:
"""
return row_vec[:, np.newaxis]
def to_row_vec(col_vec):
"""
:param col_vec: 2d np array
:return:
"""
return col_vec.reshape(1, -1)
| 23.807339
| 74
| 0.570713
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 813
| 0.313295
|
838c4a8c221ca4daa94ec9e1d608b97fed7bdb05
| 110
|
py
|
Python
|
cmsplugin_markdown/apps.py
|
glomium/cmstemplate
|
6d51a6d97cf5a132e41ea6d2404bdfedf9edf25c
|
[
"BSD-3-Clause"
] | null | null | null |
cmsplugin_markdown/apps.py
|
glomium/cmstemplate
|
6d51a6d97cf5a132e41ea6d2404bdfedf9edf25c
|
[
"BSD-3-Clause"
] | null | null | null |
cmsplugin_markdown/apps.py
|
glomium/cmstemplate
|
6d51a6d97cf5a132e41ea6d2404bdfedf9edf25c
|
[
"BSD-3-Clause"
] | null | null | null |
from django.apps import AppConfig
class CmspluginMarkdownConfig(AppConfig):
name = 'cmsplugin_markdown'
| 18.333333
| 41
| 0.8
| 73
| 0.663636
| 0
| 0
| 0
| 0
| 0
| 0
| 20
| 0.181818
|
838edfd8c862125c349c121325bcacc9bf203166
| 1,177
|
py
|
Python
|
tools/harness/tests/freemem.py
|
lambdaxymox/barrelfish
|
06a9f54721a8d96874a8939d8973178a562c342f
|
[
"MIT"
] | 111
|
2015-02-03T02:57:27.000Z
|
2022-03-01T23:57:09.000Z
|
tools/harness/tests/freemem.py
|
lambdaxymox/barrelfish
|
06a9f54721a8d96874a8939d8973178a562c342f
|
[
"MIT"
] | 12
|
2016-03-22T14:44:32.000Z
|
2020-03-18T13:30:29.000Z
|
tools/harness/tests/freemem.py
|
lambdaxymox/barrelfish
|
06a9f54721a8d96874a8939d8973178a562c342f
|
[
"MIT"
] | 55
|
2015-02-03T05:28:12.000Z
|
2022-03-31T05:00:03.000Z
|
##########################################################################
# Copyright (c) 2009, ETH Zurich.
# All rights reserved.
#
# This file is distributed under the terms in the attached LICENSE file.
# If you do not find this file, copies can be found by writing to:
# ETH Zurich D-INFK, Universitaetstrasse 6, CH-8092 Zurich. Attn: Systems Group.
##########################################################################
import re
import tests
from common import TestCommon
from results import PassFailResult
@tests.add_test
class MemTest(TestCommon):
'''prints out free and total memory after system boot up'''
name = "freemem"
def get_modules(self, build, machine):
modules = super(MemTest, self).get_modules(build, machine)
modules.add_module("freemem")
return modules
def get_finish_string(self):
return "freemem done!"
def process_data(self, testdir, rawiter):
# the test passed iff the last line is the finish string
lastline = ''
for line in rawiter:
lastline = line
passed = lastline.startswith(self.get_finish_string())
return PassFailResult(passed)
| 33.628571
| 80
| 0.605777
| 640
| 0.543755
| 0
| 0
| 656
| 0.557349
| 0
| 0
| 572
| 0.485981
|
8391c2e017e6f922119fae69c3e8b24e0d685ffc
| 2,959
|
py
|
Python
|
grad_cam.py
|
SamuelCahyawijaya/pytorch-smoothgrad
|
d9a5a359aab520a500e19359b309d1c030babb20
|
[
"MIT"
] | 77
|
2017-07-28T15:54:44.000Z
|
2018-04-21T08:25:36.000Z
|
grad_cam.py
|
SamuelCahyawijaya/pytorch-smoothgrad
|
d9a5a359aab520a500e19359b309d1c030babb20
|
[
"MIT"
] | null | null | null |
grad_cam.py
|
SamuelCahyawijaya/pytorch-smoothgrad
|
d9a5a359aab520a500e19359b309d1c030babb20
|
[
"MIT"
] | 12
|
2019-10-11T16:00:51.000Z
|
2021-12-10T03:21:54.000Z
|
import argparse
import os
import sys
import numpy as np
from scipy import misc
import cv2
import torch
import torch.nn as nn
from torch.autograd import Variable
from torchvision.models import vgg16, vgg19
from torchvision.utils import save_image
from lib.gradients import GradCam, GuidedBackpropGrad
from lib.image_utils import preprocess_image, save_cam_image, save_as_gray_image
from lib.labels import IMAGENET_LABELS
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--cuda', action='store_true', default=False,
help='Use NVIDIA GPU acceleration')
parser.add_argument('--img', type=str, default='',
help='Input image path')
parser.add_argument('--out_dir', type=str, default='./result/cam/',
help='Result directory path')
args = parser.parse_args()
args.cuda = args.cuda and torch.cuda.is_available()
if args.cuda:
print("Using GPU for acceleration")
else:
print("Using CPU for computation")
if args.img:
print('Input image: {}'.format(args.img))
else:
print('Input image: raccoon face (scipy.misc.face())')
print('Output directory: {}'.format(args.out_dir))
print()
return args
def main():
args = parse_args()
if not os.path.exists(args.out_dir):
os.makedirs(args.out_dir)
target_layer_names = ['35']
target_index = None
# Prepare input image
if args.img:
img = cv2.imread(args.img, 1)
else:
img = misc.face()
img = np.float32(cv2.resize(img, (224, 224))) / 255
preprocessed_img = preprocess_image(img, args.cuda)
model = vgg19(pretrained=True)
if args.cuda:
model.cuda()
# Prediction
output = model(preprocessed_img)
pred_index = np.argmax(output.data.cpu().numpy())
print('Prediction: {}'.format(IMAGENET_LABELS[pred_index]))
# Prepare grad cam
grad_cam = GradCam(
pretrained_model=model,
target_layer_names=target_layer_names,
cuda=args.cuda)
# Compute grad cam
mask = grad_cam(preprocessed_img, target_index)
save_cam_image(img, mask, os.path.join(args.out_dir, 'grad_cam.jpg'))
print('Saved Grad-CAM image')
# Reload preprocessed image
preprocessed_img = preprocess_image(img)
# Compute guided backpropagation
guided_backprop = GuidedBackpropGrad(
pretrained_model=model, cuda=args.cuda)
guided_backprop_saliency = guided_backprop(preprocessed_img, index=target_index)
cam_mask = np.zeros(guided_backprop_saliency.shape)
for i in range(guided_backprop_saliency.shape[0]):
cam_mask[i, :, :] = mask
cam_guided_backprop = np.multiply(cam_mask, guided_backprop_saliency)
save_as_gray_image(
cam_guided_backprop,
os.path.join(args.out_dir, 'guided_grad_cam.jpg'))
print('Saved Guided Grad-CAM image')
if __name__ == '__main__':
main()
| 29.29703
| 84
| 0.677256
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 510
| 0.172356
|
83927dd68fcacbf5d7c2a21161a37e8eb6342054
| 1,021
|
py
|
Python
|
Task2G.py
|
asew4/Flood-Warning-System-8
|
66f436caf8307232604b830e4dc4ab385de0556e
|
[
"MIT"
] | null | null | null |
Task2G.py
|
asew4/Flood-Warning-System-8
|
66f436caf8307232604b830e4dc4ab385de0556e
|
[
"MIT"
] | null | null | null |
Task2G.py
|
asew4/Flood-Warning-System-8
|
66f436caf8307232604b830e4dc4ab385de0556e
|
[
"MIT"
] | 1
|
2022-02-06T02:27:29.000Z
|
2022-02-06T02:27:29.000Z
|
from floodsystem import stationdata, datafetcher, station
stations = stationdata.build_station_list()
stationdata.update_water_levels(stations)
#Empty lists for each of the risk categories
severe_level_station = []
high_level_station = []
moderate_level_station = []
low_level_station = []
for station in stations: #Sorts out stations into different levels
level = station.relative_water_level()
if level is not None:
if level > 1.2:
severe_level_station.append(station)
elif level > 0.9:
high_level_station.append(station)
elif level > 0.7:
moderate_level_station.append(station)
else:
low_level_station.append(station)
#sets for the different categories
severe_town = {x.town for x in severe_level_station}
high_town = {x.town for x in high_level_station}
moderate_town = {x.town for x in moderate_level_station}
low_town = {x.town for x in low_level_station}
for town in severe_town:
#xx
print(town)
| 30.939394
| 66
| 0.712047
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 122
| 0.119491
|
83929360847de74ce432577b4612ddd776a07618
| 3,472
|
py
|
Python
|
Protheus_WebApp/Modules/SIGAACD/ACDA035TESTCASE.py
|
98llm/tir-script-samples
|
0bff8393b79356aa562e9e6512c11ee6e039b177
|
[
"MIT"
] | 17
|
2018-09-24T17:27:08.000Z
|
2021-09-16T19:09:46.000Z
|
Protheus_WebApp/Modules/SIGAACD/ACDA035TESTCASE.py
|
98llm/tir-script-samples
|
0bff8393b79356aa562e9e6512c11ee6e039b177
|
[
"MIT"
] | 4
|
2018-09-24T17:30:32.000Z
|
2022-01-03T11:39:30.000Z
|
Protheus_WebApp/Modules/SIGAACD/ACDA035TESTCASE.py
|
98llm/tir-script-samples
|
0bff8393b79356aa562e9e6512c11ee6e039b177
|
[
"MIT"
] | 18
|
2019-06-07T17:41:34.000Z
|
2022-01-31T18:17:31.000Z
|
#//-------------------------------------------------------------------
#/*/{Protheus.doc} ACDA035 -
#
#@author PEDRO ANTONIO MISSAGLIA
#@since 23/09/2019
#@version P12
#
# CT001 - Inclusão de Lançamento de Inventário
# CT002 - Visão de um lançamento de inventário
# CT003 - Visualização das legendas
# CT004 - Alteração de Lançamento de Inventário
# CT005 - Exclusão de Lançamento de Inventário
# CT007 - Alteração de Lançamento de Inventário sem finalizar contagem
#
#/*/
#//-------------------------------------------------------------------
from tir import Webapp
import unittest
import time
class ACDA035(unittest.TestCase):
@classmethod
def setUpClass(inst):
inst.oHelper = Webapp()
inst.oHelper.Setup('SIGAEST','11/07/2019','T1','D MG 01')
inst.oHelper.Program('ACDA035')
inst.oHelper.AddParameter("MV_CBPE012", "", ".T.", ".T.", ".T.")
inst.oHelper.SetParameters()
#CT001 - Geração de uma ordem de separação por ordem de produção
#@author: Pedro Antonio Missaglia
#@date: 18/09/2019
def test_ACDA035_CT001(self):
self.oHelper.SetButton("Incluir")
self.oHelper.SetButton('Ok')
self.oHelper.SetValue('Codigo Inv.', '000000005')
self.oHelper.SetValue('Usuario', '000010')
self.oHelper.SetValue('Quantidade', '1', grid=True)
self.oHelper.SetValue('Endereco', 'ENDSE01', grid=True)
self.oHelper.LoadGrid()
self.oHelper.SetButton('Salvar')
self.oHelper.SetButton('Não')
self.oHelper.SetButton('Cancelar')
#Definição do operação
self.oHelper.AssertTrue()
def test_ACDA035_CT002(self):
self.oHelper.SearchBrowse("D MG 01 000000003")
self.oHelper.SetButton("Visão")
self.oHelper.SetButton('Confirmar')
self.oHelper.AssertTrue()
def test_ACDA035_CT003(self):
self.oHelper.SetButton("Outras Ações", "Legenda")
self.oHelper.SetButton('Ok')
self.oHelper.SetButton('Fechar')
self.oHelper.AssertTrue()
def test_ACDA035_CT004(self):
self.oHelper.SearchBrowse("D MG 01 000000030")
self.oHelper.SetButton("Alterar")
self.oHelper.SetValue('Quantidade', '3', grid=True)
self.oHelper.LoadGrid()
self.oHelper.SetButton('Salvar')
self.oHelper.SetButton('Não')
self.oHelper.AssertTrue()
def test_ACDA035_CT005(self):
self.oHelper.SearchBrowse("D MG 01 000000005")
self.oHelper.SetButton("Outras Ações", "Excluir")
self.oHelper.SetButton('Confirmar')
self.oHelper.SetButton('Sim')
self.oHelper.AssertTrue()
def test_ACDA035_CT006(self):
self.oHelper.AddParameter("MV_WMSNEW ", "", ".F.", ".T.", ".T.")
self.oHelper.SetParameters()
self.oHelper.SearchBrowse("D MG 01 000000029")
self.oHelper.SetButton("Alterar")
self.oHelper.SetValue('Qtd.Original', '3', grid=True)
self.oHelper.LoadGrid()
self.oHelper.SetButton('Salvar')
self.oHelper.SetButton('Sim')
self.oHelper.AssertTrue()
def test_ACDA035_CT007(self):
self.oHelper.SearchBrowse("D MG 01 000000032")
self.oHelper.SetButton("Alterar")
self.oHelper.SetButton("Salvar")
self.oHelper.WaitShow("Deseja finalizar a contagem?")
self.oHelper.SetButton("Não")
self.oHelper.WaitHide("Deseja finalizar a contagem?")
time.sleep(3)
self.oHelper.SetButton("Visão")
self.oHelper.CheckResult("Produto","ACDACDA03500000000000000000001",grid=True, line=1)
self.oHelper.LoadGrid()
self.oHelper.SetButton('Confirmar')
self.oHelper.AssertTrue()
@classmethod
def tearDownClass(inst):
inst.oHelper.TearDown()
if __name__ == '__main__':
unittest.main()
| 27.125
| 88
| 0.692396
| 2,833
| 0.807123
| 0
| 0
| 318
| 0.090598
| 0
| 0
| 1,348
| 0.384046
|
8392df442da4b2f8acf4cb05c261720a7e2145a4
| 967
|
py
|
Python
|
tests/unit/test_cust_driver.py
|
abreu4/jina
|
d1d045e9e0933dffb3bd668cb9cfebab6cd52202
|
[
"Apache-2.0"
] | 2
|
2021-01-22T07:34:35.000Z
|
2021-01-23T04:36:41.000Z
|
tests/unit/test_cust_driver.py
|
abreu4/jina
|
d1d045e9e0933dffb3bd668cb9cfebab6cd52202
|
[
"Apache-2.0"
] | 1
|
2021-02-27T05:56:45.000Z
|
2021-02-27T05:57:03.000Z
|
tests/unit/test_cust_driver.py
|
abreu4/jina
|
d1d045e9e0933dffb3bd668cb9cfebab6cd52202
|
[
"Apache-2.0"
] | null | null | null |
from pathlib import Path
from jina import Flow, Document
from jina.executors import BaseExecutor
from jina.parsers import set_pea_parser
from jina.peapods.peas import BasePea
cur_dir = Path(__file__).parent
def test_load_executor_with_custom_driver():
with BaseExecutor.load_config(str(cur_dir / 'yaml/test-executor-with-custom-driver.yml')) as be:
assert be._drivers['IndexRequest'][0].__class__.__name__ == 'DummyEncodeDriver'
def test_load_pod_with_custom_driver():
args = set_pea_parser().parse_args(['--uses', str(cur_dir / 'yaml/test-executor-with-custom-driver.yml')])
with BasePea(args):
# load success with no error
pass
def validate(req):
assert len(req.docs) == 1
assert req.docs[0].text == 'hello from DummyEncodeDriver'
def test_load_flow_with_custom_driver():
with Flow().add(uses=str(cur_dir / 'yaml/test-executor-with-custom-driver.yml')) as f:
f.index([Document()], on_done=validate)
| 31.193548
| 110
| 0.733195
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 228
| 0.235781
|
8393065c4b6aeffae6a10f048cd67e3a8fa65388
| 373
|
py
|
Python
|
02-19-Cuma/forLoop.py
|
cihatdev/misha-staj
|
d0ee95d5e77a7d7a1f16611d49c87be429a25b31
|
[
"MIT"
] | 9
|
2021-03-16T20:21:54.000Z
|
2022-01-08T09:15:10.000Z
|
02-19-Cuma/forLoop.py
|
cihatdev/misha-staj
|
d0ee95d5e77a7d7a1f16611d49c87be429a25b31
|
[
"MIT"
] | 1
|
2021-02-28T21:27:17.000Z
|
2021-02-28T21:27:17.000Z
|
02-19-Cuma/forLoop.py
|
cihatdev/misha-staj
|
d0ee95d5e77a7d7a1f16611d49c87be429a25b31
|
[
"MIT"
] | 1
|
2021-05-24T11:34:48.000Z
|
2021-05-24T11:34:48.000Z
|
# for loops
# for letter in "Cihat Salik":
# print(letter)
friends = ["Hasan", "Mahmut", "Ali", "Veli"]
for friend in friends:
print(friend)
for index in range(3, 10):
print(index)
for index in range(len(friends)):
print(friends[index])
for index in range(5):
if index == 0:
print("First Iteration")
else:
print("Not first")
| 15.541667
| 44
| 0.600536
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 114
| 0.30563
|
839340ab08b4524ada1d06b4a611b58353ecf4dc
| 3,813
|
py
|
Python
|
ricga/ricga_server.py
|
MeteorKepler/laughing-invention
|
6f856d7ba27f956d8dceb18fe14ba2575beae6aa
|
[
"Apache-2.0"
] | 1
|
2018-04-12T01:44:32.000Z
|
2018-04-12T01:44:32.000Z
|
ricga/ricga_server.py
|
MeteorKepler/RICGA
|
6f856d7ba27f956d8dceb18fe14ba2575beae6aa
|
[
"Apache-2.0"
] | null | null | null |
ricga/ricga_server.py
|
MeteorKepler/RICGA
|
6f856d7ba27f956d8dceb18fe14ba2575beae6aa
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import cgi
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
import tensorflow as tf
from ricga import configuration
from ricga import inference_wrapper
from ricga.inference_utils import caption_generator
from ricga.inference_utils import vocabulary
FLAGS = tf.flags.FLAGS
tf.flags.DEFINE_string("checkpoint_path", "/home/meteorshub/code/RICGA/ricga/model/train",
"Model checkpoint file or directory containing a "
"model checkpoint file.")
tf.flags.DEFINE_string("vocab_file", "/home/meteorshub/code/RICGA/ricga/data/mscoco/word_counts.txt",
"Text file containing the vocabulary.")
tf.flags.DEFINE_string("server_ip", "59.66.143.35", "Server address")
tf.flags.DEFINE_integer("server_port", 8080, "server port")
tf.logging.set_verbosity(tf.logging.INFO)
class InferenceModel(object):
def __init__(self):
g = tf.Graph()
with g.as_default():
model = inference_wrapper.InferenceWrapper()
restore_fn = model.build_graph_from_config(configuration.ModelConfig(),
FLAGS.checkpoint_path)
g.finalize()
# Create the vocabulary.
vocab = vocabulary.Vocabulary(FLAGS.vocab_file)
sess = tf.Session(graph=g)
restore_fn(sess)
generator = caption_generator.CaptionGenerator(model, vocab)
self.vocab = vocab
self.sess = sess
self.generator = generator
def run_inf(self, image_data):
captions = self.generator.beam_search(self.sess, image_data)
caption = captions[0]
sentence = [self.vocab.id_to_word(w) for w in caption.sentence[1:-1]]
sentence = " ".join(sentence)
return sentence
inf_model = InferenceModel()
class GetHandler(BaseHTTPRequestHandler):
def do_GET(self):
form_message = """<p>RICGA:please upload a picture(jpeg)</p>
<form method="post" action="http://%s:%s" enctype="multipart/form-data">
<input name="file" type="file" accept="image/jpeg" />
<input name="token" type="hidden" />
<input type="submit" value="upload" /></form>""" % (FLAGS.server_ip, FLAGS.server_port)
self.send_response(200)
self.send_header('Content-Type', 'text/html; charset=utf-8')
self.end_headers()
self.wfile.write(form_message.encode('utf-8'))
def do_POST(self):
form = cgi.FieldStorage(fp=self.rfile,
headers=self.headers,
environ={
'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': self.headers['Content-Type']
})
self.send_response(200)
self.send_header("Content-Type", "text/plain; charset=utf-8")
self.end_headers()
for field in form.keys():
if field == 'file':
image_file = form[field]
if image_file.filename:
image_data = image_file.file.read()
caption = inf_model.run_inf(image_data)
# caption = "success"
del image_data
message = "Caption: %s" % caption
self.wfile.write(message.encode("utf-8"))
return
self.wfile.write("failure!!".encode('utf-8'))
def main(_):
server = HTTPServer(('0.0.0.0', FLAGS.server_port), GetHandler)
print('Starting server, use <ctrl-c> to stop')
server.serve_forever()
if __name__ == "__main__":
tf.app.run()
| 37.019417
| 115
| 0.59402
| 2,617
| 0.686336
| 0
| 0
| 0
| 0
| 0
| 0
| 976
| 0.255966
|
839409e02d06e81916f52fdb0dab1efe39b69585
| 2,190
|
py
|
Python
|
rotation_analysis/analysis/probe/gui/gui.py
|
Sepidak/spikeGUI
|
25ae60160308c0a34e7180f3e39a1c4dc6aad708
|
[
"MIT"
] | null | null | null |
rotation_analysis/analysis/probe/gui/gui.py
|
Sepidak/spikeGUI
|
25ae60160308c0a34e7180f3e39a1c4dc6aad708
|
[
"MIT"
] | 3
|
2021-08-09T21:51:41.000Z
|
2021-08-09T21:51:45.000Z
|
rotation_analysis/analysis/probe/gui/gui.py
|
Sepidak/spikeGUI
|
25ae60160308c0a34e7180f3e39a1c4dc6aad708
|
[
"MIT"
] | 3
|
2021-10-16T14:07:59.000Z
|
2021-10-16T17:09:03.000Z
|
import os
import sys
if sys.platform.startswith('linux'):
from OpenGL import GL
from PyQt5.QtQml import QQmlApplicationEngine
from PyQt5.QtWidgets import QApplication
from analysis.probe.gui.backend_classes import PythonBackendClass1, Logger
from analysis.probe.gui.image_providers import PyplotImageProvider
DEBUG = False
if __name__ == '__main__':
app = QApplication(sys.argv)
appEngine = QQmlApplicationEngine()
context = appEngine.rootContext()
analysis_image_provider1 = PyplotImageProvider(fig=None)
appEngine.addImageProvider("analysisprovider1", analysis_image_provider1)
analysis_image_provider2 = PyplotImageProvider(fig=None)
appEngine.addImageProvider("analysisprovider2", analysis_image_provider2)
# ALL THE ADDIMAGEPROVIDER LINES BELOW ARE REQUIRED TO MAKE QML BELIEVE THE PROVIDER IS VALID BEFORE ITS CREATION
# appEngine.addImageProvider('viewerprovider', CvImageProvider())
# analysis_image_provider = PyplotImageProvider(fig=None)
# appEngine.addImageProvider("analysisprovider", analysis_image_provider)
conf = {
'shared_directory': './' # FIXME: this is obviously BS
}
qml_source_path = os.path.join(conf['shared_directory'], 'qml', 'gui_qtquick', 'gui_qtquick.qml')
if not os.path.isfile(qml_source_path):
raise ValueError("Qml code not found at {}, please verify your installation".format(qml_source_path))
appEngine.load(qml_source_path)
try:
win = appEngine.rootObjects()[0]
except IndexError:
raise ValueError("Could not start the QT GUI")
if not DEBUG:
logger = Logger(context, win, "log")
sys.stdout = logger
print('Hello world')
# icon = QIcon(os.path.join(conf.shared_directory, 'resources', 'icons', 'pyper.png'))
# win.setIcon(icon)
backend = PythonBackendClass1(app, context, win, analysis_image_provider1, analysis_image_provider2) # create instance of backend
context.setContextProperty('py_iface', backend) # register backend python object with qml code under variable name py_iface
win.show()
sys.exit(app.exec_())
| 35.322581
| 135
| 0.719635
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 799
| 0.36484
|
83944542d560f5e410e723d98ca83aade353b2f1
| 3,701
|
py
|
Python
|
test/integration/test_forcemerge.py
|
jgough/opensearch-curator
|
e8d7eb4d969eac551db9f99bd021d0c05e28dc35
|
[
"Apache-2.0"
] | null | null | null |
test/integration/test_forcemerge.py
|
jgough/opensearch-curator
|
e8d7eb4d969eac551db9f99bd021d0c05e28dc35
|
[
"Apache-2.0"
] | null | null | null |
test/integration/test_forcemerge.py
|
jgough/opensearch-curator
|
e8d7eb4d969eac551db9f99bd021d0c05e28dc35
|
[
"Apache-2.0"
] | null | null | null |
import opensearchpy
import curator
import os
import json
import string
import random
import tempfile
from time import sleep
import click
from click import testing as clicktest
from mock import patch, Mock
from . import CuratorTestCase
from . import testvars as testvars
import logging
logger = logging.getLogger(__name__)
host, port = os.environ.get('TEST_ES_SERVER', 'localhost:9200').split(':')
port = int(port) if port else 9200
class TestActionFileforceMerge(CuratorTestCase):
def test_merge(self):
count = 1
idx = 'my_index'
self.create_index(idx)
self.add_docs(idx)
ilo1 = curator.IndexList(self.client)
ilo1._get_segment_counts()
self.assertEqual(3, ilo1.index_info[idx]['segments'])
self.write_config(
self.args['configfile'], testvars.client_config.format(host, port))
self.write_config(self.args['actionfile'],
testvars.forcemerge_test.format(count, 0.9))
test = clicktest.CliRunner()
_ = test.invoke(
curator.cli,
['--config', self.args['configfile'], self.args['actionfile']],
)
ilo2 = curator.IndexList(self.client)
# This stupid block is only for the benefit of Travis CI
# With Python 2.7 and ES 7.0, it apparently can finish testing before
# the segments have _reported_ as fully merged. This is forcing
# 3 checks before giving up and reporting the result.
for _ in range(0, 3):
self.client.indices.refresh(index=idx)
ilo2._get_segment_counts()
if ilo2.index_info[idx]['segments'] == count:
break
else:
sleep(1)
self.assertEqual(count, ilo2.index_info[idx]['segments'])
def test_extra_option(self):
self.write_config(
self.args['configfile'], testvars.client_config.format(host, port))
self.write_config(self.args['actionfile'],
testvars.bad_option_proto_test.format('forcemerge'))
test = clicktest.CliRunner()
result = test.invoke(
curator.cli,
[
'--config', self.args['configfile'],
self.args['actionfile']
],
)
self.assertEqual(1, result.exit_code)
class TestCLIforceMerge(CuratorTestCase):
def test_merge(self):
count = 1
idx = 'my_index'
self.create_index(idx)
self.add_docs(idx)
ilo1 = curator.IndexList(self.client)
ilo1._get_segment_counts()
self.assertEqual(3, ilo1.index_info[idx]['segments'])
args = self.get_runner_args()
args += [
'--config', self.args['configfile'],
'forcemerge',
'--max_num_segments', str(count),
'--delay', '0.9',
'--filter_list', '{"filtertype":"pattern","kind":"prefix","value":"my"}',
]
self.assertEqual(0, self.run_subprocess(args, logname='TestCLIforceMerge.test_merge'))
ilo2 = curator.IndexList(self.client)
# This stupid block is only for the benefit of Travis CI
# With Python 2.7 and ES 7.0, it apparently can finish testing before
# the segments have _reported_ as fully merged. This is forcing
# 3 checks before giving up and reporting the result.
for _ in range(0, 3):
self.client.indices.refresh(index=idx)
ilo2._get_segment_counts()
if ilo2.index_info[idx]['segments'] == count:
break
else:
sleep(1)
self.assertEqual(count, ilo2.index_info[idx]['segments'])
| 37.765306
| 94
| 0.604431
| 3,263
| 0.881654
| 0
| 0
| 0
| 0
| 0
| 0
| 893
| 0.241286
|
8394cbef054df0807f179df99652e99fb23bca5e
| 7,331
|
py
|
Python
|
datacube_alchemist/_utils.py
|
erin-telfer/datacube-alchemist
|
4c37b2243027769f01ce0729e5ff56d0f6354316
|
[
"Apache-2.0"
] | 15
|
2020-06-23T06:03:41.000Z
|
2021-12-23T00:19:01.000Z
|
datacube_alchemist/_utils.py
|
erin-telfer/datacube-alchemist
|
4c37b2243027769f01ce0729e5ff56d0f6354316
|
[
"Apache-2.0"
] | 69
|
2019-08-14T02:03:38.000Z
|
2022-03-04T03:38:20.000Z
|
datacube_alchemist/_utils.py
|
erin-telfer/datacube-alchemist
|
4c37b2243027769f01ce0729e5ff56d0f6354316
|
[
"Apache-2.0"
] | 3
|
2020-09-21T22:01:34.000Z
|
2021-09-22T03:02:26.000Z
|
import json
from pathlib import Path
import re
from typing import Dict
import boto3
import structlog
from datacube.model import Dataset
from datacube.virtual import Measurement, Transformation
from eodatasets3 import DatasetAssembler, serialise
from eodatasets3.model import DatasetDoc, ProductDoc
from eodatasets3.properties import StacPropertyView
from eodatasets3.scripts.tostac import dc_to_stac, json_fallback
from eodatasets3.verify import PackageChecksum
from toolz.dicttoolz import get_in
from datacube_alchemist.settings import AlchemistTask
# Regex for extracting region codes from tile IDs.
RE_TILE_REGION_CODE = re.compile(r".*A\d{6}_T(\w{5})_N\d{2}\.\d{2}")
class FakeTransformation(Transformation):
"""
Only writes input to output
"""
def measurements(self, input_measurements) -> Dict[str, Measurement]:
return input_measurements
def compute(self, data) -> Dataset:
return data
def _configure_logger():
processors = [
structlog.stdlib.add_log_level,
structlog.processors.TimeStamper(fmt="%Y-%m-%d %H:%M:%S"),
structlog.processors.StackInfoRenderer(),
structlog.processors.format_exc_info,
structlog.dev.ConsoleRenderer(),
]
structlog.configure(
processors=processors,
context_class=dict,
cache_logger_on_first_use=True,
logger_factory=structlog.PrintLoggerFactory(),
)
def _write_thumbnail(task: AlchemistTask, dataset_assembler: DatasetAssembler):
if task.settings.output.preview_image is not None:
dataset_assembler.write_thumbnail(**task.settings.output.preview_image)
elif task.settings.output.preview_image_singleband is not None:
dataset_assembler.write_thumbnail_singleband(
**task.settings.output.preview_image_singleband
)
def _write_stac(
metadata_path: Path,
destination_path: str,
explorer_url: str,
dataset_assembler: DatasetAssembler,
):
out_dataset = serialise.from_path(metadata_path)
stac_path = Path(str(metadata_path).replace("odc-metadata.yaml", "stac-item.json"))
# Make sure destination path has a / at the end. Clumsy, but necessary.
stac = dc_to_stac(
out_dataset,
metadata_path,
stac_path,
destination_path.rstrip("/") + "/",
explorer_url,
False,
)
with stac_path.open("w") as f:
json.dump(stac, f, default=json_fallback)
dataset_assembler.add_accessory_file("metadata:stac", stac_path)
checksummer = PackageChecksum()
checksum_file = (
Path(dataset_assembler.names.dataset_location.lstrip("file:"))
/ dataset_assembler._accessories["checksum:sha1"].name
)
checksummer.read(checksum_file)
checksummer.add_file(stac_path)
checksummer.write(checksum_file)
return stac
def _stac_to_sns(sns_arn, stac):
"""
Publish our STAC document to an SNS
"""
bbox = stac["bbox"]
product_name = get_in(["properties", "odc:product"], stac, None)
if product_name is None:
product_name = stac.get("collection", None)
if product_name is None:
raise ValueError("No 'odc:product_name' or 'collection' found in STAC doc")
attributes = {
"action": {"DataType": "String", "StringValue": "ADDED"},
"datetime": {
"DataType": "String",
"StringValue": str(get_in(["properties", "datetime"], stac)),
},
"product": {
"DataType": "String",
"StringValue": product_name,
},
"bbox.ll_lon": {"DataType": "Number", "StringValue": str(bbox[0])},
"bbox.ll_lat": {"DataType": "Number", "StringValue": str(bbox[1])},
"bbox.ur_lon": {"DataType": "Number", "StringValue": str(bbox[2])},
"bbox.ur_lat": {"DataType": "Number", "StringValue": str(bbox[3])},
}
maturity = get_in(["properties", "dea:dataset_maturity"], stac)
if maturity is not None:
attributes["maturity"] = {"DataType": "String", "StringValue": maturity}
client = boto3.client("sns")
client.publish(
TopicArn=sns_arn,
Message=json.dumps(stac, indent=4, default=json_fallback),
MessageAttributes=attributes,
)
def _munge_dataset_to_eo3(ds: Dataset) -> DatasetDoc:
"""
Convert to the DatasetDoc format that eodatasets expects.
"""
if ds.metadata_type.name in {"eo_plus", "eo_s2_nrt", "gqa_eo"}:
# Handle S2 NRT metadata identically to eo_plus files.
# gqa_eo is the S2 ARD with extra quality check fields.
return _convert_eo_plus(ds)
if ds.metadata_type.name == "eo":
return _convert_eo(ds)
# Else we have an already mostly eo3 style dataset
product = ProductDoc(name=ds.type.name)
# Wrap properties to avoid typos and the like
properties = StacPropertyView(ds.metadata_doc.get("properties", {}))
if properties.get("eo:gsd"):
del properties["eo:gsd"]
return DatasetDoc(
id=ds.id,
product=product,
crs=str(ds.crs),
properties=properties,
geometry=ds.extent,
)
def _guess_region_code(ds: Dataset) -> str:
"""
Get the region code of a dataset.
"""
try:
# EO plus
return ds.metadata.region_code
except AttributeError:
# Not EO plus
pass
try:
# EO
return ds.metadata_doc["region_code"]
except KeyError:
# No region code!
pass
# Region code not specified, so get it from the tile ID.
# An example of such a tile ID for S2A NRT is:
# S2A_OPER_MSI_L1C_TL_VGS1_20201114T053541_A028185_T50JPP_N02.09
# The region code is 50JPP.
tile_match = RE_TILE_REGION_CODE.match(ds.metadata_doc["tile_id"])
if not tile_match:
raise ValueError("No region code for dataset {}".format(ds.id))
return tile_match.group(1)
def _convert_eo_plus(ds) -> DatasetDoc:
# Definitely need: # - 'datetime' # - 'eo:instrument' # - 'eo:platform' # - 'odc:region_code'
region_code = _guess_region_code(ds)
properties = StacPropertyView(
{
"odc:region_code": region_code,
"datetime": ds.center_time,
"eo:instrument": ds.metadata.instrument,
"eo:platform": ds.metadata.platform,
"landsat:landsat_scene_id": ds.metadata_doc.get(
"tile_id", "??"
), # Used to find abbreviated instrument id
"sentinel:sentinel_tile_id": ds.metadata_doc.get("tile_id", "??"),
}
)
product = ProductDoc(name=ds.type.name)
return DatasetDoc(id=ds.id, product=product, crs=str(ds.crs), properties=properties)
def _convert_eo(ds) -> DatasetDoc:
# Definitely need: # - 'datetime' # - 'eo:instrument' # - 'eo:platform' # - 'odc:region_code'
region_code = _guess_region_code(ds)
properties = StacPropertyView(
{
"odc:region_code": region_code,
"datetime": ds.center_time,
"eo:instrument": ds.metadata.instrument,
"eo:platform": ds.metadata.platform,
"landsat:landsat_scene_id": ds.metadata.instrument, # Used to find abbreviated instrument id
}
)
product = ProductDoc(name=ds.type.name)
return DatasetDoc(id=ds.id, product=product, crs=str(ds.crs), properties=properties)
| 32.438053
| 105
| 0.657755
| 259
| 0.035329
| 0
| 0
| 0
| 0
| 0
| 0
| 2,010
| 0.274178
|
8394cda94ca23da8940ee7626693fe1126d8fab2
| 834
|
py
|
Python
|
HMBBF/migrations/0015_auto_20161202_1733.py
|
HLoveMe/HWMBBF_Serve
|
a11fb5b67c913b62df839ce3438a3be433e3865b
|
[
"Apache-2.0"
] | null | null | null |
HMBBF/migrations/0015_auto_20161202_1733.py
|
HLoveMe/HWMBBF_Serve
|
a11fb5b67c913b62df839ce3438a3be433e3865b
|
[
"Apache-2.0"
] | null | null | null |
HMBBF/migrations/0015_auto_20161202_1733.py
|
HLoveMe/HWMBBF_Serve
|
a11fb5b67c913b62df839ce3438a3be433e3865b
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('HMBBF', '0014_theme'),
]
operations = [
migrations.AddField(
model_name='theme',
name='time',
field=models.CharField(default='\u4e0d\u5fc5\u586b\u5199', max_length=256, verbose_name='\u5177\u4f53\u65f6\u95f4\u6bb5'),
),
migrations.AlterField(
model_name='theme',
name='date',
field=models.DateField(verbose_name='\u65f6\u95f4(\u54ea\u4e00\u5929)'),
),
migrations.AlterField(
model_name='theme',
name='time_end',
field=models.DateTimeField(verbose_name='\u7ed3\u675f\u65f6\u95f4'),
),
]
| 27.8
| 134
| 0.588729
| 725
| 0.869305
| 0
| 0
| 0
| 0
| 0
| 0
| 203
| 0.243405
|
83963bff306e66c0a55e66eed48eb8e977fd0dbd
| 4,649
|
py
|
Python
|
rb/processings/text_classifier/text_classifier.py
|
readerbench/ReaderBench
|
f0588a9a63ba21e3b8c2e5e5bc474904c07f6897
|
[
"Apache-2.0"
] | null | null | null |
rb/processings/text_classifier/text_classifier.py
|
readerbench/ReaderBench
|
f0588a9a63ba21e3b8c2e5e5bc474904c07f6897
|
[
"Apache-2.0"
] | 2
|
2021-10-17T14:00:52.000Z
|
2021-10-17T14:00:52.000Z
|
rb/processings/text_classifier/text_classifier.py
|
readerbench/ReaderBench
|
f0588a9a63ba21e3b8c2e5e5bc474904c07f6897
|
[
"Apache-2.0"
] | null | null | null |
from rb.core.lang import Lang
from rb.core.document import Document
from rb.complexity.complexity_index import ComplexityIndex, compute_indices
from rb.similarity.word2vec import Word2Vec
from rb.similarity.vector_model import VectorModelType, CorporaEnum, VectorModel
from rb.similarity.vector_model_factory import VECTOR_MODELS, create_vector_model
from typing import Tuple, List
from sklearn.svm import SVR
from collections import Counter
from sklearn import svm
from sklearn.metrics import confusion_matrix, plot_confusion_matrix
import matplotlib.pyplot as plt
import pickle
import os
import csv
import random
from werkzeug import secure_filename
import uuid
from rb.cna.cna_graph import CnaGraph
from rb.utils.rblogger import Logger
logger = Logger.get_logger()
class TextClassifier:
def __init__(self):
pass
def get_vector_model(self, lang: Lang = Lang.RO) -> VectorModel:
global logger
if lang is Lang.RO:
vector_model = create_vector_model(Lang.RO, VectorModelType.from_str('word2vec'), "readme")
elif lang is Lang.EN:
vector_model = create_vector_model(Lang.EN, VectorModelType.from_str("word2vec"), "coca")
else:
logger.error(f'Language {lang.value} is not supported for essay scoring task')
vector_model = None
return vector_model
def read_indices(self, base_folder: str = 'categories_readme', lang=Lang.RO) -> List[List[float]]:
categroies = ['general_stats.csv', 'literature_stats.csv', 'science_stats.csv']
results = []
indices = []
if lang is Lang.RO:
with open('rb/processings/text_classifier/indices_ro_class.txt', 'rt', encoding='utf-8') as f:
for line in f:
indices.append(line.strip())
for j, cat in enumerate(categroies):
essay_r = csv.reader(open(os.path.join(base_folder, cat), 'rt', encoding='utf-8'))
""" first col is the score """
for i, row in enumerate(essay_r):
if i == 0:
indices_row = row
continue
res = [j]
for findex in indices:
for k, rr in enumerate(row):
if indices_row[k].strip() == findex:
res.append(rr)
break
results.append(res)
return results
def train_svm(self, results: List[List], save_model_file=None):
total = len(results)
random.shuffle(results)
train_samples = int(total * 0.8)
train = results[:train_samples]
test = results[train_samples:]
y = [int(r[0]) for r in train]
X = [r[1:] for r in train]
clf = svm.SVC(kernel='poly', degree=14, class_weight={0: 0.1, 1: 0.6, 2: 0.3}).fit(X, y)
if save_model_file:
pickle.dump(clf, open(save_model_file, 'wb'))
dev_out, dev_in = [], []
for sample_x in test:
if int(sample_x[0]) == 0 and random.random() < 0.7:
continue
dev_out.append(int(sample_x[0]))
Xx = sample_x[1:]
dev_in.append(Xx)
print(Counter(dev_out))
disp = plot_confusion_matrix(clf, dev_in, dev_out, display_labels=['general', 'science', 'literature', ])
res = clf.predict(dev_in)
disp.ax_.set_title('Confusion Matrix')
right, wrong = 0, 0
for r, clss in zip(res, dev_out):
if r != clss:
wrong += 1
else:
right += 1
logger.info('Acc for classification : {}'.format(right/(wrong + right)))
plt.show()
def predict(self, content: str, file_to_svr_model: str, lang=Lang.RO) -> int:
svr = pickle.load(open(file_to_svr_model, "rb"))
doc = Document(lang=lang, text=content)
vector_model = self.get_vector_model(lang=lang)
cna_graph = CnaGraph(docs=doc, models=[vector_model])
compute_indices(doc=doc, cna_graph=cna_graph)
indices = []
if lang is Lang.RO:
with open('rb/processings/text_classifier/indices_ro_class.txt', 'rt', encoding='utf-8') as f:
for line in f:
indices.append(line.strip())
values_indices = []
for ind in indices:
for key, v in doc.indices.items():
if repr(key) == ind:
values_indices.append(v)
break
class_txt = svr.predict([values_indices])[0]
return class_txt
| 35.219697
| 113
| 0.591955
| 3,868
| 0.832007
| 0
| 0
| 0
| 0
| 0
| 0
| 437
| 0.093999
|
839812d03b6dbafa768b4338253f5ebbd452fe07
| 821
|
py
|
Python
|
vault_importer/csv.py
|
rpetti/vault-keepassxc-importer
|
7258a1062a52426e44fddce57d0f841f98f3c2c1
|
[
"Apache-2.0"
] | null | null | null |
vault_importer/csv.py
|
rpetti/vault-keepassxc-importer
|
7258a1062a52426e44fddce57d0f841f98f3c2c1
|
[
"Apache-2.0"
] | null | null | null |
vault_importer/csv.py
|
rpetti/vault-keepassxc-importer
|
7258a1062a52426e44fddce57d0f841f98f3c2c1
|
[
"Apache-2.0"
] | null | null | null |
import csv
class Csv:
def __init__(self, csv_file):
self.reader = csv.reader(open(csv_file, newline=''), delimiter=',')
def parse(self):
parsed = []
# Ignore the first line, 't is the header
next(self.reader)
for row in self.reader:
secret = {
'path': self.path(row[0], row[1]),
'username': row[2],
'password': row[3],
'url': row[4],
'notes': row[5]
}
parsed.append(secret)
return parsed
def path(self, path, title):
path_a = path.split('/')
if path_a[0] == 'Root':
vault_path_a = path_a[1:]
else:
vault_path_a = path_a
vault_path_a.append(title)
return '/'.join(vault_path_a)
| 26.483871
| 75
| 0.483557
| 807
| 0.982948
| 0
| 0
| 0
| 0
| 0
| 0
| 96
| 0.116931
|
839832c0e53eab95cbbd979af3ec19abef8086bb
| 3,069
|
py
|
Python
|
src/reader/_plugins/enclosure_tags.py
|
mirekdlugosz/reader
|
d929b88f1981085b68e82019aa59af126479d4a9
|
[
"BSD-3-Clause"
] | 205
|
2018-07-14T12:54:21.000Z
|
2022-03-29T06:47:13.000Z
|
src/reader/_plugins/enclosure_tags.py
|
mirekdlugosz/reader
|
d929b88f1981085b68e82019aa59af126479d4a9
|
[
"BSD-3-Clause"
] | 275
|
2018-01-28T20:57:13.000Z
|
2022-03-29T21:45:11.000Z
|
src/reader/_plugins/enclosure_tags.py
|
mirekdlugosz/reader
|
d929b88f1981085b68e82019aa59af126479d4a9
|
[
"BSD-3-Clause"
] | 12
|
2021-01-01T17:15:53.000Z
|
2022-03-22T09:38:12.000Z
|
"""
enclosure_tags
~~~~~~~~~~~~~~
Fix tags for MP3 enclosures (e.g. podcasts).
Adds a "with tags" link to a version of the file with tags set as follows:
* the entry title as title
* the feed title as album
* the entry/feed author as author
This plugin needs additional dependencies, use the ``unstable-plugins`` extra
to install them:
.. code-block:: bash
pip install reader[unstable-plugins]
To load::
READER_APP_PLUGIN='reader._plugins.enclosure_tags:init' \\
python -m reader serve
Implemented for https://github.com/lemon24/reader/issues/50.
Became a plugin in https://github.com/lemon24/reader/issues/52.
"""
import tempfile
from urllib.parse import urlparse
import mutagen.mp3
import requests
from flask import Blueprint
from flask import request
from flask import Response
from flask import stream_with_context
from flask import url_for
blueprint = Blueprint('enclosure_tags', __name__)
ALL_TAGS = ('album', 'title', 'artist')
SET_ONLY_IF_MISSING_TAGS = {'artist'}
@blueprint.route('/enclosure-tags', defaults={'filename': None})
@blueprint.route('/enclosure-tags/<filename>')
def enclosure_tags(filename):
def update_tags(file):
emp3 = mutagen.mp3.EasyMP3(file)
changed = False
for key in ALL_TAGS:
if key in SET_ONLY_IF_MISSING_TAGS and emp3.get(key):
continue
value = request.args.get(key)
if not value:
continue
emp3[key] = [value]
changed = True
if changed:
emp3.save(file)
file.seek(0)
def chunks(req):
# Send the headers as soon as possible.
# Some browsers wait for the headers before showing the "Save As" dialog.
yield ''
tmp = tempfile.TemporaryFile()
for chunk in req.iter_content(chunk_size=2 ** 20):
tmp.write(chunk)
tmp.seek(0)
update_tags(tmp)
try:
while True:
data = tmp.read(2 ** 20)
if not data:
break
yield data
finally:
tmp.close()
url = request.args['url']
req = requests.get(url, stream=True)
headers = {}
for name in ('Content-Type', 'Content-Disposition'):
if name in req.headers:
headers[name] = req.headers[name]
return Response(stream_with_context(chunks(req)), headers=headers)
def enclosure_tags_filter(enclosure, entry):
filename = urlparse(enclosure.href).path.split('/')[-1]
if not filename.endswith('.mp3'):
return []
args = {'url': enclosure.href, 'filename': filename}
if entry.title:
args['title'] = entry.title
if entry.feed.title:
args['album'] = entry.feed.title
if entry.author or entry.feed.author:
args['artist'] = entry.author or entry.feed.author
return [('with tags', url_for('enclosure_tags.enclosure_tags', **args))]
def init(app):
app.register_blueprint(blueprint)
app.reader_additional_enclosure_links.append(enclosure_tags_filter)
| 26.008475
| 81
| 0.640274
| 0
| 0
| 1,306
| 0.425546
| 1,418
| 0.46204
| 0
| 0
| 979
| 0.318996
|
8398b2a65cd51b95d6dff4f5e09806cedb08e588
| 454
|
py
|
Python
|
sqllite/delete_the_data.py
|
arjunjanamatti/pymongo_practise
|
d69153f6a0cce9416b10c0adf300986bfe9dfe22
|
[
"Apache-2.0"
] | null | null | null |
sqllite/delete_the_data.py
|
arjunjanamatti/pymongo_practise
|
d69153f6a0cce9416b10c0adf300986bfe9dfe22
|
[
"Apache-2.0"
] | null | null | null |
sqllite/delete_the_data.py
|
arjunjanamatti/pymongo_practise
|
d69153f6a0cce9416b10c0adf300986bfe9dfe22
|
[
"Apache-2.0"
] | null | null | null |
import _sqlite3
mydb = _sqlite3.connect(database = 'namelist')
with mydb:
cur = mydb.cursor()
name = 'update_name_placeholder'
cur.execute('DELETE FROM users WHERE First_name = ?', (name,))
mydb.commit()
print('Data deleted!!!')
cur = mydb.cursor()
selectquery = 'SELECT * FROM users'
cur.execute(selectquery)
results = cur.fetchall()
print('Original data: ')
for row in results:
print(row)
| 16.814815
| 66
| 0.627753
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 130
| 0.286344
|
8398ea34b25e65cea834c631c5374561393da5cc
| 35,415
|
py
|
Python
|
tools/messages/aura4_messages.py
|
AuraUAS/aura-core
|
4711521074db72ba9089213e14455d89dc5306c0
|
[
"MIT",
"BSD-2-Clause-FreeBSD"
] | 8
|
2016-08-03T19:35:03.000Z
|
2019-12-15T06:25:05.000Z
|
tools/messages/aura4_messages.py
|
AuraUAS/aura-core
|
4711521074db72ba9089213e14455d89dc5306c0
|
[
"MIT",
"BSD-2-Clause-FreeBSD"
] | 4
|
2018-09-27T15:48:56.000Z
|
2018-11-05T12:38:10.000Z
|
tools/messages/aura4_messages.py
|
AuraUAS/aura-core
|
4711521074db72ba9089213e14455d89dc5306c0
|
[
"MIT",
"BSD-2-Clause-FreeBSD"
] | 5
|
2017-06-28T19:15:36.000Z
|
2020-02-19T19:31:24.000Z
|
import struct
# Message id constants
command_ack_id = 10
config_airdata_id = 11
config_board_id = 12
config_ekf_id = 13
config_imu_id = 14
config_mixer_id = 15
config_mixer_matrix_id = 16
config_power_id = 17
config_pwm_id = 18
config_stability_damping_id = 19
command_inceptors_id = 20
command_zero_gyros_id = 21
command_reset_ekf_id = 22
command_cycle_inceptors_id = 23
pilot_id = 24
imu_id = 25
aura_nav_pvt_id = 26
airdata_id = 27
power_id = 28
status_id = 29
ekf_id = 30
# Constants
pwm_channels = 8 # number of pwm output channels
sbus_channels = 16 # number of sbus channels
ap_channels = 6 # number of sbus channels
mix_matrix_size = 64 # 8 x 8 mix matrix
# Enums
enum_nav_none = 0 # None
enum_nav_nav15 = 1 # None
enum_nav_nav15_mag = 2 # None
# Message: command_ack
# Id: 10
class command_ack():
id = 10
_pack_string = "<BB"
def __init__(self, msg=None):
# public fields
self.command_id = 0
self.subcommand_id = 0
# unpack if requested
if msg: self.unpack(msg)
def pack(self):
msg = struct.pack(self._pack_string,
self.command_id,
self.subcommand_id)
return msg
def unpack(self, msg):
(self.command_id,
self.subcommand_id) = struct.unpack(self._pack_string, msg)
# Message: config_airdata
# Id: 11
class config_airdata():
id = 11
_pack_string = "<BBBB"
def __init__(self, msg=None):
# public fields
self.barometer = 0
self.pitot = 0
self.swift_baro_addr = 0
self.swift_pitot_addr = 0
# unpack if requested
if msg: self.unpack(msg)
def pack(self):
msg = struct.pack(self._pack_string,
self.barometer,
self.pitot,
self.swift_baro_addr,
self.swift_pitot_addr)
return msg
def unpack(self, msg):
(self.barometer,
self.pitot,
self.swift_baro_addr,
self.swift_pitot_addr) = struct.unpack(self._pack_string, msg)
# Message: config_board
# Id: 12
class config_board():
id = 12
_pack_string = "<BB"
def __init__(self, msg=None):
# public fields
self.board = 0
self.led_pin = 0
# unpack if requested
if msg: self.unpack(msg)
def pack(self):
msg = struct.pack(self._pack_string,
self.board,
self.led_pin)
return msg
def unpack(self, msg):
(self.board,
self.led_pin) = struct.unpack(self._pack_string, msg)
# Message: config_ekf
# Id: 13
class config_ekf():
id = 13
_pack_string = "<Bfffffffffff"
def __init__(self, msg=None):
# public fields
self.select = 0
self.sig_w_accel = 0.0
self.sig_w_gyro = 0.0
self.sig_a_d = 0.0
self.tau_a = 0.0
self.sig_g_d = 0.0
self.tau_g = 0.0
self.sig_gps_p_ne = 0.0
self.sig_gps_p_d = 0.0
self.sig_gps_v_ne = 0.0
self.sig_gps_v_d = 0.0
self.sig_mag = 0.0
# unpack if requested
if msg: self.unpack(msg)
def pack(self):
msg = struct.pack(self._pack_string,
self.select,
self.sig_w_accel,
self.sig_w_gyro,
self.sig_a_d,
self.tau_a,
self.sig_g_d,
self.tau_g,
self.sig_gps_p_ne,
self.sig_gps_p_d,
self.sig_gps_v_ne,
self.sig_gps_v_d,
self.sig_mag)
return msg
def unpack(self, msg):
(self.select,
self.sig_w_accel,
self.sig_w_gyro,
self.sig_a_d,
self.tau_a,
self.sig_g_d,
self.tau_g,
self.sig_gps_p_ne,
self.sig_gps_p_d,
self.sig_gps_v_ne,
self.sig_gps_v_d,
self.sig_mag) = struct.unpack(self._pack_string, msg)
# Message: config_imu
# Id: 14
class config_imu():
id = 14
_pack_string = "<BBfffffffffffffffffffffffffffffff"
def __init__(self, msg=None):
# public fields
self.interface = 0
self.pin_or_address = 0
self.strapdown_calib = [0.0] * 9
self.accel_scale = [0.0] * 3
self.accel_translate = [0.0] * 3
self.mag_affine = [0.0] * 16
# unpack if requested
if msg: self.unpack(msg)
def pack(self):
msg = struct.pack(self._pack_string,
self.interface,
self.pin_or_address,
self.strapdown_calib[0],
self.strapdown_calib[1],
self.strapdown_calib[2],
self.strapdown_calib[3],
self.strapdown_calib[4],
self.strapdown_calib[5],
self.strapdown_calib[6],
self.strapdown_calib[7],
self.strapdown_calib[8],
self.accel_scale[0],
self.accel_scale[1],
self.accel_scale[2],
self.accel_translate[0],
self.accel_translate[1],
self.accel_translate[2],
self.mag_affine[0],
self.mag_affine[1],
self.mag_affine[2],
self.mag_affine[3],
self.mag_affine[4],
self.mag_affine[5],
self.mag_affine[6],
self.mag_affine[7],
self.mag_affine[8],
self.mag_affine[9],
self.mag_affine[10],
self.mag_affine[11],
self.mag_affine[12],
self.mag_affine[13],
self.mag_affine[14],
self.mag_affine[15])
return msg
def unpack(self, msg):
(self.interface,
self.pin_or_address,
self.strapdown_calib[0],
self.strapdown_calib[1],
self.strapdown_calib[2],
self.strapdown_calib[3],
self.strapdown_calib[4],
self.strapdown_calib[5],
self.strapdown_calib[6],
self.strapdown_calib[7],
self.strapdown_calib[8],
self.accel_scale[0],
self.accel_scale[1],
self.accel_scale[2],
self.accel_translate[0],
self.accel_translate[1],
self.accel_translate[2],
self.mag_affine[0],
self.mag_affine[1],
self.mag_affine[2],
self.mag_affine[3],
self.mag_affine[4],
self.mag_affine[5],
self.mag_affine[6],
self.mag_affine[7],
self.mag_affine[8],
self.mag_affine[9],
self.mag_affine[10],
self.mag_affine[11],
self.mag_affine[12],
self.mag_affine[13],
self.mag_affine[14],
self.mag_affine[15]) = struct.unpack(self._pack_string, msg)
# Message: config_mixer
# Id: 15
class config_mixer():
id = 15
_pack_string = "<BBBBBBBfffffffffff"
def __init__(self, msg=None):
# public fields
self.mix_autocoord = False
self.mix_throttle_trim = False
self.mix_flap_trim = False
self.mix_elevon = False
self.mix_flaperon = False
self.mix_vtail = False
self.mix_diff_thrust = False
self.mix_Gac = 0.0
self.mix_Get = 0.0
self.mix_Gef = 0.0
self.mix_Gea = 0.0
self.mix_Gee = 0.0
self.mix_Gfa = 0.0
self.mix_Gff = 0.0
self.mix_Gve = 0.0
self.mix_Gvr = 0.0
self.mix_Gtt = 0.0
self.mix_Gtr = 0.0
# unpack if requested
if msg: self.unpack(msg)
def pack(self):
msg = struct.pack(self._pack_string,
self.mix_autocoord,
self.mix_throttle_trim,
self.mix_flap_trim,
self.mix_elevon,
self.mix_flaperon,
self.mix_vtail,
self.mix_diff_thrust,
self.mix_Gac,
self.mix_Get,
self.mix_Gef,
self.mix_Gea,
self.mix_Gee,
self.mix_Gfa,
self.mix_Gff,
self.mix_Gve,
self.mix_Gvr,
self.mix_Gtt,
self.mix_Gtr)
return msg
def unpack(self, msg):
(self.mix_autocoord,
self.mix_throttle_trim,
self.mix_flap_trim,
self.mix_elevon,
self.mix_flaperon,
self.mix_vtail,
self.mix_diff_thrust,
self.mix_Gac,
self.mix_Get,
self.mix_Gef,
self.mix_Gea,
self.mix_Gee,
self.mix_Gfa,
self.mix_Gff,
self.mix_Gve,
self.mix_Gvr,
self.mix_Gtt,
self.mix_Gtr) = struct.unpack(self._pack_string, msg)
# Message: config_mixer_matrix
# Id: 16
class config_mixer_matrix():
id = 16
_pack_string = "<hhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhh"
def __init__(self, msg=None):
# public fields
self.matrix = [0.0] * mix_matrix_size
# unpack if requested
if msg: self.unpack(msg)
def pack(self):
msg = struct.pack(self._pack_string,
int(round(self.matrix[0] * 16384)),
int(round(self.matrix[1] * 16384)),
int(round(self.matrix[2] * 16384)),
int(round(self.matrix[3] * 16384)),
int(round(self.matrix[4] * 16384)),
int(round(self.matrix[5] * 16384)),
int(round(self.matrix[6] * 16384)),
int(round(self.matrix[7] * 16384)),
int(round(self.matrix[8] * 16384)),
int(round(self.matrix[9] * 16384)),
int(round(self.matrix[10] * 16384)),
int(round(self.matrix[11] * 16384)),
int(round(self.matrix[12] * 16384)),
int(round(self.matrix[13] * 16384)),
int(round(self.matrix[14] * 16384)),
int(round(self.matrix[15] * 16384)),
int(round(self.matrix[16] * 16384)),
int(round(self.matrix[17] * 16384)),
int(round(self.matrix[18] * 16384)),
int(round(self.matrix[19] * 16384)),
int(round(self.matrix[20] * 16384)),
int(round(self.matrix[21] * 16384)),
int(round(self.matrix[22] * 16384)),
int(round(self.matrix[23] * 16384)),
int(round(self.matrix[24] * 16384)),
int(round(self.matrix[25] * 16384)),
int(round(self.matrix[26] * 16384)),
int(round(self.matrix[27] * 16384)),
int(round(self.matrix[28] * 16384)),
int(round(self.matrix[29] * 16384)),
int(round(self.matrix[30] * 16384)),
int(round(self.matrix[31] * 16384)),
int(round(self.matrix[32] * 16384)),
int(round(self.matrix[33] * 16384)),
int(round(self.matrix[34] * 16384)),
int(round(self.matrix[35] * 16384)),
int(round(self.matrix[36] * 16384)),
int(round(self.matrix[37] * 16384)),
int(round(self.matrix[38] * 16384)),
int(round(self.matrix[39] * 16384)),
int(round(self.matrix[40] * 16384)),
int(round(self.matrix[41] * 16384)),
int(round(self.matrix[42] * 16384)),
int(round(self.matrix[43] * 16384)),
int(round(self.matrix[44] * 16384)),
int(round(self.matrix[45] * 16384)),
int(round(self.matrix[46] * 16384)),
int(round(self.matrix[47] * 16384)),
int(round(self.matrix[48] * 16384)),
int(round(self.matrix[49] * 16384)),
int(round(self.matrix[50] * 16384)),
int(round(self.matrix[51] * 16384)),
int(round(self.matrix[52] * 16384)),
int(round(self.matrix[53] * 16384)),
int(round(self.matrix[54] * 16384)),
int(round(self.matrix[55] * 16384)),
int(round(self.matrix[56] * 16384)),
int(round(self.matrix[57] * 16384)),
int(round(self.matrix[58] * 16384)),
int(round(self.matrix[59] * 16384)),
int(round(self.matrix[60] * 16384)),
int(round(self.matrix[61] * 16384)),
int(round(self.matrix[62] * 16384)),
int(round(self.matrix[63] * 16384)))
return msg
def unpack(self, msg):
(self.matrix[0],
self.matrix[1],
self.matrix[2],
self.matrix[3],
self.matrix[4],
self.matrix[5],
self.matrix[6],
self.matrix[7],
self.matrix[8],
self.matrix[9],
self.matrix[10],
self.matrix[11],
self.matrix[12],
self.matrix[13],
self.matrix[14],
self.matrix[15],
self.matrix[16],
self.matrix[17],
self.matrix[18],
self.matrix[19],
self.matrix[20],
self.matrix[21],
self.matrix[22],
self.matrix[23],
self.matrix[24],
self.matrix[25],
self.matrix[26],
self.matrix[27],
self.matrix[28],
self.matrix[29],
self.matrix[30],
self.matrix[31],
self.matrix[32],
self.matrix[33],
self.matrix[34],
self.matrix[35],
self.matrix[36],
self.matrix[37],
self.matrix[38],
self.matrix[39],
self.matrix[40],
self.matrix[41],
self.matrix[42],
self.matrix[43],
self.matrix[44],
self.matrix[45],
self.matrix[46],
self.matrix[47],
self.matrix[48],
self.matrix[49],
self.matrix[50],
self.matrix[51],
self.matrix[52],
self.matrix[53],
self.matrix[54],
self.matrix[55],
self.matrix[56],
self.matrix[57],
self.matrix[58],
self.matrix[59],
self.matrix[60],
self.matrix[61],
self.matrix[62],
self.matrix[63],) = struct.unpack(self._pack_string, msg)
self.matrix[0] /= 16384
self.matrix[1] /= 16384
self.matrix[2] /= 16384
self.matrix[3] /= 16384
self.matrix[4] /= 16384
self.matrix[5] /= 16384
self.matrix[6] /= 16384
self.matrix[7] /= 16384
self.matrix[8] /= 16384
self.matrix[9] /= 16384
self.matrix[10] /= 16384
self.matrix[11] /= 16384
self.matrix[12] /= 16384
self.matrix[13] /= 16384
self.matrix[14] /= 16384
self.matrix[15] /= 16384
self.matrix[16] /= 16384
self.matrix[17] /= 16384
self.matrix[18] /= 16384
self.matrix[19] /= 16384
self.matrix[20] /= 16384
self.matrix[21] /= 16384
self.matrix[22] /= 16384
self.matrix[23] /= 16384
self.matrix[24] /= 16384
self.matrix[25] /= 16384
self.matrix[26] /= 16384
self.matrix[27] /= 16384
self.matrix[28] /= 16384
self.matrix[29] /= 16384
self.matrix[30] /= 16384
self.matrix[31] /= 16384
self.matrix[32] /= 16384
self.matrix[33] /= 16384
self.matrix[34] /= 16384
self.matrix[35] /= 16384
self.matrix[36] /= 16384
self.matrix[37] /= 16384
self.matrix[38] /= 16384
self.matrix[39] /= 16384
self.matrix[40] /= 16384
self.matrix[41] /= 16384
self.matrix[42] /= 16384
self.matrix[43] /= 16384
self.matrix[44] /= 16384
self.matrix[45] /= 16384
self.matrix[46] /= 16384
self.matrix[47] /= 16384
self.matrix[48] /= 16384
self.matrix[49] /= 16384
self.matrix[50] /= 16384
self.matrix[51] /= 16384
self.matrix[52] /= 16384
self.matrix[53] /= 16384
self.matrix[54] /= 16384
self.matrix[55] /= 16384
self.matrix[56] /= 16384
self.matrix[57] /= 16384
self.matrix[58] /= 16384
self.matrix[59] /= 16384
self.matrix[60] /= 16384
self.matrix[61] /= 16384
self.matrix[62] /= 16384
self.matrix[63] /= 16384
# Message: config_power
# Id: 17
class config_power():
id = 17
_pack_string = "<B"
def __init__(self, msg=None):
# public fields
self.have_attopilot = False
# unpack if requested
if msg: self.unpack(msg)
def pack(self):
msg = struct.pack(self._pack_string,
self.have_attopilot)
return msg
def unpack(self, msg):
(self.have_attopilot,) = struct.unpack(self._pack_string, msg)
# Message: config_pwm
# Id: 18
class config_pwm():
id = 18
_pack_string = "<Hffffffff"
def __init__(self, msg=None):
# public fields
self.pwm_hz = 0
self.act_gain = [0.0] * pwm_channels
# unpack if requested
if msg: self.unpack(msg)
def pack(self):
msg = struct.pack(self._pack_string,
self.pwm_hz,
self.act_gain[0],
self.act_gain[1],
self.act_gain[2],
self.act_gain[3],
self.act_gain[4],
self.act_gain[5],
self.act_gain[6],
self.act_gain[7])
return msg
def unpack(self, msg):
(self.pwm_hz,
self.act_gain[0],
self.act_gain[1],
self.act_gain[2],
self.act_gain[3],
self.act_gain[4],
self.act_gain[5],
self.act_gain[6],
self.act_gain[7]) = struct.unpack(self._pack_string, msg)
# Message: config_stability_damping
# Id: 19
class config_stability_damping():
id = 19
_pack_string = "<BBBBffff"
def __init__(self, msg=None):
# public fields
self.sas_rollaxis = False
self.sas_pitchaxis = False
self.sas_yawaxis = False
self.sas_tune = False
self.sas_rollgain = 0.0
self.sas_pitchgain = 0.0
self.sas_yawgain = 0.0
self.sas_max_gain = 0.0
# unpack if requested
if msg: self.unpack(msg)
def pack(self):
msg = struct.pack(self._pack_string,
self.sas_rollaxis,
self.sas_pitchaxis,
self.sas_yawaxis,
self.sas_tune,
self.sas_rollgain,
self.sas_pitchgain,
self.sas_yawgain,
self.sas_max_gain)
return msg
def unpack(self, msg):
(self.sas_rollaxis,
self.sas_pitchaxis,
self.sas_yawaxis,
self.sas_tune,
self.sas_rollgain,
self.sas_pitchgain,
self.sas_yawgain,
self.sas_max_gain) = struct.unpack(self._pack_string, msg)
# Message: command_inceptors
# Id: 20
class command_inceptors():
id = 20
_pack_string = "<hhhhhh"
def __init__(self, msg=None):
# public fields
self.channel = [0.0] * ap_channels
# unpack if requested
if msg: self.unpack(msg)
def pack(self):
msg = struct.pack(self._pack_string,
int(round(self.channel[0] * 16384)),
int(round(self.channel[1] * 16384)),
int(round(self.channel[2] * 16384)),
int(round(self.channel[3] * 16384)),
int(round(self.channel[4] * 16384)),
int(round(self.channel[5] * 16384)))
return msg
def unpack(self, msg):
(self.channel[0],
self.channel[1],
self.channel[2],
self.channel[3],
self.channel[4],
self.channel[5],) = struct.unpack(self._pack_string, msg)
self.channel[0] /= 16384
self.channel[1] /= 16384
self.channel[2] /= 16384
self.channel[3] /= 16384
self.channel[4] /= 16384
self.channel[5] /= 16384
# Message: command_zero_gyros
# Id: 21
class command_zero_gyros():
id = 21
_pack_string = "<"
def __init__(self, msg=None):
# public fields
# unpack if requested
if msg: self.unpack(msg)
def pack(self):
msg = struct.pack(self._pack_string,
return msg
def unpack(self, msg):
# Message: command_reset_ekf
# Id: 22
class command_reset_ekf():
id = 22
_pack_string = "<"
def __init__(self, msg=None):
# public fields
# unpack if requested
if msg: self.unpack(msg)
def pack(self):
msg = struct.pack(self._pack_string,
return msg
def unpack(self, msg):
# Message: command_cycle_inceptors
# Id: 23
class command_cycle_inceptors():
id = 23
_pack_string = "<"
def __init__(self, msg=None):
# public fields
# unpack if requested
if msg: self.unpack(msg)
def pack(self):
msg = struct.pack(self._pack_string,
return msg
def unpack(self, msg):
# Message: pilot
# Id: 24
class pilot():
id = 24
_pack_string = "<hhhhhhhhhhhhhhhhB"
def __init__(self, msg=None):
# public fields
self.channel = [0.0] * sbus_channels
self.flags = 0
# unpack if requested
if msg: self.unpack(msg)
def pack(self):
msg = struct.pack(self._pack_string,
int(round(self.channel[0] * 16384)),
int(round(self.channel[1] * 16384)),
int(round(self.channel[2] * 16384)),
int(round(self.channel[3] * 16384)),
int(round(self.channel[4] * 16384)),
int(round(self.channel[5] * 16384)),
int(round(self.channel[6] * 16384)),
int(round(self.channel[7] * 16384)),
int(round(self.channel[8] * 16384)),
int(round(self.channel[9] * 16384)),
int(round(self.channel[10] * 16384)),
int(round(self.channel[11] * 16384)),
int(round(self.channel[12] * 16384)),
int(round(self.channel[13] * 16384)),
int(round(self.channel[14] * 16384)),
int(round(self.channel[15] * 16384)),
self.flags)
return msg
def unpack(self, msg):
(self.channel[0],
self.channel[1],
self.channel[2],
self.channel[3],
self.channel[4],
self.channel[5],
self.channel[6],
self.channel[7],
self.channel[8],
self.channel[9],
self.channel[10],
self.channel[11],
self.channel[12],
self.channel[13],
self.channel[14],
self.channel[15],
self.flags) = struct.unpack(self._pack_string, msg)
self.channel[0] /= 16384
self.channel[1] /= 16384
self.channel[2] /= 16384
self.channel[3] /= 16384
self.channel[4] /= 16384
self.channel[5] /= 16384
self.channel[6] /= 16384
self.channel[7] /= 16384
self.channel[8] /= 16384
self.channel[9] /= 16384
self.channel[10] /= 16384
self.channel[11] /= 16384
self.channel[12] /= 16384
self.channel[13] /= 16384
self.channel[14] /= 16384
self.channel[15] /= 16384
# Message: imu
# Id: 25
class imu():
id = 25
_pack_string = "<Lhhhhhhhhhhhhhhhh"
def __init__(self, msg=None):
# public fields
self.millis = 0
self.raw = [0] * 6
self.cal = [0] * 10
# unpack if requested
if msg: self.unpack(msg)
def pack(self):
msg = struct.pack(self._pack_string,
self.millis,
self.raw[0],
self.raw[1],
self.raw[2],
self.raw[3],
self.raw[4],
self.raw[5],
self.cal[0],
self.cal[1],
self.cal[2],
self.cal[3],
self.cal[4],
self.cal[5],
self.cal[6],
self.cal[7],
self.cal[8],
self.cal[9])
return msg
def unpack(self, msg):
(self.millis,
self.raw[0],
self.raw[1],
self.raw[2],
self.raw[3],
self.raw[4],
self.raw[5],
self.cal[0],
self.cal[1],
self.cal[2],
self.cal[3],
self.cal[4],
self.cal[5],
self.cal[6],
self.cal[7],
self.cal[8],
self.cal[9]) = struct.unpack(self._pack_string, msg)
# Message: aura_nav_pvt
# Id: 26
class aura_nav_pvt():
id = 26
_pack_string = "<LhBBBBBBLlBBBBllllLLlllLlLLHBBBBBBlhH"
def __init__(self, msg=None):
# public fields
self.iTOW = 0
self.year = 0
self.month = 0
self.day = 0
self.hour = 0
self.min = 0
self.sec = 0
self.valid = 0
self.tAcc = 0
self.nano = 0
self.fixType = 0
self.flags = 0
self.flags2 = 0
self.numSV = 0
self.lon = 0
self.lat = 0
self.height = 0
self.hMSL = 0
self.hAcc = 0
self.vAcc = 0
self.velN = 0
self.velE = 0
self.velD = 0
self.gSpeed = 0
self.heading = 0
self.sAcc = 0
self.headingAcc = 0
self.pDOP = 0
self.reserved = [0] * 6
self.headVeh = 0
self.magDec = 0
self.magAcc = 0
# unpack if requested
if msg: self.unpack(msg)
def pack(self):
msg = struct.pack(self._pack_string,
self.iTOW,
self.year,
self.month,
self.day,
self.hour,
self.min,
self.sec,
self.valid,
self.tAcc,
self.nano,
self.fixType,
self.flags,
self.flags2,
self.numSV,
self.lon,
self.lat,
self.height,
self.hMSL,
self.hAcc,
self.vAcc,
self.velN,
self.velE,
self.velD,
self.gSpeed,
self.heading,
self.sAcc,
self.headingAcc,
self.pDOP,
self.reserved[0],
self.reserved[1],
self.reserved[2],
self.reserved[3],
self.reserved[4],
self.reserved[5],
self.headVeh,
self.magDec,
self.magAcc)
return msg
def unpack(self, msg):
(self.iTOW,
self.year,
self.month,
self.day,
self.hour,
self.min,
self.sec,
self.valid,
self.tAcc,
self.nano,
self.fixType,
self.flags,
self.flags2,
self.numSV,
self.lon,
self.lat,
self.height,
self.hMSL,
self.hAcc,
self.vAcc,
self.velN,
self.velE,
self.velD,
self.gSpeed,
self.heading,
self.sAcc,
self.headingAcc,
self.pDOP,
self.reserved[0],
self.reserved[1],
self.reserved[2],
self.reserved[3],
self.reserved[4],
self.reserved[5],
self.headVeh,
self.magDec,
self.magAcc) = struct.unpack(self._pack_string, msg)
# Message: airdata
# Id: 27
class airdata():
id = 27
_pack_string = "<ffffffH"
def __init__(self, msg=None):
# public fields
self.baro_press_pa = 0.0
self.baro_temp_C = 0.0
self.baro_hum = 0.0
self.ext_diff_press_pa = 0.0
self.ext_static_press_pa = 0.0
self.ext_temp_C = 0.0
self.error_count = 0
# unpack if requested
if msg: self.unpack(msg)
def pack(self):
msg = struct.pack(self._pack_string,
self.baro_press_pa,
self.baro_temp_C,
self.baro_hum,
self.ext_diff_press_pa,
self.ext_static_press_pa,
self.ext_temp_C,
self.error_count)
return msg
def unpack(self, msg):
(self.baro_press_pa,
self.baro_temp_C,
self.baro_hum,
self.ext_diff_press_pa,
self.ext_static_press_pa,
self.ext_temp_C,
self.error_count) = struct.unpack(self._pack_string, msg)
# Message: power
# Id: 28
class power():
id = 28
_pack_string = "<HHHH"
def __init__(self, msg=None):
# public fields
self.int_main_v = 0.0
self.avionics_v = 0.0
self.ext_main_v = 0.0
self.ext_main_amp = 0.0
# unpack if requested
if msg: self.unpack(msg)
def pack(self):
msg = struct.pack(self._pack_string,
int(round(self.int_main_v * 100)),
int(round(self.avionics_v * 100)),
int(round(self.ext_main_v * 100)),
int(round(self.ext_main_amp * 100)))
return msg
def unpack(self, msg):
(self.int_main_v,
self.avionics_v,
self.ext_main_v,
self.ext_main_amp) = struct.unpack(self._pack_string, msg)
self.int_main_v /= 100
self.avionics_v /= 100
self.ext_main_v /= 100
self.ext_main_amp /= 100
# Message: status
# Id: 29
class status():
id = 29
_pack_string = "<HHHLHH"
def __init__(self, msg=None):
# public fields
self.serial_number = 0
self.firmware_rev = 0
self.master_hz = 0
self.baud = 0
self.byte_rate = 0
self.timer_misses = 0
# unpack if requested
if msg: self.unpack(msg)
def pack(self):
msg = struct.pack(self._pack_string,
self.serial_number,
self.firmware_rev,
self.master_hz,
self.baud,
self.byte_rate,
self.timer_misses)
return msg
def unpack(self, msg):
(self.serial_number,
self.firmware_rev,
self.master_hz,
self.baud,
self.byte_rate,
self.timer_misses) = struct.unpack(self._pack_string, msg)
# Message: ekf
# Id: 30
class ekf():
id = 30
_pack_string = "<LddfffffffffffffHHHB"
def __init__(self, msg=None):
# public fields
self.millis = 0
self.lat_rad = 0.0
self.lon_rad = 0.0
self.altitude_m = 0.0
self.vn_ms = 0.0
self.ve_ms = 0.0
self.vd_ms = 0.0
self.phi_rad = 0.0
self.the_rad = 0.0
self.psi_rad = 0.0
self.p_bias = 0.0
self.q_bias = 0.0
self.r_bias = 0.0
self.ax_bias = 0.0
self.ay_bias = 0.0
self.az_bias = 0.0
self.max_pos_cov = 0.0
self.max_vel_cov = 0.0
self.max_att_cov = 0.0
self.status = 0
# unpack if requested
if msg: self.unpack(msg)
def pack(self):
msg = struct.pack(self._pack_string,
self.millis,
self.lat_rad,
self.lon_rad,
self.altitude_m,
self.vn_ms,
self.ve_ms,
self.vd_ms,
self.phi_rad,
self.the_rad,
self.psi_rad,
self.p_bias,
self.q_bias,
self.r_bias,
self.ax_bias,
self.ay_bias,
self.az_bias,
int(round(self.max_pos_cov * 100)),
int(round(self.max_vel_cov * 1000)),
int(round(self.max_att_cov * 10000)),
self.status)
return msg
def unpack(self, msg):
(self.millis,
self.lat_rad,
self.lon_rad,
self.altitude_m,
self.vn_ms,
self.ve_ms,
self.vd_ms,
self.phi_rad,
self.the_rad,
self.psi_rad,
self.p_bias,
self.q_bias,
self.r_bias,
self.ax_bias,
self.ay_bias,
self.az_bias,
self.max_pos_cov,
self.max_vel_cov,
self.max_att_cov,
self.status) = struct.unpack(self._pack_string, msg)
self.max_pos_cov /= 100
self.max_vel_cov /= 1000
self.max_att_cov /= 10000
| 30.742188
| 86
| 0.472766
| 33,918
| 0.95773
| 0
| 0
| 0
| 0
| 0
| 0
| 1,892
| 0.053424
|
839c15c319f76f2a4d3c57f98d73d5a1cfa11959
| 1,563
|
py
|
Python
|
influxdbnagiosplugin/tests/test_query.py
|
paulboot/influxdb-nagios-plugin
|
945eeb518d22863a7878b3726b24d8a2e9e485bb
|
[
"Apache-2.0"
] | 5
|
2016-05-16T22:35:09.000Z
|
2020-11-13T16:03:45.000Z
|
influxdbnagiosplugin/tests/test_query.py
|
paulboot/influxdb-nagios-plugin
|
945eeb518d22863a7878b3726b24d8a2e9e485bb
|
[
"Apache-2.0"
] | 2
|
2017-11-28T00:01:19.000Z
|
2022-01-21T14:04:58.000Z
|
influxdbnagiosplugin/tests/test_query.py
|
paulboot/influxdb-nagios-plugin
|
945eeb518d22863a7878b3726b24d8a2e9e485bb
|
[
"Apache-2.0"
] | 9
|
2015-12-03T00:37:57.000Z
|
2021-09-08T09:23:05.000Z
|
"""
Query construction tests.
"""
from hamcrest import assert_that, is_, equal_to
from influxdbnagiosplugin.query import ExplicitQueryBuilder, SingleMeasurementQueryBuilder
def test_explicit_query():
query = ExplicitQueryBuilder("SHOW MEASUREMENTS")
assert_that(query().query, is_(equal_to(
"SHOW MEASUREMENTS"
)))
def test_single_measurement_query():
query = SingleMeasurementQueryBuilder.for_hostname_and_age(
measurement="disk_free",
hostname="hostname",
age="30s",
where=[],
)
assert_that(query().query, is_(equal_to(
"SELECT time, value FROM disk_free"
" WHERE time > now() - 30s"
" AND host = 'hostname'"
)))
def test_single_measurement_query_where_clause():
query = SingleMeasurementQueryBuilder.for_hostname_and_age(
measurement="disk_free",
hostname="hostname",
age="30s",
where=["path=/"],
)
assert_that(query().query, is_(equal_to(
"SELECT time, value FROM disk_free"
" WHERE time > now() - 30s"
" AND host = 'hostname'"
" AND path = '/'"
)))
def test_single_measurement_query_where_clause_quoted():
query = SingleMeasurementQueryBuilder.for_hostname_and_age(
measurement="disk_free",
hostname="hostname",
age="30s",
where=["path='/'"],
)
assert_that(query().query, is_(equal_to(
"SELECT time, value FROM disk_free"
" WHERE time > now() - 30s"
" AND host = 'hostname'"
" AND path = '/'"
)))
| 26.948276
| 90
| 0.627639
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 459
| 0.293666
|
839cf93a477b1ceb19582913fdf13770dea82220
| 27,056
|
py
|
Python
|
sfftk_migrate/test_sfftk_migrate.py
|
emdb-empiar/sfftk-migrate
|
fc8941082256456edb61fe22ecbf932f6258352a
|
[
"Apache-2.0"
] | null | null | null |
sfftk_migrate/test_sfftk_migrate.py
|
emdb-empiar/sfftk-migrate
|
fc8941082256456edb61fe22ecbf932f6258352a
|
[
"Apache-2.0"
] | 2
|
2020-04-02T15:25:10.000Z
|
2020-04-03T14:32:12.000Z
|
sfftk_migrate/test_sfftk_migrate.py
|
emdb-empiar/sfftk-migrate
|
fc8941082256456edb61fe22ecbf932f6258352a
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
import os
import sys
import types
import unittest
import inspect
from lxml import etree
from . import XSL, XML, VERSION_LIST
from .core import get_module, get_stylesheet, get_source_version, get_migration_path, list_versions
from .main import parse_args
from .migrate import migrate_by_stylesheet, do_migration, get_params
from .utils import _print, _check, _decode_data
replace_list = [
('\n', ''),
('\t', ''),
(' ', ''),
]
def _replace(s, vals=replace_list):
if s is None:
return ''
_s = s
for u, v in vals:
_s = _s.replace(u, v)
return _s
def compare_elements(el1, el2):
"""Compare two elements and all their children
:return: True or False
"""
_check(el1, (etree._Element), TypeError)
_check(el2, (etree._Element), TypeError)
# https://stackoverflow.com/questions/7905380/testing-equivalence-of-xml-etree-elementtree
if el1.tag != el2.tag:
return False
if _replace(el1.text) != _replace(el2.text):
return False
if _replace(el1.tail) != _replace(el2.tail):
return False
if el1.attrib != el2.attrib:
return False
if len(el1) != len(el2):
return False
return all(compare_elements(e1, e2) for e1, e2 in zip(el1, el2))
class TestUtils(unittest.TestCase):
def test_check(self):
"""Test that _check works"""
with self.assertRaisesRegex(TypeError, r"object '1' is not of class <class 'str'>"):
_check(1, str, TypeError)
with self.assertRaises(TypeError):
_check(1, str, TypeError, message="")
def test_migrate(self):
"""Test that migrate works"""
# exceptions
with self.assertRaises(TypeError):
migrate_by_stylesheet(1, 2)
with self.assertRaises(IOError):
migrate_by_stylesheet('file.xml', 'file.xsl')
def test_parse_args(self):
"""Test correct arguments"""
# default with -t/--target-version
args = parse_args("file.xml -v -t 1.0")
self.assertEqual(args.infile, "file.xml")
self.assertEqual(args.target_version, "1.0")
self.assertEqual(args.outfile, "file_v1.0.xml")
self.assertFalse(args.list_versions)
# specify outfile
args = parse_args("file.xml -v -t 1.0 -o my_output.xml")
self.assertEqual(args.outfile, "my_output.xml")
# list valid versions
args = parse_args("-l")
self.assertEqual(args.infile, '')
self.assertEqual(args.target_version, VERSION_LIST[-1])
self.assertIsNone(args.outfile)
self.assertTrue(args.list_versions)
self.assertFalse(args.show_version)
# show version in file
args = parse_args("-v -s file.xml")
self.assertEqual(args.infile, 'file.xml')
self.assertEqual(args.target_version, VERSION_LIST[-1])
# self.assertEqual(args.outfile, 'file_v0.8.0.dev1.xml')
self.assertIsNone(args.outfile)
self.assertFalse(args.list_versions)
self.assertTrue(args.show_version)
# show package version
args = parse_args("-v -V")
self.assertEqual(args.infile, '')
self.assertIsNone(args.outfile)
self.assertTrue(args.version)
self.assertFalse(args.list_versions)
self.assertFalse(args.show_version)
def test_get_stylesheet(self):
"""Given versions return the correct stylesheet to use"""
stylesheet = get_stylesheet("1", "2")
self.assertEqual(os.path.basename(stylesheet), 'migrate_v1_to_v2.xsl')
self.assertTrue(os.path.exists(stylesheet))
original = os.path.join(XML, 'original.xml')
_migrated = migrate_by_stylesheet(original, stylesheet,
segmentation_details="Nothing much")
migrated = etree.ElementTree(etree.XML(_migrated))
sys.stderr.write('migrated:\n' + etree.tostring(migrated).decode('utf-8'))
# self.assertTrue(False)
with self.assertRaises(OSError):
get_stylesheet("nothing", "something")
def test_get_source_version(self):
"""Obtain the version in the original"""
source_version = get_source_version(os.path.join(XML, 'original.xml'))
self.assertEqual(source_version, '1')
fn_v07 = os.path.join(XML, 'test2.sff')
source_version_v07 = get_source_version(fn_v07)
self.assertEqual(source_version_v07, '0.7.0.dev0')
fn_v08 = os.path.join(XML, 'test2_v0.8.0.dev1.sff')
source_version_v08 = get_source_version(fn_v08)
self.assertEqual(source_version_v08, '0.8.0.dev1')
def test_get_migration_path(self):
"""Determine the sequence of migrations to perform"""
version_list = ['1', '2', '3', '4', '5', '6']
migration_path = get_migration_path('2', '6', version_list=version_list)
self.assertEqual(migration_path, [('2', '3'), ('3', '4'), ('4', '5'), ('5', '6')])
# cannot find start
with self.assertRaisesRegex(ValueError, r".*invalid migration start.*"):
get_migration_path('0', '6', version_list=version_list)
# cannot find end
with self.assertRaisesRegex(ValueError, r".*invalid migration end.*"):
get_migration_path('1', '9', version_list=version_list)
def test_do_migration_example(self):
"""Toy migration example"""
version_list = ['1', '2']
cmd = "{infile} -v --target-version 2 --outfile {outfile}".format(
infile=os.path.join(XML, "original.xml"),
outfile=os.path.join(XML, "my_output.xml")
)
args = parse_args(cmd)
_text = "48ec3e2ab568763658fc3f5430b851ceaf1593d6" # secrets.token_hex(20)
status = do_migration(
args,
value_list=[_text],
version_list=version_list,
)
_output = os.path.join(XML, "original_v2.xml")
self.assertTrue(os.path.exists(_output))
self.assertEqual(status, os.EX_OK)
output = etree.parse(_output)
self.assertEqual(output.xpath('/segmentation/details/text()')[0], _text)
os.remove(args.outfile)
def test_do_migration(self):
"""Do an actual migration using the convenience function"""
# try a null migration
target_version = "0.8.0.dev1"
outfile = os.path.join(XML, 'my_file_out.sff')
cmd = "{infile} -v --target-version {target_version} --outfile {outfile}".format(
infile=os.path.join(XML, 'test2_v0.8.0.dev1.sff'),
target_version=target_version,
outfile=outfile,
)
args = parse_args(cmd)
status = do_migration(args)
self.assertEqual(status, os.EX_OK)
self.assertFalse(os.path.exists(outfile)) # the file was not created
# try an actual migrations
cmd = "{infile} -v --target-version {target_version} --outfile {outfile}".format(
infile=os.path.join(XML, 'test2.sff'),
target_version=target_version,
outfile=outfile
)
args = parse_args(cmd)
status = do_migration(args)
self.assertEqual(status, os.EX_OK)
self.assertTrue(os.path.exists(outfile)) # the file was not created
in_version = get_source_version(args.infile)
out_version = get_source_version(outfile)
self.assertNotEqual(in_version, out_version)
self.assertEqual(out_version, target_version)
os.remove(outfile)
def test_get_module(self):
"""Check that we can get the right module for this migration"""
module = get_module('1', '2')
self.assertIsInstance(module, types.ModuleType)
def test_get_params(self):
"""Test getting params"""
module = get_module('1', '2')
_text = "ce3c90151bb3c803c8e6570ee7d5845ac3c96c38" # secrets.token_hex(20)
params = get_params(module.PARAM_LIST, value_list=[_text])
self.assertIsInstance(params, dict)
self.assertEqual(len(params), 1)
with self.assertRaises(ValueError):
get_params(module.PARAM_LIST, value_list=[_text, _text])
def test_list_versions(self):
"""Test that we can list the supported versions"""
args = parse_args("-v -l")
status, version_count = list_versions()
self.assertEqual(status, os.EX_OK)
self.assertEqual(version_count, 2)
class TestMigrations(unittest.TestCase):
def test_original_to_add_field(self):
"""Test adding a field to the original"""
original = os.path.join(XML, 'original.xml')
reference = etree.parse(os.path.join(XML, 'add_field.xml'))
stylesheet = os.path.join(XSL, 'original_to_add_field.xsl')
# we pass the value of the `details` param as follows:
# A = reference.xpath(<xpath>)[0]
# etree.XSLT.strparam(A) - handle a possibly quoted string
details_text = reference.xpath('/segmentation/details/text()')[0]
_migrated = migrate_by_stylesheet(original, stylesheet,
segmentation_details=details_text) # bytes
migrated = etree.ElementTree(etree.XML(_migrated))
same = compare_elements(reference.getroot(), migrated.getroot())
sys.stderr.write('reference:\n' + etree.tostring(reference).decode('utf-8'))
sys.stderr.write('\n')
sys.stderr.write('migrated:\n' + etree.tostring(migrated).decode('utf-8'))
self.assertTrue(same)
def test_original_to_drop_field(self):
"""Test dropping a field from the original"""
original = os.path.join(XML, 'original.xml')
reference = etree.parse(os.path.join(XML, 'drop_field.xml'))
stylesheet = os.path.join(XSL, 'original_to_drop_field.xsl')
with self.assertWarns(UserWarning):
_migrated = migrate_by_stylesheet(original, stylesheet, verbose=True)
migrated = etree.ElementTree(etree.XML(_migrated))
same = compare_elements(reference.getroot(), migrated.getroot())
self.assertTrue(same)
sys.stderr.write('reference:\n' + etree.tostring(reference).decode('utf-8'))
sys.stderr.write('\n')
sys.stderr.write('migrated:\n' + etree.tostring(migrated).decode('utf-8'))
def test_original_to_change_field_rename_field(self):
"""Test changing a field by renaming it"""
original = os.path.join(XML, 'original.xml')
reference = etree.parse(os.path.join(XML, 'change_field_rename_field.xml'))
stylesheet = os.path.join(XSL, 'original_to_change_field_rename_field.xsl')
_migrated = migrate_by_stylesheet(original, stylesheet)
migrated = etree.ElementTree(etree.XML(_migrated))
same = compare_elements(reference.getroot(), migrated.getroot())
self.assertTrue(same)
# sys.stderr.write('reference:\n' + etree.tostring(reference).decode('utf-8'))
# sys.stderr.write('\n')
# sys.stderr.write('migrated:\n' + etree.tostring(migrated).decode('utf-8'))
def test_original_to_change_field_add_attribute(self):
"""Test changing a field by adding an attribute"""
original = os.path.join(XML, 'original.xml')
reference = etree.parse(os.path.join(XML, 'change_field_add_attribute.xml'))
stylesheet = os.path.join(XSL, 'original_to_change_field_add_attribute.xsl')
lang_text = reference.xpath('/segmentation/name/@lang')[0]
_migrated = migrate_by_stylesheet(original, stylesheet,
segmentation_name_lang=lang_text)
migrated = etree.ElementTree(etree.XML(_migrated))
same = compare_elements(reference.getroot(), migrated.getroot())
self.assertTrue(same)
# sys.stderr.write('reference:\n' + etree.tostring(reference).decode('utf-8'))
# sys.stderr.write('\n')
# sys.stderr.write('migrated:\n' + etree.tostring(migrated).decode('utf-8'))
def test_original_to_change_field_drop_attribute(self):
"""Test changing a field by dropping an attribute"""
original = os.path.join(XML, 'original.xml')
reference = etree.parse(os.path.join(XML, 'change_field_drop_attribute.xml'))
stylesheet = os.path.join(XSL, 'original_to_change_field_drop_attribute.xsl')
_migrated = migrate_by_stylesheet(original, stylesheet)
migrated = etree.ElementTree(etree.XML(_migrated))
same = compare_elements(reference.getroot(), migrated.getroot())
self.assertTrue(same)
sys.stderr.write('reference:\n' + etree.tostring(reference).decode('utf-8'))
sys.stderr.write('\n')
sys.stderr.write('migrated:\n' + etree.tostring(migrated).decode('utf-8'))
def test_original_to_change_field_change_value(self):
"""Test changing a field by changing the value"""
original = os.path.join(XML, 'original.xml')
reference = etree.parse(os.path.join(XML, 'change_field_change_value.xml'))
stylesheet = os.path.join(XSL, 'original_to_change_field_change_value.xsl')
_segment_name = reference.xpath('/segmentation/segment[@id=1]/name/text()')[0]
_migrated = migrate_by_stylesheet(original, stylesheet, segment_name=_segment_name)
migrated = etree.ElementTree(etree.XML(_migrated))
same = compare_elements(reference.getroot(), migrated.getroot())
sys.stderr.write('reference:\n' + etree.tostring(reference).decode('utf-8'))
sys.stderr.write('\n')
sys.stderr.write('migrated:\n' + etree.tostring(migrated).decode('utf-8'))
self.assertTrue(same)
def test_original_to_change_field_rename_attribute(self):
"""Test changing a field by renaming an attribute"""
original = os.path.join(XML, 'original.xml')
reference = etree.parse(os.path.join(XML, 'change_field_rename_attribute.xml'))
stylesheet = os.path.join(XSL, 'original_to_change_field_rename_attribute.xsl')
_migrated = migrate_by_stylesheet(original, stylesheet)
migrated = etree.ElementTree(etree.XML(_migrated))
same = compare_elements(reference.getroot(), migrated.getroot())
sys.stderr.write('reference:\n' + etree.tostring(reference).decode('utf-8'))
sys.stderr.write('\n')
sys.stderr.write('migrated:\n' + etree.tostring(migrated).decode('utf-8'))
self.assertTrue(same)
def test_original_list_to_change_value_list(self):
"""Test changing all the values for a list"""
original = os.path.join(XML, 'original_list.xml')
reference = etree.parse(os.path.join(XML, 'change_value_list.xml'))
stylesheet = os.path.join(XSL, 'original_to_change_value_list.xsl')
_migrated = migrate_by_stylesheet(original, stylesheet)
migrated = etree.ElementTree(etree.XML(_migrated))
same = compare_elements(reference.getroot(), migrated.getroot())
sys.stderr.write('reference:\n' + etree.tostring(reference).decode('utf-8'))
sys.stderr.write('\n')
sys.stderr.write('migrated:\n' + etree.tostring(migrated).decode('utf-8'))
self.assertTrue(same)
class TestEMDBSFFMigrations(unittest.TestCase):
def test_migrate_mesh_exceptions(self):
"""Test that we capture exceptions"""
module = get_module('0.7.0.dev0', '0.8.0.dev1')
# create an empty mesh
mesh = etree.Element("mesh")
with self.assertRaisesRegex(ValueError, r".*invalid endianness.*"):
module.migrate_mesh(mesh, endianness='other')
with self.assertRaisesRegex(ValueError, r".*invalid triangles mode.*"):
module.migrate_mesh(mesh, triangles_mode='other')
with self.assertRaisesRegex(ValueError, r".*invalid vertices mode.*"):
module.migrate_mesh(mesh, vertices_mode='other')
# no geometry
verts, norms, tris = module.migrate_mesh(mesh)
self.assertIsInstance(verts, etree._Element)
self.assertEqual(int(verts.get("num_vertices")), 0)
# let's get the signature of the migrate_mesh function to get the default values for kwargs
signature = inspect.signature(module.migrate_mesh)
# verts
self.assertEqual(verts.get("mode"), signature.parameters['vertices_mode'].default)
self.assertEqual(verts.get("endianness"), signature.parameters['endianness'].default)
self.assertEqual(verts.get("data"), "")
# norms
self.assertEqual(norms.get("mode"), signature.parameters['vertices_mode'].default)
self.assertEqual(norms.get("endianness"), signature.parameters['endianness'].default)
self.assertEqual(norms.get("data"), "")
# tris
self.assertEqual(tris.get("mode"), signature.parameters['triangles_mode'].default)
self.assertEqual(tris.get("endianness"), signature.parameters['endianness'].default)
self.assertEqual(tris.get("data"), "")
def test_v0_7_0_dev0_to_v0_8_0_dev0(self):
"""Test migration from v0.7.0.dev0 to v0.8.0.dev1"""
original = os.path.join(XML, 'test2.sff')
stylesheet = get_stylesheet("0.7.0.dev0", "0.8.0.dev1")
# phase I migration using stylesheet
_migrated = migrate_by_stylesheet(original, stylesheet)
# convert migration to an ElementTree object
migrated = etree.ElementTree(etree.XML(_migrated))
_original = etree.parse(original)
segments = _original.xpath('/segmentation/segmentList/segment')
_print(segments)
segment_meshes = dict()
module = get_module('0.7.0.dev0', '0.8.0.dev1')
for segment in segments:
segment_meshes[int(segment.get("id"))] = dict()
for mesh in segment.xpath('meshList/mesh'):
_vertices, _normals, _triangles = module.migrate_mesh(
mesh)
segment_meshes[int(segment.get("id"))][int(mesh.get("id"))] = _vertices, _normals, _triangles
migrated_segments = migrated.xpath('/segmentation/segment_list/segment')
for migrated_segment in migrated_segments:
for migrated_mesh in migrated_segment.xpath('mesh_list/mesh'):
_vertices, _normals, _triangles = segment_meshes[int(migrated_segment.get("id"))][
int(migrated_mesh.get("id"))]
migrated_mesh.insert(0, _vertices)
migrated_mesh.insert(1, _normals)
migrated_mesh.insert(2, _triangles)
# let's see what it looks like
migrated_decoded = etree.tostring(migrated, xml_declaration=True, encoding='UTF-8', pretty_print=True).decode(
'utf-8')
# sys.stderr.write('migrated:\n' + migrated_decoded)
# with open(os.path.join(XML, 'test2_v0.8.0.dev1.sff'), 'w') as f:
# f.write(migrated_decoded)
def test_meshes_equal_v0_7_0_dev0_vs_v0_8_0_dev0(self):
"""Test that the mesh data is the same
We only compare surface vertices. Normal vertices correspond one-to-one to surface vertices and are not relevant
to triangles.
"""
v7 = os.path.join(XML, 'test7.sff')
v8 = os.path.join(XML, 'test7_v0.8.0.dev1.sff')
fv7 = etree.parse(v7)
fv8 = etree.parse(v8)
fv7_segments = fv7.xpath('/segmentation/segmentList/segment')
# extract vertices, normals and triangles
fv7_segment_meshes = dict()
for segment in fv7_segments:
fv7_segment_meshes[int(segment.get("id"))] = dict()
for mesh in segment.xpath('meshList/mesh'):
fv7_segment_meshes[int(segment.get("id"))][int(mesh.get("id"))] = {
'surface_vertices': dict(),
'normal_vertices': dict(),
'triangles': dict(),
}
vertex_list = next(mesh.iter('vertexList'))
for vertex in vertex_list:
if vertex.get('designation') == 'surface' or vertex.get('designation') is None:
fv7_segment_meshes[int(segment.get("id"))][int(mesh.get("id"))]['surface_vertices'][
int(vertex.get('vID'))] = tuple(map(lambda v: float(v.text), vertex.xpath('*')))
elif vertex.get('designation') == 'normal':
fv7_segment_meshes[int(segment.get("id"))][int(mesh.get("id"))]['normal_vertices'][
int(vertex.get('vID'))] = tuple(map(lambda v: float(v.text), vertex.xpath('*')))
triangle_list = next(mesh.iter('polygonList'))
for triangle in triangle_list:
# _print(tuple(map(lambda t: t.text, triangle.xpath('v'))))
vertex_ids = list(map(lambda p: int(p.text), triangle.xpath('v')))
if len(vertex_ids) == 3:
fv7_segment_meshes[int(segment.get("id"))][int(mesh.get("id"))]['triangles'][
int(triangle.get('PID'))] = tuple(vertex_ids), tuple()
elif len(vertex_ids) == 6:
fv7_segment_meshes[int(segment.get("id"))][int(mesh.get("id"))]['triangles'][
int(triangle.get('PID'))] = tuple(vertex_ids[::2]), tuple(vertex_ids[1::2])
else:
pass
# _print(fv7_segment_meshes)
fv8_segments = fv8.xpath('/segmentation/segment_list/segment')
# extract vertices, normals and triangles
fv8_segment_meshes = dict()
for segment in fv8_segments:
fv8_segment_meshes[int(segment.get("id"))] = dict()
for mesh in segment.xpath('mesh_list/mesh'):
fv8_segment_meshes[int(segment.get("id"))][int(mesh.get("id"))] = dict()
vertices = next(mesh.iter('vertices'))
# _print(vertices.keys())
# _print(vertices.get("data").encode("ASCII"))
vertex_list = _decode_data(vertices.get("data").encode('ASCII'),
int(vertices.get("num_vertices")), vertices.get("mode"),
vertices.get("endianness"))
vertex_tuples = list(zip(vertex_list[::3], vertex_list[1::3], vertex_list[2::3]))
# _print(data_vectors)
fv8_segment_meshes[int(segment.get("id"))][int(mesh.get("id"))]['surface_vertices'] = dict(
zip(range(len(vertex_tuples)), vertex_tuples))
# _print(data_dict)
normals = next(mesh.iter('normals'))
normal_list = _decode_data(normals.get("data").encode('ASCII'), int(normals.get("num_normals")),
normals.get("mode"), normals.get('endianness'))
normal_tuples = list(zip(normal_list[::3], normal_list[1::3], normal_list[2::3]))
fv8_segment_meshes[int(segment.get("id"))][int(mesh.get("id"))]['normal_vertices'] = dict(
zip(range(len(normal_tuples)), normal_tuples))
triangles = next(mesh.iter('triangles'))
triangle_list = _decode_data(triangles.get("data").encode('ASCII'),
int(triangles.get("num_triangles")),
triangles.get("mode"), triangles.get('endianness'))
triangle_tuples = list(zip(triangle_list[::3], triangle_list[1::3], triangle_list[2::3]))
fv8_segment_meshes[int(segment.get("id"))][int(mesh.get("id"))]['triangles'] = dict(
zip(range(len(triangle_tuples)), triangle_tuples))
# _print(fv8_segment_meshes)
# compare
fv7_surface_vertices = list()
for segment_id in fv7_segment_meshes:
for mesh_id in fv7_segment_meshes[segment_id]:
for triangle_id in fv7_segment_meshes[segment_id][mesh_id]['triangles']:
triangle = fv7_segment_meshes[segment_id][mesh_id]['triangles'][triangle_id]
# _print(triangle)
# _print(triangle)
s0, s1, s2 = triangle[0]
# n0, n1, n2 = triangle[1]
fv7_surface_vertices += [fv7_segment_meshes[segment_id][mesh_id]['surface_vertices'][s0],
fv7_segment_meshes[segment_id][mesh_id]['surface_vertices'][s1],
fv7_segment_meshes[segment_id][mesh_id]['surface_vertices'][s2]]
fv8_surface_vertices = list()
for segment_id in fv8_segment_meshes:
for mesh_id in fv8_segment_meshes[segment_id]:
for triangle_id in fv8_segment_meshes[segment_id][mesh_id]['triangles']:
triangle = fv8_segment_meshes[segment_id][mesh_id]['triangles'][triangle_id]
# _print(triangle)
s0, s1, s2 = triangle
fv8_surface_vertices += [fv8_segment_meshes[segment_id][mesh_id]['surface_vertices'][s0],
fv8_segment_meshes[segment_id][mesh_id]['surface_vertices'][s1],
fv8_segment_meshes[segment_id][mesh_id]['surface_vertices'][s2]]
# _print(fv7_surface_vertices[1283])
# _print(fv8_surface_vertices[1283])
self.assertEqual(len(fv7_surface_vertices), len(fv8_surface_vertices))
for u, v in zip(fv7_surface_vertices, fv8_surface_vertices):
self.assertAlmostEqual(u[0], v[0])
self.assertAlmostEqual(u[1], v[1])
self.assertAlmostEqual(u[2], v[2])
def test_v0_7_0_dev0_to_v0_8_0_dev0_shapes(self):
"""Test that we can migrate shapes"""
original = os.path.join(XML, 'test_shape_segmentation.sff')
stylesheet = get_stylesheet("0.7.0.dev0", "0.8.0.dev1")
_migrated = migrate_by_stylesheet(original, stylesheet)
migrated = etree.ElementTree(etree.XML(_migrated))
migrated_decoded = etree.tostring(migrated, xml_declaration=True, encoding='UTF-8', pretty_print=True).decode(
'utf-8')
sys.stderr.write(migrated_decoded)
with open(os.path.join(XML, 'test_shape_segmentation_v0.8.0.dev1.sff'), 'w') as f:
f.write(migrated_decoded)
class TestMain(unittest.TestCase):
def test_parse_args(self):
"""Test parse_args function"""
cmd = "file.xml -v"
args = parse_args(cmd)
self.assertEqual(args.infile, "file.xml")
self.assertEqual(args.outfile, "file_v{}.xml".format(VERSION_LIST[-1]))
def test_parse_args_outfile(self):
"""Test that outfile arg is honoured"""
cmd = "file.xml -v -o nothing.xml"
args = parse_args(cmd)
self.assertEqual(args.outfile, "nothing.xml")
def test_no_shlex(self):
"""Test not using shlex"""
cmd = ["file.xml", "-v", "-o", "nothing.xml"]
args = parse_args(cmd, use_shlex=False)
self.assertEqual(args.infile, "file.xml")
self.assertEqual(args.outfile, "nothing.xml")
| 50.383613
| 120
| 0.623965
| 25,762
| 0.952173
| 0
| 0
| 0
| 0
| 0
| 0
| 6,718
| 0.2483
|
839e9ac3360c11d26c97e8b4677e721f25a025a1
| 11,298
|
py
|
Python
|
ross/stochastic/st_shaft_element.py
|
hssaabbl/ross
|
5e548d24c8522c8a9a294479c580c21b4eb3bb65
|
[
"MIT"
] | 69
|
2018-12-26T19:21:26.000Z
|
2022-02-10T08:48:03.000Z
|
ross/stochastic/st_shaft_element.py
|
hssaabbl/ross
|
5e548d24c8522c8a9a294479c580c21b4eb3bb65
|
[
"MIT"
] | 639
|
2018-12-18T16:44:11.000Z
|
2022-03-27T16:46:41.000Z
|
ross/stochastic/st_shaft_element.py
|
hssaabbl/ross
|
5e548d24c8522c8a9a294479c580c21b4eb3bb65
|
[
"MIT"
] | 136
|
2019-01-08T12:37:32.000Z
|
2022-03-30T07:14:35.000Z
|
"""Shaft element module for STOCHASTIC ROSS.
This module creates an instance of random shaft element for stochastic
analysis.
"""
from ross.shaft_element import ShaftElement
from ross.stochastic.st_materials import ST_Material
from ross.stochastic.st_results_elements import plot_histogram
from ross.units import Q_, check_units
__all__ = ["ST_ShaftElement", "st_shaft_example"]
class ST_ShaftElement:
"""Random shaft element.
Creates an object containing a generator with random instances of
ShaftElement.
Parameters
----------
L : float, pint.Quantity, list
Element length.
Input a list to make it random.
idl : float, pint.Quantity, list
Inner diameter of the element at the left position.
Input a list to make it random.
odl : float, pint.Quantity, list
Outer diameter of the element at the left position.
Input a list to make it random.
idr : float, pint.Quantity, list, optional
Inner diameter of the element at the right position
Default is equal to idl value (cylindrical element)
Input a list to make it random.
odr : float, pint.Quantity, list, optional
Outer diameter of the element at the right position.
Default is equal to odl value (cylindrical element)
Input a list to make it random.
material : ross.material, list of ross.material
Shaft material.
Input a list to make it random.
n : int, optional
Element number (coincident with it's first node).
If not given, it will be set when the rotor is assembled
according to the element's position in the list supplied to
axial_force : float, list, optional
Axial force.
Input a list to make it random.
Default is 0.
torque : float, list, optional
Torque
Input a list to make it random.
Default is 0.
shear_effects : bool, optional
Determine if shear effects are taken into account.
Default is True.
rotary_inertia : bool, optional
Determine if rotary_inertia effects are taken into account.
Default is True.
gyroscopic : bool, optional
Determine if gyroscopic effects are taken into account.
Default is True.
shear_method_calc : str, optional
Determines which shear calculation method the user will adopt.
Default is 'cowper'
is_random : list
List of the object attributes to become random.
Possibilities:
["L", "idl", "odl", "idr", "odr", "material", "axial_force", "torque"]
Example
-------
>>> import numpy as np
>>> import ross.stochastic as srs
>>> size = 5
>>> E = np.random.uniform(208e9, 211e9, size)
>>> st_steel = srs.ST_Material(name="Steel", rho=7810, E=E, G_s=81.2e9)
>>> elms = srs.ST_ShaftElement(L=1,
... idl=0,
... odl=np.random.uniform(0.1, 0.2, size),
... material=st_steel,
... is_random=["odl", "material"],
... )
>>> len(list(iter(elms)))
5
"""
@check_units
def __init__(
self,
L,
idl,
odl,
idr=None,
odr=None,
material=None,
n=None,
axial_force=0,
torque=0,
shear_effects=True,
rotary_inertia=True,
gyroscopic=True,
shear_method_calc="cowper",
is_random=None,
):
if idr is None:
idr = idl
if "idl" in is_random and "idr" not in is_random:
is_random.append("idr")
if odr is None:
odr = odl
if "odl" in is_random and "odr" not in is_random:
is_random.append("odr")
if isinstance(material, ST_Material):
material = list(iter(material))
attribute_dict = dict(
L=L,
idl=idl,
odl=odl,
idr=idr,
odr=odr,
material=material,
n=n,
axial_force=axial_force,
torque=torque,
shear_effects=shear_effects,
rotary_inertia=rotary_inertia,
gyroscopic=gyroscopic,
shear_method_calc=shear_method_calc,
tag=None,
)
self.is_random = is_random
self.attribute_dict = attribute_dict
def __iter__(self):
"""Return an iterator for the container.
Returns
-------
An iterator over random shaft elements.
Examples
--------
>>> import ross.stochastic as srs
>>> elm = srs.st_shaft_example()
>>> len(list(iter(elm)))
2
"""
return iter(self.random_var(self.is_random, self.attribute_dict))
def __getitem__(self, key):
"""Return the value for a given key from attribute_dict.
Parameters
----------
key : str
A class parameter as string.
Raises
------
KeyError
Raises an error if the parameter doesn't belong to the class.
Returns
-------
Return the value for the given key.
Example
-------
>>> import numpy as np
>>> import ross.stochastic as srs
>>> size = 5
>>> E = np.random.uniform(208e9, 211e9, size)
>>> st_steel = srs.ST_Material(name="Steel", rho=7810, E=E, G_s=81.2e9)
>>> elms = srs.ST_ShaftElement(L=1,
... idl=0,
... odl=np.random.uniform(0.1, 0.2, size),
... material=st_steel,
... is_random=["odl", "material"],
... )
>>> elms["L"]
1
"""
if key not in self.attribute_dict.keys():
raise KeyError("Object does not have parameter: {}.".format(key))
return self.attribute_dict[key]
def __setitem__(self, key, value):
"""Set new parameter values for the object.
Function to change a parameter value.
It's not allowed to add new parameters to the object.
Parameters
----------
key : str
A class parameter as string.
value : The corresponding value for the attrbiute_dict's key.
***check the correct type for each key in ST_ShaftElement
docstring.
Raises
------
KeyError
Raises an error if the parameter doesn't belong to the class.
Example
-------
>>> import numpy as np
>>> import ross.stochastic as srs
>>> size = 5
>>> E = np.random.uniform(208e9, 211e9, size)
>>> st_steel = srs.ST_Material(name="Steel", rho=7810, E=E, G_s=81.2e9)
>>> elms = srs.ST_ShaftElement(L=1,
... idl=0,
... odl=np.random.uniform(0.1, 0.2, size),
... material=st_steel,
... is_random=["odl", "material"],
... )
>>> elms["odl"] = np.linspace(0.1, 0.2, 5)
>>> elms["odl"]
array([0.1 , 0.125, 0.15 , 0.175, 0.2 ])
"""
if key not in self.attribute_dict.keys():
raise KeyError("Object does not have parameter: {}.".format(key))
self.attribute_dict[key] = value
def random_var(self, is_random, *args):
"""Generate a list of objects as random attributes.
This function creates a list of objects with random values for selected
attributes from ross.ShaftElement.
Parameters
----------
is_random : list
List of the object attributes to become stochastic.
*args : dict
Dictionary instanciating the ross.ShaftElement class.
The attributes that are supposed to be stochastic should be
set as lists of random variables.
Returns
-------
f_list : generator
Generator of random objects.
"""
args_dict = args[0]
new_args = []
for i in range(len(args_dict[is_random[0]])):
arg = []
for key, value in args_dict.items():
if key in is_random:
arg.append(value[i])
else:
arg.append(value)
new_args.append(arg)
f_list = (ShaftElement(*arg) for arg in new_args)
return f_list
def plot_random_var(self, var_list=None, histogram_kwargs=None, plot_kwargs=None):
"""Plot histogram and the PDF.
This function creates a histogram to display the random variable
distribution.
Parameters
----------
var_list : list, optional
List of random variables, in string format, to plot.
Default is plotting all the random variables.
histogram_kwargs : dict, optional
Additional key word arguments can be passed to change
the plotly.go.histogram (e.g. histnorm="probability density", nbinsx=20...).
*See Plotly API to more information.
plot_kwargs : dict, optional
Additional key word arguments can be passed to change the plotly go.figure
(e.g. line=dict(width=4.0, color="royalblue"), opacity=1.0, ...).
*See Plotly API to more information.
Returns
-------
fig : Plotly graph_objects.Figure()
A figure with the histogram plots.
Examples
--------
>>> import ross.stochastic as srs
>>> elm = srs.st_shaft_example()
>>> fig = elm.plot_random_var(["odl"])
>>> # fig.show()
"""
label = dict(
L="Length",
idl="Left inner diameter",
odl="Left outer diameter",
idr="Right inner diameter",
odr="Right outer diameter",
)
is_random = self.is_random
if "material" in is_random:
is_random.remove("material")
if var_list is None:
var_list = is_random
elif not all(var in is_random for var in var_list):
raise ValueError(
"Random variable not in var_list. Select variables from {}".format(
is_random
)
)
return plot_histogram(
self.attribute_dict, label, var_list, histogram_kwargs={}, plot_kwargs={}
)
def st_shaft_example():
"""Return an instance of a simple random shaft element.
The purpose is to make available a simple model so that doctest can be
written using it.
Returns
-------
elm : ross.stochastic.ST_ShaftElement
An instance of a random shaft element object.
Examples
--------
>>> import ross.stochastic as srs
>>> elm = srs.st_shaft_example()
>>> len(list(iter(elm)))
2
"""
from ross.materials import steel
elm = ST_ShaftElement(
L=[1.0, 1.1],
idl=0.0,
odl=[0.1, 0.2],
material=steel,
is_random=["L", "odl"],
)
return elm
| 32.28
| 88
| 0.547354
| 10,259
| 0.908037
| 0
| 0
| 1,258
| 0.111347
| 0
| 0
| 7,978
| 0.706143
|
839ec849aea4ca2defce43d38650cfab96daff56
| 2,873
|
py
|
Python
|
sympy/benchmarks/bench_symbench.py
|
vprusso/sympy
|
d5aa27ec88bb076f59087aada97d99bfff8b2f4c
|
[
"BSD-3-Clause"
] | null | null | null |
sympy/benchmarks/bench_symbench.py
|
vprusso/sympy
|
d5aa27ec88bb076f59087aada97d99bfff8b2f4c
|
[
"BSD-3-Clause"
] | null | null | null |
sympy/benchmarks/bench_symbench.py
|
vprusso/sympy
|
d5aa27ec88bb076f59087aada97d99bfff8b2f4c
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
from __future__ import print_function, division
from sympy.core.compatibility import xrange
from random import random
from sympy import factor, I, Integer, pi, simplify, sin, sqrt, Symbol, sympify
from sympy.abc import x, y, z
from timeit import default_timer as clock
def bench_R1():
"real(f(f(f(f(f(f(f(f(f(f(i/2)))))))))))"
def f(z):
return sqrt(Integer(1)/3)*z**2 + I/3
e = f(f(f(f(f(f(f(f(f(f(I/2)))))))))).as_real_imag()[0]
def bench_R2():
"Hermite polynomial hermite(15, y)"
def hermite(n, y):
if n == 1:
return 2*y
if n == 0:
return 1
return (2*y*hermite(n - 1, y) - 2*(n - 1)*hermite(n - 2, y)).expand()
a = hermite(15, y)
def bench_R3():
"a = [bool(f==f) for _ in range(10)]"
f = x + y + z
a = [bool(f == f) for _ in range(10)]
def bench_R4():
# we don't have Tuples
pass
def bench_R5():
"blowup(L, 8); L=uniq(L)"
def blowup(L, n):
for i in range(n):
L.append( (L[i] + L[i + 1]) * L[i + 2] )
def uniq(x):
v = set(x)
return v
L = [x, y, z]
blowup(L, 8)
L = uniq(L)
def bench_R6():
"sum(simplify((x+sin(i))/x+(x-sin(i))/x) for i in xrange(100))"
s = sum(simplify((x + sin(i))/x + (x - sin(i))/x) for i in xrange(100))
def bench_R7():
"[f.subs(x, random()) for _ in xrange(10**4)]"
f = x**24 + 34*x**12 + 45*x**3 + 9*x**18 + 34*x**10 + 32*x**21
a = [f.subs(x, random()) for _ in xrange(10**4)]
def bench_R8():
"right(x^2,0,5,10^4)"
def right(f, a, b, n):
a = sympify(a)
b = sympify(b)
n = sympify(n)
x = f.atoms(Symbol).pop()
Deltax = (b - a)/n
c = a
est = 0
for i in range(n):
c += Deltax
est += f.subs(x, c)
return est*Deltax
a = right(x**2, 0, 5, 10**4)
def _bench_R9():
"factor(x^20 - pi^5*y^20)"
factor(x**20 - pi**5*y**20)
def bench_R10():
"v = [-pi,-pi+1/10..,pi]"
def srange(min, max, step):
v = [min]
while (max - v[-1]).evalf() > 0:
v.append(v[-1] + step)
return v[:-1]
v = srange(-pi, pi, sympify(1)/10)
def bench_R11():
"a = [random() + random()*I for w in [0..1000]]"
a = [random() + random()*I for w in range(1000)]
def bench_S1():
"e=(x+y+z+1)**7;f=e*(e+1);f.expand()"
e = (x + y + z + 1)**7
f = e*(e + 1)
f = f.expand()
if __name__ == '__main__':
benchmarks = [
bench_R1,
bench_R2,
bench_R3,
bench_R5,
bench_R6,
bench_R7,
bench_R8,
#_bench_R9,
bench_R10,
bench_R11,
#bench_S1,
]
report = []
for b in benchmarks:
t = clock()
b()
t = clock() - t
print("%s%65s: %f" % (b.__name__, b.__doc__, t))
| 21.765152
| 78
| 0.484859
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 490
| 0.170553
|
839f729c16c6176bd93a48ef474f3a2349aae65f
| 774
|
py
|
Python
|
tests/test_capture.py
|
atac/c10-tools
|
278acfaab8bb42dff448fe1fbe08e7b7f75b1752
|
[
"BSD-3-Clause"
] | 5
|
2021-06-10T01:32:06.000Z
|
2021-12-22T23:05:52.000Z
|
tests/test_capture.py
|
atac/c10-tools
|
278acfaab8bb42dff448fe1fbe08e7b7f75b1752
|
[
"BSD-3-Clause"
] | 17
|
2020-08-03T16:35:26.000Z
|
2022-03-30T17:29:41.000Z
|
tests/test_capture.py
|
atac/c10-tools
|
278acfaab8bb42dff448fe1fbe08e7b7f75b1752
|
[
"BSD-3-Clause"
] | null | null | null |
from tempfile import NamedTemporaryFile
import os
import pytest
from c10_tools.capture import main
@pytest.fixture
def args():
return {'<infile>': pytest.PCAP,
'<outfile>': NamedTemporaryFile('wb').name,
'-f': True,
'-q': True,
'-t': pytest.TMATS}
def test_overwrite(args):
main(args)
assert os.stat(args['<outfile>']).st_size == 7904
def test_checks_exists(args):
args['-f'] = False
with open(args['<outfile>'], 'w+b'), pytest.raises(SystemExit):
main(args)
def test_tmats(args):
args['-t'] = pytest.TMATS
main(args)
expected = open(pytest.TMATS, 'rb').read().replace(b'\r\n', b'\n')
with open(args['<outfile>'], 'rb') as f:
assert f.read(6351)[28:] == expected
| 22.114286
| 70
| 0.595607
| 0
| 0
| 0
| 0
| 200
| 0.258398
| 0
| 0
| 103
| 0.133075
|
83a0c16822a34d798946b9dc4c088ff91cd8ad8d
| 330
|
py
|
Python
|
e-olymp/p15/373.py
|
ferhatelmas/algo
|
a7149c7a605708bc01a5cd30bf5455644cefd04d
|
[
"WTFPL"
] | 25
|
2015-01-21T16:39:18.000Z
|
2021-05-24T07:01:24.000Z
|
e-olymp/p15/373.py
|
ferhatelmas/algo
|
a7149c7a605708bc01a5cd30bf5455644cefd04d
|
[
"WTFPL"
] | 2
|
2020-09-30T19:39:36.000Z
|
2020-10-01T17:15:16.000Z
|
e-olymp/p15/373.py
|
ferhatelmas/algo
|
a7149c7a605708bc01a5cd30bf5455644cefd04d
|
[
"WTFPL"
] | 15
|
2015-01-21T16:39:27.000Z
|
2020-10-01T17:00:22.000Z
|
# TODO: tle
import re
import sys
m = {"?": ".?", "*": ".*?"}
def score(s):
return sum(ord(e) - ord("a") + 1 for e in s)
def tr(s):
return "(?=({}))".format("".join(m.get(e, e) for e in s))
for ln in sys.stdin:
p, s = ln.rstrip().split()
res = re.findall(tr(p), s)
print(score(min(res)) if res else -1)
| 15.714286
| 61
| 0.50303
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 41
| 0.124242
|
83a1781aa9bd0a390115621e83bce23ea229c38b
| 1,025
|
py
|
Python
|
dino.py
|
panpepson/DinoBot-chroma-offline
|
a6587555bf52c1545e69d79a4d30f19ad911eff2
|
[
"MIT"
] | null | null | null |
dino.py
|
panpepson/DinoBot-chroma-offline
|
a6587555bf52c1545e69d79a4d30f19ad911eff2
|
[
"MIT"
] | 3
|
2021-06-08T21:14:50.000Z
|
2022-03-12T00:22:40.000Z
|
dino.py
|
panpepson/DinoBot-chroma-offline
|
a6587555bf52c1545e69d79a4d30f19ad911eff2
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
import numpy as np
import cv2
from mss.linux import MSS as mss
from PIL import Image
import time
import pyautogui as pg
#mon = {'top': 480, 'left': 130, 'width': 70, 'height': 35}
mon = {'top': 200, 'left': 410, 'width': 50, 'height': 30} #git-b01
def process_image(original_image):
processed_image = cv2.cvtColor(original_image, cv2.COLOR_BGR2GRAY)
processed_image = cv2.Canny(processed_image, threshold1=200, threshold2=300)
return processed_image
def screen_record():
sct = mss()
last_time = time.time()
while(True):
img = sct.grab(mon)
print('loop took {} seconds'.format(time.time() - last_time))
last_time = time.time()
img = np.array(img)
processed_image = process_image(img)
mean = np.mean(processed_image)
print('mean = ', mean)
if not mean == float(0):
pg.press('space')
if cv2.waitKey(25) & 0xFF == ord('q'):
cv2.destroyAllWindows()
break
screen_record()
| 26.282051
| 80
| 0.62439
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 152
| 0.148293
|
83a1f16b819638b10f8073878aae0693547c3238
| 5,085
|
py
|
Python
|
trove/tests/scenario/groups/instance_create_group.py
|
sapcc/trove
|
c03ec0827687fba202f72f4d264ab70158604857
|
[
"Apache-2.0"
] | 1
|
2019-09-20T08:31:54.000Z
|
2019-09-20T08:31:54.000Z
|
trove/tests/scenario/groups/instance_create_group.py
|
sapcc/trove
|
c03ec0827687fba202f72f4d264ab70158604857
|
[
"Apache-2.0"
] | 5
|
2019-08-14T06:46:03.000Z
|
2021-12-13T20:01:25.000Z
|
trove/tests/scenario/groups/instance_create_group.py
|
sapcc/trove
|
c03ec0827687fba202f72f4d264ab70158604857
|
[
"Apache-2.0"
] | 2
|
2020-03-15T01:24:15.000Z
|
2020-07-22T20:34:26.000Z
|
# Copyright 2015 Tesora Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from proboscis import test
from trove.tests import PRE_INSTANCES
from trove.tests.scenario import groups
from trove.tests.scenario.groups.test_group import TestGroup
from trove.tests.scenario.runners import test_runners
GROUP = "scenario.instance_create_group"
class InstanceCreateRunnerFactory(test_runners.RunnerFactory):
_runner_ns = 'instance_create_runners'
_runner_cls = 'InstanceCreateRunner'
@test(depends_on_groups=["services.initialize"],
runs_after_groups=[PRE_INSTANCES],
groups=[GROUP, groups.INST_CREATE])
class InstanceCreateGroup(TestGroup):
"""Test Instance Create functionality."""
def __init__(self):
super(InstanceCreateGroup, self).__init__(
InstanceCreateRunnerFactory.instance())
@test
def create_empty_instance(self):
"""Create an empty instance."""
self.test_runner.run_empty_instance_create()
@test(depends_on_groups=[groups.INST_CREATE],
groups=[GROUP, groups.INST_INIT_CREATE])
class InstanceInitCreateGroup(TestGroup):
"""Test Instance Init Create functionality."""
def __init__(self):
super(InstanceInitCreateGroup, self).__init__(
InstanceCreateRunnerFactory.instance())
@test
def create_initial_configuration(self):
"""Create a configuration group for a new initialized instance."""
self.test_runner.run_initial_configuration_create()
@test(runs_after=[create_initial_configuration])
def create_initialized_instance(self):
"""Create an instance with initial properties."""
self.test_runner.run_initialized_instance_create()
@test(depends_on_groups=[groups.INST_CREATE],
groups=[GROUP, groups.INST_CREATE_WAIT],
runs_after_groups=[groups.MODULE_CREATE, groups.CFGGRP_CREATE,
groups.INST_ERROR_DELETE])
class InstanceCreateWaitGroup(TestGroup):
"""Test that Instance Create Completes."""
def __init__(self):
super(InstanceCreateWaitGroup, self).__init__(
InstanceCreateRunnerFactory.instance())
@test
def wait_for_instance(self):
"""Waiting for main instance to become active."""
self.test_runner.run_wait_for_instance()
@test(depends_on_groups=[groups.INST_INIT_CREATE],
groups=[GROUP, groups.INST_INIT_CREATE_WAIT],
runs_after_groups=[groups.INST_CREATE_WAIT])
class InstanceInitCreateWaitGroup(TestGroup):
"""Test that Instance Init Create Completes."""
def __init__(self):
super(InstanceInitCreateWaitGroup, self).__init__(
InstanceCreateRunnerFactory.instance())
@test
def wait_for_init_instance(self):
"""Waiting for init instance to become active."""
self.test_runner.run_wait_for_init_instance()
@test(depends_on=[wait_for_init_instance])
def add_initialized_instance_data(self):
"""Add data to the initialized instance."""
self.test_runner.run_add_initialized_instance_data()
@test(runs_after=[add_initialized_instance_data])
def validate_initialized_instance(self):
"""Validate the initialized instance data and properties."""
self.test_runner.run_validate_initialized_instance()
@test(depends_on_groups=[groups.INST_INIT_CREATE_WAIT],
groups=[GROUP, groups.INST_INIT_DELETE])
class InstanceInitDeleteGroup(TestGroup):
"""Test Initialized Instance Delete functionality."""
def __init__(self):
super(InstanceInitDeleteGroup, self).__init__(
InstanceCreateRunnerFactory.instance())
@test
def delete_initialized_instance(self):
"""Delete the initialized instance."""
self.test_runner.run_initialized_instance_delete()
@test(depends_on_groups=[groups.INST_INIT_DELETE],
runs_after_groups=[groups.INST_ERROR_DELETE],
groups=[GROUP, groups.INST_INIT_DELETE_WAIT])
class InstanceInitDeleteWaitGroup(TestGroup):
"""Test that Initialized Instance Delete Completes."""
def __init__(self):
super(InstanceInitDeleteWaitGroup, self).__init__(
InstanceCreateRunnerFactory.instance())
@test
def wait_for_init_delete(self):
"""Wait for the initialized instance to be gone."""
self.test_runner.run_wait_for_init_delete()
@test(runs_after=[wait_for_init_delete])
def delete_initial_configuration(self):
"""Delete the initial configuration group."""
self.test_runner.run_initial_configuration_delete()
| 34.828767
| 78
| 0.73117
| 3,322
| 0.653294
| 0
| 0
| 4,026
| 0.79174
| 0
| 0
| 1,476
| 0.290265
|
83a21e9300920f882ecbddf58f262c3769b6771a
| 20,066
|
py
|
Python
|
reco_utils/recommender/deeprec/models/dkn.py
|
suhoy901/recommenders
|
8ec9f1950d694a5aeaa3d463ac23cad661a30a11
|
[
"MIT"
] | 28
|
2021-11-12T08:26:40.000Z
|
2022-03-27T07:21:24.000Z
|
reco_utils/recommender/deeprec/models/dkn.py
|
shobhit-agarwal/recommenders
|
8ec9f1950d694a5aeaa3d463ac23cad661a30a11
|
[
"MIT"
] | 5
|
2021-11-10T02:58:32.000Z
|
2022-03-21T16:13:11.000Z
|
reco_utils/recommender/deeprec/models/dkn.py
|
shobhit-agarwal/recommenders
|
8ec9f1950d694a5aeaa3d463ac23cad661a30a11
|
[
"MIT"
] | 9
|
2021-11-03T07:14:47.000Z
|
2022-02-22T13:42:04.000Z
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import numpy as np
import tensorflow as tf
from reco_utils.recommender.deeprec.models.base_model import BaseModel
__all__ = ["DKN"]
class DKN(BaseModel):
"""DKN model (Deep Knowledge-Aware Network)
H. Wang, F. Zhang, X. Xie and M. Guo, "DKN: Deep Knowledge-Aware Network for News
Recommendation", in Proceedings of the 2018 World Wide Web Conference on World
Wide Web, 2018.
"""
def __init__(self, hparams, iterator_creator):
"""Initialization steps for DKN.
Compared with the BaseModel, DKN requires two different pre-computed embeddings,
i.e. word embedding and entity embedding.
After creating these two embedding variables, BaseModel's __init__ method will be called.
Args:
hparams (obj): Global hyper-parameters.
iterator_creator (obj): DKN data loader class.
"""
self.graph = tf.Graph()
with self.graph.as_default():
with tf.name_scope("embedding"):
word2vec_embedding = self._init_embedding(hparams.wordEmb_file)
self.embedding = tf.Variable(
word2vec_embedding, trainable=True, name="word"
)
if hparams.use_entity:
e_embedding = self._init_embedding(hparams.entityEmb_file)
W = tf.Variable(
tf.random.uniform([hparams.entity_dim, hparams.dim], -1, 1)
)
b = tf.Variable(tf.zeros([hparams.dim]))
e_embedding_transformed = tf.nn.tanh(tf.matmul(e_embedding, W) + b)
self.entity_embedding = tf.Variable(
e_embedding_transformed, trainable=True, name="entity"
)
else:
self.entity_embedding = tf.Variable(
tf.constant(
0.0,
shape=[hparams.entity_size, hparams.dim],
dtype=tf.float32,
),
trainable=True,
name="entity",
)
if hparams.use_context:
c_embedding = self._init_embedding(hparams.contextEmb_file)
W = tf.Variable(
tf.random.uniform([hparams.entity_dim, hparams.dim], -1, 1)
)
b = tf.Variable(tf.zeros([hparams.dim]))
c_embedding_transformed = tf.nn.tanh(tf.matmul(c_embedding, W) + b)
self.context_embedding = tf.Variable(
c_embedding_transformed, trainable=True, name="context"
)
else:
self.context_embedding = tf.Variable(
tf.constant(
0.0,
shape=[hparams.entity_size, hparams.dim],
dtype=tf.float32,
),
trainable=True,
name="context",
)
super().__init__(hparams, iterator_creator, graph=self.graph)
def _init_embedding(self, file_path):
"""Load pre-trained embeddings as a constant tensor.
Args:
file_path (str): the pre-trained embeddings filename.
Returns:
obj: A constant tensor.
"""
return tf.constant(np.load(file_path).astype(np.float32))
def _l2_loss(self):
hparams = self.hparams
l2_loss = tf.zeros([1], dtype=tf.float32)
# embedding_layer l2 loss
l2_loss = tf.add(
l2_loss, tf.multiply(hparams.embed_l2, tf.nn.l2_loss(self.embedding))
)
if hparams.use_entity:
l2_loss = tf.add(
l2_loss,
tf.multiply(hparams.embed_l2, tf.nn.l2_loss(self.entity_embedding)),
)
if hparams.use_entity and hparams.use_context:
l2_loss = tf.add(
l2_loss,
tf.multiply(hparams.embed_l2, tf.nn.l2_loss(self.context_embedding)),
)
params = self.layer_params
for param in params:
l2_loss = tf.add(
l2_loss, tf.multiply(hparams.layer_l2, tf.nn.l2_loss(param))
)
return l2_loss
def _l1_loss(self):
hparams = self.hparams
l1_loss = tf.zeros([1], dtype=tf.float32)
# embedding_layer l2 loss
l1_loss = tf.add(
l1_loss, tf.multiply(hparams.embed_l1, tf.norm(self.embedding, ord=1))
)
if hparams.use_entity:
l1_loss = tf.add(
l1_loss,
tf.multiply(hparams.embed_l1, tf.norm(self.entity_embedding, ord=1)),
)
if hparams.use_entity and hparams.use_context:
l1_loss = tf.add(
l1_loss,
tf.multiply(hparams.embed_l1, tf.norm(self.context_embedding, ord=1)),
)
params = self.layer_params
for param in params:
l1_loss = tf.add(
l1_loss, tf.multiply(hparams.layer_l1, tf.norm(param, ord=1))
)
return l1_loss
def _build_graph(self):
hparams = self.hparams
self.keep_prob_train = 1 - np.array(hparams.dropout)
self.keep_prob_test = np.ones_like(hparams.dropout)
with tf.compat.v1.variable_scope("DKN") as scope:
logit = self._build_dkn()
return logit
def _build_dkn(self):
"""The main function to create DKN's logic.
Returns:
obj: Prediction score made by the DKN model.
"""
hparams = self.hparams
# build attention model for clicked news and candidate news
click_news_embed_batch, candidate_news_embed_batch = self._build_pair_attention(
self.iterator.candidate_news_index_batch,
self.iterator.candidate_news_entity_index_batch,
self.iterator.click_news_index_batch,
self.iterator.click_news_entity_index_batch,
hparams,
)
nn_input = tf.concat(
[click_news_embed_batch, candidate_news_embed_batch], axis=1
)
dnn_channel_part = 2
last_layer_size = dnn_channel_part * self.num_filters_total
layer_idx = 0
hidden_nn_layers = []
hidden_nn_layers.append(nn_input)
with tf.compat.v1.variable_scope(
"nn_part", initializer=self.initializer
) as scope:
for idx, layer_size in enumerate(hparams.layer_sizes):
curr_w_nn_layer = tf.compat.v1.get_variable(
name="w_nn_layer" + str(layer_idx),
shape=[last_layer_size, layer_size],
dtype=tf.float32,
)
curr_b_nn_layer = tf.compat.v1.get_variable(
name="b_nn_layer" + str(layer_idx),
shape=[layer_size],
dtype=tf.float32,
)
curr_hidden_nn_layer = tf.compat.v1.nn.xw_plus_b(
hidden_nn_layers[layer_idx], curr_w_nn_layer, curr_b_nn_layer
)
if hparams.enable_BN is True:
curr_hidden_nn_layer = tf.layers.batch_normalization(
curr_hidden_nn_layer,
momentum=0.95,
epsilon=0.0001,
training=self.is_train_stage,
)
activation = hparams.activation[idx]
curr_hidden_nn_layer = self._active_layer(
logit=curr_hidden_nn_layer, activation=activation
)
hidden_nn_layers.append(curr_hidden_nn_layer)
layer_idx += 1
last_layer_size = layer_size
self.layer_params.append(curr_w_nn_layer)
self.layer_params.append(curr_b_nn_layer)
w_nn_output = tf.compat.v1.get_variable(
name="w_nn_output", shape=[last_layer_size, 1], dtype=tf.float32
)
b_nn_output = tf.compat.v1.get_variable(
name="b_nn_output", shape=[1], dtype=tf.float32
)
self.layer_params.append(w_nn_output)
self.layer_params.append(b_nn_output)
nn_output = tf.compat.v1.nn.xw_plus_b(
hidden_nn_layers[-1], w_nn_output, b_nn_output
)
return nn_output
def _build_pair_attention(
self,
candidate_word_batch,
candidate_entity_batch,
click_word_batch,
click_entity_batch,
hparams,
):
"""This function learns the candidate news article's embedding and user embedding.
User embedding is generated from click history and also depends on the candidate news article via attention mechanism.
Article embedding is generated via KCNN module.
Args:
candidate_word_batch (obj): tensor word indices for constructing news article
candidate_entity_batch (obj): tensor entity values for constructing news article
click_word_batch (obj): tensor word indices for constructing user clicked history
click_entity_batch (obj): tensor entity indices for constructing user clicked history
hparams (obj): global hyper-parameters
Returns:
click_field_embed_final_batch: user embedding
news_field_embed_final_batch: candidate news article embedding
"""
doc_size = hparams.doc_size
attention_hidden_sizes = hparams.attention_layer_sizes
clicked_words = tf.reshape(click_word_batch, shape=[-1, doc_size])
clicked_entities = tf.reshape(click_entity_batch, shape=[-1, doc_size])
with tf.compat.v1.variable_scope(
"attention_net", initializer=self.initializer
) as scope:
# use kims cnn to get conv embedding
with tf.compat.v1.variable_scope(
"kcnn", initializer=self.initializer, reuse=tf.compat.v1.AUTO_REUSE
) as cnn_scope:
news_field_embed = self._kims_cnn(
candidate_word_batch, candidate_entity_batch, hparams
)
click_field_embed = self._kims_cnn(
clicked_words, clicked_entities, hparams
)
click_field_embed = tf.reshape(
click_field_embed,
shape=[
-1,
hparams.history_size,
hparams.num_filters * len(hparams.filter_sizes),
],
)
avg_strategy = False
if avg_strategy:
click_field_embed_final = tf.reduce_mean(
click_field_embed, axis=1, keepdims=True
)
else:
news_field_embed = tf.expand_dims(news_field_embed, 1)
news_field_embed_repeat = tf.add(
tf.zeros_like(click_field_embed), news_field_embed
)
attention_x = tf.concat(
axis=-1, values=[click_field_embed, news_field_embed_repeat]
)
attention_x = tf.reshape(
attention_x, shape=[-1, self.num_filters_total * 2]
)
attention_w = tf.compat.v1.get_variable(
name="attention_hidden_w",
shape=[self.num_filters_total * 2, attention_hidden_sizes],
dtype=tf.float32,
)
attention_b = tf.compat.v1.get_variable(
name="attention_hidden_b",
shape=[attention_hidden_sizes],
dtype=tf.float32,
)
curr_attention_layer = tf.compat.v1.nn.xw_plus_b(
attention_x, attention_w, attention_b
)
if hparams.enable_BN is True:
curr_attention_layer = tf.layers.batch_normalization(
curr_attention_layer,
momentum=0.95,
epsilon=0.0001,
training=self.is_train_stage,
)
activation = hparams.attention_activation
curr_attention_layer = self._active_layer(
logit=curr_attention_layer, activation=activation
)
attention_output_w = tf.compat.v1.get_variable(
name="attention_output_w",
shape=[attention_hidden_sizes, 1],
dtype=tf.float32,
)
attention_output_b = tf.compat.v1.get_variable(
name="attention_output_b", shape=[1], dtype=tf.float32
)
attention_weight = tf.compat.v1.nn.xw_plus_b(
curr_attention_layer, attention_output_w, attention_output_b
)
attention_weight = tf.reshape(
attention_weight, shape=[-1, hparams.history_size, 1]
)
norm_attention_weight = tf.nn.softmax(attention_weight, axis=1)
click_field_embed_final = tf.reduce_sum(
tf.multiply(click_field_embed, norm_attention_weight),
axis=1,
keepdims=True,
)
if attention_w not in self.layer_params:
self.layer_params.append(attention_w)
if attention_b not in self.layer_params:
self.layer_params.append(attention_b)
if attention_output_w not in self.layer_params:
self.layer_params.append(attention_output_w)
if attention_output_b not in self.layer_params:
self.layer_params.append(attention_output_b)
self.news_field_embed_final_batch = tf.squeeze(news_field_embed)
click_field_embed_final_batch = tf.squeeze(click_field_embed_final)
return click_field_embed_final_batch, self.news_field_embed_final_batch
def _kims_cnn(self, word, entity, hparams):
"""The KCNN module. KCNN is an extension of traditional CNN that incorporates symbolic knowledge from
a knowledge graph into sentence representation learning.
Args:
word (obj): word indices for the sentence.
entity (obj): entity indices for the sentence. Entities are aligned with words in the sentence.
hparams (obj): global hyper-parameters.
Returns:
obj: Sentence representation.
"""
# kims cnn parameter
filter_sizes = hparams.filter_sizes
num_filters = hparams.num_filters
dim = hparams.dim
embedded_chars = tf.nn.embedding_lookup(self.embedding, word)
if hparams.use_entity and hparams.use_context:
entity_embedded_chars = tf.nn.embedding_lookup(
self.entity_embedding, entity
)
context_embedded_chars = tf.nn.embedding_lookup(
self.context_embedding, entity
)
concat = tf.concat(
[embedded_chars, entity_embedded_chars, context_embedded_chars], axis=-1
)
elif hparams.use_entity:
entity_embedded_chars = tf.nn.embedding_lookup(
self.entity_embedding, entity
)
concat = tf.concat([embedded_chars, entity_embedded_chars], axis=-1)
else:
concat = embedded_chars
concat_expanded = tf.expand_dims(concat, -1)
# Create a convolution + maxpool layer for each filter size
pooled_outputs = []
for i, filter_size in enumerate(filter_sizes):
with tf.compat.v1.variable_scope(
"conv-maxpool-%s" % filter_size, initializer=self.initializer
):
# Convolution Layer
if hparams.use_entity and hparams.use_context:
filter_shape = [filter_size, dim * 3, 1, num_filters]
elif hparams.use_entity:
filter_shape = [filter_size, dim * 2, 1, num_filters]
else:
filter_shape = [filter_size, dim, 1, num_filters]
W = tf.compat.v1.get_variable(
name="W" + "_filter_size_" + str(filter_size),
shape=filter_shape,
dtype=tf.float32,
initializer=tf.contrib.layers.xavier_initializer(uniform=False),
)
b = tf.compat.v1.get_variable(
name="b" + "_filter_size_" + str(filter_size),
shape=[num_filters],
dtype=tf.float32,
)
if W not in self.layer_params:
self.layer_params.append(W)
if b not in self.layer_params:
self.layer_params.append(b)
conv = tf.nn.conv2d(
concat_expanded,
W,
strides=[1, 1, 1, 1],
padding="VALID",
name="conv",
)
# Apply nonlinearity
h = tf.nn.relu(tf.nn.bias_add(conv, b), name="relu")
# Maxpooling over the outputs
pooled = tf.nn.max_pool2d(
h,
ksize=[1, hparams.doc_size - filter_size + 1, 1, 1],
strides=[1, 1, 1, 1],
padding="VALID",
name="pool",
)
pooled_outputs.append(pooled)
# Combine all the pooled features
# self.num_filters_total is the kims cnn output dimension
self.num_filters_total = num_filters * len(filter_sizes)
h_pool = tf.concat(pooled_outputs, axis=-1)
h_pool_flat = tf.reshape(h_pool, [-1, self.num_filters_total])
return h_pool_flat
def infer_embedding(self, sess, feed_dict):
"""Infer document embedding in feed_dict with current model.
Args:
sess (obj): The model session object.
feed_dict (dict): Feed values for evaluation. This is a dictionary that maps graph elements to values.
Returns:
list: news embedding in a batch
"""
feed_dict[self.layer_keeps] = self.keep_prob_test
feed_dict[self.is_train_stage] = False
return sess.run([self.news_field_embed_final_batch], feed_dict=feed_dict)
def run_get_embedding(self, infile_name, outfile_name):
"""infer document embedding with current model.
Args:
infile_name (str): Input file name, format is [Newsid] [w1,w2,w3...] [e1,e2,e3...]
outfile_name (str): Output file name, format is [Newsid] [embedding]
Returns:
obj: An instance of self.
"""
load_sess = self.sess
with tf.io.gfile.GFile(outfile_name, "w") as wt:
for (
batch_data_input,
newsid_list,
data_size,
) in self.iterator.load_infer_data_from_file(infile_name):
news_embedding = self.infer_embedding(load_sess, batch_data_input)[0]
for i in range(data_size):
wt.write(
newsid_list[i]
+ " "
+ ",".join(
[
str(embedding_value)
for embedding_value in news_embedding[i]
]
)
+ "\n"
)
return self
| 41.630705
| 126
| 0.546397
| 19,834
| 0.988438
| 0
| 0
| 0
| 0
| 0
| 0
| 3,751
| 0.186933
|
83a22a3ba1efc66d7ca002b326c88281fa4ad1f6
| 2,193
|
py
|
Python
|
LeNet-5/LeNet-5.py
|
huangjunxiong11/TF2
|
6de61c28c59ef34be7e53762b3a759da152642f7
|
[
"MIT"
] | null | null | null |
LeNet-5/LeNet-5.py
|
huangjunxiong11/TF2
|
6de61c28c59ef34be7e53762b3a759da152642f7
|
[
"MIT"
] | null | null | null |
LeNet-5/LeNet-5.py
|
huangjunxiong11/TF2
|
6de61c28c59ef34be7e53762b3a759da152642f7
|
[
"MIT"
] | null | null | null |
import tensorflow as tf
from tensorflow.keras import datasets, layers, optimizers, Sequential, metrics, losses
# 1.数据集准备
(x, y), (x_val, y_val) = datasets.mnist.load_data() # 加载数据集,返回的是两个元组,分别表示训练集和测试集
x = tf.convert_to_tensor(x, dtype=tf.float32) / 255. # 转换为张量,并缩放到0~1
y = tf.convert_to_tensor(y, dtype=tf.int32) # 转换为张量(标签)
print(x.shape, y.shape)
train_dataset = tf.data.Dataset.from_tensor_slices((x, y)) # 构建数据集对象
train_dataset = train_dataset.batch(32).repeat(10) # 设置批量训练的batch为32,要将训练集重复训练10遍
# 2.搭建网络
network = Sequential([ # 搭建网络容器
layers.Conv2D(6, kernel_size=3, strides=1), # 第一个卷积层,6个3*3*1卷积核
layers.MaxPooling2D(pool_size=2, strides=2), # 池化层,卷积核2*2,步长2
layers.ReLU(), # 激活函数
layers.Conv2D(16, kernel_size=3, strides=1), # 第二个卷积层,16个3*3*6卷积核
layers.MaxPooling2D(pool_size=2, strides=2), # 池化层
layers.ReLU(), # 激活函数
layers.Flatten(), # 拉直,方便全连接层处理
layers.Dense(120, activation='relu'), # 全连接层,120个节点
layers.Dense(84, activation='relu'), # 全连接层,84个节点
layers.Dense(10) # 输出层,10个节点
])
network.build(input_shape=(None, 28, 28, 1)) # 定义输入,batch_size=32,输入图片大小是28*28,通道数为1。
network.summary() # 显示出每层的待优化参数量
# 3.模型训练(计算梯度,迭代更新网络参数)
optimizer = optimizers.SGD(lr=0.01) # 声明采用批量随机梯度下降方法,学习率=0.01
acc_meter = metrics.Accuracy() # 新建accuracy测量器
for step, (x, y) in enumerate(train_dataset): # 一次输入batch组数据进行训练
with tf.GradientTape() as tape: # 构建梯度记录环境
x = tf.reshape(x, (32, 28, 28, 1)) # 将输入拉直,[b,28,28]->[b,784]
# x = tf.extand_dims(x, axis=3)
out = network(x) # 输出[b, 10]
y_onehot = tf.one_hot(y, depth=10) # one-hot编码
loss = tf.square(out - y_onehot)
loss = tf.reduce_sum(loss) / 32 # 定义均方差损失函数,注意此处的32对应为batch的大小
grads = tape.gradient(loss, network.trainable_variables) # 计算网络中各个参数的梯度
optimizer.apply_gradients(zip(grads, network.trainable_variables)) # 更新网络参数
acc_meter.update_state(tf.argmax(out, axis=1), y) # 比较预测值与标签,并计算精确度(写入数据,进行求精度)
if step % 200 == 0: # 每200个step,打印一次结果
print('Step', step, ': Loss is: ', float(loss), ' Accuracy: ', acc_meter.result().numpy()) # 读取数据
acc_meter.reset_states() # 清零测量器l
| 47.673913
| 106
| 0.674875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,312
| 0.454766
|
83a488662edcd23d97eb9c55b24ca7fbca5d6193
| 7,727
|
py
|
Python
|
gw_full_latest/CloudTTN.py
|
rendikanyut/LowCostLoRaGw
|
a4bce0cf123ffeb48a01e779a62d76b1bf1c4486
|
[
"Linux-OpenIB"
] | 654
|
2016-03-01T08:43:24.000Z
|
2022-03-22T06:55:27.000Z
|
gw_full_latest/CloudTTN.py
|
rendikanyut/LowCostLoRaGw
|
a4bce0cf123ffeb48a01e779a62d76b1bf1c4486
|
[
"Linux-OpenIB"
] | 315
|
2016-02-25T10:36:11.000Z
|
2022-03-31T20:59:21.000Z
|
gw_full_latest/CloudTTN.py
|
rendikanyut/LowCostLoRaGw
|
a4bce0cf123ffeb48a01e779a62d76b1bf1c4486
|
[
"Linux-OpenIB"
] | 388
|
2016-02-16T14:40:36.000Z
|
2022-03-15T04:03:20.000Z
|
#-------------------------------------------------------------------------------
# Part of this Python script is taken from the Pycom NanoGateway
# https://github.com/pycom/pycom-libraries/tree/master/examples/lorawan-nano-gateway
#
# Adapted by Congduc.Pham@univ-pau.fr
#
# This file is part of the low-cost LoRa gateway developped at University of Pau
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with the program. If not, see <http://www.gnu.org/licenses/>.
#-------------------------------------------------------------------------------
import binascii
import json
import os
import socket
import time
import datetime
from dateutil import parser
import calendar
import sys
#don't generate pyc (no compilation of imported module) so change in key_* file can be done dynamically
sys.dont_write_bytecode = True
import key_TTN as key_LoRaWAN
netserv='TTN'
try:
key_LoRaWAN.source_list
except AttributeError:
key_LoRaWAN.source_list=[]
try:
key_LoRaWAN.lorawan_server
except AttributeError:
key_LoRaWAN.lorawan_server="router.eu.thethings.network"
try:
key_LoRaWAN.lorawan_port
except AttributeError:
key_LoRaWAN.lorawan_port=1700
PROTOCOL_VERSION = 2
PUSH_DATA = 0
PUSH_ACK = 1
PULL_DATA = 2
PULL_ACK = 4
PULL_RESP = 3
RX_PK = {
'rxpk': [{
'time': '',
'tmst': 0,
'chan': 0,
'rfch': 0,
'freq': 0,
'stat': 1,
'modu': 'LORA',
'datr': '',
'codr': '4/5',
'rssi': 0,
'lsnr': 0,
'size': 0,
'data': ''
}]
}
TX_ACK_PK = {
'txpk_ack': {
'error': ''
}
}
class LoRaWAN:
def __init__(self, id, frequency, bw, sf, server, port):
self.id = id
self.frequency = frequency
self.sf = sf
self.bw = bw
self.server = server
self.port = port
self.server_ip = None
self.sock = None
def start(self):
self._log('Cloud%s: gw id: {}' % netserv, self.id)
# get the server IP and create an UDP socket
try:
self.server_ip = socket.getaddrinfo(self.server, self.port)[0][-1]
self._log('Cloud%s: Opening UDP socket to {} ({}) port {}...' % netserv, self.server, self.server_ip[0], self.server_ip[1])
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.sock.setblocking(False)
except Exception as ex:
self._log('Cloud%s: Failed to connect to server: {}' % netserv, ex)
def _sf_bw_to_dr(self, sf, bw):
dr = 'SF' + str(sf)
if bw == 125:
return dr + 'BW125'
elif bw == 250:
return dr + 'BW250'
else:
return dr + 'BW500'
def rx_packet(self, ldata, datalen, tdata, tmst, rssi, snr):
RX_PK["rxpk"][0]["time"] = tdata
if tmst=='':
#in seconds, maybe we should put it in microsecond?
RX_PK["rxpk"][0]["tmst"] = calendar.timegm(time.gmtime())
else:
RX_PK["rxpk"][0]["tmst"] = int(tmst)
RX_PK["rxpk"][0]["freq"] = self.frequency
RX_PK["rxpk"][0]["datr"] = self._sf_bw_to_dr(self.sf, self.bw)
RX_PK["rxpk"][0]["rssi"] = rssi
RX_PK["rxpk"][0]["lsnr"] = snr
RX_PK["rxpk"][0]["data"] = ldata
RX_PK["rxpk"][0]["size"] = datalen
#packet = self._make_node_packet(rx_data, tdata, 0, self.sf, self.bw, rssi, snr)
packet=json.dumps(RX_PK)
self._push_data(packet)
self._log('Cloud%s: Received packet: {}' % netserv, packet)
def _push_data(self, data):
token = os.urandom(2)
packet = bytearray([PROTOCOL_VERSION]) + token + bytearray([PUSH_DATA]) + binascii.unhexlify(self.id) + data
#print ''.join('{:02x}'.format(x) for x in packet)
#self._log('Cloud%s: Try to forward packet: {}' % netserv, packet)
try:
self.sock.sendto(packet, self.server_ip)
self.sock.close()
except Exception as ex:
self._log('Cloud%s: Failed to push uplink packet to server: {}' % netserv, ex)
def _log(self, message, *args):
print('{}'.format(str(message).format(*args)))
# Testing with pau_lorawan_testing/Pau_testing_device 0x26011721
#
# python CloudTTN.py "QCEXASYAAAABhCGE1L87NCDMk0jLa6hYXm0e+g==" "256,64,637605665,0,28,8,-45" "125,5,12,868100" "2019-03-25T18:46:00.528+01:00" "0000B827EBD1B236"
# or
# python CloudTTN.py "QCEXASYAAAABhCGE1L87NCDMk0jLa6hYXm0e+g==" "256,64,637605665,0,28,8,-45" "125,5,12,868100" "`date +%FT%T%z`" "0000B827EBD1B236"
#
# get the base64 encrypted data from `Arduino_LoRa_temp` sending "Hello from UPPA"
#
# Hello from UPPA
# plain payload hex
# 48 65 6C 6C 6F 20 66 72 6F 6D 20 55 50 50 41
# Encrypting
# encrypted payload
# 84 21 84 D4 BF 3B 34 20 CC 93 48 CB 6B A8 58
# calculate MIC with NwkSKey
# transmitted LoRaWAN-like packet:
# MHDR[1] | DevAddr[4] | FCtrl[1] | FCnt[2] | FPort[1] | EncryptedPayload | MIC[4]
# 40 21 17 01 26 00 00 00 01 84 21 84 D4 BF 3B 34 20 CC 93 48 CB 6B A8 58 5E 6D 1E FA
# [base64 LoRaWAN HEADER+CIPHER+MIC]:QCEXASYAAAABhCGE1L87NCDMk0jLa6hYXm0e+g==
def main(ldata, pdata, rdata, tdata, gwid):
# this is common code to process packet information provided by the main gateway script (i.e. post_processing_gw.py)
# these information are provided in case you need them
arr = map(int,pdata.split(','))
dst=arr[0]
ptype=arr[1]
src=arr[2]
seq=arr[3]
datalen=arr[4]
SNR=arr[5]
RSSI=arr[6]
#if lora packet is received with an SX1301 concentrator, then the packet-formatter will pass the tmst field after the date information, separated by '*'
#i.e. "2019-03-25T18:46:00.528+01:00*29641444"
tmst=tdata.count('*')
if (tmst != 0):
tdata_tmp=tdata.split('*')[0]
tmst=tdata.split('*')[1]
tdata=tdata_tmp
else:
tmst=''
#from 2019-05-14T14:53:10.241191+02:00 (similar to command date +%FT%T.%6N%z)
#to 2019-05-14T14:53:10.241191Z (similar to command date +%FT%T.%6NZ)
dt = parser.parse(tdata)
#in case you want to remove microsecond
#tdata = dt.replace(microsecond=0,tzinfo=None).isoformat()+"Z"
tdata = dt.replace(tzinfo=None).isoformat()+"Z"
arr = map(int,rdata.split(','))
rbw=arr[0]
rcr=arr[1]
rsf=arr[2]
rfq=arr[3]/1000.0
#LoRaWAN packet
if dst==256:
src_str="0x%0.8X" % src
#we force to BW125 as TTN is can not handle other bandwidth right now, for instance those of Lora 2.4GHz
#TODO: change when TTN will support LoRa 2.4GHz
rbw=125
else:
src_str=str(src)
if (src_str in key_LoRaWAN.source_list) or (len(key_LoRaWAN.source_list)==0):
#build the ttn_gwid which is defined to be gwid[4:10]+"FFFF"+gwid[10:]
#gwid is normally defined as eth0 MAC address filled by 0 in front: 0000B827EBD1B236
ttn_gwid=gwid[4:10]+"FFFF"+gwid[10:]
ttn = LoRaWAN(
id=ttn_gwid,
frequency=rfq,
bw=rbw,
sf=rsf,
server=key_LoRaWAN.lorawan_server,
port=key_LoRaWAN.lorawan_port)
ttn.start()
ttn.rx_packet(ldata, datalen, tdata, tmst, RSSI, SNR)
else:
print "Source is not is source list, not sending to %s" % netserv
if __name__ == "__main__":
main(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4], sys.argv[5])
| 31.410569
| 162
| 0.640611
| 2,568
| 0.332341
| 0
| 0
| 0
| 0
| 0
| 0
| 3,897
| 0.504335
|
83a6888316b1c7a494fc6ea76d1fb65b1293789a
| 2,651
|
py
|
Python
|
pythonProject1/venv/Lib/site-packages/tkinterpp/dialoguebox.py
|
mjtomlinson/CNE330_Python_1_Final_Project
|
05020806860937ef37b9a0ad2e27de4897a606de
|
[
"CC0-1.0"
] | null | null | null |
pythonProject1/venv/Lib/site-packages/tkinterpp/dialoguebox.py
|
mjtomlinson/CNE330_Python_1_Final_Project
|
05020806860937ef37b9a0ad2e27de4897a606de
|
[
"CC0-1.0"
] | null | null | null |
pythonProject1/venv/Lib/site-packages/tkinterpp/dialoguebox.py
|
mjtomlinson/CNE330_Python_1_Final_Project
|
05020806860937ef37b9a0ad2e27de4897a606de
|
[
"CC0-1.0"
] | null | null | null |
try:
import tkinter as tk
except ImportError:
import Tkinter as tk
class DialogueEntry(tk.Toplevel):
"""
DialogueEntry : tkinter.Toplevel
Dialogue box that allow the user to input a text in a field.
kwargs :
title : title of the dialogue box
text : text displayed in the label of the dialogue box
ok_button_callback : callable that is called when the ok button is pressed
textvariable : tkinter.StringVar that is used in the Entry widget
width : 300 by default, width of the window
height : 70 by default, height of the window
xpos, ypos : screen coordinates. By default, these coordinates place the window in the middle of the screen
methods :
get(self) : gets the string in the entry widget
set(self, value) : sets the string in the entry widget
"""
def __init__(self, *args, title="Please Enter a value", text="Enter a value", ok_button_callback=None, textvariable=None, width=300, height=70, xpos=None, ypos=None, **kwargs):
super().__init__(*args, **kwargs)
w, h = width, height
if xpos is None:
ws = self.winfo_screenwidth() # width of the screen
x = (ws // 2) - (w // 2)
else:
x = xpos
if ypos is None:
hs = self.winfo_screenheight() # height of the screen
y = (hs // 2) - (h // 2)
else:
y = ypos
self.title(title)
self.geometry(f"{w}x{h}+{x}+{y}")
self.resizable(False, False)
self.update()
self.textvar = textvariable or tk.StringVar()
self.ok_button_callback = ok_button_callback
self.entry = tk.Entry(self, textvariable=self.textvar, width=w // 6)
self.ok_btn = tk.Button(self, text="Ok", command=self.on_ok_btn)
self.cancel_btn = tk.Button(self, text="Cancel", command=self.on_cancel_btn)
self.label = tk.Label(self, text=text)
self.protocol("WM_DELETE_WINDOW", self.on_cancel_btn)
self.label.grid(row=0, column=0, columnspan=2, sticky="ew")
self.entry.grid(row=1, column=0, columnspan=2, sticky="ew")
self.ok_btn.grid(row=2, column=0, sticky="ew")
self.cancel_btn.grid(row=2, column=1, sticky="ew")
self.mainloop()
def on_ok_btn(self):
if callable(self.ok_button_callback):
self.ok_button_callback()
self.on_cancel_btn()
def on_cancel_btn(self):
self.destroy()
def get(self):
return self.textvar.get()
def set(self, value):
self.textvar.set(value)
| 36.819444
| 180
| 0.606941
| 2,573
| 0.970577
| 0
| 0
| 0
| 0
| 0
| 0
| 941
| 0.35496
|
83a72aa00c8eb33d1a3ee2b6393e98bf6532dbf4
| 6,467
|
py
|
Python
|
siqbal/hooks.py
|
smehata/siqbal
|
8b6a21fb63c050237593c49757065198c0e2c54a
|
[
"MIT"
] | null | null | null |
siqbal/hooks.py
|
smehata/siqbal
|
8b6a21fb63c050237593c49757065198c0e2c54a
|
[
"MIT"
] | null | null | null |
siqbal/hooks.py
|
smehata/siqbal
|
8b6a21fb63c050237593c49757065198c0e2c54a
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from . import __version__ as app_version
app_name = "SIqbal"
app_title = "SIqbal"
app_publisher = "RC"
app_description = "Customizations for SIqbal"
app_icon = "octicon octicon-file-directory"
app_color = "green"
app_email = "developer@rccorner.com"
app_license = "MIT"
# Includes in <head>
# ------------------
# include js, css files in header of desk.html
app_include_css = "/assets/siqbal/css/siqbal.css"
# app_include_js = "/assets/siqbal/js/siqbal.js"
# include js, css files in header of web template
# web_include_css = "/assets/siqbal/css/siqbal.css"
# web_include_js = "/assets/siqbal/js/siqbal.js"
# include js in page
# page_js = {"page" : "public/js/file.js"}
# include js in doctype views
# doctype_js = {"doctype" : "public/js/doctype.js"}
# doctype_list_js = {"doctype" : "public/js/doctype_list.js"}
# doctype_tree_js = {"doctype" : "public/js/doctype_tree.js"}
# doctype_calendar_js = {"doctype" : "public/js/doctype_calendar.js"}
doctype_js = {
"Address": "public/js/address.js",
"Architect and Contractor": "public/js/architect_and_contractor.js",
"Authorization Rule": "public/js/authorization_rule.js",
"Customer": "public/js/customer.js",
"Delivery Note" : "public/js/delivery_note.js",
"Item": "public/js/item.js",
"Journal Entry": "public/js/journal_entry.js",
"Landed Cost Voucher": "public/js/landed_cost_voucher.js",
"Material Request" : "public/js/material_request.js",
"Opportunity": "public/js/opportunity.js",
"Payment Entry": "public/js/payment_entry.js",
"Property Detail": "public/js/property_detail.js",
"Purchase Invoice" : "public/js/purchase_invoice.js",
"Purchase Order" : "public/js/purchase_order.js",
"Purchase Receipt" : "public/js/purchase_receipt.js",
"Quotation" : "public/js/quotation.js",
"Request for Quotation": "public/js/request_for_quotation.js",
"Salary Slip" : "public/js/salary_slip.js",
"Sales Invoice" : "public/js/sales_invoice.js",
"Sales Order" : "public/js/sales_order.js",
"Stock Entry" : "public/js/stock_entry.js",
"Stock Reconciliation" : "public/js/stock_reconciliation.js",
"Supplier Quotation": "public/js/supplier_quotation.js"
}
# Home Pages
# ----------
# application home page (will override Website Settings)
# home_page = "login"
# website user home page (by Role)
# role_home_page = {
# "Role": "home_page"
# }
# Website user home page (by function)
# get_website_user_home_page = "siqbal.utils.get_home_page"
# Generators
# ----------
# automatically create page for each record of this doctype
# website_generators = ["Web Page"]
# Installation
# ------------
# before_install = "siqbal.install.before_install"
# after_install = "siqbal.install.after_install"
# Desk Notifications
# ------------------
# See frappe.core.notifications.get_notification_config
# notification_config = "siqbal.notifications.get_notification_config"
# Permissions
# -----------
# Permissions evaluated in scripted ways
# permission_query_conditions = {
# "Event": "frappe.desk.doctype.event.event.get_permission_query_conditions",
# }
#
# has_permission = {
# "Event": "frappe.desk.doctype.event.event.has_permission",
# }
# Document Events
# ---------------
# Hook on document methods and events
# doc_events = {
# "*": {
# "on_update": "method",
# "on_cancel": "method",
# "on_trash": "method"
# }
# }
doc_events = {
"Sales Order": {
"validate": [
"siqbal.hook_events.sales_order.set_average_valuation_rate",
# "siqbal.utils.validate_date"
],
"before_submit": "siqbal.hook_events.sales_order.unset_needs_approval",
"before_update_after_submit": "siqbal.hook_events.sales_order.validate_items_rate_and_update_boxes"
},
"Sales Invoice": {
"validate": [
"siqbal.hook_events.sales_invoice.validate_discount_while_return",
"siqbal.hook_events.sales_invoice.validate_taxes_and_charges_from_so",
"siqbal.utils.add_location",
"siqbal.hook_events.sales_invoice.validate_sales_invoice"
# "siqbal.utils.validate_date"
],
"before_insert": "siqbal.hook_events.sales_invoice.set_supplier_details",
"on_submit": [
"siqbal.hook_events.sales_invoice.update_reserved_qty",
"siqbal.hook_events.sales_invoice.create_purchase_invoices_against_sales_taxes",
# "siqbal.utils.change_pi_status"
#"siqbal.hook_events.sales_invoice.validate_user_warehouse"
],
"on_cancel": "siqbal.hook_events.sales_invoice.update_reserved_qty"
},
"Payment Entry": {
"validate": [
"siqbal.hook_events.payment_entry.validate_sales_order",
# "siqbal.hook_events.payment_entry.validate_salaryslip_amount",
#"siqbal.utils.validate_date"
],
# "on_submit": "siqbal.hook_events.payment_entry.update_salaryslip_status",
# "on_cancel": "siqbal.hook_events.payment_entry.update_salaryslip_status"
},
"Stock Entry": {
#"validate": "siqbal.utils.validate_date",
#"on_submit": "siqbal.hook_events.stock_entry.validate_user_warehouse"
},
"Opportunity": {
"validate": "siqbal.utils.send_followup_sms"
},
"Purchase Invoice": {
"validate": "siqbal.utils.add_location"
},
"Purchase Order": {
#"validate": "siqbal.utils.validate_date"
},
"Purchase Receipt": {
#"validate": "siqbal.utils.validate_date"
},
"Stock Reconciliation": {
#"validate": "siqbal.utils.validate_date"
},
# "Quotation": {
#"validate": "siqbal.utils.validate_date"
# },
# "Journal Entry": {
# "before_save": "siqbal.hook_events.journal_entry.set_name"
# }
}
jenv = {
"methods" : [
"get_qrcode_image:siqbal.utils.get_qrcode_image"
]
}
# Scheduled Tasks
# ---------------
# scheduler_events = {
# "all": [
# "siqbal.tasks.all"
# ],
# "daily": [
# "siqbal.tasks.daily"
# ],
# "hourly": [
# "siqbal.tasks.hourly"
# ],
# "weekly": [
# "siqbal.tasks.weekly"
# ]
# "monthly": [
# "siqbal.tasks.monthly"
# ]
# }
# Testing
# -------
# before_tests = "siqbal.install.before_tests"
# Overriding Methods
# ------------------------------
#
# override_whitelisted_methods = {
# "frappe.desk.doctype.event.event.get_events": "siqbal.event.get_events"
# }
#
# each overriding function accepts a `data` argument;
# generated from the base implementation of the doctype dashboard,
# along with any modifications made in other Frappe apps
# override_doctype_dashboards = {
# "Task": "siqbal.task.get_dashboard_data"
# }
override_doctype_class = {
'Sales Invoice': 'siqbal.hook_events.overide_sales_invoice.OverrideSalesInvoice'
}
| 28.117391
| 101
| 0.706355
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 5,639
| 0.871965
|
83a7ad962e9be184926ad2137bbbb0b45b02188c
| 4,781
|
py
|
Python
|
testing/python/tests/test_dcgm_reader.py
|
omertuc/DCGM
|
904e1600e5924ef60ac5256d492d0b7f6a7244bc
|
[
"Apache-2.0"
] | null | null | null |
testing/python/tests/test_dcgm_reader.py
|
omertuc/DCGM
|
904e1600e5924ef60ac5256d492d0b7f6a7244bc
|
[
"Apache-2.0"
] | null | null | null |
testing/python/tests/test_dcgm_reader.py
|
omertuc/DCGM
|
904e1600e5924ef60ac5256d492d0b7f6a7244bc
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from DcgmReader import *
import pydcgm
import dcgm_structs
import dcgm_structs_internal
import dcgm_agent_internal
import dcgm_fields
from dcgm_structs import dcgmExceptionClass
import logger
import test_utils
import time
@test_utils.run_with_standalone_host_engine(20)
@test_utils.run_with_initialized_client()
def test_dcgm_reader_default(handle):
dr = DcgmReader()
dr.SetHandle(handle)
latest = dr.GetLatestGpuValuesAsFieldNameDict()
for gpuId in latest:
# latest data might be less than the list, because blank values aren't included
assert len(latest[gpuId]) <= len(defaultFieldIds)
# Make sure we get strings
for key in latest[gpuId]:
assert isinstance(key, basestring)
sample = dr.GetLatestGpuValuesAsFieldIdDict()
for gpuId in sample:
assert len(sample[gpuId]) <= len(defaultFieldIds)
# Make sure we get valid integer field ids
for fieldId in sample[gpuId]:
assert isinstance(fieldId, int)
assert dcgm_fields.DcgmFieldGetById(fieldId) != None
@test_utils.run_with_standalone_host_engine(20)
@test_utils.run_with_initialized_client()
def test_dcgm_reader_specific_fields(handle):
specificFields = [dcgm_fields.DCGM_FI_DEV_POWER_USAGE, dcgm_fields.DCGM_FI_DEV_XID_ERRORS]
dr = DcgmReader(fieldIds=specificFields)
dr.SetHandle(handle)
latest = dr.GetLatestGpuValuesAsFieldNameDict()
for gpuId in latest:
assert len(latest[gpuId]) <= len(specificFields)
@test_utils.run_with_standalone_host_engine(20)
@test_utils.run_with_initialized_client()
@test_utils.run_only_with_live_gpus()
def test_reading_specific_data(handle, gpuIds):
"""
Verifies that we can inject specific data and get that same data back
"""
dcgmHandle = pydcgm.DcgmHandle(handle)
dcgmSystem = dcgmHandle.GetSystem()
specificFieldIds = [ dcgm_fields.DCGM_FI_DEV_RETIRED_DBE,
dcgm_fields.DCGM_FI_DEV_POWER_VIOLATION,
dcgm_fields.DCGM_FI_DEV_THERMAL_VIOLATION,
]
fieldValues = [ 1,
1000,
9000,
]
for i in range(0, len(specificFieldIds)):
field = dcgm_structs_internal.c_dcgmInjectFieldValue_v1()
field.version = dcgm_structs_internal.dcgmInjectFieldValue_version1
field.fieldId = specificFieldIds[i]
field.status = 0
field.fieldType = ord(dcgm_fields.DCGM_FT_INT64)
field.ts = int((time.time()+10) * 1000000.0) # set the injected data into the future
field.value.i64 = fieldValues[i]
ret = dcgm_agent_internal.dcgmInjectFieldValue(handle, gpuIds[0], field)
assert (ret == dcgm_structs.DCGM_ST_OK)
dr = DcgmReader(fieldIds=specificFieldIds)
dr.SetHandle(handle)
latest = dr.GetLatestGpuValuesAsFieldIdDict()
assert len(latest[gpuIds[0]]) == len(specificFieldIds)
for i in range(0, len(specificFieldIds)):
assert latest[gpuIds[0]][specificFieldIds[i]] == fieldValues[i]
@test_utils.run_with_standalone_host_engine(20)
@test_utils.run_with_initialized_client()
@test_utils.run_only_with_live_gpus()
@test_utils.run_with_cuda_app()
def test_reading_pid_fields(handle, gpuIds, cudaApp):
"""
Verifies that we can decode PID structs
"""
fieldTag = dcgm_fields.DCGM_FI_DEV_COMPUTE_PIDS
pids = []
dr = DcgmReader(fieldIds=[ fieldTag ], updateFrequency=100000)
logger.debug("Trying for 2 seconds")
exit_loop = False
for _ in range(10):
if (exit_loop):
break
data = dr.GetLatestGpuValuesAsFieldIdDict()
assert len(data) > 0
for gpuId in data:
gpuData = data[gpuId]
if fieldTag in gpuData:
pids.append(gpuData[fieldTag].pid)
if gpuData[fieldTag].pid == cudaApp.getpid():
# Found our PID. Exit the loop
exit_loop = True
time.sleep(0.2)
logger.debug("PIDs: %s. cudaApp PID: %d" % (str(pids), cudaApp.getpid()))
assert cudaApp.getpid() in pids, "could not find cudaApp PID"
| 35.947368
| 94
| 0.69316
| 0
| 0
| 0
| 0
| 3,933
| 0.822631
| 0
| 0
| 1,031
| 0.215645
|
83a81a83e057b3d3c679bc9510ffe5779a6f5647
| 14,761
|
py
|
Python
|
tests/lava/lib/dl/slayer/neuron/test_alif.py
|
timcheck/lava-dl
|
e680722071129fde952ea0d744984aa2a038797a
|
[
"BSD-3-Clause"
] | 37
|
2021-09-30T16:47:15.000Z
|
2022-03-07T22:29:21.000Z
|
tests/lava/lib/dl/slayer/neuron/test_alif.py
|
timcheck/lava-dl
|
e680722071129fde952ea0d744984aa2a038797a
|
[
"BSD-3-Clause"
] | 36
|
2021-11-04T16:54:55.000Z
|
2022-03-31T02:26:29.000Z
|
tests/lava/lib/dl/slayer/neuron/test_alif.py
|
timcheck/lava-dl
|
e680722071129fde952ea0d744984aa2a038797a
|
[
"BSD-3-Clause"
] | 20
|
2021-10-29T22:55:58.000Z
|
2022-03-22T17:27:16.000Z
|
# Copyright (C) 2022 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
import sys
import os
import unittest
import numpy as np
import matplotlib.pyplot as plt
import torch
import torch.nn.functional as F
from lava.lib.dl.slayer.neuron import alif
verbose = True if (('-v' in sys.argv) or ('--verbose' in sys.argv)) else False
seed = np.random.randint(1000)
# seed = 590
np.random.seed(seed)
if verbose:
print(f'{seed=}')
if torch.cuda.is_available():
device = torch.device('cuda')
else:
if verbose:
print(
'CUDA is not available in the system. '
'Testing for CPU version only.'
)
device = torch.device('cpu')
# neuron parameters
threshold = 1
current_decay = np.random.random()
voltage_decay = np.random.random()
threshold_decay = np.random.random()
refractory_decay = np.random.random()
# create input
time = torch.FloatTensor(np.arange(200)).to(device)
# expand to (batch, neuron, time) tensor
spike_input = torch.autograd.Variable(
torch.zeros([5, 4, len(time)]), requires_grad=True
).to(device)
spike_input.data[..., np.random.randint(spike_input.shape[-1], size=5)] = 1
weight = torch.FloatTensor(
5 * np.random.random(size=spike_input.shape[-1]) - 0.5
).reshape(
[1, 1, spike_input.shape[-1]]
).to(device)
# initialize neuron
neuron = alif.Neuron(
threshold,
threshold_step=0.5 * threshold,
current_decay=current_decay,
voltage_decay=voltage_decay,
threshold_decay=threshold_decay,
refractory_decay=refractory_decay,
persistent_state=True,
).to(device)
quantized_weight = neuron.quantize_8bit(weight)
neuron.debug = True
# get the neuron response for full input
current, voltage, th, ref = neuron.dynamics(quantized_weight * spike_input)
spike = neuron.spike(voltage, th, ref)
class TestALIF(unittest.TestCase):
def test_input_output_range(self):
if verbose:
print(spike_input.sum(), spike_input.flatten())
if verbose:
print(spike.sum(), spike.flatten())
self.assertTrue(
spike_input.sum().item() > 0,
'There was zero input spike. Check the test setting.'
)
self.assertTrue(
spike.sum().item() > 0,
'There was zero ouptut spike. Check the test setting.'
)
def test_properties(self):
_ = neuron.weight_exponent
_ = neuron.v_th_mant
_ = neuron.cx_current_decay
_ = neuron.cx_voltage_decay
_ = neuron.cx_threshold_decay
_ = neuron.cx_refractory_decay
_ = neuron.scale
_ = neuron.shape
_ = neuron.device
# just looking for errors
self.assertTrue(True, 'Encountered errors.')
def test_batch_consistency(self):
spike_var = torch.norm(torch.var(spike, dim=0)).item()
voltage_var = torch.norm(torch.var(voltage, dim=0)).item()
current_var = torch.norm(torch.var(current, dim=0)).item()
th_var = torch.norm(torch.var(th, dim=0)).item()
ref_var = torch.norm(torch.var(ref, dim=0)).item()
self.assertTrue(
spike_var < 1e-5,
f'Spike variation across batch dimension is inconsistent. '
f'Variance was {spike_var}. Expected 0.'
)
self.assertTrue(
current_var < 1e-5,
f'Current variation across batch dimension is inconsistent. '
f'Variance was {current_var}. Expected 0.'
)
self.assertTrue(
voltage_var < 1e-5,
f'Voltage variation across batch dimension is inconsistent. '
f'Variance was {voltage_var}. Expected 0.'
)
self.assertTrue(
th_var < 1e-5,
f'Threshold variation across batch dimension is inconsistent. '
f'Variance was {th_var}. Expected 0.'
)
self.assertTrue(
ref_var < 1e-5,
f'Refractory variation across batch dimension is inconsistent. '
f'Variance was {ref_var}. Expected 0.'
)
def test_integer_states(self):
# there should be no quantization error when
# states are scaled with s_scale
voltage_error = torch.norm(
torch.floor(voltage * neuron.s_scale)
- voltage * neuron.s_scale
)
current_error = torch.norm(
torch.floor(current * neuron.s_scale)
- current * neuron.s_scale
)
th_error = torch.norm(
torch.floor(th * neuron.s_scale)
- th * neuron.s_scale
)
ref_error = torch.norm(
torch.floor(ref * neuron.s_scale)
- ref * neuron.s_scale
)
self.assertTrue(
voltage_error < 1e-5,
f'Voltage calculation has issues with scaling. '
f'De-Scaling must result in integer states. '
f'Error was {voltage_error}'
)
self.assertTrue(
current_error < 1e-5,
f'Current calculation has issues with scaling. '
f'De-Scaling must result in integer states. '
f'Error was {current_error}'
)
self.assertTrue(
th_error < 1e-5,
f'Threshold calculation has issues with scaling. '
f'De-Scaling must result in integer states. '
f'Error was {th_error}'
)
self.assertTrue(
ref_error < 1e-5,
f'Refractory calculation has issues with scaling. '
f'De-Scaling must result in integer states. '
f'Error was {ref_error}'
)
def test_persistent_state(self):
# clear previous persistent state
neuron.current_state *= 0
neuron.voltage_state *= 0
neuron.threshold_state *= 0
neuron.threshold_state += neuron.threshold # stable at th0
neuron.refractory_state *= 0
# break the calculation into two parts: before ind and after ind
ind = int(np.random.random() * (spike_input.shape[-1] - 1)) + 1
current0, voltage0, th0, ref0 = neuron.dynamics(
quantized_weight[..., :ind] * spike_input[..., :ind]
)
spike0 = neuron.spike(voltage0, th0, ref0)
current1, voltage1, th1, ref1 = neuron.dynamics(
quantized_weight[..., ind:] * spike_input[..., ind:]
)
spike1 = neuron.spike(voltage1, th1, ref1)
spike_error = (
torch.norm(spike[..., :ind] - spike0)
+ torch.norm(spike[..., ind:] - spike1)
).item()
voltage_error = (
torch.norm(voltage[..., :ind] - voltage0)
+ torch.norm(voltage[..., ind:] - voltage1)
).item()
current_error = (
torch.norm(current[..., :ind] - current0)
+ torch.norm(current[..., ind:] - current1)
).item()
th_error = (
torch.norm(th[..., :ind] - th0)
+ torch.norm(th[..., ind:] - th1)
).item()
ref_error = (
torch.norm(ref[..., :ind] - ref0)
+ torch.norm(ref[..., ind:] - ref1)
).item()
if verbose:
print(ind)
if spike_error >= 1e-5:
print('Persistent spike states')
print(
spike[0, 0, ind - 10:ind + 10].cpu().data.numpy().tolist()
)
print(spike0[0, 0, -10:].cpu().data.numpy().tolist())
print(spike1[0, 0, :10].cpu().data.numpy().tolist())
if voltage_error >= 1e-5:
print('Persistent voltage states')
print((
neuron.s_scale * voltage[0, 0, ind - 10:ind + 10]
).cpu().data.numpy().astype(int).tolist())
print((
neuron.s_scale * voltage0[0, 0, -10:]
).cpu().data.numpy().astype(int).tolist())
print((
neuron.s_scale * voltage1[0, 0, :10]
).cpu().data.numpy().astype(int).tolist())
if current_error >= 1e-5:
print('Persistent current states')
print((
neuron.s_scale * current[0, 0, ind - 10:ind + 10]
).cpu().data.numpy().astype(int).tolist())
print((
neuron.s_scale * current0[0, 0, -10:]
).cpu().data.numpy().astype(int).tolist())
print((
neuron.s_scale * current1[0, 0, :10]
).cpu().data.numpy().astype(int).tolist())
if th_error >= 1e-5:
print('Persistent threshold states')
print((
neuron.s_scale * th[0, 0, ind - 10:ind + 10]
).cpu().data.numpy().astype(int).tolist())
print((
neuron.s_scale * th0[0, 0, -10:]
).cpu().data.numpy().astype(int).tolist())
print((
neuron.s_scale * th1[0, 0, :10]
).cpu().data.numpy().astype(int).tolist())
if ref_error >= 1e-5:
print('Persistent refractory states')
print((
neuron.s_scale * ref[0, 0, ind - 10:ind + 10]
).cpu().data.numpy().astype(int).tolist())
print((
neuron.s_scale * ref0[0, 0, -10:]
).cpu().data.numpy().astype(int).tolist())
print((
neuron.s_scale * ref1[0, 0, :10]
).cpu().data.numpy().astype(int).tolist())
if verbose:
if bool(os.environ.get('DISPLAY', None)):
plt.figure()
plt.plot(
time.cpu().data.numpy(),
current[0, 0].cpu().data.numpy(),
label='current'
)
plt.plot(
time[:ind].cpu().data.numpy(),
current0[0, 0].cpu().data.numpy(),
label=':ind'
)
plt.plot(
time[ind:].cpu().data.numpy(),
current1[0, 0].cpu().data.numpy(),
label='ind:'
)
plt.xlabel('time')
plt.legend()
plt.figure()
plt.plot(
time.cpu().data.numpy(),
voltage[0, 0].cpu().data.numpy(),
label='voltage'
)
plt.plot(
time[:ind].cpu().data.numpy(),
voltage0[0, 0].cpu().data.numpy(),
label=':ind'
)
plt.plot(
time[ind:].cpu().data.numpy(),
voltage1[0, 0].cpu().data.numpy(),
label='ind:'
)
plt.plot(
time[spike[0, 0] > 0].cpu().data.numpy(),
0 * spike[0, 0][spike[0, 0] > 0].cpu().data.numpy(),
'.', markersize=12, label='spike'
)
plt.plot(
time[:ind][spike0[0, 0] > 0].cpu().data.numpy(),
0 * spike0[0, 0][spike0[0, 0] > 0].cpu().data.numpy(),
'.', label=':ind'
)
plt.plot(
time[ind:][spike1[0, 0] > 0].cpu().data.numpy(),
0 * spike1[0, 0][spike1[0, 0] > 0].cpu().data.numpy(),
'.', label='ind:'
)
plt.xlabel('time')
plt.legend()
plt.figure()
plt.plot(
time.cpu().data.numpy(),
th[0, 0].cpu().data.numpy(),
label='threshold'
)
plt.plot(
time[:ind].cpu().data.numpy(),
th0[0, 0].cpu().data.numpy(),
label=':ind'
)
plt.plot(
time[ind:].cpu().data.numpy(),
th1[0, 0].cpu().data.numpy(),
label='ind:'
)
plt.xlabel('time')
plt.legend()
plt.figure()
plt.plot(
time.cpu().data.numpy(),
ref[0, 0].cpu().data.numpy(),
label='refractory'
)
plt.plot(
time[:ind].cpu().data.numpy(),
ref0[0, 0].cpu().data.numpy(),
label=':ind'
)
plt.plot(
time[ind:].cpu().data.numpy(),
ref1[0, 0].cpu().data.numpy(),
label='ind:'
)
plt.xlabel('time')
plt.legend()
plt.show()
self.assertTrue(
spike_error < 1e-5,
f'Persistent state has errors in spike calculation. '
f'Error was {spike_error}.'
f'{seed=}'
)
self.assertTrue(
voltage_error < 1e-5,
f'Persistent state has errors in voltage calculation. '
f'Error was {voltage_error}.'
f'{seed=}'
)
self.assertTrue(
current_error < 1e-5,
f'Persistent state has errors in current calculation. '
f'Error was {current_error}.'
f'{seed=}'
)
self.assertTrue(
th_error < 1e-5,
f'Persistent state has errors in threshold calculation. '
f'Error was {th_error}.'
f'{seed=}'
)
self.assertTrue(
ref_error < 1e-5,
f'Persistent state has errors in refractory calculation. '
f'Error was {ref_error}.'
f'{seed=}'
)
def test_backward(self):
spike_target = spike.clone().detach()
current_target = current.clone().detach()
voltage_target = voltage.clone().detach()
spike_target[
...,
np.random.randint(spike_input.shape[-1], size=5)
] = 1
current_target[
...,
np.random.randint(spike_input.shape[-1], size=5)
] -= 1
voltage_target[
...,
np.random.randint(spike_input.shape[-1], size=5)
] -= -1
loss = F.mse_loss(spike, spike_target) \
+ F.mse_loss(current, current_target) \
+ F.mse_loss(voltage, voltage_target)
loss.backward()
# just looking for errors
self.assertTrue(True, 'Encountered errors.')
def test_graded_spikes(self):
# TODO: after further study of network behavior with graded spikes.
pass
| 34.569087
| 78
| 0.492175
| 12,960
| 0.877989
| 0
| 0
| 0
| 0
| 0
| 0
| 2,519
| 0.170652
|
83a83633ab9542d9e22f77076652f1c0ce78f53a
| 526
|
py
|
Python
|
amount_test.py
|
kalafut/go-ledger
|
28a625e31d460e0ac2926c53a30f47f159d2b82f
|
[
"MIT"
] | null | null | null |
amount_test.py
|
kalafut/go-ledger
|
28a625e31d460e0ac2926c53a30f47f159d2b82f
|
[
"MIT"
] | 2
|
2015-11-08T18:50:11.000Z
|
2015-11-08T18:50:42.000Z
|
amount_test.py
|
kalafut/go-ledger
|
28a625e31d460e0ac2926c53a30f47f159d2b82f
|
[
"MIT"
] | null | null | null |
import decimal
import pytest
from amount import Amount as A
def test_basic():
a = A(("0.30", "$"))
assert '$ 0.30' == str(a)
a = A({"$": decimal.Decimal(4)})
assert '$ 4', str(a)
def test_add():
a = A(("2.34", "$"))
b = A(("5.97", "$"))
assert "$ 8.31" == str(a+b)
c = A(("9.01", "CAD")) + A(("15.56", "$"))
assert "CAD 9.01" == str(c.get("CAD"))
assert "$ 15.56" == str(c.get("$"))
d = a + c
assert "$ 17.90" == str(d.get("$"))
assert "CAD 9.01" == str(d.get("CAD"))
| 21.04
| 46
| 0.452471
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 126
| 0.239544
|
83a84321ea0a0bc9570475d0ce3c63e9712bd0ca
| 4,449
|
py
|
Python
|
DiscoGAN/discogan_train.py
|
sumersumerdjl/kozistr-Awesome-GANs
|
6e20e9cd07d0ec413a187d496159b97d793dab0c
|
[
"MIT"
] | 1
|
2021-08-16T01:40:46.000Z
|
2021-08-16T01:40:46.000Z
|
DiscoGAN/discogan_train.py
|
Psyche-mia/Awesome-GANs
|
6e20e9cd07d0ec413a187d496159b97d793dab0c
|
[
"MIT"
] | null | null | null |
DiscoGAN/discogan_train.py
|
Psyche-mia/Awesome-GANs
|
6e20e9cd07d0ec413a187d496159b97d793dab0c
|
[
"MIT"
] | 1
|
2021-08-16T01:35:21.000Z
|
2021-08-16T01:35:21.000Z
|
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import tensorflow as tf
# import numpy as np
import time
import discogan
import sys
sys.path.insert(0, '../')
import image_utils as iu
from datasets import Pix2PixDataSet as DataSets
results = {
'sample_output': './gen_img/',
'model': './model/DiscoGAN-model.ckpt'
}
paras = {
'epoch': 200,
'batch_size': 64,
'logging_interval': 5
}
def main():
start_time = time.time() # clocking start
# Dataset
dataset = DataSets(height=64,
width=64,
channel=3,
ds_path='D:/DataSets/pix2pix/',
ds_name="vangogh2photo")
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with tf.Session(config=config) as s:
# DiscoGAN model
model = discogan.DiscoGAN(s)
# load model & graph & weight
global_step = 0
ckpt = tf.train.get_checkpoint_state('./model/')
if ckpt and ckpt.model_checkpoint_path:
# Restores from checkpoint
model.saver.restore(s, ckpt.model_checkpoint_path)
global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
print("[+] global step : %s" % global_step, " successfully loaded")
else:
print('[-] No checkpoint file found')
# initializing variables
tf.global_variables_initializer().run()
d_overpowered = False # G loss > D loss * 2
for epoch in range(paras['epoch']):
for step in range(1000):
offset_a = (step * paras['batch_size']) % (dataset.images_a.shape[0] - paras['batch_size'])
offset_b = (step * paras['batch_size']) % (dataset.images_b.shape[0] - paras['batch_size'])
# batch data set
batch_a = dataset.images_a[offset_a:(offset_a + paras['batch_size']), :]
batch_b = dataset.images_b[offset_b:(offset_b + paras['batch_size']), :]
# update D network
if not d_overpowered:
s.run(model.d_op, feed_dict={model.A: batch_a})
# update G network
s.run(model.g_op, feed_dict={model.B: batch_b})
if epoch % paras['logging_interval'] == 0:
d_loss, g_loss, summary = s.run([
model.d_loss,
model.g_loss,
model.merged
], feed_dict={
model.A: batch_a,
model.B: batch_b
})
# print loss
print("[+] Epoch %03d Step %04d => " % (epoch, global_step),
" D loss : {:.8f}".format(d_loss),
" G loss : {:.8f}".format(g_loss))
# update overpowered
d_overpowered = d_loss < g_loss / 2.
# training G model with sample image and noise
ab_samples = s.run(model.G_s2b, feed_dict={model.A: batch_a})
ba_samples = s.run(model.G_b2s, feed_dict={model.B: batch_b})
# summary saver
model.writer.add_summary(summary, global_step=global_step)
# export image generated by model G
sample_image_height = model.sample_size
sample_image_width = model.sample_size
sample_ab_dir = results['sample_output'] + 'train_A_{0}_{1}.png'.format(epoch, global_step)
sample_ba_dir = results['sample_output'] + 'train_B_{0}_{1}.png'.format(epoch, global_step)
# Generated image save
iu.save_images(ab_samples, size=[sample_image_height, sample_image_width],
image_path=sample_ab_dir)
iu.save_images(ba_samples, size=[sample_image_height, sample_image_width],
image_path=sample_ba_dir)
# model save
model.saver.save(s, results['model'], global_step=global_step)
end_time = time.time() - start_time
# elapsed time
print("[+] Elapsed time {:.8f}s".format(end_time))
# close tf.Session
s.close()
if __name__ == '__main__':
main()
| 34.757813
| 111
| 0.537199
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 917
| 0.206114
|
83a940e2ed8e27f8008df09f81164f38241b0cc0
| 303
|
bzl
|
Python
|
examples/index.bzl
|
SebastianPodgajny/rules_nodejs
|
7d1fbd9b6751225f502eccf2a91d1059371d461d
|
[
"Apache-2.0"
] | null | null | null |
examples/index.bzl
|
SebastianPodgajny/rules_nodejs
|
7d1fbd9b6751225f502eccf2a91d1059371d461d
|
[
"Apache-2.0"
] | null | null | null |
examples/index.bzl
|
SebastianPodgajny/rules_nodejs
|
7d1fbd9b6751225f502eccf2a91d1059371d461d
|
[
"Apache-2.0"
] | null | null | null |
"Used to reference the nested workspaces for examples in /WORKSPACE"
ALL_EXAMPLES = [
"angular",
"app",
"kotlin",
"nestjs",
"parcel",
"protocol_buffers",
"user_managed_deps",
"vendored_node",
"vendored_node_and_yarn",
"web_testing",
"webapp",
"worker",
]
| 18.9375
| 68
| 0.617162
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 211
| 0.69637
|
83a97810070d0ec137e7706e16cb2e0d4e501275
| 2,611
|
py
|
Python
|
testproject/testproject/views.py
|
Najiva/django-tables2-column-shifter
|
90695d5890c4ef6d7ba58a189a53d712aa8b9e14
|
[
"BSD-3-Clause"
] | 19
|
2017-01-04T19:17:57.000Z
|
2021-08-05T20:06:32.000Z
|
testproject/testproject/views.py
|
Najiva/django-tables2-column-shifter
|
90695d5890c4ef6d7ba58a189a53d712aa8b9e14
|
[
"BSD-3-Clause"
] | 16
|
2017-02-20T20:24:01.000Z
|
2021-07-23T12:50:35.000Z
|
testproject/testproject/views.py
|
Najiva/django-tables2-column-shifter
|
90695d5890c4ef6d7ba58a189a53d712aa8b9e14
|
[
"BSD-3-Clause"
] | 8
|
2017-01-26T07:15:54.000Z
|
2022-03-22T18:03:24.000Z
|
from django.views.generic import TemplateView
from django_tables2.config import RequestConfig
from django_tables2_column_shifter.tables import (
ColumnShiftTableBootstrap2,
ColumnShiftTableBootstrap3,
ColumnShiftTableBootstrap4,
ColumnShiftTableBootstrap5,
)
from .models import Author, Book
from .tables import get_author_table_class, get_book_table_class
class Index(TemplateView):
template_name = "testproject/index.html"
class Base(object):
container_css = "span10 offset1"
template_name = "testproject/test_bootstrap2.html"
table_class_version = ColumnShiftTableBootstrap2
def get_context_data(self, **kwargs):
context = super(Base, self).get_context_data(**kwargs)
# Build tabels
author_queryset = Author.objects.all()
author_table1 = get_author_table_class(
self.table_class_version
)(author_queryset)
author_table2 = get_author_table_class(
self.table_class_version
)(author_queryset, prefix="authors2")
book_queryset = Book.objects.all()
book_table = get_book_table_class(
self.table_class_version
)(book_queryset, prefix="books")
# Turn on sorting and pagination
RequestConfig(self.request, paginate={'per_page': 2}).configure(author_table1)
RequestConfig(self.request, paginate={'per_page': 2}).configure(author_table2)
RequestConfig(self.request, paginate={'per_page': 2}).configure(book_table)
context['container_css'] = self.container_css
context['author_table1'] = author_table1
context['author_table2'] = author_table2
context['book_table'] = book_table
context['book_queryset'] = book_queryset
return context
class Bootstrap2(Base, TemplateView):
pass
class Bootstrap3(Base, TemplateView):
container_css = "col-xs-10 col-xs-offset-1"
template_name = "testproject/test_bootstrap3.html"
table_class_version = ColumnShiftTableBootstrap3
class Bootstrap4(Base, TemplateView):
container_css = "col-xs-10 col-xs-offset-1"
template_name = "testproject/test_bootstrap4.html"
table_class_version = ColumnShiftTableBootstrap4
class Bootstrap4_1_3(Base, TemplateView):
container_css = "col-xs-10 col-xs-offset-1"
template_name = "testproject/test_bootstrap4.1.3.html"
table_class_version = ColumnShiftTableBootstrap4
class Bootstrap5(Base, TemplateView):
container_css = "col-xs-10 col-xs-offset-1"
template_name = "testproject/test_bootstrap5.html"
table_class_version = ColumnShiftTableBootstrap5
| 32.234568
| 86
| 0.73152
| 2,215
| 0.848334
| 0
| 0
| 0
| 0
| 0
| 0
| 487
| 0.186519
|
83a9d512e8c7c2d7396ec8d190a1c692e2814f87
| 1,812
|
py
|
Python
|
release/stubs.min/Autodesk/Revit/DB/__init___parts/Workset.py
|
htlcnn/ironpython-stubs
|
780d829e2104b2789d5f4d6f32b0ec9f2930ca03
|
[
"MIT"
] | 182
|
2017-06-27T02:26:15.000Z
|
2022-03-30T18:53:43.000Z
|
release/stubs.min/Autodesk/Revit/DB/__init___parts/Workset.py
|
htlcnn/ironpython-stubs
|
780d829e2104b2789d5f4d6f32b0ec9f2930ca03
|
[
"MIT"
] | 28
|
2017-06-27T13:38:23.000Z
|
2022-03-15T11:19:44.000Z
|
release/stubs.min/Autodesk/Revit/DB/__init___parts/Workset.py
|
htlcnn/ironpython-stubs
|
780d829e2104b2789d5f4d6f32b0ec9f2930ca03
|
[
"MIT"
] | 67
|
2017-06-28T09:43:59.000Z
|
2022-03-20T21:17:10.000Z
|
class Workset(WorksetPreview,IDisposable):
""" Represents a workset in the document. """
@staticmethod
def Create(document,name):
"""
Create(document: Document,name: str) -> Workset
Creates a new workset.
document: The document in which the new instance is created.
name: The workset name.
Returns: Returns the newly created workset.
"""
pass
def Dispose(self):
""" Dispose(self: WorksetPreview,A_0: bool) """
pass
def ReleaseUnmanagedResources(self,*args):
""" ReleaseUnmanagedResources(self: WorksetPreview,disposing: bool) """
pass
def __enter__(self,*args):
""" __enter__(self: IDisposable) -> object """
pass
def __exit__(self,*args):
""" __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object) """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
IsEditable=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Whether the workset is editable.
Get: IsEditable(self: Workset) -> bool
"""
IsOpen=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Whether the workset is open (rather than closed).
Get: IsOpen(self: Workset) -> bool
"""
IsVisibleByDefault=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Whether the workset is visible by default.
Get: IsVisibleByDefault(self: Workset) -> bool
"""
Kind=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Kind of the workset.
Get: Kind(self: Workset) -> WorksetKind
"""
| 22.37037
| 215
| 0.671634
| 1,806
| 0.996689
| 0
| 0
| 303
| 0.167219
| 0
| 0
| 1,145
| 0.631898
|
83ab1978e9bfcb9289cdc6a850d6619b639f3ad4
| 967
|
py
|
Python
|
experiments/12_wiki_training.py
|
dddaga/word-tree
|
ed6c59c16feee04d5c6003b3f5f4df68e6808e04
|
[
"MIT"
] | null | null | null |
experiments/12_wiki_training.py
|
dddaga/word-tree
|
ed6c59c16feee04d5c6003b3f5f4df68e6808e04
|
[
"MIT"
] | null | null | null |
experiments/12_wiki_training.py
|
dddaga/word-tree
|
ed6c59c16feee04d5c6003b3f5f4df68e6808e04
|
[
"MIT"
] | 1
|
2020-12-02T09:07:06.000Z
|
2020-12-02T09:07:06.000Z
|
import numpy as np
EXPERIMENT_NAME = 'EXP_12'
CORPUS_PATH = '/home/dddhiraj/Documents/stuff/data/wiki_en.txt'
TRAINING_WINDOW = 5
CONTEXT_DIMENSION = 64
LEANING_RATE = 1
DROPOUT = 0.05
CONTEXT_DECAY = 1 - TRAINING_WINDOW ** -0.5
CONTRASTIVE_WEIGHT = 1#0.1
NEGATIVE_SAMPLE_SIZE = TRAINING_WINDOW ** 2
CONEXT_INERTIA = np.sqrt(TRAINING_WINDOW)
THREADS = 6
CHUNK_SIZE = 5000
DB = 'REDIS'
if DB == 'MONGO':
import pymongo
myclient = pymongo.MongoClient('mongodb://localhost:27017')
mydb = myclient["mydatabase"]
collection = mydb.train_1#neighbour_aware_context_initilization_train_window_8
if DB == 'REDIS':
import redis
collection = redis.Redis(db=1) #11
key_collection= redis.Redis(db=2) #12
#import redisai
# collection = redisai.Client(db=14)
# key_collection = redisai.Client(db=15)
'''
Experiment details:
Trained on wiki data with 51 million words.
'''
| 21.021739
| 82
| 0.680455
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 342
| 0.353671
|
83ab5e631ea0bec7a174bfa753c93a724a3979a9
| 49,562
|
py
|
Python
|
yasi.py
|
arenadotio/yasi-sexp-indenter
|
f64cd332b3f41d7c2b3458b4279a13ec26df16b8
|
[
"MIT"
] | null | null | null |
yasi.py
|
arenadotio/yasi-sexp-indenter
|
f64cd332b3f41d7c2b3458b4279a13ec26df16b8
|
[
"MIT"
] | 1
|
2020-07-14T16:07:38.000Z
|
2020-07-14T16:07:38.000Z
|
yasi.py
|
arenadotio/yasi-sexp-indenter
|
f64cd332b3f41d7c2b3458b4279a13ec26df16b8
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# coding: utf-8
""" yasi
Date: 20th November 2013
Author: nkmathew <kipkoechmathew@gmail.com>
Dialect aware s-expression indenter
"""
from __future__ import print_function
import argparse
import hashlib
import os
import re
import shutil
import sys
import time
import collections
import json
import difflib
try:
from functools import lru_cache
except ImportError:
from backports.functools_lru_cache import lru_cache
# pylint: disable=unused-import
from pprint import pprint # noqa
__version__ = '2.1.2'
@lru_cache(maxsize=None)
def create_args_parser():
""" Return command line parser """
parser = argparse.ArgumentParser(
description='Dialect-aware s-expression indenter', prog='yasi')
parser.add_argument('files', help='List of files to be indented. '
'Will indent from standard input if no files are specified',
nargs='*')
parser.add_argument(
'-nc', '--no-compact', '--nc', dest='compact',
help='Do not compact the code, just indent', action='store_false')
parser.add_argument(
'-nb', '--no-backup', '--nb', dest='backup', action='store_false',
help='Do not create a backup file even if --backup-dir is specified ')
parser.add_argument(
'-nm', '--no-modify', '--nm', dest='modify',
help='Do not modify the file', action='store_false')
parser.add_argument(
'--diff', '-diff', dest='output_diff',
help='Prints unified diff of the initial and final result',
action='store_true')
parser.add_argument(
'-nw', '--no-warning', '--nw', dest='warning',
help='Do not display warnings', action='store_false')
parser.add_argument(
'-nr', '--no-rc', '--nr', dest='read_rc',
help='Ignore any rc files in the current or home folder',
action='store_false')
parser.add_argument(
'--no-output', '-no-output', dest='output',
help='Suppress output of the indented code', action='store_false')
parser.add_argument(
'-c', '--color', '-color', dest='colour_diff',
help='Display diff text in color', action='store_true')
parser.add_argument(
'-ne', '--no-exit', '--ne', dest='exit', action='store_false',
help='Instructs the program not to exit when a warning is raised.')
parser.add_argument(
'-o', dest='output_file',
help='Path/name of output file', type=str, default='')
parser.add_argument(
'--tab', '-tab', dest='tab_size',
help='Indent with tabs using the specified tabwidth. A tab is assumed \
equal to 4 spaces by default when expanding the tabs in the input file',
type=int, default=-1)
parser.add_argument(
'--dialect', '-dialect',
help='Use Scheme keywords', type=str, default='')
parser.add_argument(
'-v', '--version', action='version',
help='Prints script version', version='yasi v%s' % __version__)
parser.add_argument(
'-suffix', '--suffix', dest='backup_suffix', help='Backup file suffix',
type=str, default='.yasi.bak~')
parser.add_argument(
'-bd', '--backup-dir', '--bd', '-backup-dir',
help='The directory where the backup file is to be written',
type=str, default=os.getcwd())
parser.add_argument(
'-is', '--indent-size', '--is',
help='The number of spaces per indent',
type=int, default=2)
parser.add_argument(
'-di', '--default-indent', '--di',
help='The indent level to be used in case a '
"function's argument is in the next line. Vim uses 2, the most common being 1.",
type=int, default=1)
parser.add_argument(
'-ic', '--indent-comments', '--ic',
help='If true, comment lines will be indented possibly '
'messing with any deliberate comment layout', action='store_true')
parser.add_argument(
'-uni', '--uniform', '-uniform', '--uni',
help='Dictates whether the if-clause and else-clause of an if-like'
'block should have the same indent level.',
action='store_true')
parser.add_argument(
'-parallel', '--parallel',
help='Process the given files in parallel',
action='store_true')
return parser
def parse_args(arguments=None):
""" Reads command-line arguments
>>> parse_args('--indent-comments')
"""
if arguments is None:
arguments = sys.argv[1:]
if isinstance(arguments, str):
arguments = arguments.split()
if isinstance(arguments, argparse.Namespace):
return arguments
parser = create_args_parser()
args = parser.parse_args(arguments)
args.dialect = args.dialect.lower()
if args.dialect not in ['lisp', 'newlisp', 'clojure', 'scheme', 'all', '']:
parser.error("`{0}' is not a recognized dialect".format(args.dialect))
args.backup_dir = os.path.expanduser(args.backup_dir)
if not os.path.exists(args.backup_dir):
parser.error("Directory `{0}' does not exist".format(args.backup_dir))
if len(args.files) > 1 and args.output_file:
parser.error('Cannot use the -o flag when more than one file is specified')
if not args.files:
# Indentation from standard input
if args.modify and not args.output_file:
args.modify = False
args.backup = False
args.warning = False
if args.output_diff:
# If someone requests a diff we assume they don't want the file to be
# modified
args.modify = False
return args
def read_file(fname):
""" read_file(fname : str) -> str
>>> read_file(r'C:\\mine\\test.lisp')
r'(print "No, no, there\'s \\r\\nlife in him!. ")\\r\\n\\r\\n'
The file is read in binary mode in order to preserve original line endings.
Line ending Binary mode Text mode
CRLF CRLF LF
CR CR LF
"""
assert os.path.exists(fname), "\n--%s-- Warning: File `%s' does not exist..." \
% (current_time(), fname)
with open(fname, 'rb') as fp:
return fp.read().decode('utf-8')
def current_time():
""" current_time() -> str
>>> current_time()
14:28:04
Returns the current local time in 24 clock system.
"""
return time.strftime('%X', (time.localtime()))
def backup_source_file(fname, args=None):
""" backup_source_file(fname : str)
>>> backup_source_file('~/Desktop/lisp/test.lisp')
Create a backup copy of the source file.
"""
args = parse_args(args)
backup_dir = args.backup_dir
assert os.path.exists(fname), \
("\n--%s-- Warning: File `%s' does not exist..." % (current_time(), fname))
assert os.path.exists(os.path.abspath(backup_dir)), \
("\n--%s-- Warning: Directory `%s' does not exist..." % (current_time(), fname))
backup_name = backup_dir + os.sep + os.path.split(fname)[1] + args.backup_suffix
try:
shutil.copyfile(fname, backup_name)
except IOError:
message = "\n--%s-- Warning: Couldn't backup the file `%s' in `%s', check if you have enough permissions. "
tpl = (current_time(), fname, backup_dir)
sys.stderr.write(message % tpl)
def md5sum(content):
""" md5sum(content : str) -> str
>>> md5sum('Keyboard not found!! Press F1 to continue...')
'ad98cde09016d2e99a726966a4291acf'
Returns a checksum to be used to determine whether the file has changed.
A simple textual comparison can still do the work
"""
return hashlib.md5(content).hexdigest()
def find_line_ending(string):
""" find_line_ending(string : str) -> str
>>> find_line_ending('Elementary my dear Watson. \\r')
'\\r'
Find the line ending in the file so that we can try to preserve it.
"""
if CRLF in string:
return CRLF
if CR in string:
return CR
return LF
@lru_cache(maxsize=None)
def trim(string):
""" trim(string : str) -> str
Uses every usefull hack to try and reduce extra whitespace without
messing with character literals
"""
# Trailing whitespace
string = re.sub('[ \t]*$', '', string)
# turn '(print(+ 1 1))' to '(print (+ 1 1))'
string = re.sub(r'''([^\\(\[, {@~`'#^])(\(|\[|{)''', r'\1 \2', string, re.X)
# turn ')(' to ') ('
string = re.sub(r'(\)|\]|})(\[|\(|{)', r'\1 \2', string)
# Remove any space before closing brackets '(print 12 )' ==> '(print 12)'
string = re.sub('[ \t]*(\)|\]|})', r'\1', string)
# remove extra whitespace "(print 'this)" ==> "(print 'this)"
string = re.sub('[ \t]{2,}', ' ', string)
# turn ') ) ) ' into '))) '
string = re.sub(r'(\))[ \t]*(?=(\)))', r'\1', string)
string = re.sub(r'(\])[ \t]*(?=(\]))', r'\1', string)
string = re.sub(r'(})[ \t]*(?=(}))', r'\1', string)
# turn '( ( ( ' into '((( '
string = re.sub(r'(\()[ \t]*(?=(\())', r'\1', string)
string = re.sub(r'(\[)[ \t]*(?=(\[))', r'\1', string)
string = re.sub(r'({)[ \t]*(?=({))', r'\1', string)
# remove leading whitespace ' print' ==> 'print'
string = re.sub('^[ \t]*', '', string)
# Remove space between quote and opening bracket, "' (1 2 3)" ==> "'(1 2 3)"
string = re.sub("('|`)[ \t]+(\(|\[|{)", r'\1\2', string)
return string
def find_trim_limit(string, args=None):
""" find_trim_limit(string : str) -> int
>>> find_trim_limit(r'(list #\; #\")')
14
>>> find_trim_limit(r'(list ; ")')
6
>>> find_trim_limit(r'(list " ;)')
7
The function attempts to identify upto which point we are supposed to trim
so that we don't mess with strings or any aligned comments.
It does this by comparing the positions of semicolons and double
quotes. It doesn't consider the multiline comment marker. If your
code uses multiline comments(#| ... |#), you'll have to use --no-compact mode
"""
args = parse_args(args)
# Find position of the first unescaped semi colon
comment_start = re.search(r'([^\\];)|(^;)', string)
# Find position of the first unescaped double quote
string_start = re.search(r'([^\\]")|(^")', string)
# Assign -1 if there's no match
limit = string_start.end() if string_start else -1
comment_start = comment_start.end() if comment_start else -1
if comment_start != -1:
# If a semi colon is found, include all the whitespace before it to preserve
# any aligned comments
comment_start = re.search('[ \t]*;', string).start()
if args.dialect == 'newlisp':
# Find out which string type comes first(normal, tag or brace strings)
brace_string_start = re.search('{', string)
tag_string_start = re.search('\[text\]', string)
brace_string_start = brace_string_start.end() if brace_string_start else -1
tag_string_start = tag_string_start.end() if tag_string_start else -1
pos_lst = [limit, brace_string_start, tag_string_start]
pos_lst = [x for x in pos_lst if x != -1]
if pos_lst:
limit = min(pos_lst)
if comment_start != -1 and limit != -1:
if comment_start < limit:
# If the semicolon comes before the comma, it means the string has been
# commented out
limit = comment_start
elif comment_start != -1 and limit == -1:
# If there's a semicolon but no quote, use the semicolon position as the
# limit
limit = comment_start
elif limit == -1:
# If neither a semicolon nor a double quote has been found, use the length
# of the string as the limit
limit = len(string)
return limit
@lru_cache(maxsize=None)
def is_macro_name(func_name, dialect):
""" is_macro_name(func_name : str, dialect : str) -> bool
>>> is_macro_name('yacc:define-parser')
True
Tests if a word is a macro using the language's/dialect's convention,
e.g macros in Lisp usually start with 'def' and 'with' in Scheme. Saves
the effort of finding all the macros in Lisp/Scheme/Clojure/newLISP and storing
them in a list.
"""
if not func_name:
return False
if dialect == 'lisp':
return re.search('^(macro|def|do|with-)', func_name, re.I)
if dialect == 'scheme':
return re.search('^(call-|def|with-)', func_name)
if dialect == 'clojure':
return re.search('^(def|with)', func_name)
if dialect == 'newlisp':
return re.search('^(macro|def)', func_name)
return False
@lru_cache(maxsize=None)
def split_preserve(string, sep):
""" split_preserve(string : str, sep : str) -> [str]
>>> split_preserve('''
"My dear Holmes, " said I, "this is too much. You would certainly
have been burned, had you lived a few centuries ago.
''', '\\n')
['\\n',
' "My dear Holmes, " said I, "this is too much. You would certainly\\n',
' have been burned, had you lived a few centuries ago.\\n',
' ']
Splits the string and sticks the separator back to every string in the list.
"""
# split the whole string into a list so that you can iterate line by line.
str_list = string.split(sep)
if str_list[-1] == '':
# If you split 'this\nthat\n' you get ['this', 'that', ''] if
# you add newlines to every string in the list you get
# ['this\n', 'that\n', '\n']. You've just added
# another newline at the end of the file.
del str_list[-1]
str_list = [x + sep for x in str_list]
else:
# ['this', 'that'] will become ['this\n', 'that\n'] when
# mapped. A newline has been added to the file. We don't want
# this, so we strip it below.
str_list = [x + sep for x in str_list]
str_list[-1] = str_list[-1].rstrip(sep)
return str_list
@lru_cache(maxsize=None)
def all_whitespace(string):
""" all_whitespace(string : str) -> bool
>>> all_whitespace(' ')
True
Returns True if a string has only whitespace.
"""
return re.search('^[ \t]*(\r|\n|$)', string)
def detabify(text, args):
""" tabify(text : str, args : argparse.Namespace|str) -> str
Expands tabs
"""
args = parse_args(args)
if args.tab_size < 1:
return text.expandtabs(4)
return text.expandtabs(args.tab_size)
def tabify(text, args):
""" tabify(text : str, args : argparse.Namespace|str) -> str
>>> tabify(' (println "hello world")', '--tab=3')
'\t\t (println "hello world")'
Replace spaces with tabs
"""
args = parse_args(args)
if args.tab_size < 1:
return text
tab_equiv = ' ' * args.tab_size
return text.replace(tab_equiv, '\t')
def pad_leading_whitespace(string, zero_level, blist, args=None):
""" pad_leading_whitespace(string : str, current_level : int,
zero_level : int) -> str
>>> pad_leading_whitespace("(print 'Yello)")
" (print 'Yello)"
Takes a string and indents it using the current indentation level
and the zero level.
"""
args = parse_args(args)
if args.compact:
# if compact mode is on, split the string into two, trim the first
# position and merge the two portions.
trim_limit = find_trim_limit(string, args)
comment_line = re.search('^[ \t]*;', string, re.M)
if comment_line and args.indent_comments:
trim_limit = comment_line.end()
substr1 = string[0:trim_limit]
substr2 = string[trim_limit:]
substr1 = trim(substr1)
string = substr1 + substr2
else:
# If in nocompact mode, remove leading spaces only
string = re.sub('^[ \t]+', '', string, count=0)
indent_level = zero_level
if blist:
indent_level = blist[-1]['indent_level']
padding = ' ' * indent_level
padding = tabify(padding, args)
return padding + string, indent_level
def indent_line(zerolevel, bracket_list, line, in_comment, in_symbol_region,
args=None):
""" indent_line(zerolevel : int, bracket_list : list, line : str, in_comment : bool,
in_symbol_region : bool, args : string|list)
Most important function in the indentation process. It uses the bracket
locations stored in the list to indent the line.
"""
args = parse_args(args)
comment_line = re.search('^[ \t]*;', line, re.M)
if args.indent_comments:
# We are allowed to indent comment lines
comment_line = False
if not args.compact and bracket_list == [] and not in_comment:
# If nocompact mode is on and there are no unclosed blocks, try to
# find the zero level by simply counting spaces before a line that
# is not empty or has a comment
_line = detabify(line, args)
leading_spaces = re.search('^[ \t]+[^; )\n\r]', _line)
if leading_spaces:
# NOTE: If you don't subtract one here, the zero level will increase
# every time you indent the file because the character at the end of
# the regex is part of the capture.
zerolevel = leading_spaces.end() - 1
else:
zerolevel = 0
if in_symbol_region:
# No processing done in strings and comments
return zerolevel, line, 0
if not comment_line and not all_whitespace(line):
# If this is not a comment line indent the line.
# If the list is empty, then the current_level defaults
# to zero
curr_line, current_level = pad_leading_whitespace(line, zerolevel,
bracket_list, args)
return zerolevel, curr_line, current_level
return zerolevel, line, 0
# ---------------------------------------------------------------------------------
# GLOBAL CONSTANTS::
CR = '\r'
LF = '\n'
CRLF = CR + LF
KEYWORD0 = 0 # Non-keyword
KEYWORD1 = 1 # Indents uniformly by 1 unit
KEYWORD2 = 2 # Distinguishes subforms
KEYWORD3 = 3 # Indents uniformly by 2 units
KEYWORD4 = 4 # A 1-keyword used mostly for defining local functions e.g flets
# Keywords that indent by two spaces
SCHEME_KEYWORDS = \
['define', 'local-odd?', 'when', 'begin', 'case',
'local-even?', 'do', 'call-with-bytevector-output-port',
'call-with-input-file', 'call-with-port',
'call-with-current-continuation', 'open-file-input-port',
'call-with-port', 'call-with-values', 'call-with-output-file',
'call-with-string-output-port', 'define-syntax', 'if', 'let', 'let*',
'library', 'unless', 'lambda', 'syntax-rules', 'syntax-case',
'let-syntax', 'letrec*', 'letrec', 'let-values', 'let*-values',
'with-exception-handler', 'with-input-from-file',
'with-interrupts-disabled', 'with-input-from-string',
'with-output-to-file', 'with-input-from-port',
'with-output-to-string', 'with-source-path', 'with-syntax',
'with-implicit',
'with-error-handler', 'module', 'parameterize']
CLOJURE_KEYWORDS = \
['defn', 'fn', 'dorun', 'doseq', 'loop', 'when',
'let', 'defmacro', 'binding', 'doto', 'ns', ':import', 'defstruct',
'condp', 'comment', 'when', 'when-let', '->', '->>',
'extend-type', 'reify', 'binding', 'when-not', 'proxy', 'dotimes',
'try', 'finally', 'for', 'letfn', 'catch', 'iterate', 'while',
'with-local-vars', 'locking', 'defmulti', 'defmethod', 'extend'
]
LISP_KEYWORDS = \
[':implementation', ':method', 'case', 'defclass',
'defconstant', 'defgeneric', 'defimplementation',
'define-condition', 'define-implementation-package',
'definterface', 'defmacro', 'defmethod', 'defpackage',
'defproject', 'deftype', 'defun', 'defvar', 'do-external-symbols',
'dolist', 'dotimes', 'ecase', 'etypecase', 'flet', 'handler-bind',
'if', 'lambda', 'let', 'let*', 'print-unreadable-object',
'macrolet', 'defparameter', 'with-slots', 'typecase', 'loop', 'when', 'prog1',
'unless', 'with-open-file', 'with-output-to-string', 'with-input-from-string',
'block', 'handler-case', 'defstruct', 'eval-when', 'tagbody', 'ignore-errors',
'labels', 'multiple-value-bind', 'progn', 'unwind-protect', 'collect'
]
NEWLISP_KEYWORDS = \
['while', 'if', 'case', 'dotimes', 'define', 'dolist', 'catch',
'throw', 'lambda', 'lambda-macro', 'when', 'unless', 'letex', 'begin',
'dostring', 'let', 'letn', 'doargs', 'define-macro', 'until', 'do-until',
'do-while', 'for-all', 'find-all', 'for'
]
# The 'if' and 'else' part of an if block should have different indent levels so
# that they can stand out since there's no else Keyword in Lisp/Scheme to make
# this explicit. list IF_LIKE helps us track these keywords.
IF_LIKE = ['if']
@lru_cache(maxsize=None)
def parse_rc_json():
""" Reads the json configuration file(.yasirc.json), parses it and returns the
dictionary
"""
fname = '.yasirc.json'
path = os.path.expanduser('~/' + fname)
if os.path.exists(fname):
path = os.path.abspath(fname)
elif not os.path.exists(path):
path = ''
content = ''
if path:
with open(path) as f:
content = f.read()
ret = {}
if content:
ret = json.loads(content)
return collections.defaultdict(dict, ret)
def assign_indent_numbers(lst, inum, dic):
""" Associate keywords with their respective indentation numbers
"""
for i in lst:
dic[i] = inum
return dic
def add_keywords(args):
""" add_keywords(dialect : str) -> [str, str]
Takes a lisp dialect name and returns a list of keywords that increase
indentation by two spaces and those that can be one-armed like 'if'
"""
dialect = args.dialect
keywords = collections.defaultdict(int)
two_spacers = []
two_armed = IF_LIKE
local_binders = []
if dialect == 'lisp': # Lisp
two_spacers = LISP_KEYWORDS
two_armed += ['multiple-value-bind', 'destructuring-bind', 'do', 'do*']
local_binders += ['flet', 'macrolet', 'labels']
elif dialect == 'scheme': # Scheme
two_spacers = SCHEME_KEYWORDS
two_armed += ['with-slots', 'do', 'do*']
local_binders += []
elif dialect == 'clojure': # Clojure
two_spacers = CLOJURE_KEYWORDS
two_armed += []
local_binders += ['letfn']
elif dialect == 'newlisp': # newLISP
two_spacers = NEWLISP_KEYWORDS
two_armed += []
local_binders += []
elif dialect == 'all':
two_spacers = LISP_KEYWORDS + SCHEME_KEYWORDS + CLOJURE_KEYWORDS + \
NEWLISP_KEYWORDS
keywords = assign_indent_numbers(two_spacers, KEYWORD1, keywords)
keywords = assign_indent_numbers(two_armed, KEYWORD2, keywords)
keywords = assign_indent_numbers(local_binders, KEYWORD4, keywords)
if args.read_rc:
rc_keywords = parse_rc_json()
keywords.update(rc_keywords[dialect])
return keywords
# ---------------------------------------------------------------------------------
def find_first_arg_pos(bracket_offset, curr_line, args=None):
""" find_first_arg_pos(bracket_offset : int, curr_line : str) -> [int, int]
Arguments:
bracket_offset - The position of the bracket in the current line e.g
" ( list 'timey 'wimey )" --> 4
" ( list 'timey 'wimey )" --> 1
"( list 'timey 'wimey )" --> 0
>>> find_first_arg_pos(0, "( list 'one-sheep 'two-sheep )")
[11, 5]
Returns the position of the first argument to the function relative to the
position of the opening bracket and the number of spaces between the opening
bracket and the function name.
The two values will to be used to align the other arguments in the subsequent line
"""
args = parse_args(args)
spaces_before_func = 0
subline = curr_line[bracket_offset + 1:]
if re.search('^[ \t]*($|\r)', subline):
# whitespace extending to the end of the line means there's no
# function in this line. The indentation level defaults to one.
arg_pos = 1
else:
if bracket_offset != len(curr_line) - 1 and curr_line[bracket_offset + 1] == ' ':
# control reaches here if we are not at the end of the line
# and whitespace follows. We must first find the position of the
# function and then the arguments position
match = re.search(' +[^)\]]| \)', subline) # Find the first non whitespace/bracket character
if match:
spaces_before_func = match.end() - match.start() - 1
end = match.end()
else:
end = 0
# Then use the end of the whitespace group as the first argument
arg_pos = re.search(' +([^)])|( *(\(|\[))', subline[end:])
if arg_pos:
arg_pos = arg_pos.end() + spaces_before_func + 1
else:
arg_pos = spaces_before_func + 1
if re.match('^[ \t]*(#\||;|$|\r)',
subline[(end - 1 + subline[end - 1:].find(' ')):]):
# But, if a comment if found after the function name, the
# indent level becomes one
arg_pos = spaces_before_func + args.default_indent
else:
# If there's no space after the bracket, simply find the end of the
# whitespace group
match = re.search(' +([^)}\n\r])|( *(\(|\[|{))', subline)
if match: # found the argument
arg_pos = match.end()
else: # Either empty list or argument is in the next line
arg_pos = 1
if re.match('^[\t ]*(;|$|\r)', subline[subline.find(' '):]):
# Again if a comment is found after the function name, the
# indent level defaults to 1
arg_pos = spaces_before_func + args.default_indent
return [arg_pos, spaces_before_func]
def _pop_from_list(bracket, lst, line, real_pos, offset, msg_stack):
""" _pop_from_list(char : str, lst : [str], line : str,
real_pos : int, offset : int)
The function is called when a closing bracket is encountered. The function
simply pops the last pushed item and issues a warning if an error is
encountered.
"""
# Try to spot a case when a square bracket is used to close a round bracket
# block
if bracket == ']':
correct_closer = '['
elif bracket == ')':
correct_closer = '('
else:
correct_closer = '{'
if lst != []:
popped = lst.pop()
popped_char = popped['character']
popped_pos = popped['line_number']
popped_offset = popped['bracket_pos']
if popped_char is not correct_closer:
message = "Bracket `%s' does not match `%s' at (%d, %d)"
message = message % (bracket, popped_char, popped_pos, popped_offset)
warning_info = {
'msg': message,
'line': line,
'column': real_pos
}
msg_stack.append(warning_info)
else:
# If the list is empty and a closing bracket is found, it means we have
# excess brackets. That warning is issued here. The coordinates used
# will be slightly or largely off target depending on how much your
# code was modified when used with compact mode
message = "Unmatched closing bracket `%s'" % bracket
warning_info = {
'msg': message,
'line': line,
'column': offset + 1
}
msg_stack.append(warning_info)
return lst
def _push_to_list(lst, func_name, char, line, offset,
first_arg_pos, first_item, in_list_literal,
lead_spaces, args=None):
""" _push_to_list(lst : [str], func_name : str, char : str, line : int, offset : int,
first_arg_pos :int , first_item : int, in_list_literal : bool,
lead_spaces : int, args : str)
Called when an opening bracket is encountered. A hash containing the
necessary data to pin point errors and the indentation level is stored in
the list and the list returned.
"""
args = parse_args(args)
keywords = add_keywords(args)
pos_hash = {'character': char,
'line_number': line,
'bracket_pos': offset,
'indent_level': offset + first_arg_pos, # the default value, e.g in normal function
'func_name': func_name,
'spaces': 0}
is_macro = is_macro_name(func_name, args.dialect)
two_spacer = is_macro or keywords[func_name] in [KEYWORD1, KEYWORD4]
if in_list_literal or char == '{' or (char == '[' and args.dialect == 'clojure'):
# found quoted list or clojure hashmap/vector
pos_hash['indent_level'] = first_item
elif keywords[func_name] == KEYWORD2:
# We only make the if-clause stand out if not in uniform mode
pos_hash['indent_level'] = lead_spaces + ((offset + args.indent_size * 2)
if not args.uniform
else (offset + args.indent_size))
elif func_name != '':
if two_spacer:
pos_hash['indent_level'] = lead_spaces + offset + args.indent_size
elif keywords[func_name] == KEYWORD3:
pos_hash['indent_level'] = lead_spaces + offset + (2 * args.indent_size)
lst.append(pos_hash)
try:
# A hack to make flets and labels in Lisp not indent like
# functions. The 'labels' indentation may not be exactly
# perfect.
parent_func = lst[-3]['func_name']
# Make 'special' indentation occur only in a Clojure binding block([]) for
# letfns
non_bind_block = args.dialect == 'clojure' and lst[-2]['character'] != '['
if keywords[parent_func] == KEYWORD4 and not non_bind_block:
lst[-1]['indent_level'] = offset + args.indent_size
except IndexError:
pass
return lst
def indent_code(original_code, args=None):
""" indented_code(string : str, fname : str) -> [...]
Arguments:
fpath: Simply used in formatting the warning messages
>>> indent_code("(print\n'Hello)")
{'bracket_locations': [],
'comment_locations': [],
'in_comment': 0,
'in_newlisp_tag_string': False,
'in_string': False,
'in_symbol_with_space': False,
'indented_code': ['(print\n', " 'Hello)"],
'last_quote_location': (),
'last_symbol_location': (),
'message_stack': [],
'newlisp_brace_locations': [],
'original_code': ['(print\n', "'Hello)"],
'first_tag_string': ()}
The last entry in the list is the indented string.
"""
args = parse_args(args)
keywords = add_keywords(args)
# Safeguards against processing brackets inside strings
in_string = False
# newLISP use curly brackets as a syntax for multiline strings
# this variable here tries to keep track of that
in_newlisp_string = 0
in_newlisp_tag_string = False
newlisp_brace_locations = []
first_tag_string = ()
# zero_level helps us get the same results as Sitaram's indenter when in
# --no-compact mode.
zero_level = 0
# The two variables prevent formatting comment regions or symbols with whitespace
in_comment = 0
in_symbol_with_space = False
comment_locations = []
last_symbol_location = ()
# A in_symbol_region is the region between pipes(| |) or in strings. This
# includes the comment region. This region is not to be messed with.
in_symbol_region = in_string or in_comment or in_symbol_with_space or \
in_newlisp_string or in_newlisp_tag_string
# we need to know the line number in order to issue almost accurate messages about
# unclosed brackets and string
line_number = 1
# Stores the last position a quote was encountered so that in case there are
# any unclosed strings, we can pinpoint them
last_quote_location = ()
line_ending = find_line_ending(original_code)
code_lines = split_preserve(original_code, line_ending)
indented_code = []
bracket_locations = []
# List of warnings from errors in the code
message_stack = []
for line in code_lines:
escaped = False
curr_line = line
# Get the indent level and the indented line
zero_level, curr_line, indent_level = indent_line(zero_level,
bracket_locations,
line, in_comment,
in_symbol_region, args)
# Build up the indented string.
indented_code.append(curr_line)
regex = '^[ \t]*'
lead_spaces = re.findall(regex, curr_line)
if lead_spaces:
curr_line = re.sub(regex, detabify(lead_spaces[0], args), curr_line)
offset = 0
for curr_char in curr_line:
next_char = curr_line[offset + 1:offset + 2]
prev_char = curr_line[offset - 1:offset]
substr = curr_line[offset + 1:] # slice to the end
if escaped:
# Move to the next character if the current one has been escaped
escaped = False
offset += 1
continue
if curr_char == '\\' and not in_newlisp_string and not in_newlisp_tag_string:
# the next character has been escaped
escaped = True
if (curr_char == ';' or (curr_char == '#' and args.dialect == 'newlisp'))\
and not in_symbol_region and not \
(prev_char == '#' and args.dialect == 'scheme'):
# a comment has been found, go to the next line
# A sharp sign(#) before a semi-colon in Scheme is used to
# comment out sections of code. We don't treat it as a comment
break
# ----------------------------------------------------------
# Comments are dealt with here. Clojure and newLISP don't have Lisp
# style multiline comments so don't include them.
if args.dialect not in ['clojure', 'newlisp'] and curr_char == '|' \
and not in_string:
if prev_char == '#' and not in_symbol_with_space:
comment_locations.append((line_number, offset))
in_comment += 1
elif in_comment and next_char == '#':
in_comment -= 1
comment_locations.pop()
elif not in_comment:
if in_symbol_with_space:
last_symbol_location = ()
in_symbol_with_space = False
else:
last_symbol_location = (line_number, offset)
in_symbol_with_space = True
# ----------------------------------------------------------
# Strings are dealt with here only if we are not in a comment
if not (in_symbol_with_space or in_comment or in_newlisp_tag_string):
if curr_char == '"':
last_quote_location = (line_number, offset)
in_string = not bool(in_string)
if args.dialect == 'newlisp' and not in_string:
# We handle newLISP's multiline(brace) string here. Brace
# strings can nest
if curr_char == '{':
newlisp_brace_locations.append((line_number, offset))
in_newlisp_string += 1
elif curr_char == '}':
if newlisp_brace_locations:
newlisp_brace_locations.pop()
else:
message = "Attempt to close a non-existent newLISP string"
warning_info = {
'msg': message,
'line': line_number,
'column': offset
}
message_stack.append(warning_info)
in_newlisp_string -= 1
if curr_char == '[' and args.dialect == 'newlisp' and not \
(in_newlisp_string or in_string):
# We have to handle tag strings in newLISP here.
if re.match('\[text\]', curr_line[offset:offset + 7]):
in_newlisp_tag_string = True
if first_tag_string == ():
first_tag_string = (line_number, offset)
elif re.match('\[/text\]', curr_line[offset:offset + 7]):
in_newlisp_tag_string = False
first_tag_string = ()
in_symbol_region = in_string or in_comment or in_symbol_with_space \
or in_newlisp_string or in_newlisp_tag_string
if in_symbol_region:
# move on if we are in a string, a symbol with a space or a comment
# altogether known as the symbol region
offset += 1
continue
# Finds the real position of a bracket to be used in pinpointing where
# the unclosed bracket is. The real position is different from the offset
# because current offset is the position of the bracket in the
# trimmed string not the original.
real_position = (offset - zero_level) + \
len(re.findall('^[ \t]*', line)[0]) - indent_level
if curr_char in ['(', '[', '{']:
if curr_char in ['[', '{'] and args.dialect in ['lisp', 'newlisp']:
# Square/Curly brackets are used should not contribute to
# the indentation in CL and newLISP
offset += 1
continue
first_arg_pos, spaces_before_func = \
find_first_arg_pos(offset, curr_line, args)
func_name = substr[0:first_arg_pos - 1].strip(')]\t\n\r ').lower()
in_list_literal = False
if re.search("[^#]('|`|#)([ \t]*\(|\[)($|\r)", curr_line[0:offset + 1]):
in_list_literal = True
if re.search('^[^ \t]+[ \t]*($|\r)', substr):
# The function is the last symbol/form in the line
func_name = substr.strip(')]\t\n\r ').lower()
if in_list_literal:
# an empty string is always in a non-empty string, we don't want
# this. We set False as the func_name because it's not a string
# in_list_literal prevents an keyword in a list literal from
# affecting the indentation
func_name = ''
if func_name in ['define-macro', 'defmacro']:
# Macro names are part of two space indenters.
# This part tries to find the name so that it is not indented
# like a function the next time it's used.
end_of_space = re.search('^[ \t]*', substr).end()
substr = substr[end_of_space:]
substr = substr[re.search('[ \t]*', substr).start():].strip()
macro_name = substr[:substr.find(' ')] # macro name is delimeted by whitespace
if macro_name != '':
keywords[macro_name] = KEYWORD1
# first_item stores the position of the first item in the literal list
# it's necessary so that we don't assume that the first item is always
# after the opening bracket.
first_item = re.search('[ \t]*', curr_line[offset + 1:]).end() + offset + 1
bracket_locations = _push_to_list(bracket_locations[:], func_name,
curr_char, line_number, offset,
first_arg_pos, first_item,
in_list_literal,
spaces_before_func, args)
elif curr_char in [']', ')', '}']:
if curr_char in [']', '}'] and args.dialect in ['lisp', 'newlisp']:
# Square/Curly brackets are used should not contribute to
# the indentation in CL and newLISP
offset += 1
continue
bracket_locations = _pop_from_list(curr_char, bracket_locations[:],
line_number, real_position,
offset, message_stack)
if bracket_locations and curr_char in [' ', '\t'] and \
keywords[bracket_locations[-1]['func_name']] == KEYWORD2:
# This part changes the indentation level of a then clause so that
# we can achieve something like:
# (if (= this that)
# 'then-form
# 'else-form)
# This is done by keeping track of the number of spaces found. If
# you find two spaces it means that, for example that we have just
# passed the then-form and hence should decrease the indentation
# level by 2.(I shamelessly copied this algorithm from Dorai's
# indenter)
if prev_char not in [' ', '\t', ''] or not \
re.search('^[ \t]*(;|#\||$|\r)', curr_line):
# The level shouldn't be decreased if the line is a comment
# line. The regex above takes care of that.
bracket_locations[-1]['spaces'] += 1
if bracket_locations[-1]['spaces'] == 2:
bracket_locations[-1]['indent_level'] -= \
0 if args.uniform else args.indent_size
# some dummy value to prevent control from reaching here again
bracket_locations[-1]['spaces'] = 999
offset += 1
line_number += 1
res = {
'message_stack': message_stack,
'first_tag_string': first_tag_string,
'in_newlisp_tag_string': in_newlisp_tag_string,
'last_symbol_location': last_symbol_location,
'comment_locations': comment_locations,
'newlisp_brace_locations': newlisp_brace_locations,
'in_string': in_string,
'in_comment': in_comment,
'in_symbol_with_space': in_symbol_with_space,
'bracket_locations': bracket_locations,
'last_quote_location': last_quote_location,
'original_code': code_lines,
'indented_code': indented_code
}
return res
def colour_diff(diff_lines):
""" colour_diff(diff_lines : lst)
Print diff text to terminal in color
"""
try:
import colorama
except ImportError:
# colorama is not available, print plain diff
print(''.join(list(diff_lines)))
return
colorama.init()
def p_green(text):
""" Print added line in green """
print(colorama.Fore.GREEN + text + colorama.Fore.WHITE, end='')
def p_yellow(text):
""" Print diff section header in yellow """
print(colorama.Fore.YELLOW + text + colorama.Fore.WHITE, end='')
def p_red(text):
""" Print removed line in red """
print(colorama.Fore.RED + text + colorama.Fore.WHITE, end='')
section = re.compile('@@\s+-\d\d,\d\d\s\+\d\d,\d\d\s+@@')
for line in diff_lines:
if line.startswith('-'):
p_red(line)
elif line.startswith('+'):
p_green(line)
elif section.search(line):
p_yellow(line)
else:
print(line, end='')
def _post_indentation(res, args=None, fpath=''):
""" _post_indentation(res : dict):
Called after the string has been indented appropriately.
It takes care of writing the file and checking for unclosed strings
or comments.
"""
fname = os.path.basename(fpath)
args = parse_args(args)
for msg in res['message_stack']:
if args.warning:
if args.files:
msg['fname'] = fname
sys.stderr.write('\n{fname}:{line}:{column}: {msg}'.format(**msg))
else:
# Input was passed through stdin
sys.stderr.write('\n:{line}:{column}: {msg}'.format(**msg))
if res['bracket_locations']:
# If the bracket_locations list is not empty it means that there are some
# brackets(opening) that haven't been closed.
for bracket in res['bracket_locations']:
line = bracket['line_number']
column = bracket['bracket_pos']
character = bracket['character']
# The bracket_locations are not very accurate. The warning might be
# misleading because it considers round and square brackets to be
# the same.
message = "\n%s:%d:%d: Unmatched `%s'"
if args.warning:
sys.stderr.write(message % (fname, line, column, character))
if res['newlisp_brace_locations']:
for brace in res['newlisp_brace_locations']:
message = "\n%s:%d:%d: Unclosed newLISP brace string"
if args.warning:
sys.stderr.write(message % (fname, brace[0], brace[1]))
if res['comment_locations']:
for comment in res['comment_locations']:
message = "\n%s:%d:%d: Unclosed multiline comment"
tpl = (fname,) + comment
if args.warning:
sys.stderr.write(message % tpl)
if res['last_symbol_location']:
message = "\n%s:%d:%d: Unclosed symbol"
tpl = (fname,) + res['last_symbol_location']
if args.warning:
sys.stderr.write(message % tpl)
if res['in_string']:
message = "\n%s:%d:%d: String extends to end-of-file"
tpl = (fname,) + res['last_quote_location']
if args.warning:
sys.stderr.write(message % tpl)
if res['in_newlisp_tag_string']:
message = "\n%s:%d:%d: Tag string extends to end-of-file"
tpl = (fname,) + res['first_tag_string']
if args.warning:
sys.stderr.write(message % tpl)
output_file = args.output_file
if not output_file:
output_file = fpath
indented_code = res['indented_code']
indent_result = ''.join(indented_code)
if indented_code == res['original_code'] and args.files:
message = "File '%s' has already been formatted. Leaving it unchanged...\n"
sys.stderr.write(message % fname)
if output_file != fpath:
with open(output_file, 'wb') as indented_file:
indented_file.write(indent_result.encode('utf8'))
else:
if args.output_diff:
diff = difflib.unified_diff(res['original_code'], indented_code, n=5)
if args.colour_diff:
colour_diff(diff)
else:
print(''.join(list(diff)))
elif args.output:
print(indent_result, end='')
if args.modify:
# write in binary mode to preserve the original line ending
with open(output_file, 'wb') as indented_file:
indented_file.write(indent_result.encode('utf8'))
def indent_files(arguments):
""" indent_files(arguments)
Note: if the parallel option is provided, the files will be read and processed
in parallel
"""
args = parse_args(arguments)
if not args.files:
# Indent from stdin
code = sys.stdin.read()
indent_result = indent_code(code, args)
_post_indentation(indent_result)
if args.parallel:
import multiprocessing
pool = multiprocessing.Pool(multiprocessing.cpu_count())
pool.starmap(indent_file, [(fname, args) for fname in args.files])
else:
for fname in args.files:
indent_file(fname, args)
def indent_file(fname, args):
"""
indent_file(fname: string, args)
1. Create a backup of the source file(backup_source_file())
2. Read the file contents(read_file())
3. Indent the code(indent_code())
4. Write to the file or print the indented code(_post_indentation())
"""
args = parse_args(args)
fname = os.path.expanduser(fname)
code = read_file(fname)
if not args.dialect:
# Guess dialect from the file extensions if none is specified in the command
# line
if fname.endswith('.lisp'):
args.dialect = 'lisp'
elif fname.endswith('.lsp'):
args.dialect = 'newlisp'
elif re.search(".clj[sc]{0,1}$", fname):
args.dialect = 'clojure'
elif fname.endswith('.ss') or fname.endswith('.scm'):
args.dialect = 'scheme'
else:
args.dialect = 'all'
indent_result = indent_code(code, args)
if args.backup:
# Create a backup file in the specified directory
backup_source_file(fname, args)
_post_indentation(indent_result, fpath=fname)
def main():
""" Entry point """
indent_files(sys.argv[1:])
if __name__ == '__main__':
main()
| 38.994493
| 115
| 0.57877
| 0
| 0
| 0
| 0
| 8,118
| 0.163795
| 0
| 0
| 22,255
| 0.449034
|
83aba0fd5dc8afef0605fcfd0ba5992a35684f2e
| 12,989
|
py
|
Python
|
Speedo/plugins/animoji.py
|
aviskumar/speedo
|
758e8ac1fdeeb0b72c3a57742032ca5c79f0b2fa
|
[
"BSD-3-Clause"
] | null | null | null |
Speedo/plugins/animoji.py
|
aviskumar/speedo
|
758e8ac1fdeeb0b72c3a57742032ca5c79f0b2fa
|
[
"BSD-3-Clause"
] | null | null | null |
Speedo/plugins/animoji.py
|
aviskumar/speedo
|
758e8ac1fdeeb0b72c3a57742032ca5c79f0b2fa
|
[
"BSD-3-Clause"
] | 3
|
2021-10-12T08:17:01.000Z
|
2021-12-21T01:17:54.000Z
|
import asyncio
from collections import deque
from . import *
@speedo.on(Speedo_cmd(pattern="think$", outgoing=True))
@speedo.on(sudo_cmd(pattern="think$", allow_sudo=True))
async def _(event):
if event.fwd_from:
return
event = await eor(event, "think")
deq = deque(list("🤔🧐🤔🧐🤔🧐"))
for _ in range(48):
await asyncio.sleep(0.1)
await event.edit("".join(deq))
deq.rotate(1)
@speedo.on(Speedo_cmd(pattern="ccry$", outgoing=True))
@speedo.on(sudo_cmd(pattern="ccry$", allow_sudo=True))
async def cry(e):
if e.fwd_from:
return
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
await e.edit("(;´༎ຶД༎ຶ)")
@speedo.on(Speedo_cmd(pattern="fap$", outgoing=True))
@speedo.on(sudo_cmd(pattern="fap$", allow_sudo=True))
async def _(event):
if event.fwd_from:
return
event = await eor(event, "fapping(°_°)")
deq = deque(list("🍆✊🏻💦"))
for _ in range(32):
await asyncio.sleep(0.1)
await event.edit("".join(deq))
deq.rotate(1)
@speedo.on(Speedo_cmd(pattern=r"lmao$"))
@speedo.on(sudo_cmd(pattern=r"lmao$", allow_sudo=True))
async def _(event):
if event.fwd_from:
return
event = await eor(event, "lmao")
deq = deque(list("😂🤣😂🤣😂🤣"))
for _ in range(48):
await asyncio.sleep(0.1)
await event.edit("".join(deq))
deq.rotate(1)
@speedo.on(Speedo_cmd(pattern=r"nothappy$"))
@speedo.on(sudo_cmd(pattern="nothappy$", allow_sudo=True))
async def _(event):
if event.fwd_from:
return
event = await eor(event, "nathappy")
deq = deque(list("😁☹️😁☹️😁☹️😁"))
for _ in range(48):
await asyncio.sleep(0.4)
await event.edit("".join(deq))
deq.rotate(1)
@speedo.on(Speedo_cmd(outgoing=True, pattern="clock$"))
@speedo.on(sudo_cmd(pattern="clock$", allow_sudo=True))
async def _(event):
if event.fwd_from:
return
event = await eor(event, "clock")
deq = deque(list("🕙🕘🕗🕖🕕🕔🕓🕒🕑🕐🕛"))
for _ in range(48):
await asyncio.sleep(0.1)
await event.edit("".join(deq))
deq.rotate(1)
@speedo.on(Speedo_cmd(pattern=r"muah$"))
@speedo.on(sudo_cmd(pattern="muah$", allow_sudo=True))
async def _(event):
if event.fwd_from:
return
event = await eor(event, "muah")
deq = deque(list("😗😙😚😚😘"))
for _ in range(48):
await asyncio.sleep(0.1)
await event.edit("".join(deq))
deq.rotate(1)
@speedo.on(Speedo_cmd(pattern="heart$"))
@speedo.on(sudo_cmd(pattern="heart$", allow_sudo=True))
async def _(event):
if event.fwd_from:
return
event = await eor(event, "heart")
deq = deque(list("❤️🧡💛💚💙💜🖤"))
for _ in range(48):
await asyncio.sleep(0.1)
await event.edit("".join(deq))
deq.rotate(1)
@speedo.on(Speedo_cmd(pattern="gym$", outgoing=True))
@speedo.on(sudo_cmd(pattern="gym$", allow_sudo=True))
async def _(event):
if event.fwd_from:
return
event = await eor(event, "gym")
deq = deque(list("🏃🏋🤸🏃🏋🤸🏃🏋🤸"))
for _ in range(48):
await asyncio.sleep(0.1)
await event.edit("".join(deq))
deq.rotate(1)
@speedo.on(Speedo_cmd(pattern=f"earth$", outgoing=True))
@speedo.on(sudo_cmd(pattern="earth$", allow_sudo=True))
async def _(event):
if event.fwd_from:
return
event = await eor(event, "earth")
deq = deque(list("🌏🌍🌎🌎🌍🌏🌍🌎"))
for _ in range(48):
await asyncio.sleep(0.1)
await event.edit("".join(deq))
deq.rotate(1)
@speedo.on(Speedo_cmd(outgoing=True, pattern="moon$"))
@speedo.on(sudo_cmd(pattern="moon$", allow_sudo=True))
async def _(event):
if event.fwd_from:
return
event = await eor(event, "moon")
deq = deque(list("🌗🌘🌑🌒🌓🌔🌕🌖"))
for _ in range(48):
await asyncio.sleep(0.1)
await event.edit("".join(deq))
deq.rotate(1)
@speedo.on(Speedo_cmd(pattern=f"lovestory$", outgoing=True))
@speedo.on(sudo_cmd(pattern=f"lovestory$", allow_sudo=True))
async def _(event):
if event.fwd_from:
return
animation_interval = 3
animation_ttl = range(0, 103)
await eor(event, "Let me tel you")
animation_chars = [
"1 ❤️ love story",
" 😐 😕 \n/👕\ <👗\ \n 👖 /|",
" 😉 😳 \n/👕\ /👗\ \n 👖 /|",
" 😚 😒 \n/👕\ <👗> \n 👖 /|",
" 😍 ☺️ \n/👕\ /👗\ \n 👖 /|",
" 😍 😍 \n/👕\ /👗\ \n 👖 /|",
" 😘 😊 \n /👕\/👗\ \n 👖 /|",
" 😳 😁 \n /|\ /👙\ \n / / |",
"😈 /😰\ \n<|\ 👙 \n /🍆 / |",
"😅 \n/(),✊😮 \n /\ _/\\/|",
"😎 \n/\\_,__😫 \n // // \\",
"😖 \n/\\_,💦_😋 \n // // \\",
" 😭 ☺️ \n /|\ /(👶)\ \n /!\ / \ ",
"Abee aur kitna dekhoge be besharmi ki bhi hadd hoti hai..,The End 😂...",
]
for i in animation_ttl:
await asyncio.sleep(animation_interval)
await event.edit(animation_chars[i % 103])
@speedo.on(Speedo_cmd(pattern=f"smoon$", outgoing=True))
@speedo.on(sudo_cmd(pattern="smoon$", allow_sudo=True))
async def _(event):
if event.fwd_from:
return
event = await eor(event, "smoon")
animation_interval = 0.1
animation_ttl = range(101)
await event.edit("smoon..")
animation_chars = [
"🌗🌗🌗🌗🌗\n🌓🌓🌓🌓🌓\n🌗🌗🌗🌗🌗\n🌓🌓🌓🌓🌓\n🌗🌗🌗🌗🌗",
"🌘🌘🌘🌘🌘\n🌔🌔🌔🌔🌔\n🌘🌘🌘🌘🌘\n🌔🌔🌔🌔🌔\n🌘🌘🌘🌘🌘",
"🌑🌑🌑🌑🌑\n🌕🌕🌕🌕🌕\n🌑🌑🌑🌑🌑\n🌕🌕🌕🌕🌕\n🌑🌑🌑🌑🌑",
"🌒🌒🌒🌒🌒\n🌖🌖🌖🌖🌖\n🌒🌒🌒🌒🌒\n🌖🌖🌖🌖🌖\n🌒🌒🌒🌒🌒",
"🌓🌓🌓🌓🌓\n🌗🌗🌗🌗🌗\n🌓🌓🌓🌓🌓\n🌗🌗🌗🌗🌗\n🌓🌓🌓🌓🌓",
"🌔🌔🌔🌔🌔\n🌘🌘🌘🌘🌘\n🌔🌔🌔🌔🌔\n🌘🌘🌘🌘🌘\n🌔🌔🌔🌔🌔",
"🌕🌕🌕🌕🌕\n🌑🌑🌑🌑🌑\n🌕🌕🌕🌕🌕\n🌑🌑🌑🌑🌑\n🌕🌕🌕🌕🌕",
"🌖🌖🌖🌖🌖\n🌒🌒🌒🌒🌒\n🌖🌖🌖🌖🌖\n🌒🌒🌒🌒🌒\n🌖🌖🌖🌖🌖",
]
for i in animation_ttl:
await asyncio.sleep(animation_interval)
await event.edit(animation_chars[i % 8])
@speedo.on(Speedo_cmd(pattern=f"tmoon$", outgoing=True))
@speedo.on(sudo_cmd(pattern="tmoon$", allow_sudo=True))
async def _(event):
if event.fwd_from:
return
event = await eor(event, "tmoon")
animation_interval = 0.1
animation_ttl = range(117)
await event.edit("tmoon")
animation_chars = [
"🌗",
"🌘",
"🌑",
"🌒",
"🌓",
"🌔",
"🌕",
"🌖",
"🌗",
"🌘",
"🌑",
"🌒",
"🌓",
"🌔",
"🌕",
"🌖",
"🌗",
"🌘",
"🌑",
"🌒",
"🌓",
"🌔",
"🌕",
"🌖",
"🌗",
"🌘",
"🌑",
"🌒",
"🌓",
"🌔",
"🌕",
"🌖",
]
for i in animation_ttl:
await asyncio.sleep(animation_interval)
await event.edit(animation_chars[i % 32])
@speedo.on(Speedo_cmd(pattern=f"hart$", outgoing=True))
@speedo.on(sudo_cmd(pattern=f"hart$", allow_sudo=True))
async def _(event):
if event.fwd_from:
return
animation_interval = 0.5
animation_ttl = range(20)
event = await eor(event, "❤️")
animation_chars = ["🖤", "❤️", "🖤", "❤️", ""]
for i in animation_ttl:
await asyncio.sleep(animation_interval)
await event.edit(animation_chars[i % 4])
@speedo.on(Speedo_cmd(pattern=f"anim$", outgoing=True))
@speedo.on(sudo_cmd(pattern=f"anim$", allow_sudo=True))
async def _(event):
if event.fwd_from:
return
animation_interval = 1
animation_ttl = range(20)
event = await eor(event, "😢")
animation_chars = [
"😁",
"😧",
"😡",
"😢",
"😁",
"😧",
"😡",
"😢",
]
for i in animation_ttl:
await asyncio.sleep(animation_interval)
await event.edit(animation_chars[i % 10])
@speedo.on(Speedo_cmd(pattern=f"fuck$", outgoing=True))
@speedo.on(sudo_cmd(pattern=f"fuck$", allow_sudo=True))
async def _(event):
if event.fwd_from:
return
animation_interval = 1
animation_ttl = range(0, 101)
await eor(event, "fuk")
animation_chars = ["👉 ✊️", "👉 ✊️", "👉 ✊️", "👉✊️💦"]
for i in animation_ttl:
await asyncio.sleep(animation_interval)
await event.edit(animation_chars[i % 4])
@speedo.on(Speedo_cmd(pattern=f"sux$", outgoing=True))
@speedo.on(sudo_cmd(pattern=f"sux$", allow_sudo=True))
async def _(event):
if event.fwd_from:
return
animation_interval = 1
animation_ttl = range(0, 101)
await eor(event, "sux")
animation_chars = ["🤵 👰", "🤵 👰", "🤵 👰", "🤵👼👰"]
for i in animation_ttl:
await asyncio.sleep(animation_interval)
await event.edit(animation_chars[i % 4])
@speedo.on(Speedo_cmd(pattern=f"kiss$", outgoing=True))
@speedo.on(sudo_cmd(pattern=f"kiss$", allow_sudo=True))
async def _(event):
if event.fwd_from:
return
animation_interval = 1
animation_ttl = range(0, 101)
await eor(event, "kiss")
animation_chars = ["🤵 👰", "🤵 👰", "🤵 👰", "🤵💋👰"]
for i in animation_ttl:
await asyncio.sleep(animation_interval)
await event.edit(animation_chars[i % 4])
@speedo.on(Speedo_cmd(pattern=f"fnl$", outgoing=True))
@speedo.on(sudo_cmd(pattern=f"fnl$", allow_sudo=True))
async def _(event):
if event.fwd_from:
return
animation_interval = 2
animation_ttl = range(6)
event = await eor(event, "Hey There....")
animation_chars = ["😁🏿", "😁🏾", "😁🏽", "😁🏼", "😁"]
for i in animation_ttl:
await asyncio.sleep(animation_interval)
await event.edit(animation_chars[i % 6])
@speedo.on(Speedo_cmd(pattern=f"monkey$", outgoing=True))
@speedo.on(sudo_cmd(pattern=f"monkey$", allow_sudo=True))
async def _(event):
if event.fwd_from:
return
animation_interval = 2
animation_ttl = range(12)
event = await eor(event, "Hey There....")
animation_chars = ["🐵", "🙉", "🙈", "🙊", "🖕🐵🖕"]
for i in animation_ttl:
await asyncio.sleep(animation_interval)
await event.edit(animation_chars[i % 6])
@speedo.on(Speedo_cmd(pattern=f"hand$", outgoing=True))
@speedo.on(sudo_cmd(pattern=f"hand$", allow_sudo=True))
async def _(event):
if event.fwd_from:
return
animation_interval = 1
animation_ttl = range(13)
event = await eor(event, "🖐️")
animation_chars = [
"👈",
"👉",
"☝️",
"👆",
"🖕",
"👇",
"✌️",
"🤞",
"🖖",
"🤘",
"🤙",
"🖐️",
"👌",
]
for i in animation_ttl:
await asyncio.sleep(animation_interval)
await event.edit(animation_chars[i % 13])
@speedo.on(Speedo_cmd(pattern=f"gsg$", outgoing=True))
@speedo.on(sudo_cmd(pattern=f"gsg$", allow_sudo=True))
async def _(event):
if event.fwd_from:
return
animation_interval = 1
animation_ttl = range(12)
event = await eor(event, "ContDown....")
animation_chars = [
"🔟",
"9️⃣",
"8️⃣",
"7️⃣",
"6️⃣",
"5️⃣",
"4️⃣",
"3️⃣",
"2️⃣",
"1️⃣",
"0️⃣",
"🆘",
]
for i in animation_ttl:
await asyncio.sleep(animation_interval)
await event.edit(animation_chars[i % 12])
@speedo.on(Speedo_cmd(pattern=r"theart$", outgoing=True))
@speedo.on(sudo_cmd(pattern=r"theart$", allow_sudo=True))
async def _(event):
if event.fwd_from:
return
animation_interval = 0.3
animation_ttl = range(54)
event = await eor(event, "🖤")
animation_chars = [
"❤️",
"🧡",
"💛",
"💚",
"💙",
"💜",
"🖤",
"💘",
"💝",
"❤️",
"🧡",
"💛",
"💚",
"💙",
"💜",
"🖤",
"💘",
"💝",
]
for i in animation_ttl:
await asyncio.sleep(animation_interval)
await event.edit(animation_chars[i % 18])
CmdHelp("animoji").add_command(
'think', None, 'Use and see'
).add_command(
'ccry', None, 'Use and see'
).add_command(
'fap', None, 'Use and see'
).add_command(
'lmao', None, 'Use and see'
).add_command(
'nothappy', None, 'Use and see'
).add_command(
'clock', None, 'Use and see'
).add_command(
'muah', None, 'Use and see'
).add_command(
'heart', None, 'Use and see'
).add_command(
'gym', None, 'Use and see'
).add_command(
'earth', None, 'Use and see'
).add_command(
'moon', None, 'Use and see'
).add_command(
'lovestory', None, 'Turu Lob'
).add_command(
'smoon', None, 'Use and see'
).add_command(
'tmoon', None, 'Use and see'
).add_command(
'hart', None, 'Use and see'
).add_command(
'anim', None, 'Use and see'
).add_command(
'fuck', None, 'Use and see'
).add_command(
'sux', None, 'Use and see'
).add_command(
'kiss', None, 'Kya dekh rha h jhopdike.'
).add_command(
'fnl', None, 'Use and See.'
).add_command(
'monkey', None, 'Use and see.'
).add_command(
'hand', None, 'Use and See.'
).add_command(
'gsg', None, 'Use and See.'
).add_command(
'theart', None, 'Hearts Animation.'
).add()
| 26.781443
| 81
| 0.528755
| 0
| 0
| 0
| 0
| 13,151
| 0.911997
| 10,514
| 0.729126
| 3,982
| 0.276144
|
83abb74e341537b5ab6f003c11360924411e10b7
| 4,014
|
py
|
Python
|
Chapter09/01-optimize-mlp-layers.py
|
KonstantinKlepikov/Hands-On-Genetic-Algorithms-with-Python
|
ee5e7c5f8274a7ce22c3b528f86fa2bb1695e686
|
[
"MIT"
] | null | null | null |
Chapter09/01-optimize-mlp-layers.py
|
KonstantinKlepikov/Hands-On-Genetic-Algorithms-with-Python
|
ee5e7c5f8274a7ce22c3b528f86fa2bb1695e686
|
[
"MIT"
] | null | null | null |
Chapter09/01-optimize-mlp-layers.py
|
KonstantinKlepikov/Hands-On-Genetic-Algorithms-with-Python
|
ee5e7c5f8274a7ce22c3b528f86fa2bb1695e686
|
[
"MIT"
] | null | null | null |
from deap import base
from deap import creator
from deap import tools
import random
import numpy
import mlp_layers_test
import elitism
# boundaries for layer size parameters:
# [layer_layer_1_size, hidden_layer_2_size, hidden_layer_3_size, hidden_layer_4_size]
BOUNDS_LOW = [ 5, -5, -10, -20]
BOUNDS_HIGH = [15, 10, 10, 10]
NUM_OF_PARAMS = len(BOUNDS_HIGH)
# Genetic Algorithm constants:
POPULATION_SIZE = 20
P_CROSSOVER = 0.9 # probability for crossover
P_MUTATION = 0.5 # probability for mutating an individual
MAX_GENERATIONS = 10
HALL_OF_FAME_SIZE = 3
CROWDING_FACTOR = 10.0 # crowding factor for crossover and mutation
# set the random seed:
RANDOM_SEED = 42
random.seed(RANDOM_SEED)
# create the classifier accuracy test class:
test = mlp_layers_test.MlpLayersTest(None)
toolbox = base.Toolbox()
# define a single objective, maximizing fitness strategy:
creator.create("FitnessMax", base.Fitness, weights=(1.0,))
# create the Individual class based on list:
creator.create("Individual", list, fitness=creator.FitnessMax)
# define the layer_size_attributes individually:
for i in range(NUM_OF_PARAMS):
# "layer_size_attribute_0", "layer_size_attribute_1", ...
toolbox.register("layer_size_attribute_" + str(i),
random.uniform,
BOUNDS_LOW[i],
BOUNDS_HIGH[i])
# create a tuple containing an layer_size_attribute generator for each hidden layer:
layer_size_attributes = ()
for i in range(NUM_OF_PARAMS):
layer_size_attributes = layer_size_attributes + \
(toolbox.__getattribute__("layer_size_attribute_" + str(i)),)
# create the individual operator to fill up an Individual instance:
toolbox.register("individualCreator",
tools.initCycle,
creator.Individual,
layer_size_attributes,
n=1)
# create the population operator to generate a list of individuals:
toolbox.register("populationCreator",
tools.initRepeat,
list,
toolbox.individualCreator)
# fitness calculation
def classificationAccuracy(individual):
return test.getAccuracy(individual),
toolbox.register("evaluate", classificationAccuracy)
# genetic operators:mutFlipBit
# genetic operators:
toolbox.register("select", tools.selTournament, tournsize=2)
toolbox.register("mate",
tools.cxSimulatedBinaryBounded,
low=BOUNDS_LOW,
up=BOUNDS_HIGH,
eta=CROWDING_FACTOR)
toolbox.register("mutate",
tools.mutPolynomialBounded,
low=BOUNDS_LOW,
up=BOUNDS_HIGH,
eta=CROWDING_FACTOR,
indpb=1.0/NUM_OF_PARAMS)
# Genetic Algorithm flow:
def main():
# create initial population (generation 0):
population = toolbox.populationCreator(n=POPULATION_SIZE)
# prepare the statistics object:
stats = tools.Statistics(lambda ind: ind.fitness.values)
stats.register("max", numpy.max)
stats.register("avg", numpy.mean)
# define the hall-of-fame object:
hof = tools.HallOfFame(HALL_OF_FAME_SIZE)
# perform the Genetic Algorithm flow with hof feature added:
population, logbook = elitism.eaSimpleWithElitism(population,
toolbox,
cxpb=P_CROSSOVER,
mutpb=P_MUTATION,
ngen=MAX_GENERATIONS,
stats=stats,
halloffame=hof,
verbose=True)
# print best solution found:
print("- Best solution is: ",
test.formatParams(hof.items[0]),
", accuracy = ",
hof.items[0].fitness.values[0])
if __name__ == "__main__":
main()
| 31.359375
| 89
| 0.623069
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,244
| 0.309915
|
83abd40caac456af21954f1d6b702333668a6968
| 14,410
|
py
|
Python
|
tokenizer/state_table.py
|
xxronvel/foobar
|
0e4ca414a532eaa69803888a65ac1a2e0114183e
|
[
"MIT"
] | 1
|
2016-04-28T02:20:59.000Z
|
2016-04-28T02:20:59.000Z
|
tokenizer/state_table.py
|
xxronvel/foobar
|
0e4ca414a532eaa69803888a65ac1a2e0114183e
|
[
"MIT"
] | null | null | null |
tokenizer/state_table.py
|
xxronvel/foobar
|
0e4ca414a532eaa69803888a65ac1a2e0114183e
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
MIT License
Copyright (c) 2016 Aarón Abraham Velasco Alvarez
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
input = {
"alpha" : 0,
"numeric" : 1,
"<" : 2,
"%" : 3,
"?" : 4,
"/" : 5,
"=" : 6,
">" : 7,
"#" : 8,
"*" : 9,
"+" : 10,
"-" : 11,
"." : 12,
"'" : 13,
"\"" : 14,
"\\" : 15,
"$" : 16,
"_" : 17,
"!" : 18,
"`" : 19 ,
"&" : 20,
"|" : 21,
":" : 22,
"\n" : 23,
" " : 24
}
errors = {
1 : "Parse error, unexpected input",
2 : "Parse error, unexpected '</'",
3 : "Parse error, expecting '>'",
4 : "Unterminated comment",
5 : "Parse error, expecting \"'\"",
6 : "Parse error, expecting '`'",
7 : "Parse error, expecting variable (T_VARIABLE) or '{' or '$'",
8 : "Unterminated tag",
9 : "Parse error, expecting '.'"
}
#TODO Código embebido
table = {
0 : ((60, 28, 1 , 3 , 2 , 21, 42, 44, 20, 41, 26, 27, 29, 34, 35, 0 , 58, 60, 43, 45, 46, 47, 63, 0 , 0 , 0 ), False, 0 , 1),
1 : ((-3, 0 , 8 , 5 , 6 , 4 , 10, 9 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ), True , 19, 1),
2 : ((0 , 0 , 0 , 0 , 11, 0 , 0 , 12, 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 11, 0 , 0 , 0 ), True , 105,1),
3 : ((0 , 0 , 0 , 0 , 0 , 0 , 13, 14, 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ), True , 16, 1),
4 : ((15, 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ), False, 0 , 2),
5 : ((0 , 0 , 0 , 0 , 0 , 0 , 16, 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ), True , 5 , 1),
6 : ((18, 0 , 0 , 0 , 0 , 0 , 17, 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ), True , 2 , 1),
7 : ((7 , 7 , 0 , 7 , 7 , 7 , 7 , 72, 7 , 7 , 7 , 7 , 7 , 65, 66, 7 , 7 , 7 , 7 , 7 , 7 , 7 , 7 , 0 , 7 , 7 ), False, 0 , 8),
8 : ((0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ), True , 18, 1),
9 : ((0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ), True , 30, 1),
10 : ((0 , 0 , 0 , 0 , 0 , 0 , 0 , 11, 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ), True , 19, 1),
11 : ((0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ), True , 19, 1),
12 : ((0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ), True , 7 , 1),
13 : ((0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ), True , 17, 1),
14 : ((0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ), True , 8 , 1),
15 : ((15, 15, 0 , 0 , 0 , 0 , 0 , 19, 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ), False, 0 , 3),
16 : ((0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ), True , 6 , 1),
17 : ((0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ), True , 3 , 1),
18 : ((18, 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ), True , 1 , 1),
19 : ((0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ), True , 9 , 1),
20 : ((20, 20, 20, -1, -1, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0 , 20, 20), True , 26, 1),
21 : ((0 , 0 , 0 , 0 , 0 , 20, 23, 0 , 0 , 22, 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ), True , 16, 1),
22 : ((22, 22, 22, 22, 22, 22, 22, 22, 22, 24, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22), False, 0 , 4),
23 : ((0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ), True , 17, 1),
24 : ((22, 22, 22, 22, 22, 25, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22), False, 0 , 4),
25 : ((0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ), True , 26, 1),
26 : ((0 , 28, 0 , 0 , 0 , 0 , 30, 0 , 0 , 0 , 31, 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ), True , 29, 1),
27 : ((0 , 28, 0 , 0 , 0 , 0 , 30, 0 , 0 , 0 , 0 , 31, 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ), True , 25, 1),
28 : ((0 , 28, 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 32, 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ), True , 11, 1),
29 : ((0 , 33, 0 , 0 , 0 , 0 , 30, 0 , 0 , 0 , 0 , 0 , 61, 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ), True , 28, 1),
30 : ((0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ), True , 17, 1),
31 : ((0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ), True , 22, 1),
32 : ((0 , 33, 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ), True , 11, 1),
33 : ((0 , 33, 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ), True , 11, 1),
34 : ((34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 40, 34, 36, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34), False, 0 , 5),
35 : ((35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 40, 37, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35), False, 0 , 5),
36 : ((34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 38, 34, 38, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34), False, 0 , 5),
37 : ((35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 39, 39, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35), False, 0 , 5),
38 : ((34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 40, 34, 36, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34), False, 0 , 5),
39 : ((35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 40, 37, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35), False, 0 , 5),
40 : ((0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ), True , 12, 1),
41 : ((0 , 0 , 0 , 0 , 0 , 0 , 49, 0 , 0 , 48, 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ), True , 16, 1),
42 : ((0 , 0 , 0 , 0 , 0 , 0 , 50, 49, 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ), True , 106, 1),
43 : ((0 , 0 , 0 , 0 , 0 , 0 , 50, 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ), True , 24, 1),
44 : ((0 , 0 , 0 , 0 , 0 , 0 , 51, 52, 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ), True , 19, 1),
45 : ((45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 53, 45, 45, 45, 57, 45, 45, 45, 45, 45, 45), False, 0 , 6),
46 : ((0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 54, 0 , 0 , 0 , 0 , 0 ), True , 18, 1),
47 : ((0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 54, 0 , 0 , 0 , 0 ), True , 18, 1),
48 : ((0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ), True , 16, 1),
49 : ((0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ), True , 17, 1),
50 : ((0 , 0 , 0 , 0 , 0 , 0 , 55, 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ), True , 30, 1),
51 : ((0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ), True , 19, 1),
52 : ((0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ), True , 18, 1),
53 : ((45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 56, 45, 45, 45, 56, 45, 45, 45, 45, 45, 45), False, 0 , 6),
54 : ((0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ), True , 23, 1),
55 : ((0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ), True , 30, 1),
56 : ((45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 53, 45, 45, 45, 57, 45, 45, 45, 45, 45, 45), False, 0 , 6),
57 : ((0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ), True , 21, 1),
58 : ((59, 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 58, 59, 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ), False, 0 , 7),
59 : ((59, 59, 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 59, 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ), True , 14, 1),
60 : ((60, 60, 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 60, 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ), True , 15, 1),
61 : ((0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 62, 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ), False, 0 , 9),
62 : ((0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ), True , 103, 1),
63 : ((0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 64, 0 , 0 , 0 ), True , 107, 1),
64 : ((0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ), True , 104, 1),
65 : ((65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 7 , 65, 67, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65), False, 0 , 5),
66 : ((66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 7 , 69, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66), False, 0 , 5),
67 : ((65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 70, 65, 70, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65), False, 0 , 5),
69 : ((66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 71, 71, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66), False, 0 , 5),
70 : ((65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 7 , 65, 67, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65), False, 0 , 5),
71 : ((66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 7 , 69, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66), False, 0 , 5),
72 : ((-11, 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ), True , 4 , 1),
-1 : ((0 , 0 , 0 , 0 , 0 , 0 , 0 , -2, 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ), False, 100, 1),
-2 : ((0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ), True , 0, 1),
-3 : ((-3, -3, 0 , -3, -3, -3, -3, -10, -3, -3, -3, -3, -3, -4, -5, -3, -3, -3, -3, -3, -3, -3, -3, 0, -3, -3), True , 0 , 1),
-4 : ((-4, -4, -4, -4, -4, -4, -4, -4 , -4, -4, -4, -4, -4, -3, -4, -6, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4), True , 0 , 1),
-5 : ((-5, -5, -5, -5, -5, -5, -5, -5 , -5, -5, -5, -5, -5, -5, -3, -7, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5), True , 0 , 1),
-6 : ((-4, -4, -4, -4, -4, -4, -4, -4 , -4, -4, -4, -4, -4, -8, -4, -8, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4), True , 0 , 1),
-7 : ((-5, -5, -5, -5, -5, -5, -5, -5 , -5, -5, -5, -5, -5, -5, -9, -9, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5), True , 0 , 1),
-8 : ((-4, -4, -4, -4, -4, -4, -4, -4 , -4, -4, -4, -4, -4, -3, -4, -6, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4), True , 0 , 1),
-9 : ((-5, -5, -5, -5, -5, -5, -5, -5 , -5, -5, -5, -5, -5, -5, -3, -7, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5), True , 0 , 1),
-10 : ((0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ), False, 101, 1),
-11 : ((-11,-11,-12,-11,-11,-11,-11,-11,-11,-11,-11,-11,-11,-11,-11,-11,-11,-11,-11,-11,-11,-11,-11,-11,-11,-11), True , 0 , 1),
-12 : ((-14, 0 , 0 , 0 , 0 ,-13, 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ), True , 0 , 1),
-13 : ((-21, 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ), True , 0 , 1),
-14 : ((-14,-14, 0 ,-14,-14,-14,-14,-22,-14,-14,-14,-14,-14,-15,-16,-14,-14,-14,-14,-14,-14,-14,-14, 0 ,-14,-14), True , 0 , 1),
-15 : ((-15,-15,-15,-15,-15,-15,-15,-15,-15,-15,-15,-15,-15,-14,-15,-17,-15,-15,-15,-15,-15,-15,-15,-15,-15,-15), True , 0 , 1),
-16 : ((-16,-16,-16,-16,-16,-16,-16,-16,-16,-16,-16,-16,-16,-16,-14,-18,-16,-16,-16,-16,-16,-16,-16,-16,-16,-16), True , 0 , 1),
-17 : ((-15,-15,-15,-15,-15,-15,-15,-15,-15,-15,-15,-15,-15,-19,-15,-19,-15,-15,-15,-15,-15,-15,-15,-15,-15,-15), True , 0 , 1),
-18 : ((-16,-16,-16,-16,-16,-16,-16,-16,-16,-16,-16,-16,-16,-16,-20,-20,-16,-16,-16,-16,-16,-16,-16,-16,-16,-16), True , 0 , 1),
-19 : ((-15,-15,-15,-15,-15,-15,-15,-15,-15,-15,-15,-15,-15,-14,-15,-17,-15,-15,-15,-15,-15,-15,-15,-15,-15,-15), True , 0 , 1),
-20 : ((-16,-16,-16,-16,-16,-16,-16,-16,-16,-16,-16,-16,-16,-16,-14,-18,-16,-16,-16,-16,-16,-16,-16,-16,-16,-16), True , 0 , 1),
-21 : ((-21,-21, 0 , 0 , 0 , 0 , 0 ,-22, 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ), True , 0 , 1),
-22 : (( 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ), True , 102, 1),
100: ((0 , 0 , 0 , 20, 20, 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ), True , 26, 1),
101: ((7 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ), True , 19, 1),
102: ((103, 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ), False, 0 , 1),
103: ((103, 103, 0, 103, 103, 103, 103, 103, 103, 103, 103, 103, 103, 103, 103, 103, 103, 103, 103, 103, 103, 103, 103, 103, 103, 103), True , 126, 1)
}
| 83.294798
| 151
| 0.373352
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,503
| 0.104288
|
83ade6c082d1004a672714e19137f8f4cc8ec685
| 748
|
py
|
Python
|
notifications_utils/__init__.py
|
cds-snc/notifier-utils
|
c3a205ac4381312fe1884a39ffafa7ffb862736f
|
[
"MIT"
] | 3
|
2020-04-29T17:13:43.000Z
|
2020-12-04T21:08:33.000Z
|
notifications_utils/__init__.py
|
cds-snc/notifier-utils
|
c3a205ac4381312fe1884a39ffafa7ffb862736f
|
[
"MIT"
] | 21
|
2020-04-16T12:29:46.000Z
|
2022-02-28T17:17:15.000Z
|
notifications_utils/__init__.py
|
cds-snc/notifier-utils
|
c3a205ac4381312fe1884a39ffafa7ffb862736f
|
[
"MIT"
] | 4
|
2020-02-21T20:20:00.000Z
|
2021-02-11T19:00:59.000Z
|
import re
SMS_CHAR_COUNT_LIMIT = 612 # 153 * 4
# regexes for use in recipients.validate_email_address.
# Valid characters taken from https://en.wikipedia.org/wiki/Email_address#Local-part
# Note: Normal apostrophe eg `Firstname-o'surname@domain.com` is allowed.
hostname_part = re.compile(r"^(xn-|[a-z0-9]+)(-[a-z0-9]+)*$", re.IGNORECASE)
tld_part = re.compile(r"^([a-z]{2,63}|xn--([a-z0-9]+-)*[a-z0-9]+)$", re.IGNORECASE)
VALID_LOCAL_CHARS = r"a-zA-ZÀ-ÿ0-9.!#$%&'*+/=?^_`{|}~\-"
EMAIL_REGEX_PATTERN = r"^[{}]+@([^.@][^@\s]+)$".format(VALID_LOCAL_CHARS)
email_with_smart_quotes_regex = re.compile(
# matches wider than an email - everything between an at sign and the nearest whitespace
r"(^|\s)\S+@\S+(\s|$)",
flags=re.MULTILINE,
)
| 44
| 92
| 0.667112
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 472
| 0.629333
|
83af8ba0d0f4e817ed4ef6eadece62ddc41fd7db
| 1,230
|
py
|
Python
|
respondd/Cache.py
|
FreiFunkMuenster/py-respondd
|
4b59b0fa2418ed021abe2dca5906b8290e4600d0
|
[
"MIT"
] | null | null | null |
respondd/Cache.py
|
FreiFunkMuenster/py-respondd
|
4b59b0fa2418ed021abe2dca5906b8290e4600d0
|
[
"MIT"
] | null | null | null |
respondd/Cache.py
|
FreiFunkMuenster/py-respondd
|
4b59b0fa2418ed021abe2dca5906b8290e4600d0
|
[
"MIT"
] | null | null | null |
import time
class Cache(object):
globalCache = {}
localCace = {}
timeout = 0
now = time.time()
@staticmethod
def setTimeout(timeout):
Cache.timeout = timeout
@staticmethod
def updateTime():
Cache.now = time.time()
@staticmethod
def _isValid(timestamp):
return True if Cache.now - Cache.timeout <= timestamp else False
@staticmethod
def getGlobal(kw, fx, args = ()):
if kw not in Cache.globalCache:
Cache.globalCache[kw] = {
'timestamp': Cache.now,
'value' : fx(args),
'args' : args
}
elif not Cache._isValid(Cache.globalCache[kw]['timestamp']):
Cache.globalCache[kw]['value'] = fx(args)
return Cache.globalCache[kw]['value']
@staticmethod
def getGlobalB(kw, fx, args = ()):
return fx()
@staticmethod
def getLocal(kw, dom, fx, args = ()):
if dom not in Cache.localCace:
Cache.localCace[dom] = {}
if kw not in Cache.localCace[dom]:
Cache.localCace[dom][kw] = {
'timestamp': Cache.now,
'value' : fx(args),
'args' : args
}
elif not Cache._isValid(Cache.localCace[dom][kw]['timestamp']):
Cache.localCace[dom][kw]['value'] = fx(args)
return Cache.localCace[dom][kw]['value']
@staticmethod
def getLocalB(kw, dom, fx, args = ()):
return fx()
| 22.363636
| 66
| 0.652846
| 1,217
| 0.989431
| 0
| 0
| 1,110
| 0.902439
| 0
| 0
| 98
| 0.079675
|
83b09d2ad07562da3b8e8e789cc7815800d89928
| 1,338
|
py
|
Python
|
jsonclasses_cli/package/swift/main_program_content.py
|
forever9717/jsonclasses-cli
|
b20d10cdf2d6c970a879e2a64f31555d4d808829
|
[
"MIT"
] | null | null | null |
jsonclasses_cli/package/swift/main_program_content.py
|
forever9717/jsonclasses-cli
|
b20d10cdf2d6c970a879e2a64f31555d4d808829
|
[
"MIT"
] | null | null | null |
jsonclasses_cli/package/swift/main_program_content.py
|
forever9717/jsonclasses-cli
|
b20d10cdf2d6c970a879e2a64f31555d4d808829
|
[
"MIT"
] | null | null | null |
from jsonclasses.cgraph import CGraph
from .import_lines import import_lines
from .string_query import string_query
from .int_query import int_query
from .float_query import float_query
from .bool_query import bool_query
from .sort_order import sort_order
from .data_enum import data_enum
from .data_class import data_class
from .session_items import session_items
from .session import session
from .response import response_struct
from .user_default import user_default
from .session_manager import session_manager
from .sign_out import sign_out
from .request_manager import request_manager
from ...utils.join_lines import join_lines
def main_program_content(cgraph: CGraph) -> str:
session_classes = session_items(cgraph)
use_session = len(session_classes) > 0
return join_lines([
import_lines(),
string_query(),
int_query(),
float_query(),
bool_query(),
# other type queries
sort_order(),
*map(lambda e: data_enum(e), cgraph._enum_map.values()),
*map(lambda c: data_class(c), cgraph._map.values()),
session(session_classes) if use_session else '',
response_struct(),
user_default(),
session_manager() if use_session else '',
sign_out(),
request_manager('http://127.0.0.1:5000', use_session),
], 2)
| 34.307692
| 64
| 0.724963
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 47
| 0.035127
|
83b0ae650bd55397213c23d819dd2927624d8665
| 121
|
py
|
Python
|
common/utils/__init__.py
|
jl1990/alpha-zero-general
|
6a1549f9cd1b2ebdffee30f8de1be9cbefecd5f4
|
[
"MIT"
] | null | null | null |
common/utils/__init__.py
|
jl1990/alpha-zero-general
|
6a1549f9cd1b2ebdffee30f8de1be9cbefecd5f4
|
[
"MIT"
] | null | null | null |
common/utils/__init__.py
|
jl1990/alpha-zero-general
|
6a1549f9cd1b2ebdffee30f8de1be9cbefecd5f4
|
[
"MIT"
] | null | null | null |
"""Useful utils
"""
from .eval import *
from .misc import *
# progress bar
from .progress.progress.bar import Bar as Bar
| 17.285714
| 45
| 0.719008
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 33
| 0.272727
|
83b152d0790dab9900fa13fb39789949a2ecb7fe
| 6,664
|
py
|
Python
|
examples/move_presets.py
|
crazy-djactor/OnVifControlCam
|
36b1d70b4c025b1bce8ed8ddc1d95c04fe298e1d
|
[
"MIT"
] | null | null | null |
examples/move_presets.py
|
crazy-djactor/OnVifControlCam
|
36b1d70b4c025b1bce8ed8ddc1d95c04fe298e1d
|
[
"MIT"
] | null | null | null |
examples/move_presets.py
|
crazy-djactor/OnVifControlCam
|
36b1d70b4c025b1bce8ed8ddc1d95c04fe298e1d
|
[
"MIT"
] | null | null | null |
import zeep
import asyncio, sys
from onvif import ONVIFCamera
import cv2
import numpy as np
import urllib
from urllib.request import urlopen
IP="192.168.2.22" # Camera IP address
PORT=80 # Port
USER="admin" # Username
PASS="C0nc3ll0M4r1n" # Password
XMAX = 1
XMIN = -1
YMAX = 1
YMIN = -1
moverequest = None
ptz = None
active = False
def zeep_pythonvalue(self, xmlvalue):
return xmlvalue
zeep.xsd.simple.AnySimpleType.pythonvalue = zeep_pythonvalue
def setup_move():
mycam = ONVIFCamera(IP, PORT, USER, PASS)
# Create media service object
media = mycam.create_media_service()
print("setup_move {} {}", mycam, media)
# Create ptz service object
global ptz
ptz = mycam.create_ptz_service()
# Get target profile
media_profile = media.GetProfiles()[0]
profileToken = media_profile.token
# Get presets
print("Get Presets...")
gp = ptz.create_type('GetPresets')
gp.ProfileToken = profileToken
presets = ptz.GetPresets(gp)
for preset in presets:
if (hasattr(preset, "Name")):
name = preset.Name
else:
name = ""
position = preset['PTZPosition']
print("preset {} => ({}, {}, {})".format(name, position.PanTilt.x,
position.PanTilt.y,
position.Zoom.x))
# GetStatus
print("GetStatus")
status = ptz.GetStatus({'ProfileToken': profileToken})
print('status {} {} {} ? => {}'.format(status.Position.PanTilt.x, status.Position.PanTilt.y,
status.Position.Zoom.x,
status.MoveStatus.PanTilt))
# abMove = ptz.create_type('AbsoluteMove')
# abMove.ProfileToken = profileToken
# print('status {} {} {} {}'.format(status.Position.PanTilt.x, status.Position.PanTilt.y,
# status.Velocity.PanTilt.x, status.Velocity.PanTilt.y))
return
# Get PTZ configuration options for getting continuous move range
request = ptz.create_type('GetConfigurationOptions')
request.ConfigurationToken = media_profile.PTZConfiguration.token
ptz_configuration_options = ptz.GetConfigurationOptions(request)
global moverequest
moverequest = ptz.create_type('ContinuousMove')
moverequest.ProfileToken = media_profile.token
if moverequest.Velocity is None:
moverequest.Velocity = ptz.GetStatus({'ProfileToken': media_profile.token}).Position
# Get range of pan and tilt
# NOTE: X and Y are velocity vector
# global XMAX, XMIN, YMAX, YMIN
# XMAX = ptz_configuration_options.Spaces.ContinuousPanTiltVelocitySpace[0].XRange.Max
# XMIN = ptz_configuration_options.Spaces.ContinuousPanTiltVelocitySpace[0].XRange.Min
# YMAX = ptz_configuration_options.Spaces.ContinuousPanTiltVelocitySpace[0].YRange.Max
# YMIN = ptz_configuration_options.Spaces.ContinuousPanTiltVelocitySpace[0].YRange.Min
def url_to_image(url):
# password_mgr = urllib.request.HTTPPasswordMgrWithDefaultRealm()
# password_mgr.add_password(None, url, USER, PASS)
# handler = urllib.request.HTTPBasicAuthHandler(password_mgr)
# opener = urllib.request.build_opener(handler)
# urllib.request.install_opener(opener)
# resp = urlopen(url)
import requests
from requests.auth import HTTPDigestAuth
resp = requests.get(url, auth=HTTPDigestAuth(USER, PASS))
if resp.status_code == 200:
image = np.asarray(bytearray(resp.content), dtype="uint8")
image2 = cv2.imdecode(image, cv2.IMREAD_COLOR)
cv2.imshow('image', image2)
return image
else:
return None
class CameraController:
presets = []
status = None
def get_current_preset(self):
mycam = ONVIFCamera(IP, PORT, USER, PASS, '../wsdl/')
# Create media service object
media = mycam.create_media_service()
print("setup_move {} {}", mycam, media)
# Create ptz service object
ptz = mycam.create_ptz_service()
# Get target profile
media_profile = media.GetProfiles()[0]
profileToken = media_profile.token
# GetStatus
print("GetStatus")
self.status = ptz.GetStatus({'ProfileToken': profileToken})
print('status {} {} {} ? => {}'.format(self.status.Position.PanTilt.x, self.status.Position.PanTilt.y,
self.status.Position.Zoom.x,
self.status.MoveStatus.PanTilt))
min_dist = 100
current_prest = None
for preset in self.presets:
position = preset['PTZPosition']
dist = pow((self.status.Position.PanTilt.x - position.PanTilt.x), 2) + pow((self.status.Position.PanTilt.y - position.PanTilt.y), 2)
if dist < min_dist:
min_dist = dist
current_prest = preset
snapshot = media.GetSnapshotUri({'ProfileToken': profileToken})
print('snapshot uri {}'.format(snapshot))
# image = io.imread(snapshot)
# n_image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# cv2.imwrite('./image1.jpg', n_image)
image = url_to_image(snapshot.Uri)
cv2.imwrite('./image2.jpg', image)
return current_prest, self.status.MoveStatus.PanTilt, snapshot
def get_presets(self):
mycam = ONVIFCamera(IP, PORT, USER, PASS, '../wsdl/')
# Create media service object
media = mycam.create_media_service()
print("setup_move {} {}", mycam, media)
# Create ptz service object
ptz = mycam.create_ptz_service()
# Get target profile
media_profile = media.GetProfiles()[0]
profileToken = media_profile.token
# Get presets
print("Get Presets...")
gp = ptz.create_type('GetPresets')
gp.ProfileToken = profileToken
self.presets = ptz.GetPresets(gp)
for preset in self.presets:
if (hasattr(preset, "Name")):
name = preset.Name
else:
name = ""
position = preset['PTZPosition']
print("preset {} => ({}, {}, {})".format(name, position.PanTilt.x,
position.PanTilt.y,
position.Zoom.x))
return self.presets
if __name__ == '__main__':
# url_to_image('http://192.168.1.108/onvifsnapshot/media_service/snapshot?channel=1&subtype=0')
# setup_move()
camera = CameraController()
camera.get_presets()
camera.get_current_preset()
| 36.415301
| 144
| 0.620048
| 2,722
| 0.408463
| 0
| 0
| 0
| 0
| 0
| 0
| 2,092
| 0.313926
|
83b20a373bfc0ad0b76d049c2ba241c013b10033
| 737
|
py
|
Python
|
utils.py
|
OttrOne/suivi
|
9e53a39b0f50054b89cb960eb9055fd0a28a5ebf
|
[
"MIT"
] | null | null | null |
utils.py
|
OttrOne/suivi
|
9e53a39b0f50054b89cb960eb9055fd0a28a5ebf
|
[
"MIT"
] | 2
|
2022-01-11T15:50:04.000Z
|
2022-01-13T01:53:53.000Z
|
utils.py
|
OttrOne/suivi
|
9e53a39b0f50054b89cb960eb9055fd0a28a5ebf
|
[
"MIT"
] | null | null | null |
from string import ascii_lowercase, digits
from random import choice
from re import compile
def id_generator(length=8, chars=ascii_lowercase + digits):
return ''.join(choice(chars) for _ in range(length))
def hrsize(num: int) -> str:
for unit in ['', 'KiB', 'MiB', 'GiB', 'TiB']:
if num < 1024.0:
return f"{num:3.1f}{unit}"
num /= 1024.0
def handle_variables(config: str, context: dict = {}) -> str:
pattern = compile('.*?\${(\w+)}.*?')
match = pattern.findall(config)
if match:
for var in match:
if var in context:
# leave var in config if not in context
config = config.replace(f"${{{var}}}", context.get(var))
return config
| 28.346154
| 72
| 0.58616
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 112
| 0.151967
|
83b36d8e12e62551ee0f6bc0e1772654ff4d0f33
| 232
|
py
|
Python
|
comment/admin.py
|
Samurai-XHe/myblog
|
c9e182b84c3cb06b3207e7359f0a4d352c28d043
|
[
"MIT"
] | 1
|
2018-09-25T09:11:17.000Z
|
2018-09-25T09:11:17.000Z
|
comment/admin.py
|
Samurai-XHe/myblog
|
c9e182b84c3cb06b3207e7359f0a4d352c28d043
|
[
"MIT"
] | null | null | null |
comment/admin.py
|
Samurai-XHe/myblog
|
c9e182b84c3cb06b3207e7359f0a4d352c28d043
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import Comment
@admin.register(Comment)
class CommentAdmin(admin.ModelAdmin):
list_display = ('id', 'content_object', 'text', 'comment_time', 'user', 'root', 'parent', 'reply_to')
| 29
| 105
| 0.728448
| 143
| 0.616379
| 0
| 0
| 168
| 0.724138
| 0
| 0
| 70
| 0.301724
|
83b3965c73ce131b836c28e365aa820a33396d8f
| 2,602
|
py
|
Python
|
DiSPy/core/path.py
|
munrojm/DiSPy
|
c1ae9e213d16bfd098b362e7d54d997cd95f8919
|
[
"MIT"
] | 19
|
2018-10-05T01:49:36.000Z
|
2021-11-23T13:35:22.000Z
|
DiSPy/core/path.py
|
munrojm/DiSPy
|
c1ae9e213d16bfd098b362e7d54d997cd95f8919
|
[
"MIT"
] | 1
|
2019-03-27T20:13:08.000Z
|
2019-03-28T23:22:22.000Z
|
DiSPy/core/path.py
|
munrojm/DiSPy
|
c1ae9e213d16bfd098b362e7d54d997cd95f8919
|
[
"MIT"
] | 6
|
2019-06-05T21:41:16.000Z
|
2021-04-07T09:23:42.000Z
|
import numpy as np
from typing import Dict, List
from monty.json import MSONable
from pymatgen.core.structure import Structure
from pymatgen.symmetry.groups import SymmOp
from DiSPy.core.dg import DistortionGroup
from DiSPy.core.vecutils import closewrapped
# -- Path object and its attributes
class Path(MSONable):
def __init__(
self, images: List[Structure], distortion_group: DistortionGroup = None, img_sym_data: List[Dict] = None
):
self._images = images
self._distortion_group = None
self._img_sym_data = None
def __len__(self):
return len(self._images)
@property
def images(self):
return self._images
@property
def distortion_group(self):
return self._distortion_group
@property
def img_sym_data(self):
return self._img_sym_data
@distortion_group.setter # type: ignore
def distortion_group(self, dg):
if not isinstance(dg, DistortionGroup):
raise ValueError("Symmetry operations in group data must be instances of SymmOp.")
else:
self._distortion_group = dg
@img_sym_data.setter # type: ignore
def img_sym_data(self, img_sym_data_list):
if len(img_sym_data_list) != len(self._images):
raise ValueError("Symmetry data list has wrong length.")
else:
self._img_sym_data = img_sym_data_list
def gen_atom_map(self, basis, vectol):
images = self._images
DG = self._distortion_group.matrices
numIm = len(images)
numAtoms = len(images[0].frac_coords)
num_unstar = self._distortion_group.num_unstar
a_map = np.zeros((len(DG), numIm, numAtoms, numAtoms))
for i in range(len(DG)):
for j in range(1, numIm - 1):
atoms1 = images[j].frac_coords
num1 = images[j].species
for k in range(0, numAtoms):
t_coord = np.dot(DG[i].rotation_matrix, atoms1[k])
t_coord = (t_coord + DG[i].translation_vector) % 1.0
if i < num_unstar:
atoms2 = images[j].frac_coords
num2 = images[j].species
else:
atoms2 = images[numIm - 1 - j].frac_coords
num2 = images[numIm - 1 - j].species
for l in range(0, numAtoms):
if closewrapped(t_coord, atoms2[l], vectol) and num1[k] == num2[l] and basis[k] == 1:
a_map[i, j, k, l] = 1
return a_map
| 30.97619
| 112
| 0.593774
| 2,302
| 0.884704
| 0
| 0
| 755
| 0.290161
| 0
| 0
| 165
| 0.063413
|
83b4ad592dcb04cd2de490a6b0d70c2d9d26c009
| 2,815
|
py
|
Python
|
photos/models.py
|
kimutaimeshack/Instagram_clone
|
bdb035087f85fe055da29634cf7bc5dcb843897f
|
[
"MIT"
] | null | null | null |
photos/models.py
|
kimutaimeshack/Instagram_clone
|
bdb035087f85fe055da29634cf7bc5dcb843897f
|
[
"MIT"
] | null | null | null |
photos/models.py
|
kimutaimeshack/Instagram_clone
|
bdb035087f85fe055da29634cf7bc5dcb843897f
|
[
"MIT"
] | null | null | null |
from django.db import models
import datetime as dt
from django.urls import reverse
# Create your models here.
class Editor(models.Model):
first_name = models.CharField(max_length =30 ,null=True)
last_name = models.CharField(max_length =30 ,null=True)
email = models.EmailField()
phone_number = models.CharField(max_length = 10,blank =True)
def __str__(self):
return self.first_name
class meta:
ordering =['name']
def save_editor(self):
self.save()
class tags(models.Model):
name = models.CharField(max_length =30, null=True)
def __str__(self):
return self.name
from cloudinary.models import CloudinaryField
class Postall(models.Model):
# title field
newimage = CloudinaryField('image',null=True)
def __str__(self):
return self.newimage
class Category(models.Model):
# title field
name = models.CharField(max_length=100, null=True)
def __str__(self):
return self.name
class Location(models.Model):
# title field
name = models.CharField(max_length=100, null=True)
def __str__(self):
return self.name
class photos(models.Model):
title = models.CharField(max_length=100)
details = models.TextField(null=True)
category = models.ForeignKey(Category, on_delete=models.CASCADE, null=True)
location = models.ForeignKey(Location, on_delete=models.CASCADE, null=True)
image = CloudinaryField('image',null=True)
pub_date = models.DateTimeField(auto_now_add=True ,null=True)
@classmethod
def search_by_category(cls,search_term):
news = cls.objects.filter(category__name__icontains=search_term)
return news
class Article(models.Model):
title = models.CharField(max_length =60, null=True)
post = models.TextField(null=True)
editor = models.ForeignKey(Editor, on_delete=models.CASCADE, null=True)
tags = models.ManyToManyField(tags)
pub_date = models.DateTimeField(auto_now_add=True, null=True)
photo_imagen = models.ImageField(upload_to = 'articles/', null=True)
def get_absolute_url(self): # new
return reverse('postdetail', args=[str(self.id)])
def __str__(self):
return self.title
@classmethod
def todays_news(cls):
today = dt.date.today()
news = cls.objects.filter(pub_date__date = today)
return news
@classmethod
def days_news(cls,date):
news = cls.objects.filter(pub_date__date = date)
return news
#..search
#...................
# class Post(models.Model):
# title = models.CharField(max_length=200)
# author = models.ForeignKey('auth.User',
# on_delete=models.CASCADE,)
# body = models.TextField()
# def __str__(self):
# return self.title
| 29.946809
| 79
| 0.671403
| 2,377
| 0.844405
| 0
| 0
| 416
| 0.14778
| 0
| 0
| 398
| 0.141385
|
83b730a44041eeddf60233b1f8b68fb907f48e86
| 2,386
|
py
|
Python
|
tests/unit/test_product.py
|
jeantardelli/architecture-patterns-with-python
|
d48c7d6d4a44073b815c7e6770e44cf2e231e35b
|
[
"MIT"
] | 1
|
2021-04-07T18:04:56.000Z
|
2021-04-07T18:04:56.000Z
|
tests/unit/test_product.py
|
jeantardelli/architecture-patterns-with-python
|
d48c7d6d4a44073b815c7e6770e44cf2e231e35b
|
[
"MIT"
] | null | null | null |
tests/unit/test_product.py
|
jeantardelli/architecture-patterns-with-python
|
d48c7d6d4a44073b815c7e6770e44cf2e231e35b
|
[
"MIT"
] | null | null | null |
from datetime import date, timedelta
from allocation.domain import events
from allocation.domain.model import Product, OrderLine, Batch
today = date.today()
tomorrow = today + timedelta(days=1)
later = tomorrow + timedelta(days=10)
def test_prefers_warehouse_batches_to_shipments():
in_stock_batch = Batch("in-stock-batch", "RETRO-CLOCK", 100, eta=None)
shipment_batch = Batch("shipment-batch", "RETRO-CLOCK", 100, eta=tomorrow)
product = Product(sku="RETRO-CLOCK", batches=[in_stock_batch, shipment_batch])
line = OrderLine("ofer", "RETRO-CLOCK", 10)
product.allocate(line)
assert in_stock_batch.available_quantity == 90
assert shipment_batch.available_quantity == 100
def test_prefers_ealier_batches():
earliest = Batch("speedy-batch", "MINIMALIST-SPOON", 100, eta=today)
medium = Batch("normal-batch", "MINIMALIST-SPOON", 100, eta=tomorrow)
latest = Batch("slow-batch", "MINIMALIST-SPOON", 100, eta=later)
product = Product(sku="MINIMALIST-SPOON", batches=[medium, earliest, latest])
line = OrderLine("order1", "MINIMALIST-SPOON", 10)
product.allocate(line)
assert earliest.available_quantity == 90
assert medium.available_quantity == 100
assert latest.available_quantity == 100
def test_returns_allocated_batch_ref():
in_stock_batch = Batch("in-stock-batch-ref", "HIGHBROW-POSTER", 100, eta=None)
shipment_batch = Batch("shipment-batch-ref", "HIGHBROW-POSTER", 100, eta=tomorrow)
line = OrderLine("oref", "HIGHBROW-POSTER", 10)
product = Product(sku="HIGHBROW-POSTER", batches=[in_stock_batch, shipment_batch])
allocation = product.allocate(line)
assert allocation == in_stock_batch.reference
def test_records_out_of_stock_event_if_cannot_allocate():
batch = Batch("batch1", "SMALL-FORK", 10, eta=today)
product = Product(sku="SMALL-FORK", batches=[batch])
product.allocate(OrderLine("order1", "SMALL-FORK", 10))
allocation = product.allocate(OrderLine("order2", "SMALL-FORK", 1))
assert product.events[-1] == events.OutOfStock(sku="SMALL-FORK")
assert allocation is None
def test_increments_version_number():
line = OrderLine("oref", "SCANDI-PEN", 10)
product = Product(
sku="SCANDI-PEN", batches=[Batch("b1", "SCANDI-PEN", 100, eta=None)])
product.version_number = 7
product.allocate(line)
assert product.version_number == 8
| 39.114754
| 86
| 0.720034
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 472
| 0.197821
|
83b9a7791d97770f25fe0980d7aaeedc83bafde6
| 6,129
|
py
|
Python
|
tests/unit/states/test_grafana.py
|
yuriks/salt
|
d2a5bd8adddb98ec1718d79384aa13b4f37e8028
|
[
"Apache-2.0",
"MIT"
] | 1
|
2020-03-31T22:51:16.000Z
|
2020-03-31T22:51:16.000Z
|
tests/unit/states/test_grafana.py
|
yuriks/salt
|
d2a5bd8adddb98ec1718d79384aa13b4f37e8028
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
tests/unit/states/test_grafana.py
|
yuriks/salt
|
d2a5bd8adddb98ec1718d79384aa13b4f37e8028
|
[
"Apache-2.0",
"MIT"
] | 1
|
2021-09-30T07:00:01.000Z
|
2021-09-30T07:00:01.000Z
|
# -*- coding: utf-8 -*-
'''
:codeauthor: Jayesh Kariya <jayeshk@saltstack.com>
'''
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
# Import Salt Testing Libs
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.unit import TestCase
from tests.support.mock import (
MagicMock,
patch
)
# Import Salt Libs
import salt.utils.json
import salt.states.grafana as grafana
from salt.exceptions import SaltInvocationError
class GrafanaTestCase(TestCase, LoaderModuleMockMixin):
'''
Test cases for salt.states.grafana
'''
def setup_loader_modules(self):
return {grafana: {}}
# 'dashboard_present' function tests: 1
def test_dashboard_present(self):
'''
Test to ensure the grafana dashboard exists and is managed.
'''
name = 'myservice'
rows = ['systemhealth', 'requests', 'title']
row = [{'panels': [{'id': 'a'}], 'title': 'systemhealth'}]
ret = {'name': name,
'result': None,
'changes': {},
'comment': ''}
comt1 = ('Dashboard myservice is set to be updated. The following rows '
'set to be updated: {0}'.format(['systemhealth']))
self.assertRaises(SaltInvocationError, grafana.dashboard_present, name,
profile=False)
self.assertRaises(SaltInvocationError, grafana.dashboard_present, name,
True, True)
mock = MagicMock(side_effect=[{'hosts': True, 'index': False},
{'hosts': True, 'index': True},
{'hosts': True, 'index': True},
{'hosts': True, 'index': True},
{'hosts': True, 'index': True},
{'hosts': True, 'index': True},
{'hosts': True, 'index': True}])
mock_f = MagicMock(side_effect=[False, False, True, True, True, True])
mock_t = MagicMock(return_value='')
mock_i = MagicMock(return_value=False)
source = {'dashboard': '["rows", {"rows":["baz", null, 1.0, 2]}]'}
mock_dict = MagicMock(return_value={'_source': source})
with patch.dict(grafana.__salt__, {'config.option': mock,
'elasticsearch.exists': mock_f,
'pillar.get': mock_t,
'elasticsearch.get': mock_dict,
'elasticsearch.index': mock_i}):
self.assertRaises(SaltInvocationError, grafana.dashboard_present,
name)
with patch.dict(grafana.__opts__, {'test': True}):
self.assertRaises(SaltInvocationError, grafana.dashboard_present,
name)
comt = ('Dashboard {0} is set to be created.'.format(name))
ret.update({'comment': comt})
self.assertDictEqual(grafana.dashboard_present(name, True), ret)
mock = MagicMock(return_value={'rows':
[{'panels': 'b',
'title': 'systemhealth'}]})
with patch.object(salt.utils.json, 'loads', mock):
ret.update({'comment': comt1, 'result': None})
self.assertDictEqual(grafana.dashboard_present(name, True,
rows=row),
ret)
with patch.object(salt.utils.json, 'loads',
MagicMock(return_value={'rows': {}})):
self.assertRaises(SaltInvocationError,
grafana.dashboard_present, name,
rows_from_pillar=rows)
comt = ('Dashboard myservice is up to date')
ret.update({'comment': comt, 'result': True})
self.assertDictEqual(grafana.dashboard_present(name, True), ret)
mock = MagicMock(return_value={'rows': [{'panels': 'b',
'title': 'systemhealth'}]})
with patch.dict(grafana.__opts__, {'test': False}):
with patch.object(salt.utils.json, 'loads', mock):
comt = ('Failed to update dashboard myservice.')
ret.update({'comment': comt, 'result': False})
self.assertDictEqual(grafana.dashboard_present(name, True,
rows=row),
ret)
# 'dashboard_absent' function tests: 1
def test_dashboard_absent(self):
'''
Test to ensure the named grafana dashboard is deleted.
'''
name = 'myservice'
ret = {'name': name,
'result': None,
'changes': {},
'comment': ''}
mock = MagicMock(side_effect=[{'hosts': True, 'index': False},
{'hosts': True, 'index': True},
{'hosts': True, 'index': True}])
mock_f = MagicMock(side_effect=[True, False])
with patch.dict(grafana.__salt__, {'config.option': mock,
'elasticsearch.exists': mock_f}):
self.assertRaises(SaltInvocationError, grafana.dashboard_absent,
name)
with patch.dict(grafana.__opts__, {'test': True}):
comt = ('Dashboard myservice is set to be removed.')
ret.update({'comment': comt, 'result': None})
self.assertDictEqual(grafana.dashboard_absent(name), ret)
comt = ('Dashboard myservice does not exist.')
ret.update({'comment': comt, 'result': True})
self.assertDictEqual(grafana.dashboard_absent(name), ret)
| 44.093525
| 81
| 0.498287
| 5,633
| 0.919073
| 0
| 0
| 0
| 0
| 0
| 0
| 1,431
| 0.23348
|
83b9dae35ff849de97a8ab5c1b5b328eee4bf2a8
| 517
|
py
|
Python
|
08.Graph/Kruskal.py
|
SP2021-2/Algorithm
|
2e629eb5234212fad8bbc11491aad068e5783780
|
[
"MIT"
] | 1
|
2021-11-21T06:03:06.000Z
|
2021-11-21T06:03:06.000Z
|
08.Graph/Kruskal.py
|
SP2021-2/Algorithm
|
2e629eb5234212fad8bbc11491aad068e5783780
|
[
"MIT"
] | 2
|
2021-10-13T07:21:09.000Z
|
2021-11-14T13:53:08.000Z
|
08.Graph/Kruskal.py
|
SP2021-2/Algorithm
|
2e629eb5234212fad8bbc11491aad068e5783780
|
[
"MIT"
] | null | null | null |
def pprint(arr):
for line in arr:
print(line)
# 5 7
# 0 1 1
# 0 2 3
# 1 2 3
# 1 3 6
# 2 3 4
# 2 4 2
# 3 4 5
import sys
import heapq as hq
N, M = map(int, sys.stdin.readline().split(" "))
W = [[float('inf')] * N for _ in range(N)]
h = []
for _ in range(M):
i, j, w = map(int, sys.stdin.readline().split(" "))
hq.heappush(h, (w, i, j))
print(h)
def Kruskal(heap, source):
answer = []
visited = []
while heap:
w, i, j = hq.heappop(heap)
return answer
print(Kruskal(h, 0))
| 16.15625
| 55
| 0.537718
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 65
| 0.125725
|
83babd001889716c3b9b2382b50058698f6c9529
| 4,701
|
py
|
Python
|
resources.py
|
kordimsan/FreeWorker-Api
|
f4566d2d500328725c88d5fc5df7a4763cb4c185
|
[
"MIT"
] | null | null | null |
resources.py
|
kordimsan/FreeWorker-Api
|
f4566d2d500328725c88d5fc5df7a4763cb4c185
|
[
"MIT"
] | null | null | null |
resources.py
|
kordimsan/FreeWorker-Api
|
f4566d2d500328725c88d5fc5df7a4763cb4c185
|
[
"MIT"
] | null | null | null |
#from flask_restful import Resource, reqparse
from flask_restplus import Resource, reqparse,fields
from models import UserModel, RevokedTokenModel
from flask_jwt_extended import (create_access_token, create_refresh_token, jwt_required, jwt_refresh_token_required, get_jwt_identity, get_raw_jwt)
from run import api
parser = reqparse.RequestParser()
parser.add_argument('username', help = 'This field cannot be blank', required = True)
parser.add_argument('password', help = 'This field cannot be blank', required = True)
param = api.model('User registration', {'username' : fields.String('username'), 'password' : fields.String('password')})
class UserRegistration(Resource):
@api.expect(param)
def post(self):
data = parser.parse_args()
if UserModel.find_by_username(data['username']):
return {'message': 'User {} already exists'.format(data['username'])}
new_user = UserModel(
username = data['username'],
password = UserModel.generate_hash(data['password'])
)
try:
new_user.save_to_db()
access_token = create_access_token(identity = data['username'])
refresh_token= create_refresh_token(identity = data['username'])
return {
'message': 'User {} was created'.format(data['username']),
'access_token': access_token,
'refresh_token': refresh_token
}
except:
return {'message': 'Something went wrong'}, 500
param = api.model('User login', {'username' : fields.String('username'), 'password' : fields.String('password')})
class UserLogin(Resource):
@api.expect(param)
def post(self):
data = parser.parse_args()
current_user = UserModel.find_by_username(data['username'])
if not current_user:
return {'message': 'User {} doesn\'t exist'.format(data['username'])}
if UserModel.verify_hash(data['password'], current_user.password):
access_token = create_access_token(identity = data['username'])
refresh_token = create_refresh_token(identity = data['username'])
return {
'message': 'Logged in as {}'.format(current_user.username),
'access_token': access_token,
'refresh_token': refresh_token
}
else:
return {'message': 'Wrong credentials'}
class UserLogoutAccess(Resource):
@api.doc(security='apikey')
@jwt_required
def post(self):
jti = get_raw_jwt()['jti']
try:
revoked_token = RevokedTokenModel(jti = jti)
revoked_token.add()
return {'message': 'Access token has been revoked'}
except:
return {'message': 'Something went wrong'}, 500
class UserLogoutRefresh(Resource):
@jwt_refresh_token_required
def post(self):
jti = get_raw_jwt()['jti']
try:
revoked_token = RevokedTokenModel(jti = jti)
revoked_token.add()
return {'message': 'Refresh token has been revoked'}
except:
return {'message': 'Something went wrong'}, 500
class TokenRefresh(Resource):
@jwt_refresh_token_required
def post(self):
current_user = get_jwt_identity()
access_token = create_access_token(identity = current_user)
return {'access_token': access_token}
class AllUsers(Resource):
@api.doc(security='apikey')
@jwt_required
def get(self):
return UserModel.return_all()
@api.doc(security='apikey')
@jwt_required
def delete(self):
return UserModel.delete_all()
class UsersByName(Resource):
@api.doc(security='apikey')
@jwt_required
def get(self,username):
x = UserModel.find_by_username(username)
if not x:
return {'message': 'User {} doesn\'t exist'.format(username)}
return {
'user_id': x.user_id,
'username': x.username,
'password': x.password,
'email': x.email,
'admin': x.admin,
'first_name': x.first_name,
'last_name': x.last_name,
'phone_number': x.phone_number,
'latitude': x.latitude,
'longitude': x.longitude,
'area': x.area,
}
@api.doc(security='apikey')
@jwt_required
def delete(self,username):
return UserModel.del_by_username(username)
class SecretResource(Resource):
@api.doc(security='apikey')
@jwt_required
def get(self):
return {
'answer': 42
}
| 34.313869
| 147
| 0.605829
| 3,914
| 0.832589
| 0
| 0
| 3,623
| 0.770687
| 0
| 0
| 954
| 0.202936
|
83bb637db13a5d4678648b8d28c8559126ac4919
| 3,176
|
py
|
Python
|
archivist/parser.py
|
Serhiy1/archivist-python
|
70c7acf29eecd303bb1517d3636663d83f36cc2c
|
[
"MIT"
] | 2
|
2021-05-04T15:12:37.000Z
|
2021-09-08T10:04:41.000Z
|
archivist/parser.py
|
Serhiy1/archivist-python
|
70c7acf29eecd303bb1517d3636663d83f36cc2c
|
[
"MIT"
] | 35
|
2021-05-04T12:39:26.000Z
|
2022-03-28T09:20:19.000Z
|
archivist/parser.py
|
Serhiy1/archivist-python
|
70c7acf29eecd303bb1517d3636663d83f36cc2c
|
[
"MIT"
] | 6
|
2021-04-28T14:49:48.000Z
|
2022-01-07T15:29:05.000Z
|
"""common parser argument
"""
# pylint: disable=missing-docstring
# pylint: disable=too-few-public-methods
import argparse
from enum import Enum
import logging
from sys import exit as sys_exit
from . import archivist
from .logger import set_logger
from .proof_mechanism import ProofMechanism
LOGGER = logging.getLogger(__name__)
# from https://stackoverflow.com/questions/43968006/support-for-enum-arguments-in-argparse
class EnumAction(argparse.Action):
"""
Argparse action for handling Enums
"""
def __init__(self, **kwargs):
# Pop off the type value
enum_type = kwargs.pop("type", None)
# Ensure an Enum subclass is provided
if enum_type is None:
raise ValueError("type must be assigned an Enum when using EnumAction")
if not issubclass(enum_type, Enum):
raise TypeError("type must be an Enum when using EnumAction")
# Generate choices from the Enum
kwargs.setdefault("choices", tuple(e.name for e in enum_type))
super().__init__(**kwargs)
self._enum = enum_type
def __call__(self, parser, namespace, values, option_string=None):
# Convert value back into an Enum
value = self._enum[values]
setattr(namespace, self.dest, value)
def common_parser(description):
"""Construct parser with security option for token/auth authentication"""
parser = argparse.ArgumentParser(
description=description,
)
parser.add_argument(
"-v",
"--verbose",
dest="verbose",
action="store_true",
default=False,
help="print verbose debugging",
)
parser.add_argument(
"-u",
"--url",
type=str,
dest="url",
action="store",
default="https://rkvst.poc.jitsuin.io",
help="location of Archivist service",
)
parser.add_argument(
"-p",
"--proof-mechanism",
type=ProofMechanism,
action=EnumAction,
dest="proof_mechanism",
default=ProofMechanism.SIMPLE_HASH,
help="mechanism for proving the evidence for events on the Asset",
)
security = parser.add_mutually_exclusive_group(required=True)
security.add_argument(
"-t",
"--auth-token",
type=str,
dest="auth_token_file",
action="store",
default=".auth_token",
reqyuired=True,
help="FILE containing API authentication token",
)
return parser, security
def endpoint(args):
if args.verbose:
set_logger("DEBUG")
else:
set_logger("INFO")
arch = None
LOGGER.info("Initialising connection to Jitsuin Archivist...")
fixtures = {
"assets": {
"proof_mechanism": args.proof_mechanism.name,
},
}
if args.auth_token_file:
with open(args.auth_token_file, mode="r", encoding="utf-8") as tokenfile:
authtoken = tokenfile.read().strip()
arch = archivist.Archivist(args.url, authtoken, verify=False, fixtures=fixtures)
if arch is None:
LOGGER.error("Critical error. Aborting.")
sys_exit(1)
return arch
| 25.821138
| 90
| 0.630668
| 857
| 0.269836
| 0
| 0
| 0
| 0
| 0
| 0
| 1,024
| 0.322418
|
83bc85a7d09d10f1f239ce0341b95393b82459b8
| 6,635
|
py
|
Python
|
skytap/models/UserData.py
|
mapledyne/skytap
|
c7fb43e7d2b3e97c619948a9e5b3f03472b5cd45
|
[
"MIT"
] | 3
|
2019-04-17T13:07:30.000Z
|
2021-09-09T22:01:14.000Z
|
skytap/models/UserData.py
|
FulcrumIT/skytap
|
c7fb43e7d2b3e97c619948a9e5b3f03472b5cd45
|
[
"MIT"
] | 10
|
2016-11-02T20:48:38.000Z
|
2021-09-15T15:29:34.000Z
|
skytap/models/UserData.py
|
FulcrumIT/skytap
|
c7fb43e7d2b3e97c619948a9e5b3f03472b5cd45
|
[
"MIT"
] | 3
|
2016-03-03T07:25:13.000Z
|
2016-08-30T15:33:03.000Z
|
"""Support for the UserData resource in Skytap.
Specifically, this is for custom ('user data') that's applied to an environment
or VM. This data can be text or, in the context of using it with this Skytap
script, it can also be JSON or YAML and will then be re-parsed.
This allows users to put data into a VM user data block and it'll filter down
and be accessible to this script. We use this to expose variables to the user
like shutdown time and other automation pieces.
"""
from skytap.framework.ApiClient import ApiClient
import skytap.framework.Utils as Utils
from skytap.models.SkytapResource import SkytapResource
class UserData(SkytapResource):
"""UserData object to handle custom user data for a Skytap object.
This typically would be for a VM or Environment.
"""
def __init__(self, contents, env_url):
"""Create the UserData object."""
super(UserData, self).__init__(contents)
self.url = env_url + '/user_data.json'
def __str__(self):
"""Express the userdata as a string."""
return self.contents
def add(self, key, value):
"""Add value to environment's userdata.
Args:
key (str): The name of the value's key.
value (str): The value to add.
Returns:
str: The response from Skytap, or "{}".
"""
add_key = True
lines = self.contents.split("\n")
for i in lines:
if i != "":
j = i.split()
if len(j) > 0 and j[0] == (key + ":"):
add_key = False
if add_key:
Utils.info('Adding key \"' + key + '\" with value \"'
'' + value + '\"')
api = ApiClient()
new_content = "" + key + ": " + value + "\n" + self.contents
data = {"contents": new_content}
response = api.rest(self.url, data, 'POST')
self.data[key] = value
self.refresh()
return response
else:
Utils.info('Key \"' + key + '\" with value \"' + value + '\"'
'already exists.')
return "{}"
def delete(self, key):
"""Delete key/value from environment's userdata.
Args:
key (str): The name of key to delete, along with value
Returns:
str: The response from Skytap, or "{}".
"""
new_content = ""
del_key = False
lines = self.contents.split("\n")
for i in lines:
if i != "":
j = i.split()
if len(j) > 0 and j[0] == (key + ":"):
del_key = True
else:
new_content += (i.strip() + "\n")
if del_key:
Utils.info('Deleting key \"' + key + '\".')
api = ApiClient()
data = {"contents": "" + new_content}
response = api.rest(self.url, data, 'POST')
self.refresh()
return response
else:
Utils.info('Key \"' + key + '\" already exists.')
return "{}"
def add_line(self, text, line=-1):
"""Add line to environment's userdata.
Args:
text (str): line of text to be added. (Required)
line (int): line number to add to. If too large, default to last.
Returns:
str: The response from Skytap.
"""
try:
line = int(line)
except ValueError:
return "{}" # Not an integer
lines = self.contents.split("\n")
new_content = ""
line_found = False
count = 0
for i in lines:
if i != "":
if line == count:
new_content += (text.strip() + "\n")
new_content += (i.strip() + "\n")
line_found = True
else:
new_content += (i.strip() + "\n")
count += 1
if not line_found:
new_content += (text.strip() + "\n")
Utils.info('Adding line: \"' + text + '\"')
api = ApiClient()
data = {"contents": new_content}
response = api.rest(self.url, data, 'POST')
self.refresh()
return response
def delete_line(self, line):
"""Delete line from environment's userdata.
Args:
line (int): line number to delete.
Returns:
str: The response from Skytap.
"""
line = str(line)
lines = self.contents.split("\n")
new_content = ""
for i in lines:
if i != "":
if i.strip() != line.strip():
new_content += (i.strip() + "\n")
Utils.info('Removing line: \"' + str(line) + '\"')
api = ApiClient()
data = {"contents": new_content.lstrip()}
response = api.rest(self.url, data, 'POST')
self.refresh()
return response
def get_line(self, line):
"""Return content of line from environment's userdata.
Args:
line (int): line number to get.
Returns:
str: The content of the line, or "".
"""
try:
line = int(line)
except ValueError:
raise ValueError("Line must be an integer.")
lines = self.contents.split("\n")
return lines[line]
def _get_values(self, contents):
"""Check userdata and set variables based on keys/values within."""
lines = contents.split("\n")
values = {}
for i in lines:
tokens = i.split()
if len(tokens) < 2:
continue
# Check for valid YAML formatting in first and second tokens in
# each line, then add those values to dict.
if (tokens[0].endswith(":") and "#" not in tokens[0] and
len(tokens) > 1 and "#" not in tokens[1]):
# If variable is a number, make it integer
try:
values[tokens[0][:-1]] = int(tokens[1])
except ValueError:
values[tokens[0][:-1]] = tokens[1]
self.data[tokens[0][:-1]] = values[tokens[0][:-1]]
return values
def _calculate_custom_data(self):
"""Add custom data.
Check contents and if there's something there, try to parse it, then
add all those key/value pairs to the data block. See _get_values()
"""
if self.contents:
self._get_values(self.contents)
else:
self.data["contents"] = ""
| 29.753363
| 79
| 0.505803
| 6,009
| 0.905652
| 0
| 0
| 0
| 0
| 0
| 0
| 2,571
| 0.387491
|
83bdd2c382d1213ade5ce2c23b12fd693e9a4c32
| 700
|
py
|
Python
|
example.py
|
six-two/py_derive_cmd
|
ae821f16a057a809166b287ab9f203a8cf4b34b3
|
[
"MIT"
] | null | null | null |
example.py
|
six-two/py_derive_cmd
|
ae821f16a057a809166b287ab9f203a8cf4b34b3
|
[
"MIT"
] | null | null | null |
example.py
|
six-two/py_derive_cmd
|
ae821f16a057a809166b287ab9f203a8cf4b34b3
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# pylint: disable=unused-wildcard-import
from py_derive_cmd import *
import cmd
class MyCmd(cmd.Cmd):
pass
s = Settings(MyCmd, print_warnings=False)
@make_command(s, 'Test for the decorator', aliases=['d'])
def test_decorator(shell: MyCmd, req_arg: str, opt_arg: str = None):
print('Decorator works')
print(req_arg)
print(opt_arg)
def test_register(shell: MyCmd, raw_arg: str):
print('Register works')
print(raw_arg)
@make_command(s, 'Exit the shell by pressing Ctrl-D')
def EOF(shell: MyCmd) -> bool:
return True
CommandInfo(s, test_register, ['registered', 'r'], 'Test for register', raw_arg=True).register()
shell = MyCmd()
shell.cmdloop()
| 25
| 96
| 0.708571
| 30
| 0.042857
| 0
| 0
| 293
| 0.418571
| 0
| 0
| 191
| 0.272857
|
83be1a3a3ee334b7ce8506d005b0c751dcb0f57c
| 2,474
|
py
|
Python
|
tf_quant_finance/datetime/constants.py
|
slowy07/tf-quant-finance
|
0976f720fb58a2d7bfd863640c12a2425cd2f94f
|
[
"Apache-2.0"
] | 3,138
|
2019-07-24T21:43:17.000Z
|
2022-03-30T12:11:09.000Z
|
tf_quant_finance/datetime/constants.py
|
Aarif1430/tf-quant-finance
|
9372eb1ddf2b48cb1a3d4283bc67a10647ddc7a6
|
[
"Apache-2.0"
] | 63
|
2019-09-07T19:16:03.000Z
|
2022-03-29T19:29:40.000Z
|
tf_quant_finance/datetime/constants.py
|
Aarif1430/tf-quant-finance
|
9372eb1ddf2b48cb1a3d4283bc67a10647ddc7a6
|
[
"Apache-2.0"
] | 423
|
2019-07-26T21:28:05.000Z
|
2022-03-26T13:07:44.000Z
|
# Lint as: python3
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Date-related constants and enums."""
import enum
class Month(enum.Enum):
"""Months. Values are one-based."""
JANUARY = 1
FEBUARY = 2
MARCH = 3
APRIL = 4
MAY = 5
JUNE = 6
JULY = 7
AUGUST = 8
SEPTEMBER = 9
OCTOBER = 10
NOVEMBER = 11
DECEMBER = 12
class WeekDay(enum.Enum):
"""Named days of the week. Values are zero-based with Monday = 0."""
# We follow Python datetime convention of starting from 0.
MONDAY = 0
TUESDAY = 1
WEDNESDAY = 2
THURSDAY = 3
FRIDAY = 4
SATURDAY = 5
SUNDAY = 6
class PeriodType(enum.Enum):
"""Periods that can be added or subtracted from DateTensors."""
DAY = 0
WEEK = 1
MONTH = 2
YEAR = 3
class BusinessDayConvention(enum.Enum):
"""Conventions that determine how to roll dates that fall on holidays.
* `NONE`: No adjustment
* `FOLLOWING`: Choose the first business day after the given holiday.
* `MODIFIED_FOLLOWING`: Choose the first business day after the given holiday
unless that day falls in the next calendar month, in which case choose the
first business day before the holiday.
* `PRECEDING`: Choose the first business day before the given holiday.
* `MODIFIED_PRECEDING`: Choose the first business day before the given holiday
unless that day falls in the previous calendar month, in which case choose the
first business day after the holiday.
"""
NONE = 0
FOLLOWING = 1
MODIFIED_FOLLOWING = 2
PRECEDING = 3
MODIFIED_PRECEDING = 4
# TODO(b/148011715): add NEAREST convention.
class WeekendMask(object):
"""Provides weekend masks for some of the common weekend patterns."""
# E.g. US/UK/Europe etc.
SATURDAY_SUNDAY = (0, 0, 0, 0, 0, 1, 1)
# E.g. Most countries in the Middle East.
FRIDAY_SATURDAY = (0, 0, 0, 0, 1, 1, 0)
# E.g. India, Nepal.
SUNDAY_ONLY = (0, 0, 0, 0, 0, 0, 1)
# Default value.
NONE = (0, 0, 0, 0, 0, 0, 0)
| 26.891304
| 80
| 0.696847
| 1,766
| 0.713824
| 0
| 0
| 0
| 0
| 0
| 0
| 1,705
| 0.689167
|
83bf94a78ac2eb29dfd1c2b50e991146823fcf6e
| 2,345
|
py
|
Python
|
generate_trajectories.py
|
keuntaeklee/pytorch-PPUU
|
0ba8c953df9cdb1e9937e301ed3384ac6b66ea73
|
[
"MIT"
] | 159
|
2019-01-23T07:17:36.000Z
|
2022-03-29T14:33:31.000Z
|
generate_trajectories.py
|
keuntaeklee/pytorch-PPUU
|
0ba8c953df9cdb1e9937e301ed3384ac6b66ea73
|
[
"MIT"
] | 44
|
2019-04-29T15:11:44.000Z
|
2022-02-21T18:28:46.000Z
|
generate_trajectories.py
|
keuntaeklee/pytorch-PPUU
|
0ba8c953df9cdb1e9937e301ed3384ac6b66ea73
|
[
"MIT"
] | 61
|
2019-01-23T12:31:54.000Z
|
2022-03-07T09:25:20.000Z
|
import argparse, pdb
import gym
import numpy as np
import os
import pickle
import random
import torch
import scipy.misc
from gym.envs.registration import register
parser = argparse.ArgumentParser()
parser.add_argument('-display', type=int, default=0)
parser.add_argument('-seed', type=int, default=1)
parser.add_argument('-lanes', type=int, default=3)
parser.add_argument('-traffic_rate', type=int, default=15)
parser.add_argument('-state_image', type=int, default=1)
parser.add_argument('-save_images', type=int, default=0)
parser.add_argument('-store', type=int, default=1)
parser.add_argument('-data_dir', type=str, default='traffic-data/state-action-cost/')
parser.add_argument('-fps', type=int, default=30)
parser.add_argument('-time_slot', type=int, default=0)
parser.add_argument('-map', type=str, default='i80', choices={'ai', 'i80', 'us101', 'lanker', 'peach'})
parser.add_argument('-delta_t', type=float, default=0.1)
opt = parser.parse_args()
opt.state_image = (opt.state_image == 1)
opt.store = (opt.store == 1)
random.seed(opt.seed)
np.random.seed(opt.seed)
torch.manual_seed(opt.seed)
os.system("mkdir -p " + opt.data_dir)
kwargs = dict(
display=opt.display,
state_image=opt.state_image,
store=opt.store,
fps=opt.fps,
nb_lanes=opt.lanes,
traffic_rate=opt.traffic_rate,
data_dir=opt.data_dir,
delta_t=opt.delta_t,
)
register(
id='Traffic-v0',
entry_point='traffic_gym:Simulator',
kwargs=kwargs
)
register(
id='I-80-v0',
entry_point='map_i80:I80',
kwargs=kwargs
)
gym.envs.registration.register(
id='US-101-v0',
entry_point='map_us101:US101',
kwargs=kwargs,
)
gym.envs.registration.register(
id='Lankershim-v0',
entry_point='map_lanker:Lankershim',
kwargs=kwargs,
)
gym.envs.registration.register(
id='Peachtree-v0',
entry_point='map_peach:Peachtree',
kwargs=kwargs,
)
env_names = {
'ai': 'Traffic-v0',
'i80': 'I-80-v0',
'us101': 'US-101-v0',
'lanker': 'Lankershim-v0',
'peach': 'Peachtree-v0',
}
print('Building the environment (loading data, if any)')
env = gym.make(env_names[opt.map])
env.reset(frame=0, time_slot=opt.time_slot)
done = False
while not done:
observation, reward, done, info = env.step()
env.render()
print(f'Data generation for <{opt.map}, time slot {opt.time_slot}> completed')
| 24.946809
| 103
| 0.704478
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 571
| 0.243497
|
83c0b7884ac12f94ceaeb582cc3c5f5cebb5a227
| 999
|
py
|
Python
|
main.py
|
mvazifeh/gridart
|
78c01d6e660ca9c61f1220e102975ca632a2af6b
|
[
"MIT"
] | null | null | null |
main.py
|
mvazifeh/gridart
|
78c01d6e660ca9c61f1220e102975ca632a2af6b
|
[
"MIT"
] | null | null | null |
main.py
|
mvazifeh/gridart
|
78c01d6e660ca9c61f1220e102975ca632a2af6b
|
[
"MIT"
] | null | null | null |
import matplotlib.pylab as plt
import numpy as np
import random
from scipy.ndimage import gaussian_filter
mu =9
N = 50
k = 10
eta =10
sigma = 2
p0 = 0.5
inverse_random = False
L = range(N*N)
Q = np.zeros((N*mu,N*mu))
for o in range(mu*mu):
print(o)
F = 1000*k
a = np.ones((N,N))
for k_ in range(1000):
linear_idx = random.choices(L, weights=a.ravel()/float(a.sum()), k = k)
x, y = np.unravel_index(linear_idx, a.shape)
x += np.random.randint(-eta,eta,k)
y += np.random.randint(-eta,eta,k)
cond = (x<0) | (x>=N) | (y<0) | (y>=N)
x_ = np.delete(x, np.where(cond))
y_ = np.delete(y, np.where(cond))
a[x_,y_]+=F
a = gaussian_filter(a,sigma =sigma)
if np.random.random()>p0 and inverse_random:
a = a.max()-a
Mx,My = np.unravel_index(o,(mu,mu))
Q[Mx*N:(Mx+1)*N,My*N:(My+1)*N] = a
fig,ax = plt.subplots(1,1,figsize = (20,20))
plt.imshow(Q, interpolation='nearest')
plt.axis('off')
| 24.975
| 79
| 0.573574
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 14
| 0.014014
|
83c0d18d58ec56ff811ed70776d16216d48d95ed
| 9,841
|
py
|
Python
|
fixture/contact.py
|
ruslankl9/python_training
|
7bcaf2606a80935a4a0c458af4e6a078f241fb38
|
[
"Apache-2.0"
] | null | null | null |
fixture/contact.py
|
ruslankl9/python_training
|
7bcaf2606a80935a4a0c458af4e6a078f241fb38
|
[
"Apache-2.0"
] | null | null | null |
fixture/contact.py
|
ruslankl9/python_training
|
7bcaf2606a80935a4a0c458af4e6a078f241fb38
|
[
"Apache-2.0"
] | null | null | null |
from model.contact import Contact
import re
class ContactHelper(object):
def __init__(self, app):
self.app = app
def change_field_value(self, field_name, text):
wd = self.app.wd
if text is not None:
wd.find_element_by_name(field_name).click()
wd.find_element_by_name(field_name).clear()
wd.find_element_by_name(field_name).send_keys(text)
def change_select_value(self, select_name, value):
wd = self.app.wd
if value is not None:
xpath_str = "//div[@id='content']/form/select[@name='{select_name}']//option[@value='{value}']".format(
select_name=select_name, value=value
)
if not wd.find_element_by_xpath(xpath_str).is_selected():
wd.find_element_by_xpath(xpath_str).click()
def create(self, contact):
wd = self.app.wd
self.open_contacts_page()
self.fill_contact_form(contact)
# submit contact form
wd.find_element_by_xpath("//div[@id='content']/form/input[@type='submit']").click()
self.return_to_home_page()
self.contact_cache = None
def open_contact_edit_page_by_index(self, index):
wd = self.app.wd
self.app.open_home_page()
# click first group edit link
wd.find_element_by_xpath("//table//tr[{0}]//td[8]//a".format(index + 2)).click()
def open_contact_edit_page_by_id(self, id):
wd = self.app.wd
self.app.open_home_page()
wd.find_element_by_xpath("//input[@value='{0}']//..//..//td[8]//a".format(id)).click()
def open_contact_view_page_by_index(self, index):
wd = self.app.wd
self.app.open_home_page()
# click first group edit link
wd.find_element_by_xpath("//table//tr[{0}]//td[7]//a".format(index + 2)).click()
def modify_first_contact(self, new_contact_data):
self.modify_contact_by_index(0, new_contact_data)
def modify_contact_by_index(self, index, new_contact_data):
wd = self.app.wd
self.open_contact_edit_page_by_index(index)
self.fill_contact_form(new_contact_data)
# submit contact form
wd.find_element_by_xpath("//div[@id='content']/form/input[@type='submit']").click()
self.return_to_home_page()
self.contact_cache = None
def modify_contact_by_id(self, id, new_contact_data):
wd = self.app.wd
self.open_contact_edit_page_by_id(id)
self.fill_contact_form(new_contact_data)
# submit contact form
wd.find_element_by_xpath("//div[@id='content']/form/input[@type='submit']").click()
self.return_to_home_page()
self.contact_cache = None
def fill_contact_form(self, contact):
wd = self.app.wd
# fill contact form
self.change_field_value("firstname", contact.first_name)
self.change_field_value("middlename", contact.middle_name)
self.change_field_value("lastname", contact.last_name)
self.change_field_value("nickname", contact.nickname)
self.change_field_value("title", contact.title)
self.change_field_value("company", contact.company)
self.change_field_value("address", contact.address)
self.change_field_value("home", contact.home_phone_number)
self.change_field_value("mobile", contact.mobile_phone_number)
self.change_field_value("work", contact.work_phone_number)
self.change_field_value("fax", contact.fax_number)
self.change_field_value("email", contact.email)
self.change_field_value("email2", contact.email2)
self.change_field_value("email3", contact.email3)
self.change_field_value("homepage", contact.homepage_url)
# fill birthday
self.change_select_value("bday", contact.bday)
self.change_select_value("bmonth", contact.bmonth)
self.change_field_value("byear", contact.byear)
# fill anniversary
self.change_select_value("aday", contact.aday)
self.change_select_value("amonth", contact.amonth)
self.change_field_value("ayear", contact.ayear)
self.change_field_value("address2", contact.address2)
self.change_field_value("phone2", contact.phone_number2)
self.change_field_value("notes", contact.notes)
def delete_first_contact(self):
self.delete_contact_by_index(0)
def delete_contact_by_index(self, index):
wd = self.app.wd
self.app.open_home_page()
# select contact with given index
wd.find_elements_by_name("selected[]")[index].click()
# submit deletion
wd.find_element_by_xpath("//input[@value='Delete']").click()
wd.switch_to_alert().accept()
self.return_to_home_page()
self.contact_cache = None
def delete_contact_by_id(self, id):
wd = self.app.wd
self.app.open_home_page()
self.select_contact_by_id(id)
# submit deletion
wd.find_element_by_xpath("//input[@value='Delete']").click()
wd.switch_to_alert().accept()
self.return_to_home_page()
self.contact_cache = None
def select_contact_by_id(self, id):
wd = self.app.wd
wd.find_element_by_css_selector("input[value='%s']" % id).click()
def open_contacts_page(self):
wd = self.app.wd
if not wd.current_url.endswith("/edit.php"):
wd.find_element_by_link_text("add new").click()
def return_to_home_page(self):
wd = self.app.wd
if not (wd.current_url.endswith("/addressbook/") or wd.current_url.endswith("/index.php")):
wd.find_element_by_link_text("home").click()
def count(self):
wd = self.app.wd
self.app.open_home_page()
return len(wd.find_elements_by_name("selected[]"))
contact_cache = None
def get_contact_list(self):
"""
Получение списка контактов с домашней страницы
:return:
"""
if self.contact_cache is None:
wd = self.app.wd
self.app.open_home_page()
self.contact_cache = []
for row in wd.find_elements_by_css_selector('tr[name="entry"]'):
cells = row.find_elements_by_tag_name("td")
id = cells[0].find_element_by_tag_name("input").get_attribute("value")
last_name = cells[1].text
first_name = cells[2].text
address = cells[3].text
all_emails = cells[4].text
all_phones = cells[5].text
self.contact_cache.append(Contact(id=id, first_name=first_name, last_name=last_name, address=address,
all_emails_from_home_page=all_emails,
all_phones_from_home_page=all_phones))
return list(self.contact_cache)
def get_contact_info_from_edit_page(self, index):
wd = self.app.wd
self.open_contact_edit_page_by_index(index)
id = wd.find_element_by_name("id").get_attribute("value")
first_name = wd.find_element_by_name("firstname").get_attribute("value")
last_name = wd.find_element_by_name("lastname").get_attribute("value")
address = wd.find_element_by_name("address").text
email = wd.find_element_by_name("email").get_attribute("value")
email2 = wd.find_element_by_name("email2").get_attribute("value")
email3 = wd.find_element_by_name("email3").get_attribute("value")
home_phone_number = wd.find_element_by_name("home").get_attribute("value")
work_phone_number = wd.find_element_by_name("work").get_attribute("value")
mobile_phone_number = wd.find_element_by_name("mobile").get_attribute("value")
phone_number2 = wd.find_element_by_name("phone2").get_attribute("value")
return Contact(id=id, first_name=first_name, last_name=last_name, address=address,
email=email, email2=email2, email3=email3,
home_phone_number=home_phone_number, mobile_phone_number=mobile_phone_number,
work_phone_number=work_phone_number, phone_number2=phone_number2)
def get_contact_from_view_page(self, index):
wd = self.app.wd
self.open_contact_view_page_by_index(index)
text = wd.find_element_by_id("content").text
home_phone_number = re.search("H: (.*)", text).group(1)
work_phone_number = re.search("W: (.*)", text).group(1)
mobile_phone_number = re.search("M: (.*)", text).group(1)
phone_number2 = re.search("P: (.*)", text).group(1)
return Contact(home_phone_number=home_phone_number, mobile_phone_number=mobile_phone_number,
work_phone_number=work_phone_number, phone_number2=phone_number2)
def show_contacts_of_group(self, group):
wd = self.app.wd
self.app.open_home_page()
el = wd.find_element_by_xpath("//select[@name='group']//option[@value='{0}']".format(group.id))
if not el.is_selected():
el.click()
def add_to_group(self, contact, group):
wd = self.app.wd
self.app.open_home_page()
self.select_contact_by_id(contact.id)
# select group to add into
el = wd.find_element_by_xpath("//select[@name='to_group']//option[@value='{0}']".format(group.id))
if not el.is_selected():
el.click()
# press add to group button
wd.find_element_by_css_selector("input[name='add']").click()
self.return_to_home_page()
def del_from_group(self, contact, group):
wd = self.app.wd
self.show_contacts_of_group(group)
self.select_contact_by_id(contact.id)
wd.find_element_by_css_selector("input[name='remove']").click()
self.return_to_home_page()
| 43.737778
| 117
| 0.648206
| 9,836
| 0.995345
| 0
| 0
| 0
| 0
| 0
| 0
| 1,463
| 0.148047
|
83c1d004633b6b337c6d2bc2c9a3fefc61d57d42
| 789
|
py
|
Python
|
setup.py
|
nakagami/pure-pyawabi
|
5ffafcaa381727af7f84013cf036a4e8f7dd51da
|
[
"MIT"
] | 1
|
2021-12-13T11:29:04.000Z
|
2021-12-13T11:29:04.000Z
|
setup.py
|
nakagami/pure-pyawabi
|
5ffafcaa381727af7f84013cf036a4e8f7dd51da
|
[
"MIT"
] | null | null | null |
setup.py
|
nakagami/pure-pyawabi
|
5ffafcaa381727af7f84013cf036a4e8f7dd51da
|
[
"MIT"
] | null | null | null |
from setuptools import setup
setup(
name="pure-pyawabi",
version="0.2.4",
description='A morphological analyzer awabi clone',
long_description=open('README.md', encoding='utf-8').read(),
long_description_content_type="text/markdown",
url='https://github.com/nakagami/pure-pyawabi/',
classifiers=[
"License :: OSI Approved :: MIT License",
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"Programming Language :: Python",
"Programming Language :: Python :: 3.8",
"Operating System :: POSIX",
],
keywords=['MeCab'],
license="MIT",
author='Hajime Nakagami',
author_email='nakagami@gmail.com',
test_suite="tests",
packages=['pyawabi'],
scripts=['bin/pyawabi'],
)
| 30.346154
| 64
| 0.628644
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 416
| 0.52725
|
83c2085a8eb1b76f57b29dca121b213d911376c1
| 3,137
|
py
|
Python
|
vacancies_and_studentships/models.py
|
okyame/Arkestra
|
4aa22816b33d8f2d7a6bc8f7a498957134b557dd
|
[
"BSD-2-Clause"
] | 1
|
2020-01-15T15:17:06.000Z
|
2020-01-15T15:17:06.000Z
|
vacancies_and_studentships/models.py
|
okyame/Arkestra
|
4aa22816b33d8f2d7a6bc8f7a498957134b557dd
|
[
"BSD-2-Clause"
] | null | null | null |
vacancies_and_studentships/models.py
|
okyame/Arkestra
|
4aa22816b33d8f2d7a6bc8f7a498957134b557dd
|
[
"BSD-2-Clause"
] | null | null | null |
from django.db import models
# from cms.models.fields import PlaceholderField
from cms.models import CMSPlugin
# from filer.fields.image import FilerImageField
from arkestra_utilities.output_libraries.dates import nice_date
# from arkestra_utilities.models import ArkestraGenericModel
from arkestra_utilities.generic_models import ArkestraGenericPluginOptions, ArkestraGenericModel
from arkestra_utilities.mixins import URLModelMixin
from arkestra_utilities.settings import PLUGIN_HEADING_LEVELS, PLUGIN_HEADING_LEVEL_DEFAULT
from contacts_and_people.models import Entity, Person #, default_entity_id
# from links.models import ExternalLink
from managers import VacancyManager, StudentshipManager
class CommonVacancyAndStudentshipInformation(ArkestraGenericModel, URLModelMixin):
class Meta:
abstract = True
ordering = ['-closing_date']
closing_date = models.DateField()
description = models.TextField(null=True, blank=True,
help_text="No longer used")
def link_to_more(self):
return self.get_hosted_by.get_related_info_page_url("vacancies-and-studentships")
@property
def get_when(self):
"""
get_when provides a human-readable attribute under which items can be grouped.
Usually, this is an easily-readble rendering of the date (e.g. "April 2010") but it can also be "Top news", for items to be given special prominence.
"""
try:
# The render function of CMSNewsAndEventsPlugin can set a temporary sticky attribute for Top news items
if self.sticky:
return "Top news"
except AttributeError:
pass
date_format = "F Y"
get_when = nice_date(self.closing_date, date_format)
return get_when
@property
def date(self):
return self.closing_date
class Vacancy(CommonVacancyAndStudentshipInformation):
url_path = "vacancy"
job_number = models.CharField(max_length=9)
salary = models.CharField(blank=True, max_length=255, null=True,
help_text=u"Please include currency symbol")
objects = VacancyManager()
class Meta:
verbose_name_plural = "Vacancies"
class Studentship(CommonVacancyAndStudentshipInformation):
url_path = "studentship"
supervisors = models.ManyToManyField(Person, null=True, blank=True,
related_name="%(class)s_people")
objects = StudentshipManager()
class VacanciesPlugin(CMSPlugin, ArkestraGenericPluginOptions):
DISPLAY = (
(u"vacancies & studentships", u"Vacancies and studentships"),
(u"vacancies", u"Vacancies only"),
(u"studentships", u"Studentships only"),
)
display = models.CharField(max_length=25,choices=DISPLAY, default="vacancies & studentships")
# entity = models.ForeignKey(Entity, null=True, blank=True,
# help_text="Leave blank for autoselect", related_name="%(class)s_plugin")
vacancies_heading_text = models.CharField(max_length=25, default="Vacancies")
studentships_heading_text = models.CharField(max_length=25, default="Studentships")
| 36.057471
| 157
| 0.723621
| 2,416
| 0.770163
| 0
| 0
| 725
| 0.231113
| 0
| 0
| 1,045
| 0.333121
|
83c27bbb12a53e327e73c5820df10eeefe5bccca
| 239
|
py
|
Python
|
examples/django/015_deploy_app/project/example/views.py
|
HalfBottleOfMind/website
|
9bc83f11127ebb4f65124f794a7138373c95ab81
|
[
"Apache-2.0"
] | 12
|
2020-09-08T03:33:43.000Z
|
2021-09-03T12:31:38.000Z
|
examples/django/015_deploy_app/project/example/views.py
|
HalfBottleOfMind/website
|
9bc83f11127ebb4f65124f794a7138373c95ab81
|
[
"Apache-2.0"
] | 39
|
2021-01-29T16:11:45.000Z
|
2021-12-08T08:34:27.000Z
|
examples/django/015_deploy_app/project/example/views.py
|
HalfBottleOfMind/website
|
9bc83f11127ebb4f65124f794a7138373c95ab81
|
[
"Apache-2.0"
] | 23
|
2020-09-07T14:42:39.000Z
|
2021-12-07T20:41:29.000Z
|
from rest_framework import viewsets
from .models import Label
from .serizalizers import LabelSerializer
class LabelViewSet(viewsets.ModelViewSet):
queryset = Label.objects.all().order_by('id')
serializer_class = LabelSerializer
| 23.9
| 49
| 0.799163
| 131
| 0.548117
| 0
| 0
| 0
| 0
| 0
| 0
| 4
| 0.016736
|
83c2e6b596e3c848fe9f97b575c98a5ef638509f
| 2,660
|
py
|
Python
|
src/olympia/github/tests/test_views.py
|
gijsk/addons-server
|
7c38f379e3a0b4a5ca231f98ac0c049450c224bd
|
[
"BSD-3-Clause"
] | null | null | null |
src/olympia/github/tests/test_views.py
|
gijsk/addons-server
|
7c38f379e3a0b4a5ca231f98ac0c049450c224bd
|
[
"BSD-3-Clause"
] | null | null | null |
src/olympia/github/tests/test_views.py
|
gijsk/addons-server
|
7c38f379e3a0b4a5ca231f98ac0c049450c224bd
|
[
"BSD-3-Clause"
] | null | null | null |
import json
from django.utils.http import urlencode
import mock
import requests
from olympia.amo.tests import AMOPaths, TestCase
from olympia.amo.urlresolvers import reverse
from olympia.files.models import FileUpload
from olympia.github.tests.test_github import (
GithubBaseTestCase, example_pull_request)
class TestGithubView(AMOPaths, GithubBaseTestCase, TestCase):
def setUp(self):
super(TestGithubView, self).setUp()
self.url = reverse('github.validate')
def post(self, data, header=None, data_type=None):
data_type = data_type or 'application/json'
if (data_type == 'application/json'):
data = json.dumps(data)
elif (data_type == 'application/x-www-form-urlencoded'):
data = urlencode({'payload': json.dumps(data)})
return self.client.post(
self.url, data=data,
content_type=data_type,
HTTP_X_GITHUB_EVENT=header or 'pull_request'
)
def complete(self):
pending, success = self.requests.post.call_args_list
self.check_status(
'pending',
call=pending,
url='https://api.github.com/repos/org/repo/statuses/abc'
)
self.check_status(
'success',
call=success,
url='https://api.github.com/repos/org/repo/statuses/abc',
target_url=mock.ANY
)
assert FileUpload.objects.get()
def test_not_pull_request(self):
assert self.post({}, header='meh').status_code == 200
def test_bad_pull_request(self):
assert self.post({'pull_request': {}}).status_code == 422
def setup_xpi(self):
self.response = mock.Mock()
self.response.content = open(self.xpi_path('github-repo')).read()
self.requests.get.return_value = self.response
def test_pending_fails(self):
self.setup_xpi()
post = mock.Mock()
# GitHub returns a 404 when the addons-robot account does not
# have write access.
post.status_code = 404
post.raise_for_status.side_effect = requests.HTTPError(response=post)
self.requests.post.return_value = post
res = self.post(example_pull_request)
assert 'write access' in json.loads(res.content)['details']
def test_good_not_json(self):
self.setup_xpi()
assert self.post(
example_pull_request,
data_type='application/x-www-form-urlencoded').status_code == 201
self.complete()
def test_good(self):
self.setup_xpi()
assert self.post(example_pull_request).status_code == 201
self.complete()
| 31.666667
| 77
| 0.642105
| 2,343
| 0.880827
| 0
| 0
| 0
| 0
| 0
| 0
| 404
| 0.15188
|
83c369bbe9d3c23a66d5fe993029ea43352f5559
| 676
|
py
|
Python
|
exopy_qm/tasks/tasks/GetIOValuesTask.py
|
rassouly/exopy_qm
|
82eb7e4b4fc7364df3462bb7faa7a0880d699afc
|
[
"BSD-3-Clause"
] | null | null | null |
exopy_qm/tasks/tasks/GetIOValuesTask.py
|
rassouly/exopy_qm
|
82eb7e4b4fc7364df3462bb7faa7a0880d699afc
|
[
"BSD-3-Clause"
] | null | null | null |
exopy_qm/tasks/tasks/GetIOValuesTask.py
|
rassouly/exopy_qm
|
82eb7e4b4fc7364df3462bb7faa7a0880d699afc
|
[
"BSD-3-Clause"
] | null | null | null |
from exopy.tasks.api import (InstrumentTask)
from atom.api import Unicode, Bool, set_default
import sys
from exopy_qm.utils.dynamic_importer import *
class GetIOValuesTask(InstrumentTask):
""" Gets the IO values
"""
get_io_1 = Bool(True).tag(pref=True)
get_io_2 = Bool(True).tag(pref=True)
database_entries = set_default({'IO1': {}, 'IO2': {}})
def __init__(self, **kwargs):
super().__init__(**kwargs)
def perform(self):
io_values = self.driver.get_io_values()
if self.get_io_1:
self.write_in_database('IO1', io_values[0])
if self.get_io_2:
self.write_in_database('IO2', io_values[0])
| 26
| 58
| 0.653846
| 522
| 0.772189
| 0
| 0
| 0
| 0
| 0
| 0
| 50
| 0.073964
|
83c3b7af49b5b0a425d6a463dbe982452346eedf
| 4,381
|
py
|
Python
|
src/ns_web_api/web/ptx/thsr.py
|
steny138/PyNintendoEPrice
|
def9c95690cf3cf72615ae4216fee8fca2934de1
|
[
"Apache-2.0"
] | null | null | null |
src/ns_web_api/web/ptx/thsr.py
|
steny138/PyNintendoEPrice
|
def9c95690cf3cf72615ae4216fee8fca2934de1
|
[
"Apache-2.0"
] | 3
|
2020-06-22T15:38:18.000Z
|
2021-11-24T02:01:51.000Z
|
src/ns_web_api/web/ptx/thsr.py
|
steny138/PyNintendoEPrice
|
def9c95690cf3cf72615ae4216fee8fca2934de1
|
[
"Apache-2.0"
] | 1
|
2018-08-04T08:15:05.000Z
|
2018-08-04T08:15:05.000Z
|
import requests
import logging
from .auth import Auth
domain = "https://ptx.transportdata.tw/MOTC/v2/Rail/THSR/"
default_limit_count = 20
logger = logging.getLogger('flask.app')
auth = Auth()
def get_station():
"""GET /v2/Rail/THSR/Station 取得車站基本資料
Returns:
[dict] -- 車站基本資料
"""
action = "Station"
url = domain + action + '?a=' + __get_odata_parameter()
headers = {}
headers.update(auth.get_auth_header())
r = requests.get(url, headers=headers)
if r.status_code == requests.codes.ok:
return r.json()
return {}
def get_station_id(station_names):
"""取得高鐵車站對應id
Arguments:
station_names {[list]} -- 想查詢的車站名稱
Returns:
[dictionary] -- key: station name, value: station id
"""
all_stations = get_station()
matchs = {}
for station_name in station_names:
match = None
try:
match = next(filter(lambda x:
station_name.strip() in x['StationName']['Zh_tw'].strip(), all_stations))
except StopIteration:
pass
if match:
matchs[station_name.strip()] = match['StationID']
return matchs
def get_fare(departure, destination):
"""GET /v2/Rail/THSR/ODFare/{OriginStationID}/to/{DestinationStationID}
取得指定[起訖站間]之票價資料
Arguments:
departure {str} -- 出發車站id
destination {str} -- 到達車站id
"""
if not departure:
return {}
if not destination:
return {}
action = "ODFare/{}/to/{}".format(departure, destination)
url = domain + action + '?a=' + __get_odata_parameter()
headers = {}
headers.update(auth.get_auth_header())
r = requests.get(url, headers=headers)
if r.status_code == requests.codes.ok:
return r.json()
return {}
def get_timetable(no=''):
"""GET /v2/Rail/THSR/GeneralTimetable
取得所有車次的定期時刻表資料
Arguments:
no {str} -- 指定車次
"""
action = "GeneralTimetable"
if no:
action += "/TrainNo/{}".format(no)
url = domain + action + '?a=' + __get_odata_parameter()
headers = {}
headers.update(auth.get_auth_header())
r = requests.get(url, headers=headers)
if r.status_code == requests.codes.ok:
return r.json()
return {}
def get_seat(id):
"""GET /v2/Rail/THSR/AvailableSeatStatusList/{StationID}
取得動態指定[車站]的對號座剩餘座位資訊看板資料
"""
if not id:
return {}
action = "AvailableSeatStatusList/{}".format(id)
url = domain + action + '?a=' + __get_odata_parameter()
headers = {}
headers.update(auth.get_auth_header())
r = requests.get(url, headers=headers)
if r.status_code == requests.codes.ok:
return r.json()
else:
logger.info(r)
return {}
def get_news():
"""GET /v2/Rail/THSR/News
取得高鐵最新消息資料
"""
action = "News"
url = domain + action + '?a=' + __get_odata_parameter()
headers = {}
headers.update(auth.get_auth_header())
r = requests.get(url, headers=headers)
if r.status_code == requests.codes.ok:
return r.json()
return {}
def get_alert():
"""GET /v2/Rail/THSR/AlertInfo
取得即時通阻事件資料
"""
action = "AlertInfo"
url = domain + action + '?a=' + __get_odata_parameter()
headers = {}
headers.update(auth.get_auth_header())
r = requests.get(url, headers=headers)
if r.status_code == requests.codes.ok:
return r.json()
return {}
def __get_odata_parameter(top=0, skip=0, format="", orderby="", filter=""):
"""統一整理odata的固定參數指定回傳
Keyword Arguments:
top {int} -- 回傳幾筆 (default: {0})
skip {int} -- 跳過前面幾筆 (default: {0})
format {str} -- 回傳格式 json or xml (default: {""})
orderby {str} -- 排列順序, 傳入response欄位名稱 (default: {""})
filter {str} -- 篩選條件 (default: {""})
Returns:
[type] -- odata parameter的querystring
"""
param = {'top': top, 'skip': skip, 'orderby': orderby,
'format': format, 'filter': filter}
result = ""
if top > 0:
result += "&$top={top}"
if skip > 0:
result += "&$skip={skip}"
if orderby:
result += "&$orderby={orderby}"
if format:
result += "&$format={format}"
if filter:
result += "&$filter={filter}"
return result.format(**param)
if __name__ == '__main__':
pass
| 20.471963
| 105
| 0.582059
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,762
| 0.375933
|