text stringlengths 8 6.05M |
|---|
# -^- coding:utf-8 -^-
# --------------------------------------------------------------
# Author: TanChao
# Date : 2015.09.23
# Intro : 用装饰器实现参数类型判断
# --------------------------------------------------------------
import inspect
import re
class ValidateException(Exception):
pass
def validParam(*varargs, **keywords):
'''验证参数的装饰器。'''
varargs = map(_toStardardCondition, varargs)
keywords = dict((k, _toStardardCondition(keywords[k])) for k in keywords)
def generator(func):
# 获取func中的参数名称、非关键字可变长参数名称、关键字参数名称
args, varargname, kwname = inspect.getargspec(func)[:3]
# dctValidator: 参数名-值验证函数的字典
dctValidator = _getCallArgs(args, varargname, kwname, varargs, keywords)
def wrapper(*callvarargs, **callkeywords):
# dctCallArgs: 参数名-值的字典
dctCallArgs = _getCallArgs(args, varargname, kwname, callvarargs, callkeywords)
# print 'dctValidator:', dctValidator
# print 'dctCallArgs:', dctCallArgs
k, item, result = None, None, None
try:
# 依此检查每个条件
for k in dctValidator:
if k == varargname: # 检查非关键字可变长参数
for item in dctCallArgs[k]:
result = dctValidator[k](item)
assert True == result
elif k == kwname: # 检查关键字参数
for item in dctCallArgs[k].values():
result = dctValidator[k](item)
assert True == result
else: # 检查普通参数
item = dctCallArgs[k]
result = dctValidator[k](item)
assert True == result
except Exception, e:
print e
print inspect.getsource(dctValidator[k])
error_info = ('%s() parameter validation fails, param: %s, value: %s(%s), validator: %s'
% (func.func_name, k, item, item.__class__.__name__, result))
raise ValidateException, error_info
return func(*callvarargs, **callkeywords)
wrapper = _wrapps(wrapper, func)
return wrapper
return generator
def nullOk(cls, condition=None):
'''这个函数指定的检查条件可以接受None值'''
return lambda x: x is None or _toStardardCondition((cls, condition))(x)
def multiType(*conditions):
'''这个函数指定的检查条件只需要有一个通过'''
# 将所有条件转换为检查对应的函数
lstValidator = map(_toStardardCondition, conditions)
def validate(x):
for v in lstValidator:
if v(x):
return True
return validate
def _toStardardCondition(condition):
'''将各种格式的检查条件转换为检查函数'''
# class condition
if inspect.isclass(condition):
info = "must be %s type" % condition.__name__
return lambda x: isinstance(x, condition) or info
if isinstance(condition, (tuple, list)):
cls, condition = condition[:2]
if condition is None:
return _toStardardCondition(cls)
# regular condition
if cls in (str, unicode) and condition[0] == condition[-1] == '/':
info = 'must match regular expression: %s' % condition
return lambda x: (isinstance(x, cls) and re.match(condition[1:-1], x) is not None) or info
# pure str condition
info = 'must satisfy rule: %s' % condition
return lambda x: (isinstance(x, cls) and eval(condition)) or info
# fcuntion condition
return condition
def _getCallArgs(args, varargname, kwname, varargs, keywords):
'''获取调用时的各参数名-值验证函数的字典 or 各参数名-值的字典
args: 参数名称
varargname: 非关键字可变长参数名称
kwname: 关键字参数名称
varargs: 非关键字可变长参数(tuple) (参数类型 or 参数值)
keywords: 关键字参数(dict) (参数类型 or 参数值)
'''
dictArgs = {}
varargs = tuple(varargs)
keywords = dict(keywords)
argcount = len(args)
varcount = len(varargs)
callvarargs = None
if argcount <= varcount:
# 参数少,因此先遍历参数,依此位每个参数赋值
for n, argname in enumerate(args):
dictArgs[argname] = varargs[n]
callvarargs = varargs[-(varcount-argcount):]
else:
#值少,因此先遍历值,依此位每个参数赋值
for n, var in enumerate(varargs):
dictArgs[args[n]] = var
# 处理剩余的参数
for argname in args[-(argcount-varcount):]:
if argname in keywords:
dictArgs[argname] = keywords.pop(argname)
callvarargs = ()
if varargname is not None:
dictArgs[varargname] = callvarargs
if kwname is not None:
dictArgs[kwname] = keywords
dictArgs.update(keywords)
return dictArgs
def _wrapps(wrapper, wrapped):
'''复制元数据'''
for attr in ('__module__', '__name__', '__doc__'):
setattr(wrapper, attr, getattr(wrapped, attr))
for attr in ('__dict__',):
getattr(wrapper, attr).update(getattr(wrapped, attr, {}))
return wrapper
|
#!/usr/bin/python
from BaseHTTPServer import BaseHTTPRequestHandler,HTTPServer
from os import curdir, sep
import json
import datetime
import Messenger as messenger
PORT_NUMBER = 8080
#This class will handles any incoming request from
#the browser
class myHandler(BaseHTTPRequestHandler):
def serve_file(self):
try:
#Check the file extension required and
#set the right mime type
sendReply = False
if self.path.endswith(".html"):
mimetype='text/html'
sendReply = True
if self.path.endswith(".jpg"):
mimetype='image/jpg'
sendReply = True
if self.path.endswith(".gif"):
mimetype='image/gif'
sendReply = True
if self.path.endswith(".js"):
mimetype='application/javascript'
sendReply = True
if self.path.endswith(".css"):
mimetype='text/css'
sendReply = True
if sendReply == True:
#Open the static file requested and send it
f = open(curdir + sep + self.path)
self.send_response(200)
self.send_header('Content-type',mimetype)
self.end_headers()
self.wfile.write(f.read())
f.close()
return
except IOError:
self.send_error(404,'File Not Found: %s' % self.path)
def sendMsg(self):
message = self.extractJson()
print("Message: " + str(message) + " @ " + str(datetime.datetime.now()))
messenger.message(message['msg'])
data = {}
data['success'] = True
response = json.dumps(data)
self.send_response(200)
self.send_header('Content-type',"application/json")
self.end_headers()
self.wfile.write(response)
def extractJson(self):
request = self.rfile.read(int(self.headers['Content-Length']))
return json.loads(request)
#Handler for the GET requests
def do_GET(self):
if self.path=="/":
self.path="/static/index.html"
self.serve_file()
else:
print("No GET handler found for: " + self.path)
#Handler for the POST requests
def do_POST(self):
if self.path == "/sendMsg":
self.sendMsg()
else:
print("No POST handler found for: " + self.path)
try:
#init the messenger
messenger.init()
#Create a web server and define the handler to manage the
#incoming request
server = HTTPServer(('', PORT_NUMBER), myHandler)
print 'Started httpserver on port ' , PORT_NUMBER
#Wait forever for incoming htto requests
server.serve_forever()
except KeyboardInterrupt:
print '^C received, shutting down the web server'
server.socket.close()
|
import logging
import fmcapi
def test__monitoredinterfaces(fmc):
logging.info("Test MonitoredInterfaces. get, put MonitoredInterfaces Objects")
obj0 = fmcapi.DeviceHAMonitoredInterfaces(fmc=fmc, ha_name="HaName")
obj1 = fmcapi.MonitoredInterfaces(fmc=fmc, ha_name="HaName")
# Interface logical name (ifname)
obj1.get(name="OUTSIDE1")
obj1.monitorForFailures = True
obj1.ipv4(ipv4addr="10.254.0.4", ipv4mask=29, ipv4standbyaddr="10.254.0.3")
logging.info("MonitoredInterfaces PUT-->")
logging.info(obj1.format_data())
logging.info(obj1.put())
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
def copy_status_data(apps, schema_editor):
ModerationHistory = apps.get_model("elections", "ModerationHistory")
Election = apps.get_model("elections", "Election")
ModerationStatus = apps.get_model("elections", "ModerationStatus")
for election in Election.private_objects.all():
rec = ModerationHistory(
election=election,
status=ModerationStatus.objects.all().get(
short_label__iexact=election.suggested_status
),
)
rec.save()
def delete_status_data(apps, schema_editor):
ModerationHistory = apps.get_model("elections", "ModerationHistory")
ModerationHistory.objects.all().delete()
class Migration(migrations.Migration):
dependencies = [("elections", "0048_seed_status")]
operations = [migrations.RunPython(copy_status_data, delete_status_data)]
|
from .models import Coupon
from rest_framework_mongoengine import serializers
class CouponSerializers(serializers.DocumentSerializer):
class Meta:
model = Coupon
fields = "__all__" |
#判断一个字符串是否为对称字符串(20分)用函数形式进行操作完成
def hanshu(str):
if str ==str[::-1]:
print("是对称字符串")
else:
print("不是对称字符串")
hanshu("abgba") |
dumpling = int(input("만두 개수 입력 : "))
price = 1000
if dumpling > 0 and dumpling < 10:
price *= dumpling #대입연산자 price = price * dumpling
elif dumpling >= 10 and dumpling < 100:
price *= 0.85 * dumpling
elif dumpling >= 100:
price *= 0.75 * dumpling
else: #0보다 작아
print("잘못된 개수입니다.")
print("가격 : %d원, 현금결제가 : %s원"%(price,price*0.9))
|
'''
Created on Oct 12, 2017
@author: John Nguyen
Test 7
'''
import os
import PyPDF2
input_directory = "/Users/NguyenJ.MININT-3LV3JTL/InputTestCaseInPDFFormat"
output_text_file = "/Users/NguyenJ.MININT-3LV3JTL/TestCaseFromPDFToTextOutput/OutputTextFile"
os.chdir(input_directory)
set_of_all_PDF_input_files = os.listdir()
number_of_Files_to_be_Processed = len(set_of_all_PDF_input_files)
file_counter = 0
with open(output_text_file, 'a') as output_text_file:
output_text_file.write("step number | Name | Step | Result | Testdata | ExternalId | Description \n")
for each_PDF_input_file in set_of_all_PDF_input_files:
step_counter = 0
file_counter += 1
print("**** File " + str(file_counter) + ' is being processed ... out of ' + \
str(number_of_Files_to_be_Processed) + ' files **************************\n' )
with open(each_PDF_input_file, 'rb') as PDF_input_file:
pdfreader = PyPDF2.PdfFileReader(PDF_input_file)
for each_page in range(pdfreader.getNumPages()):
set_of_test_steps = pdfreader.getPage(each_page).extractText()
""" Get the test case ID and the external ID """
if(each_page == 0):
test_case_name, test_step_description, input_data, expected_result, external_ID, description = set_of_test_steps[0].split('|')
test_case_ID_holder = test_case_name.lstrip().rstrip()
#print("Test Case Name = " + test_case_ID_holder)
external_ID_holder = external_ID.lstrip().rstrip()
#print("eXTERNAL di = " + external_ID_holder)
description = test_case_ID_holder
""" Format the test steps and write them to the output file """
for each_test_step in set_of_test_steps:
test_case_name, \
test_step_description,\
expected_result, \
input_data, \
external_ID, \
description_temp = each_test_step.split('|')
step_counter += 1
# test_step = f'{step_counter} | {test_case_ID_holder} | {test_step_description} | {input_data} | {expected_result} | {external_ID_holder}'
test_step = str(step_counter) + "|" + \
test_case_ID_holder + "|" + \
test_step_description.lstrip().rstrip() + "|" + \
expected_result.lstrip().rstrip() + "|" + \
input_data + "|" + \
external_ID_holder + "|" + \
description
print(test_step)
print("========================================================================")
output_text_file.write(test_step + "\n") |
from impedence_functions import paper_results
def main():
paper_results()
if __name__ == "__main__":
main()
|
import argparse
import json
import os
from glob import glob
from tempfile import TemporaryDirectory
from time import time
from functools import partial, reduce
from collections import namedtuple
import h5py
import numpy as np
import pandas as pd
from joblib import Parallel, delayed, dump, load
from sklearn import metrics as met
from sklearn.base import clone
from sklearn.dummy import DummyClassifier, DummyRegressor
from sklearn.ensemble import (AdaBoostClassifier, AdaBoostRegressor,
ExtraTreesClassifier, ExtraTreesRegressor,
GradientBoostingClassifier,
GradientBoostingRegressor,
RandomForestClassifier, RandomForestRegressor)
from sklearn.feature_selection import SelectFromModel
from sklearn.gaussian_process import (GaussianProcessClassifier,
GaussianProcessRegressor)
from sklearn.impute import MissingIndicator, SimpleImputer
from sklearn.linear_model import ElasticNetCV, LogisticRegressionCV
from sklearn.model_selection import (KFold, StratifiedKFold, cross_val_predict,
cross_validate)
from sklearn.neural_network import MLPClassifier, MLPRegressor
from sklearn.pipeline import make_pipeline, make_union
from sklearn.preprocessing import StandardScaler, FunctionTransformer
from sklearn.tree import (DecisionTreeClassifier, DecisionTreeRegressor,
ExtraTreeClassifier, ExtraTreeRegressor)
from torch._C import dtype
import utils as u
pd.options.mode.chained_assignment = None
SEED = 123456
clipper = FunctionTransformer(partial(np.clip, a_min=0, a_max=1))
def train_and_test(X,
y,
learners: dict,
outfile: u.Path,
metrics: dict = None,
nprocs: int = 4,
fold_splitter: int = None,
impute=True,
predict: bool = False):
"""train all learners and write performance metrics.
NOTE: does not clip regression output.
Args:
X (df or np array): predictors
y (_type_): target
learners (dict): ML algs
outfile (u.Path): out path
metrics (dict, optional): _description_. Defaults to None.
nprocs (int, optional): number of processes for cross val split. Defaults to 4.
fold_splitter (int, optional): If None, does not give CV metric summaries. Defaults to None.
impute (bool, optional): impute missing. Defaults to True.
predict (bool, optional): give CV estimates for each datapoint. Defaults to False.
"""
if isinstance(X, pd.DataFrame):
X = X.values
if isinstance(y, pd.DataFrame):
index = y.index
y = y.values
else:
index = None
results = dict.fromkeys(learners)
scaler = StandardScaler()
imputer = SimpleImputer(missing_values=np.nan,
strategy='median',
fill_value=0)
if fold_splitter is not None:
with TemporaryDirectory() as tmpdir:
for learner_name, learner in learners.items():
if impute:
clf = make_pipeline(
make_union(imputer, MissingIndicator(
features='all'), n_jobs=2),
scaler,
learner,
memory=tmpdir) # SelectFromModel(learner,threshold='mean'))
else:
clf = make_pipeline(
scaler,
learner,
memory=tmpdir) # SelectFromModel(learner,threshold='mean'))
if predict:
preds = dict.fromkeys(learners)
try:
preds[learner_name] = cross_val_predict(
clf,
X,
y,
cv=fold_splitter, # Kfold or StratifiedKFold depending on type of learner
n_jobs=nprocs,
pre_dispatch=2 * nprocs,
method='predict_proba')[:, 1]
except AttributeError: # the learner has no predict proba feature
preds[learner_name] = cross_val_predict(
clf,
X,
y,
pre_dispatch=2 * nprocs,
cv=fold_splitter, # Kfold or StratifiedKFold depending on type of learner
n_jobs=nprocs)
pred_df = pd.DataFrame(
{**preds, 'y_true': y},
index=index)
pred_df.to_csv(
outfile.with_suffix('.preds.csv.gz'),
compression='gzip')
modeldir = outfile.parent / 'models'
modeldir.mkdir(exist_ok=True)
# weights trained on WHOLE dataset
for learner_name, learner in learners.items():
if impute:
clf = make_pipeline(
make_union(imputer, MissingIndicator(
features='all'), n_jobs=2),
scaler,
clone(learner))
else:
clf = make_pipeline(
make_union(imputer, MissingIndicator(
features='all'), n_jobs=2),
scaler,
clone(learner))
clf.fit(X, y)
dump(clf,
modeldir / f'{outfile.name}_{learner_name}.pkl.gz')
# we can pickle it
def main(args: argparse.Namespace):
# use for both class and regress
decision_tree_params = {
'max_depth': 4,
'min_samples_leaf': 1,
}
params = [args]
if args.classify:
classification_learners = {
'Random': DummyClassifier(strategy='stratified'),
# same as most_frequent, except predict_proba returns the class prior.
'Trivial': DummyClassifier(strategy='prior'),
'RF': RandomForestClassifier(bootstrap=True,
n_estimators=50,
criterion='gini',
n_jobs=2,
class_weight="balanced",
**decision_tree_params),
'AdaBoost': AdaBoostClassifier(n_estimators=50,
base_estimator=DecisionTreeClassifier(
criterion='gini',
class_weight="balanced",
**decision_tree_params)),
'GradBoost': GradientBoostingClassifier(n_estimators=50,
criterion='friedman_mse',
**decision_tree_params),
'LogisticReg': LogisticRegressionCV(Cs=5,
penalty='l1',
class_weight='balanced',
solver='saga', # NOTE: saga is fast only if data has been scaled
max_iter=1000,
n_jobs=2,
tol=1e-4,
cv=3),
# 'MLP': MLPClassifier(solver='sgd',
# batch_size=100,
# activation='relu',
# learning_rate='adaptive',
# learning_rate_init=0.01,
# momentum=0.8,
# nesterovs_momentum=True,
# hidden_layer_sizes=(40, 10, 10, 10),
# tol=1e-4,
# max_iter=1000,
# shuffle=True)
}
c_metrics = {'Acc': met.accuracy_score,
'F1': met.f1_score,
'AUC': met.roc_auc_score,
'Prec': met.precision_score,
'Recall': met.recall_score,
'MCC': met.matthews_corrcoef
}
params.append(str(classification_learners))
# cv requires scoring fn
for m in c_metrics:
c_metrics[m] = met.make_scorer(c_metrics[m])
###### REGRESSION #######
# 'criterion':'mse', # by default
if args.regress:
regression_learners = {
'Mean': DummyRegressor(strategy='mean'),
'Median': DummyRegressor(strategy='median'),
'RF': RandomForestRegressor(bootstrap=True,
n_jobs=2,
n_estimators=50,
**decision_tree_params),
'GradBoost': GradientBoostingRegressor(n_estimators=50,
criterion='friedman_mse',
**decision_tree_params),
'AdaBoost': AdaBoostRegressor(
n_estimators=50,
base_estimator=DecisionTreeRegressor(**decision_tree_params)),
'ElasticNet': ElasticNetCV(cv=3,
precompute='auto',
n_alphas=50,
normalize=False,
selection='random',
n_jobs=2,
max_iter=1000),
# 'MLP': MLPRegressor(solver='sgd',
# batch_size=100,
# activation='relu',
# learning_rate='adaptive',
# learning_rate_init=0.01,
# momentum=0.8,
# nesterovs_momentum=True,
# hidden_layer_sizes=(40, 10, 10, 10),
# tol=1e-4,
# early_stopping=False,
# max_iter=1000,
# shuffle=True)
}
params.append(str(regression_learners))
dump(params, args.outdir / 'arguments.pkl.gz')
r_metrics = {'MSE': met.mean_squared_error,
'MAE': met.mean_absolute_error,
'EV': met.explained_variance_score
}
for m in r_metrics:
r_metrics[m] = met.make_scorer(r_metrics[m])
try:
X = u.load_hdf_files(args.data, args.procs).query('ebl<=200')
if args.dropna:
X.dropna(inplace=True)
y = X['y_prob']
X.drop(columns='y_prob', inplace=True)
if 'features' in args:
# allow substring matches
x_attrs = [a for a in X.columns if any(a.startswith(feature)
for feature in args.features)]
X = X[x_attrs]
if args.classify:
# TODO: don't reorder the input data!
y_bin = (u.sigmoid(y) > args.ils) # ils = 0, no_ils = 1
n_positive = y_bin.sum()
if not n_positive or n_positive == y_bin.size:
print('need 2 classes in training data')
args.classify = False
else:
print(f'pos: {n_positive}, neg: {y_bin.size-n_positive}')
if args.cnoise > 0:
y_bin = y_bin.logical_xor(
np.random.uniform(size=y_bin.shape) > args.cnoise)
if args.rnoise > 0:
y += np.random.normal(scale=args.rnoise,
size=y.shape)
if args.regress:
print('\nregressors....\n')
now = time()
results_r = train_and_test(X,
y,
regression_learners,
# metrics=r_metrics,
outfile=args.outdir/'results_regress',
# fold_splitter=KFold(args.folds,
# shuffle=True,
# random_state=SEED),
nprocs=args.procs,
predict=args.predict,
impute=not args.dropna)
print('time:', time()-now)
if args.classify:
print('\nclassifiers....\n')
now = time()
results_c = train_and_test(X,
y_bin,
classification_learners,
# metrics=c_metrics,
outfile=args.outdir/'results_classify',
# fold_splitter=StratifiedKFold(args.folds,
# shuffle=True,
# random_state=SEED),
nprocs=args.procs,
predict=args.predict,
impute=not args.dropna)
print('time:', time()-now)
except Exception as e:
raise e
print(e)
print('finished')
if __name__ == "__main__":
# TODO: make this match the dataloading format of the torch scripts. ignore config.json
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument('--cnoise',
type=float, default=0,
help='''p for Bernoulli noise to flip classification labels.''')
parser.add_argument('--rnoise',
type=float, default=0,
help='''sigma for Gaussian noise added to regression label.''')
parser.add_argument('--procs',
'-p',
type=int,
help='num procs',
default=4)
parser.add_argument('--dropna',
action='store_true',
help='drop all rows with null values')
parser.add_argument('--predict',
action='store_true',
help='write cross-val predictions to file')
parser.add_argument('--regress',
action='store_true',
help='regression')
parser.add_argument('--classify',
action='store_true',
help='regression')
parser.add_argument('--balanced',
action='store_true',
help='''use balanced ILS/NoILS classes;
default is true.''')
parser.add_argument('--ils',
default=.9,
type=float,
help='''species tree topology frequency can
be at most this value to be considered discordant.''')
parser.add_argument('--data',
'-i',
nargs="+",
type=u.Path,
help='input hdf5 file')
parser.add_argument('--outdir',
type=u.Path,
help='directory to store results files')
parser.add_argument('--config',
'-c',
help='''input json config file.
All flags will overwrite command line args.''')
parser.add_argument('--folds',
'-f',
type=int,
help='CV folds',
default=10)
args = parser.parse_args()
if args.config:
arg_dict = vars(args)
config = json.load(open(args.config))
for k in config:
arg_dict[k] = config[k]
print('Arguments: ', args)
for fname in args.data:
if not fname.exists():
raise OSError("file not found:", fname)
if not args.outdir:
args.outdir = args.data[0].with_suffix('')
args.outdir.mkdir(exist_ok=True)
main(args)
|
from main.activity.desktop_v3.activity_login import *
from main.activity.desktop_v3.activity_logout import *
from main.activity.desktop_v3.activity_myshop_editor import *
from utils.lib.user_data import *
from utils.function.setup import *
import unittest
class TestMyshopInfo_Validation(unittest.TestCase):
_site = "live"
def setUp(self):
print ('[VALIDATION TEST] "Myshop-Editor"')
self.driver = tsetup()
self.user = user1
def test_1_check_validation_shop_slogan(self):
print ("TEST #1 : [Validation] Input Shop Slogan")
print ("========================================")
driver = self.driver
email = self.user['email']
pwd = self.user['pwd']
login = loginActivity()
myshopEdit = myshopEditorActivity()
logout = logoutActivity()
login.do_login(driver, self.user, email, pwd, self._site)
myshopEdit.setObject(driver)
myshopEdit.goto_myshop_editor(self._site)
myshopEdit.check_validation_input_shop_slogan_empty(driver)
time.sleep(2)
logout.do_logout(driver, self._site)
def test_2_check_validation_shop_description(self):
print ("TEST #2 : [Validation] Input Shop Description")
print ("=============================================")
driver = self.driver
email = user1['email']
pwd = user1['pwd']
login = loginActivity()
myshopEdit = myshopEditorActivity()
logout = logoutActivity()
login.do_login(driver, self.user, email, pwd, self._site)
myshopEdit.setObject(driver)
myshopEdit.goto_myshop_editor(self._site)
myshopEdit.check_validation_input_shop_description_empty(driver)
time.sleep(2)
logout.do_logout(driver, self._site)
def test_3_check_validation_shop_closed_note(self):
print ("TEST #3 : [Validation] Input Shop Close Note")
print ("=============================================")
driver = self.driver
email = user1['email']
pwd = user1['pwd']
login = loginActivity()
myshopEdit = myshopEditorActivity()
logout = logoutActivity()
login.do_login(driver, self.user, email, pwd, self._site)
myshopEdit.setObject(driver)
myshopEdit.goto_myshop_editor(self._site)
myshopEdit.check_validation_input_shop_closed_note_empty(driver)
time.sleep(2)
logout.do_logout(driver, self._site)
def test_4_check_validation_shop_closed_date(self):
print ("TEST #4 : [Validation] Input Shop Close Date")
print ("============================================")
driver = self.driver
email = user1['email']
pwd = user1['pwd']
login = loginActivity()
myshopEdit = myshopEditorActivity()
logout = logoutActivity()
login.do_login(driver, self.user, email, pwd, self._site)
myshopEdit.setObject(driver)
myshopEdit.goto_myshop_editor(self._site)
myshopEdit.check_validation_input_shop_closed_date_empty(driver)
time.sleep(2)
logout.do_logout(driver, self._site)
def test_5_check_validation_shop_info_all(self):
print ("TEST #4 : [Validation] Input Empty")
print ("============================================")
driver = self.driver
email = user1['email']
pwd = user1['pwd']
login = loginActivity()
myshopEdit = myshopEditorActivity()
logout = logoutActivity()
login.do_login(driver, self.user, email, pwd, self._site)
myshopEdit.setObject(driver)
myshopEdit.goto_myshop_editor(self._site)
myshopEdit.check_validation_input_all_empty(driver)
time.sleep(2)
logout.do_logout(driver, self._site)
def tearDown(self):
print("Testing akan selesai dalam beberapa saat..")
self.driver.close()
if __name__ == '__main__':
unittest.main(warnings='ignore')
|
# -*- coding: utf-8 -*-
# Personal Assistant Reliable Intelligent System
# By Tanguy De Bels
import speech_recognition as sr
import Utilities
r = sr.Recognizer()
def listen():
with sr.Microphone() as source:
r.adjust_for_ambient_noise(source)
print(u'Say something!')
audio = r.listen(source)
try:
s = (r.recognize_google(audio, language = "fr-FR")).lower()
print s
return s
except sr.UnknownValueError:
print(u'Could not understand audio')
return ''
except sr.RequestError as e:
print(u'Could not request results; {0}'.format(e)) |
import threading
from client import Client
from ftpcommands import *
from network import *
# Get settings from config.ini, read the config file comments for a detailed description on each value
config = configparser.ConfigParser()
config.read('config.ini')
host = config['IP']['Host']
port = int(config['IP']['Port'])
protocol_dict = {
"AUTH": cmd_auth,
"CDUP": cmd_cdup,
"CWD ": cmd_cwd,
"FEAT": cmd_feat,
"USER": cmd_user,
"LIST": cmd_list,
"PASS": cmd_pass,
"PASV": cmd_pasv,
"PORT": cmd_port,
"PWD ": cmd_pwd,
"RETR": cmd_retr,
"STOR": cmd_stor,
"SYST": cmd_syst,
"TYPE": cmd_type
}
def client_thread(client):
welcome_msg = "220 (PyFTP 0.0.1)"
client.connection.send((welcome_msg + "\r\n").encode())
keep_alive = True
while keep_alive:
command = client.connection.recv(1024).decode()
print("Received from " + client.address[0] + ":" + str(client.address[1]) + " ... " + command)
method = command.strip()
if method != "":
while len(method) < 4:
method += " "
method = method[:4]
response = protocol_dict[method](client, command)
print("Responding to " + client.address[0] + ":" + str(client.address[1]) + " ... " + response + "\n\n\n")
client.connection.send((response + "\r\n").encode())
else:
keep_alive = False
def accept_connections():
conn, addr = create_command_socket(host, port).accept()
client = Client(conn, addr)
t = threading.Thread(target=client_thread, args=(client,))
t.start()
def main():
print("Started FTP server on port", str(port) + ", listening on", host)
while True:
accept_connections()
if __name__ == "__main__":
main()
|
from django.urls import path, include
from products import views
from django.contrib.auth import views as auth_views
urlpatterns = [
path('', include('orders.urls')),
path('products/<str:slug>', views.list_product, name='list_product'),
path('', include('details.urls')),
path('', include('basket.urls')),
path('login_logout/', views.login_logout, name='login_logout'),
path('password_reset_tel/', views.password_reset_via_tel, name='password_reset_via_tel'),
path('password_reset/', auth_views.PasswordResetView.as_view(template_name='password_reset/password_reset.html'), name='password_reset'),
path('password_reset/done/', auth_views.PasswordResetDoneView.as_view(template_name='password_reset/password_reset_done.html'), name='password_reset_done'),
path('password_reset/<uidb64>/<token>/', auth_views.PasswordResetConfirmView.as_view(template_name='password_reset/password_reset_confirm.html'), name='password_reset_confirm'),
path('password_reset/complete/', auth_views.PasswordResetCompleteView.as_view(template_name='password_reset/password_reset_complete.html'), name='password_reset_complete'),
]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Dec 1 11:51:17 2020
@author: aureoleday
"""
#from gnuradio import gr
#from gnuradio import audio,analog
import numpy as np
from scipy import signal
import matplotlib.pyplot as plt
from functools import reduce
b, a = signal.butter(3, [0.1], 'lowpass')
#def am_mod(mt,fc,fs,ofs_ca,ofs_f):
def sig_src(fs,wave_form,fc,ampl,phi,t):
Ts = 1/fs
n = t/Ts
n = np.arange(n)
if(wave_form == "sin"):
sig = ampl*np.sin(2*np.pi*fc*n*Ts + phi*(np.pi/180))
else:
sig = ampl*np.cos(2*np.pi*fc*n*Ts + phi*(np.pi/180))
return sig
# def orthognal_demod(fs,sig_in,fc):
# Ts = 1/fs
# sig_len = len(sig_in)
# n = np.arange(sig_len)
# q = np.sin(2*np.pi*fc*n*Ts)
# i = np.cos(2*np.pi*fc*n*Ts)
def diff_demod(fs,fc,fr,i_cd,q_cd,i_rd,q_rd):
Ts = 1/fs
c_len = len(i_cd)
n = np.arange(c_len)
cq = np.sin(2*np.pi*fc*n*Ts)
ci = np.cos(2*np.pi*fc*n*Ts)
rq = np.sin(2*np.pi*fr*n*Ts)
ri = np.cos(2*np.pi*fr*n*Ts)
# def demod(din,f0,fs):
# d_cnt = int(fs/f0)
# din_delay = np.zeros(din.size)
# din_delay[d_cnt:] = din[:-d_cnt]
# dcross = din*din_delay
# zi = signal.filtfilt(b,a,dcross)
# return zi#sig_fs
#osc = np.cos(2*np.pi*f0*np.arange(fs/f0)/fs)
#ms = np.kron((np.random.randint(0,2,N)-0.5)*2,osc)
def AWGN(sin,snr):
SNR = 10**(snr/10)
print(SNR)
Ps = reduce(lambda x,y:x+y,map(lambda x:x**2,sin))/sin.size
print(Ps)
Pn = Ps/SNR
print(Pn)
s_wn = sin + np.random.randn(sin.size)*(Pn**0.5)
return s_wn
#f0 = 200
fs = 1000
fc = 100
f0 = 10
t = 0.3
SNR=50
e = sig_src(fs,'sin',fc,1,0,t)
s = sig_src(fs,'sin',f0,1,0,t)
c = sig_src(fs,'cos',f0,1,0,t)
es = e*s
ec = e*c
esn = AWGN(es,SNR)
ecn = AWGN(ec,SNR)
arctan = np.arctan(esn/ecn)
arctan_n = np.arctan(es/ec)
#msn = AWGN(ms,SNR)
#dout = demod(msn,f0,fs)
# plt.figure()
# plt.plot(ec)
# plt.figure()
# # plt.plot(arctan)
# plt.plot(e)
# fig = plt.figure()
ax = plt.subplot(411)
ax.set_title('sin(w),SNR = 30')
ax.plot(esn,color='g')
ax = plt.subplot(412)
ax.set_title('cos(w),SNR = 30')
ax.plot(ecn,color='g')
ax = plt.subplot(413)
ax.set_title('actan(x) with SNR = 30')
ax.plot(arctan,color='r')
bx = plt.subplot(414)
bx.set_title('actan(x) without noise')
bx.plot(arctan_n,color='b') |
# Python 3
import os
import argparse
from stat import S_ISDIR, S_ISREG
def walktree(top):
'''parcourt récursivement le dossier et renvoit
une liste avec tous les fichiers normaux'''
liste = []
for f in os.listdir(top):
pathname = os.path.join(top, f)
mode = os.stat(pathname).st_mode # give a code that
# says whether it's a directory or a file
if S_ISDIR(mode):
# It's a directory, recurse into it
result = walktree(pathname)
for _ in result:
liste.append(_)
elif S_ISREG(mode):
liste.append(pathname.split('/')[-1])
else:
# Unknown file type, print a message
print('Skipping %s' % pathname)
return liste
def test_list(liste1, liste2):
return [elem for elem in liste1 if elem not in liste2]
def affiche_liste(liste):
liste = sorted(liste)
print("-"*20)
for i in liste:
print(i)
print("-"*20)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("dossier1", help="chemin complet du premier dossier")
parser.add_argument("dossier2", help="chemin complet du second dossier")
args = parser.parse_args()
chemin1 = args.dossier1
chemin2 = args.dossier2
liste_dossier1 = walktree(chemin1)
liste_dossier2 = walktree(chemin2)
print("\n")
print("Fichiers présents dans " + chemin1 + " et absents dans " + chemin2)
affiche_liste(test_list(liste_dossier1, liste_dossier2))
print("\n")
print("Fichiers présents dans " + chemin2 + " et absents dans " + chemin1)
affiche_liste(test_list(liste_dossier2, liste_dossier1))
print("\n")
if __name__ == '__main__':
main()
|
"""
===========================================================================
Sampling techniques using KDD Cup 1999 IDS dataset
===========================================================================
The following examples demonstrate various sampling techniques for a dataset
in which classes are extremely imbalanced with heavily skewed features
"""
import os
import sys
from contextlib import contextmanager
import time
import pandas as pd
import numpy as np
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import *
from sklearn.model_selection import train_test_split, StratifiedKFold
from sklearn.model_selection import cross_val_predict, cross_val_score
import tensorflow as tf
from keras import models, layers
from keras.regularizers import l2
from filehandler import Filehandler
from dataset import KDDCup1999
from visualize import Visualize
import itertools
from tensorflow.python.keras.callbacks import TensorBoard
@contextmanager
def timer(title):
t0 = time.time()
yield
print('{} - done in {:.0f}s'.format(title, time.time() - t0))
class Model:
def __init__(self):
self.random_state = 20
self.base = {'model': None}
self.binary_enabled = False
self.multi_enabled = False
self.X = None
self.y = None
self.y_pred = {'binary': [], 'multi': []}
self.y_test = {'binary': [], 'multi': []}
self.splits = 2
self.kfold = StratifiedKFold(n_splits=self.splits, shuffle=True, random_state=self.random_state)
def fit(self, X_train, y_train):
self.base['model'].fit(X_train, y_train)
def predict(self, X_test):
return self.base['model'].predict(X_test)
def score(self, X, y, ctype):
agg_ypred = []
agg_ytest = []
for train, test in self.kfold.split(X, y):
self.fit(X.loc[train], y[train])
y_pred = self.predict(X.loc[test])
agg_ypred.append(y_pred)
agg_ytest.append(y[test])
self.y_pred[ctype] = [item for sublist in agg_ypred for item in sublist]
self.y_test[ctype] = [item for sublist in agg_ytest for item in sublist]
class RandomForestClf(Model):
def __init__(self):
Model.__init__(self)
self.binary_enabled = False
self.multi_enabled = False
self.base['model'] = RandomForestClassifier(n_estimators=100, random_state=self.random_state)
# Single Layer Perceptron - Binary Classification
class AnnSLPBinary(Model):
def __init__(self, n_features):
Model.__init__(self)
self.binary_enabled = False
self.epochs = 2
self.batch_size = 100
self.verbose = 0
self.n_features = n_features
self.base['model'] = self.create_network()
def create_network(self):
model = models.Sequential()
model.add(layers.Dense(1, activation='sigmoid', input_shape=(self.n_features,)))
model.compile(loss='binary_crossentropy', optimizer='sgd', metrics=['accuracy'])
return model
def fit(self, X_train, y_train):
self.base['model'].fit(X_train, y_train, epochs=self.epochs, batch_size=self.batch_size, verbose=self.verbose)
def predict(self, X_test):
y_pred = self.base['model'].predict_classes(X_test)
return y_pred.ravel()
# Multi Layer Perceptron - Binary Classification
class AnnMLPBinary(Model):
def __init__(self, n_features):
Model.__init__(self)
self.binary_enabled = False
self.epochs = 2
self.batch_size = 100
self.verbose = 0
self.n_features = n_features
self.base['model'] = self.create_network()
def create_network(self):
model = models.Sequential()
model.add(layers.Dense(self.n_features, activation='relu', input_shape=(self.n_features,)))
model.add(layers.Dense(self.n_features, activation='relu'))
model.add(layers.Dense(1, activation='sigmoid'))
model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['accuracy'])
return model
def fit(self, X_train, y_train):
self.base['model'].fit(X_train, y_train, epochs=self.epochs, batch_size=self.batch_size, verbose=self.verbose)
def predict(self, X_test):
y_pred = self.base['model'].predict_classes(X_test)
return y_pred.ravel()
class AnnMLPMulti(Model):
def __init__(self, n_features):
Model.__init__(self)
self.multi_enabled = True
self.epochs = 20
self.batch_size = 100
self.verbose = 0
self.n_features = n_features
self.base = {}
def get_model(self):
model = models.Sequential()
model.add(layers.Dense(self.n_features, activation='relu', input_shape=(self.n_features,)))
model.add(layers.Dense(self.n_features, activation='relu'))
model.add(layers.Dense(5, activation='softmax'))
tensorboard = TensorBoard(log_dir='logs/{}'.format(time))
model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy'])
return model
def fit(self, X_train, y_train):
y_train = pd.get_dummies(y_train) # for multi neural networks
tensorboard = TensorBoard(log_dir='logs/tensorboard/{}'.format(time.strftime("%Y%m%d-%H%M%S")))
self.base['model'].fit(X_train, y_train, epochs=self.epochs, batch_size=self.batch_size,
callbacks=[tensorboard])
def predict(self, X_test):
y_pred = self.base['model'].predict_classes(X_test)
return y_pred.ravel()
class Modelling:
def __init__(self):
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Ignore low level instruction warnings
tf.logging.set_verbosity(tf.logging.ERROR) # Set tensorflow verbosity
# self.logfile = None
# self.gettrace = getattr(sys, 'gettrace', None)
# self.original_stdout = sys.stdout
# self.timestr = time.strftime("%Y%m%d-%H%M%S")
# self.log_file()
print(__doc__)
self.filehandler = Filehandler()
self.ds = KDDCup1999()
self.visualize = Visualize()
self.full = None
self.X = None
self.y = None
self.X_train = None
self.X_test = None
self.y_train = None
self.y_test = None
self.n_features = None
self.random_state = 20
self.label_multi = {0: 'normal', '0': 'normal', 1: 'dos', '1': 'dos', 2: 'u2r', '2': 'u2r', 3: 'r2l',
'3': 'r2l', 4: 'probe', '4': 'probe'}
self.label_binary = {0: 'good', '0': 'good', 1: 'bad', '1': 'bad'}
with timer('\nLoading dataset'):
self.load_data()
with timer('\nSetting X and y'):
self.set_X()
self.n_features = self.X.shape[1]
models = (RandomForestClf(), AnnSLPBinary(self.n_features), AnnMLPBinary(self.n_features),
AnnMLPMulti(self.n_features))
classification_type = ('Binary', 'Multi')
for m, ctype in itertools.product(models, classification_type):
score = False
if ctype == 'Binary' and m.binary_enabled:
self.set_y_binary()
score = True
elif ctype == 'Multi' and m.multi_enabled:
self.set_y_multi()
score = True
if not score:
continue
with timer('\nTraining and scoring {} - {} target'.format(m.__class__.__name__, ctype)):
m.base['model'] = m.get_model()
#self.train_test_split()
m.score(self.X, self.y, ctype)
m.y_test[ctype] = pd.Series(m.y_test[ctype])
m.y_pred[ctype] = pd.Series(m.y_pred[ctype])
m.y_test[ctype] = m.y_test[ctype].astype(int)
m.y_pred[ctype] = m.y_pred[ctype].astype(int)
if ctype == 'Binary':
m.y_test[ctype] = self.series_map_ac_binary_to_label(m.y_test[ctype])
m.y_pred[ctype] = self.series_map_ac_binary_to_label(m.y_pred[ctype])
else:
m.y_test[ctype] = self.series_map_ac_multi_to_label(m.y_test[ctype])
m.y_pred[ctype] = self.series_map_ac_multi_to_label(m.y_pred[ctype])
title = '{} - {} - {} '.format('CM', m.__class__.__name__, ctype)
self.visualize.confusion_matrix(m.y_test[ctype], m.y_pred[ctype], title)
self.scores(m.y_test[ctype], m.y_pred[ctype])
# Append the scores to a scores array. I could then do an np.mean(scores) to get the mean(average) from all the kfolds
# save the epoch number and gfold number if possible as well, to get a per/epoch score
# self.log_file()
print('Finished')
def log_file(self):
if self.gettrace is None:
pass
elif self.gettrace():
pass
else:
if self.logfile:
sys.stdout = self.original_stdout
self.logfile.close()
self.logfile = False
else:
# Redirect stdout to file for logging if not in debug mode
self.logfile = open('logs/{}_{}_stdout.txt'.format(self.__class__.__name__, self.timestr), 'w')
sys.stdout = self.logfile
def load_data(self):
self.full = self.filehandler.read_csv(self.ds.config['path'], self.ds.config['file'] + '_Tensor2d_type_1')
def set_X(self):
self.X = self.full.loc[:, self.full.columns != 'attack_category']
def set_y_binary(self):
self.y = self.full.loc[:, ['attack_category']]
self.df_map_ac_label_to_binary()
self.y = self.y.values.ravel()
def set_y_multi(self):
self.y = self.full.loc[:, ['attack_category']]
self.df_map_ac_label_to_multi()
self.y = self.y.values.ravel()
def train_test_split(self):
self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(self.X, self.y, test_size=0.30,
random_state=self.random_state)
def df_map_ac_label_to_binary(self):
conditions = [
(self.y['attack_category'] == 'normal'),
(self.y['attack_category'] == 'dos') | (self.y['attack_category'] == 'u2r') |
(self.y['attack_category'] == 'r2l') | (self.y['attack_category'] == 'probe')
]
self.y['attack_category'] = np.select(conditions, [0, 1])
def df_map_ac_label_to_multi(self):
conditions = [
(self.y['attack_category'] == 'normal'),
(self.y['attack_category'] == 'dos'), (self.y['attack_category'] == 'u2r'),
(self.y['attack_category'] == 'r2l'), (self.y['attack_category'] == 'probe')
]
self.y['attack_category'] = np.select(conditions, ['0', '1', '2', '3', '4']) # string for get_dummies encoding
def series_map_ac_multi_to_label(self, s):
return s.map(self.label_multi)
def series_map_ac_binary_to_label(self, s):
return s.map(self.label_binary)
def scores(self, y_test, y_pred):
print('Accuracy {}'.format(accuracy_score(y_test, y_pred)))
print('F1 {}'.format(classification_report(y_test, y_pred, digits=10)))
modelling = Modelling()
# class AnnFeedForward(Model):
#
# # Because this is a binary classification problem, one common choice is to use the sigmoid activation function in a one-unit output layer.
#
# # Start neural network
# network = models.Sequential()
#
# # Add fully connected layer with a ReLU activation function
# network.add(layers.Dense(units=16, activation='relu', input_shape=(number_of_features,)))
#
# # Add fully connected layer with a ReLU activation function
# network.add(layers.Dense(units=16, activation='relu'))
#
# # Add fully connected layer with a sigmoid activation function
# network.add(layers.Dense(units=1, activation='sigmoid'))
#
# # Compile neural network
# network.compile(loss='binary_crossentropy', # Cross-entropy
# optimizer='rmsprop', # Root Mean Square Propagation
# metrics=['accuracy']) # Accuracy performance metric
#
#
# # Train neural network
# history = network.fit(train_features, # Features
# train_target, # Target vector
# epochs=3, # Number of epochs
# verbose=1, # Print description after each epoch
# batch_size=100, # Number of observations per batch
# validation_data=(test_features, test_target)) # Data for evaluation
#
# class ANNPerceptronClf(Model):
# def __init__(self):
# Model.__init__(self)
# self.enabled = False
# self.base['stext'] = 'ANNPCLF'
# self.base['model'] = KerasClassifier(build_fn=self.create_network, epochs=10, batch_size=100, verbose=0)
#
# def create_network(self):
# network = Sequential()
#
# # Input layer with inputs matching 0 axis of tensor, hidden layer with 1 neuron
# network.add(Dense(output_dim=1, init='uniform', activation='relu', input_dim=self.X_train.shape[1]))
#
# # Output layer - sigmoid good for binary classification
# network.add(Dense(output_dim=1, init='uniform', activation='sigmoid'))
#
# # Binary cross entropy good for binary classification
# network.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
#
# return network
#
# def set_dataset(self, folder, file):
# Model.set_dataset(self, folder, file)
#
# def fit(self):
# self.base['model'].fit(self.X_train, self.y_train)
#
# def predict(self):
# self.predictions = self.base['model'].predict(self.X_train)
# self.predictions = (self.predictions > 0.5)
# class AnnMlpBinary(Model):
# def __init__(self):
# Model.__init__(self)
# self.enabled = False
# self.base['stext'] = 'ANNPCLF'
# self.base['model'] = KerasClassifier(build_fn=self.create_network, epochs=10, batch_size=100, verbose=0)
#
# def create_network(self):
# model = Sequential()
# model.add(Dense(64, input_dim=20, activation='relu'))
# model.add(Dropout(0.5))
# model.add(Dense(64, activation='relu'))
# model.add(Dropout(0.5))
# model.add(Dense(1, activation='sigmoid'))
#
# model.compile(loss='binary_crossentropy',
# optimizer='rmsprop',
# metrics=['accuracy'])
#
# model.fit(x_train, y_train,
# epochs=20,
# batch_size=128)
# score = model.evaluate(x_test, y_test, batch_size=128)
|
from apscheduler.schedulers.blocking import BlockingScheduler
from bras import add_bingfa, add_itv_online
import switch
import olt
sched = BlockingScheduler(daemonic=False)
def bas_add_bingfa():
add_bingfa()
def olt_tuopu():
olt.del_old_data()
olt.add_infs()
olt.add_groups()
def sw_tuopu():
switch.update_model()
switch.del_old_data()
switch.add_groups()
switch.add_infs()
switch.add_traffics()
def xunjian():
olt.add_main_card()
olt.add_power_info()
switch.add_main_card()
switch.add_power_info()
def itv_online():
add_itv_online()
sched.add_job(bas_add_bingfa, 'cron', day_of_week='0-6', hour='6', minute='15')
sched.add_job(
itv_online, 'cron', day_of_week='0-6', hour='18-24', minute='*/15')
# sched.add_job(sw_tuopu, 'cron',
# day_of_week='0-6', hour='20', minute='00')
# sched.add_job(sw_tuopu, 'cron', day_of_week='0-6', hour='21', minute='00')
# sched.add_job(olt_tuopu, 'cron', day_of_week='0-6', hour='20', minute='30')
# sched.add_job(xunjian, 'cron', day_of_week='0', hour='1', minute='30')
# sched.add_job(sw_add_traffics, 'cron',
# day_of_week='0-6', hour='18-22', minute='20/15')
try:
sched.start()
except (KeyboardInterrupt, SystemExit):
pass
|
#!/usr/bin/python3
# -----------------------------------------------------------------------------
# Extractor.py
# Search for all ROP gadgets in a given binary using Ropper.
# The gadgets can be searched according to a label or an opcode.
#
# Author: Eval
# GitHub: https://github.com/jcouvy
# -----------------------------------------------------------------------------
import argparse
import re
from ropper import RopperService
from Structures import Instruction, Gadget
from Mnemonics import OPCODES
class Extractor:
def __init__(self, options, target):
self.rs = RopperService(options)
self.rs.addFile(target)
self.rs.loadGadgetsFor(name=target)
self.target = target
def search_gadgets(self, label):
"""
Using Ropper's engine, the function searches for all gadgets matching
a given label.
Args:
label: Search pattern to be used by Ropper. A '?' character will be
interpreted as any character, the '%' is any string.
(example: 'mov e??, [e??]')
Returns:
List of Gadget objects matching the gadgets found in the binary.
"""
def find_mnemonic(insn):
""" Returns a String of the mnemonic version of a given opcode """
opcode = self.rs.asm(insn)
opcode = re.findall('.{1,2}', opcode)
opcode = int(opcode[0], 16)
for i in OPCODES:
if i == opcode:
return OPCODES[i]
return 'not supported'
gadget_list = []
for file, gadget in self.rs.search(search=label, name=self.target):
addr_str = str(gadget).partition(':')[0]
insn_str = str(gadget).partition(':')[2].strip(' ').split('; ')
insn_list = []
for i in insn_str:
address = int(addr_str, 16)
regs = re.findall(r'e[a-z][a-z]', i)
mnemonic = find_mnemonic(i)
if len(regs) == 2:
insn = Instruction(i, address, mnemonic,
regs[0], regs[1])
elif len(regs) == 1:
insn = Instruction(i, address, mnemonic,
regs[0])
else:
insn = Instruction(i, address, mnemonic)
insn_list.append(insn)
gadget_list.append(Gadget(address, insn_list))
return gadget_list
def print_gadgets(gtype, glist):
"""
Pretty print each gadget found in a given list.
Args:
gtype: Short string indicating the instruction type. \
(example: load, store...)
glist: List of instructions.
"""
print("Searching for type: "+gtype)
print("Found %i gadgets:\n" % len(glist))
for g in glist:
print("%s" % g)
for i in g.instructions:
print(i)
def _test_class():
""" Test-run function for debugging """
print_gadgets("load", extract.search_gadgets('mov [e??], e??'))
print_gadgets("store", extract.search_gadgets('mov e??, [e??]'))
print_gadgets("pop", extract.search_gadgets('pop ???; ret;'))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='')
parser.add_argument("target", help="path to target binary")
args = parser.parse_args()
target = args.target
options = {
'color': False,
'all': False,
'inst_count': 3,
'type': 'rop',
'detailed': False,
}
extract = Extractor(options, target)
_test_class()
|
from chai.src.corpus.Corpus import Corpus
def main():
print("Hi, Welcome to Chai Interactive")
print("You are in the 'Corpus' Interactive Mode")
corpus_name = input("Please Enter the name of your Corpus")
corpus = Corpus(corpus_name)
print("Please Enter the 'Standard Terms' for the corpus")
standard_term = input()
while not standard_term == '':
corpus.add_leader(standard_term)
standard_term = input()
print("Listed Below are the clusters in the Corpus, ", corpus.name)
for cluster in corpus.clusters:
print(cluster)
print("We will match your terms to clusters")
term = input("Enter term")
while not term == '':
term_cluster = corpus.match_term_cluster(term)
if not term_cluster:
print(term, ' was not matched with any existing clusters')
else:
print(term, ' was matched to cluster ', term_cluster.name)
term = input("Enter term")
if __name__ == '__main__':
main()
|
from unittest import TestCase
import six
import os
from datetime import datetime
import json
from approx_dates.models import ApproxDate
from popolo_data.importer import Popolo, NotAValidType
from popolo_data.models import (Person, Organization, Membership,
Area, Post, Event)
from popolo_data.base import approx_date_to_iso
from tempfile import mktemp
class TestSaving(TestCase):
"""
test assignments to attributes and to files
"""
def test_date_save_load(self):
p = Person()
p.birth_date = ApproxDate.from_iso8601("2015")
assert approx_date_to_iso(p.birth_date) == "2015"
p.birth_date = ApproxDate.from_iso8601("2015-06")
assert approx_date_to_iso(p.birth_date) == "2015-06"
p.birth_date = "2015-06-23 to 2015-07-12"
assert approx_date_to_iso(p.birth_date) == "2015-06-23 to 2015-07-12"
p.birth_date = datetime(2015,6,23)
assert approx_date_to_iso(p.birth_date) == "2015-06-23"
def test_invalidtype(self):
p = Popolo()
t = "hello this is a string, an invalid type!"
try:
p.add(t)
except NotAValidType:
pass
def test_twitter(self):
"""
does adding delete values
"""
p = Person()
p.twitter = "testuser"
p.twitter = "http://www.twitter.com/testuser"
p.twitter = "testuser"
assert p.twitter == "testuser"
p.twitter = None
def test_merge(self):
"""
"test if merging preserves correct details
"""
full_attributes = {"id": "person1",
"email": "test@madeup.com",
"honorific_prefix": "Dr",
"honorific_suffix": "MBe",
"image": "blahblah.jpg",
"name": "Indiana Jones",
"sort_name": "jones, indiana",
"national_identity": "american",
"biography": "steals things",
"birth_date": datetime(1899, 7, 1).date(),
"death_date": ApproxDate.FUTURE,
"family_name": "Jones",
"given_name": "Indiana",
"summary": "archaeologist",
"wikidata": "Q174284",
"phone": "9906753",
"fax": "5559906753",
"property": "123 fake street",
"facebook": "https://www.facebook.com/indianajones/",
"sources": ["TV", "Movies"],
}
reduced_attributes = {"id": "4435435",
"gender": "male",
"name": "Indiana Jones",
"other_names": [{"name":"Indiana Walton Jones"}],
}
p1 = Person()
p2 = Person()
for k, v in six.iteritems(full_attributes):
setattr(p1, k, v)
for k, v in six.iteritems(reduced_attributes):
setattr(p2, k, v)
assert "Q174284" == p1.identifier_value("wikidata")
#also test a model without a custom absorb class
o1 = Organization(id="org1",
name="org1")
o2 = Organization(id="org1",
name="org1")
membership = Membership()
membership.person = p2
pop1 = Popolo.new()
pop2 = Popolo.new()
pop1.add(p1)
pop1.add(o1)
pop2.add(p2)
pop2.add(o2)
pop2.add(membership)
one_way = pop1.merge(pop2)
other_way = pop2.merge(pop1)
new_person = one_way.persons[0]
other_person = other_way.persons[0]
original = pop1.persons[0]
assert new_person.id == "person1"
assert other_person.id == "person1"
assert new_person.property == "123 fake street"
assert other_person.property == "123 fake street"
assert new_person.gender == "male"
assert other_person.gender == "male"
assert "Indiana Walton Jones" in [x["name"] for x in new_person.other_names]
assert "Indiana Walton Jones" not in [x["name"] for x in original.other_names]
#test collection json export
pop1.persons.json()
def test_populate_from_scratch(self):
"""
test person
"""
pop = Popolo()
"""
test person
"""
person_attributes = {"id": "person1",
"email": "test@madeup.com",
"gender": "male",
"honorific_prefix": "Dr",
"honorific_suffix": "MBe",
"image": "blahblah.jpg",
"name": "Indiana Jones",
"sort_name": "jones, indiana",
"national_identity": "american",
"biography": "steals things",
"birth_date": datetime(1899, 7, 1).date(),
"death_date": datetime(1999, 7, 1).date(),
"family_name": "Jones",
"given_name": "Indiana",
"summary": "archaeologist",
"wikidata": "Q174284",
"phone": "9906753",
"fax": "5559906753",
"property": "123 fake street",
"facebook": "https://www.facebook.com/indianajones/",
"other_names": [{"name":"Indiana Walton Jones"}],
"sources": ["TV", "Movies"],
"twitter":"indianajones",
}
p = Person()
for k, v in six.iteritems(person_attributes):
setattr(p, k, v)
assert getattr(p, k) == v
pop.add([p])
"""
test organisation
"""
org_attributes = {"id": "org1",
"name": "made up organisation",
"wikidata": "Q20136634",
"classification": "Parliament",
"image": "parliament.jpg",
"founding_date": datetime(1894, 7, 1).date(),
"dissolution_date": datetime(1923, 7, 1).date(),
"seat": 50,
"other_names": ["banana republic"]
}
o = Organization()
for k, v in six.iteritems(org_attributes):
setattr(o, k, v)
assert getattr(o, k) == v
pop.add(o)
"""
test area
"""
area_attributes = {"id": "area1",
"name": "Brighton",
"type": "City",
"wikidata": "Q131491"
}
a = Area()
for k, v in six.iteritems(area_attributes):
setattr(a, k, v)
assert getattr(a, k) == v
pop.add(a)
"""
test post
"""
post_attribute = {"id": "post1",
"label": "Leader",
"organization_id": "org1",
}
p = Post()
for k, v in six.iteritems(post_attribute):
setattr(p, k, v)
assert getattr(p, k) == v
pop.add(p)
"""
test event
"""
event_attributes = {"id": "event1",
"classification": "legislative-term",
"start_date": datetime(1900, 1, 1).date(),
"end_date": datetime(1905, 1, 1).date(),
"organization_id":"org1",
}
e = Event()
for k, v in six.iteritems(event_attributes):
setattr(e, k, v)
assert getattr(e, k) == v
pop.add(e)
membership_attributes = {"role":"MP",
"person_id":"person1",
"organisation_id":"org1",
"area_id":"area1",
"post_id":"post1",
"legislative_period_id":"event1",
"start_date": datetime(1900, 5, 1).date(),
"end_date":datetime(1903, 1, 1).date(),
}
m = Membership()
for k, v in six.iteritems(membership_attributes):
setattr(m, k, v)
assert getattr(m, k) == v
pop.add(m)
"""
can we save and reload this intact?
"""
j = pop.to_json()
pop2 = Popolo(json.loads(j))
assert pop2.to_json() == pop.to_json()
assert pop.memberships[0].id == pop2.memberships[0].id
"""
save to temp file
"""
filename = mktemp()
pop.to_filename(filename)
os.remove(filename)
|
# -*- coding: utf-8 -*-
"""
@File:api_request.py
@Author:cdf
@Version:3.0
@Description:request请求封装
"""
import datetime
import requests
from ApiAutoCore.base.read_yml import readYaml
from ApiAutoCore.base.log import Logger
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
class apiRequest:
def __init__(self, project_name):
self.read_data = readYaml(project_name, 'middler', 'config.yml') # 读取配置文件
self.config_env = self.read_data.configEnv() # 获取所属环境配置
self.base_url = self.read_data.all_data[self.config_env]['host']
self.logger = Logger(project_name).get_logger()
"""封装https请求,根据实际情况传参"""
def send_requests(self, method, url, data=None, params=None, headers=None, cookies=None,
json=None, files=None, auth=None, timeout=None, proxies=None,
verify=False, cert=None): # 禁用SSL证书验证verify=False
url = ''.join(self.base_url + url)
start_time = datetime.datetime.now()
res = requests.request(method=method, url=url, data=data, params=params, headers=headers,
cookies=cookies, json=json, files=files, auth=auth, timeout=timeout,
proxies=proxies, verify=verify, cert=cert)
end_time = datetime.datetime.now()
self.logger.debug('请求内容:' + str(res.request.body))
self.logger.debug('请求地址:' + str(res.url))
# self.logger.debug('接口返回:' + str(res))
self.logger.debug('请求头:' + str(res.request.headers))
self.logger.critical('url:' + str(res.url) + '\ntime:' + str(round((end_time - start_time).total_seconds(), 5)))
# self.logger.critical('url:' + str(res.url) + '\nrequest中的time:' + str(res.elapsed.total_seconds()))
return res
if __name__ == '__main__':
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/85.0.4183.83 Safari/537.36 Edg/85.0.564.44',
'host': 's.pinpin.com',
# 'Content-Type': 'application/pdf',
'Cookie': 'hy_data_2020_id=17b2f38622f88e-0bce6541f96ffb-3d385d08-1049088-17b2f38623051d; hy_data_2020_js_sdk=%7B%22distinct_id%22%3A%2217b2f38622f88e-0bce6541f96ffb-3d385d08-1049088-17b2f38623051d%22%2C%22site_id%22%3A1216%2C%22user_company%22%3A1346%2C%22props%22%3A%7B%7D%2C%22device_id%22%3A%2217b2f38622f88e-0bce6541f96ffb-3d385d08-1049088-17b2f38623051d%22%7D; LiveWSALA33200344=72358b7af7c946e5b4d4ccbafdaebcf7; NALA33200344fistvisitetime=1628584829944; pvuvMachineId=d5356a2545394322a33461aacf56c898; Hm_lvt_2fe0d8bb16537140e266bfe55a157d37=1628584829,1628647373; LiveWSALA33200344sessionid=5ca757ce56be47ed91929f2da1bee4cc; NALA33200344visitecounts=2; NALA33200344lastvisitetime=1628648826961; NALA33200344visitepages=35; Hm_lpvt_2fe0d8bb16537140e266bfe55a157d37=1628648827; token=b299e16916fb555cf6d93c5253fc8aa8'
}
urls = '/pfapi/bside/pinpincloud/exam/listexaminees.json?examStatus=-1&pageSize=20&pageNo=1'
data = ''
r = apiRequest('ApiTestManagerPc').send_requests(method='GET', url=urls, headers=headers).json()
print(r)
|
# -*- coding: utf-8 -*-
import math
class Solution:
def countVowelStrings(self, n: int) -> int:
return math.comb(n + 4, n)
if __name__ == "__main__":
solution = Solution()
assert 5 == solution.countVowelStrings(1)
assert 15 == solution.countVowelStrings(2)
assert 66045 == solution.countVowelStrings(33)
|
scores={}
result_f=open("results.txt")
for each_line in result_f:#系统对for自动从上到下来排序。
#for 后面可以视为临时变量 所以each_line换成a,b等均可以
(name,score)=each_line.split()
scores[score]=name #[]内为key的部分,name为key对应的值
result_f.close
print("the top scores were:")
for each_score in sorted(scores.keys(),reverse=True):
print('surfer '+scores[each_score]+' scored '+each_score)
|
import torch
from openvaccine.losses.loss import Loss
@Loss.register("MCRMSE")
class MCRMSE(Loss):
def forward(self, logits: torch.Tensor, targets: torch.Tensor, weight: torch.Tensor = None) -> torch.Tensor:
logits = logits[:, :self._num_to_calc_on]
targets = targets[:, :self._num_to_calc_on]
mse = torch.nn.functional.mse_loss(logits, targets, reduction="none")
if weight is not None:
mean_mse = (mse * weight.reshape(-1, 1)).mean(dim=0)
else:
mean_mse = torch.mean(mse, dim=0)
sqrt_mse = torch.sqrt(mean_mse)
loss = torch.mean(sqrt_mse)
return loss
|
import os
import sys
import time
STANDARD_SIZES = set(((250, 250), (300, 1050), (160, 600), (728, 90),
(300, 600), (970, 90), (234, 60), (125, 125), (300, 250),
(120, 240), (120, 90), (180, 160), (300, 100), (970, 250),
(120, 60), (550, 480), (468, 60), (336, 280), (88, 31),
(240, 400), (180, 150), (120, 600), (720, 300), (976, 40),
(180, 900)))
def filter(list, s):
for x in list:
if x in s:
return True
return False
###################################################
## TWEAK THE ARGUMENTS TO THIS AS NEEDED ##
## (we loaded from a db, hence the id attribute) ##
## (change the ad detection code as desired too) ##
###################################################
def is_it_an_ad(list, id, src, iframe_url, width, height, landing_url, landing_domain, parent_domain):
if (width, height) not in STANDARD_SIZES:
return False
if landing_domain == parent_domain:
return False
# no ads with no landing pages
if len(landing_url) == 0:
return False
# check against adblock filters
if filter(list, img[0]) or filter(list, img[1]):
return True
return False
if __name__ == '__main__':
# usage: python adblock.py filter list
if len(sys.argv) < 1:
print "usage: python adblock.py filter_list"
sys.exit(1)
# loads an adblock filter list (used the simple text file)
list = open(sys.argv[1]).read().split('\n')[:-1]
##########################################################
## OPEN THE FILE OR DB CONTAINING YOUR SAVED CRAWL DATA ##
##########################################################
# data = (load in crawl data)
## THIS IS AN EXAMPLE ##
data = [['www.example.com/article1',
'ads.example.com/&ad_channel=2829',
250,
250,
'www.amazon.com',
'amazon.com',
'example.com']]
ads, non_ads = [],[]
for img in data:
src, iframe_url, width, height, landing_url, landing_domain, parent_domain = img
if is_it_an_ad(list, id, src, iframe_url, width, height, landing_url, landing_domain, parent_domain):
ads.append(img[0])
else:
non_ads.append(img[0])
print ads
print non_ads
# (save ads, non_ads as desired)
## END OF THE EXAMPLE ##
|
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait as wait
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.chrome.options import Options
from pyvirtualdisplay import Display
from sqlalchemy import create_engine
import psycopg2
import os, re
def titan_scrapping():
display = Display(visible = 0, size = (1200, 900))
display.start()
try:
Titanbet = webdriver.Chrome(executable_path=os.path.abspath("/usr/bin/chromedriver"))
Titanbet.get("https://www.europartners.com/")
Titanbet.find_element_by_xpath("//*[@id='wrapper']/header/section/nav/ul/li[1]/a/span").click()
Titanbet.implicitly_wait(10)
Titanbet.find_element_by_id("userName").send_keys("betfyuk")
Titanbet.find_element_by_id("password").send_keys("qwerty123")
pwd = Titanbet.find_element_by_id("password")
pwd.send_keys(Keys.RETURN)
Titanbet.implicitly_wait(10)
commission = Titanbet.find_element_by_xpath("//*[@id='separateConv']/div[1]/div[2]").text
pattern = re.compile(r'[\d.\d]+')
tmp = pattern.search(commission)
commission = tmp.group(0)
return commission
finally:
Titanbet.quit()
display.stop()
data = titan_scrapping()
balance = data
engine = create_engine('postgresql://postgres:root@localhost/kyan')
result = engine.execute("INSERT INTO titanbets (balance) VALUES (%s);", balance) |
s = raw_input().strip()
pos, char = raw_input().strip().split()
l = list(s)
l[int(pos)] = char
print ''.join(l)
|
# 数学题
class Solution:
def kthPalindrome(self, queries: List[int], intLength: int) -> List[int]:
n = len(queries)
d = intLength // 2 + intLength % 2
base = 10**(d-1) if d != 1 else 1
res = []
for item in queries:
pre = base + item - 1
if pre >= 10 ** d:
res.append(-1)
else:
if intLength % 2 == 0:
res.append( int( str(pre) + str(pre)[::-1] ))
else:
res.append( int( str(pre) + str(pre)[:-1][::-1] ))
return res
# dl的写法
class Solution:
def kthPalindrome(self, queries: List[int], intLength: int) -> List[int]:
ans = [-1] * len(queries)
base = 10 ** ((intLength - 1) // 2)
for i, q in enumerate(queries):
if q <= 9 * base:
s = str(base + q - 1) # 回文数左半部分
print(s)
s += s[-2::-1] if intLength % 2 else s[::-1]
ans[i] = int(s)
return ans
|
import tensorflow as tf
x = [1,2,3]
y = [1,2,3]
w = tf.Variable(5.0)
hypothesis = x * w
cost = tf.reduce_mean(tf.square(hypothesis - y))
train = tf.train.GradientDescentOptimizer(learning_rate=0.1).minimize(cost)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for step in range(101):
_, w_val = sess.run([train, w])
print(step, w_val)
|
import os
class ManifestMaker:
def __init__(self, config):
self.config = config
self.templates = os.path.join(os.path.dirname(__file__), 'resources/')
def master_deployment(self):
return self.__template_parse("locust-master.yaml")
def worker_deployment(self):
return self.__template_parse("locust-worker.yaml")
def master_service(self):
return self.__template_parse("locust-master-service.yaml")
def master_service_lb(self):
return self.__template_parse("locust-master-service-lb.yaml")
def __template_parse(self, template):
template = os.path.join(self.templates, template)
tmpl = open(template, 'rt').read()
text = tmpl.format(**self.config)
return text
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Feb 14 20:09:12 2020
@author: adeela
"""
'''
https://www.thepythoncorner.com/2017/12/the-art-of-avoiding-nested-code/
https://nedbatchelder.com/text/iter.html
https://nedbatchelder.com/blog/201608/breaking_out_of_two_loops.html
https://book.pythontips.com/en/latest/map_filter.html
Good Explanation of reduce() at the top -->
https://www.geeksforgeeks.org/reduce-in-python/
'''
# =============================================================================
# find odd numbers in list using list comprehension
# =============================================================================
A = [1,2,3,4,5,6,7,8,9,11]
##same old for loop
O = []
for x in A:
if x % 2 != 0:
O.append(x)
## fancy straight forward list comprehension syntax
X = [x for x in A if x%2!=0]
print(X)
# =============================================================================
# Manipulate elements in the list
# =============================================================================
# add letter 'a' times the element in the list
import itertools
#itertools.chain(*E) --> E is a list of lists and choice() flattens it to one list
B = [1,3,4,5]
B_ = ['a '* i for i in B]
print(B_)
# output --> ['a ', 'a a a ', 'a a a a ', 'a a a a a ']
#Now let's suppose C represents the weights of elements in array B
# we can multiple weights OR we would like to exapnd elements in B as per weights in C
C = [4,5,2,1]
#explanation of line 76
# integer * string means string is replicated number of times indicated by integer
# join in this case is adding space between each element of string we got from step 1; its still one string
# split() will make split the string and put each element as a separte element in list
# 4 * '1' --> '4444' -->'4 4 4 4' --> ['4', '4', '4', '4']
E = [(' '.join(C[i]* str(B[i]))).split(' ') for i in range(len(B))]
E = list(itertools.chain(*E)) # flatten list of list
E = [int(x) for x in E ] # convert to integer list
# =============================================================================
# #### OR simply it further as follows w/o using join()
# =============================================================================
# list('ABC') --> ['A', 'B', 'C']
E = [list(C[i]* str(B[i])) for i in range(len(B))]
E = list(itertools.chain(*E)) # flatten list of list
E = [int(x) for x in E ] # convert to integer list
# =============================================================================
# Filter the numbers from list using list comprehension vs filter()
# =============================================================================
#filter out number that are NOT multiple of 11
#LIST COMPREHENSION way
X = [1,2,3,4,5,5,6,7,11,22,55,44]
multiples_11 = [x for x in X if x%11 == 0]
# FILTER way
# filter (function_to_apply, Iterable) and it returns Iterable object thats
# why we need to wrap the results in list() to get a list object
# lambda lets us define an anonymus function
#
multiples_11_f = list(filter(lambda x: x%11==0, X))
# =============================================================================
# Find coordinate pairs using zip() vs list comprehension
# =============================================================================
X = range(0,100, 1) # numbers from 0 to 100 with jump_size == 1
Y = range(0,200, 2) # numbers from 0 to 200 with jump_size == 2
# create (x,y) pairs by combining elements in both list by indices
# meaning x0 combined with y0, x1 with y1 and so on.
# The final size of list is same as size of X or Y
X_Y = list(zip (X,Y))
# this gives the cartesian pair of tuples
X_Y_cartesian = [(x, y) for x in X for y in Y]
# Now we can expnd it further to filter out some of the pairs
# only pairs where both x and y is positive
X_Y_cartesian_even = [(x,y) for x in X for y in Y if x%2== 0 and y%2==0]
# =============================================================================
# sum(product) numbers using list comprehension vs reduce()
# from functools library
# =============================================================================
X = range(1,100)
# Plain old way to calculate sum
temp = 0
for x in X:
temp +=x
# As per google search sum not possible via list comprehension
from functools import reduce
#Using reduce() ; It takes lambda function with two inputs only
# first x and y are first two elements of list and its result s computed then
# in the next iteration next element and result from prevuious
# computation is picked and result is computed and so on
X_sum = reduce(lambda x,y: x+y, X)
# =============================================================================
# changing elements of the list via map() vs list comprehension
# =============================================================================
# Task: convert all elments in a list of int to string and append a space after
# the number
X = range(1,5)
# plain old way to do it
X_m= []
for x in X:
X_m.append(str(x) + ' ')
# list comprehension
X_m = [str(x)+ ' ' for x in X]
# map() , map also returns an Iterable object so we need to wrap it with list()
# to get list of elements
X_m = list(map (lambda x: str(x) + ' ', X ))
|
def insertion_sort(sortable):
for i in range(len(sortable)):
while i>0 and sortable[i] < sortable[i-1]:
temp = sortable[i-1]
sortable[i-1] = sortable[i]
sortable[i] = temp
i -= 1
return sortable
def insertion_sort_recursive(sortable, n=-1):
if n == -1: n = len(sortable)
if n == 1: return sortable
insertion_sort_recursive(sortable, n-1)
i = n - 1
while i>0 and sortable[i] < sortable[i-1]:
temp = sortable[i-1]
sortable[i-1] = sortable[i]
sortable[i] = temp
i -= 1
return sortable
if __name__=='__main__':
import ztesting.sorting_test.sorting_test as test
test.test_insertion_sort()
|
import numpy as np
import cv2
import pathlib
import imutils
address = pathlib.PureWindowsPath(r'C:\Users\Muhammad Reza\KTPDetect\Foto ktp\ktp0.jpg')
image = cv2.imread(address.as_posix())
image = imutils.resize(image,width=650)
hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV_FULL)
low_blue = np.array([80,30,155],np.uint16)
high_blue = np.array([240,255,255],np.uint16)
blue = cv2.inRange(hsv,low_blue,high_blue)
# hsv = cv2.GaussianBlur(hsv,(17,17),0)
edged = cv2.Canny(blue,30,200)
contours,hierarchy = cv2.findContours(edged,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
cv2.drawContours(image,contours,-1,(255,0,0),5)
# cv2.imshow('hsv',hsv)
cv2.imshow('img',blue)
# cv2.imshow('canny',edged)
cv2.imshow('overlay',image)
cv2.waitKey(0)
cv2.destroyAllWindows() |
"""
Antelope Interface Definitions
The abstract classes in this sub-package define what information is made available via a stateless query to an Antelope
resource of some kind. The interfaces must be instantiated in order to be used. In the core package
"""
from .interfaces.abstract_query import PrivateArchive, EntityNotFound, NoAccessToEntity
from .interfaces.iconfigure import ConfigureInterface
from .interfaces.iexchange import ExchangeInterface, ExchangeRequired
from .interfaces.iindex import IndexInterface, IndexRequired, directions, comp_dir, num_dir, check_direction, valid_sense, comp_sense
from .interfaces.ibackground import BackgroundInterface, BackgroundRequired
from .interfaces.iquantity import QuantityInterface, QuantityRequired, NoFactorsFound, ConversionReferenceMismatch, FlowableMismatch
from .interfaces.iforeground import ForegroundInterface
from .flows import BaseEntity, FlowInterface, Flow
from .refs.process_ref import MultipleReferences, NoReference
from .refs.catalog_ref import CatalogRef, QuantityRef, UnknownOrigin
from .refs.quantity_ref import convert, NoUnitConversionTable
from .refs.base import NoCatalog, EntityRefMergeError
from .refs.exchange_ref import ExchangeRef, RxRef
import re
from os.path import splitext
from collections import namedtuple
class PropertyExists(Exception):
pass
'''
Query classes
'''
class BasicQuery(IndexInterface, ExchangeInterface, QuantityInterface):
def __init__(self, archive, debug=False):
self._archive = archive
self._dbg = debug
def _perform_query(self, itype, attrname, exc, *args, strict=False, **kwargs):
if itype is None:
itype = 'basic'
iface = self._archive.make_interface(itype)
result = getattr(iface, attrname)(*args, **kwargs)
if result is not None: # successful query must return something
return result
raise exc(itype, attrname, *args)
@property
def origin(self):
return self._archive.ref
def make_ref(self, entity):
"""
Query subclasses can return abstracted versions of query results.
:param entity:
:return: an entity that could have a reference to a grounded query
"""
if entity is None:
return None
if entity.is_entity:
return entity.make_ref(self)
else:
return entity # already a ref
'''
I think that's all I need to do!
'''
class LcQuery(BasicQuery, BackgroundInterface, ConfigureInterface):
pass
'''
Utilities
'''
def local_ref(source, prefix=None):
"""
Create a semantic ref for a local filename. Just uses basename. what kind of monster would access multiple
different files with the same basename without specifying ref?
alternative is splitext(source)[0].translate(maketrans('/\\','..'), ':~') but ugghh...
Okay, FINE. I'll use the full path. WITH leading '.' removed.
Anyway, to be clear, local semantic references are not supposed to be distributed.
:param source:
:param prefix: [None] default 'local'
:return:
"""
if prefix is None:
prefix = 'local'
xf = source.translate(str.maketrans('/\\', '..', ':~'))
while splitext(xf)[1] in {'.gz', '.json', '.zip', '.txt', '.spold', '.7z'}:
xf = splitext(xf)[0]
while xf[0] == '.':
xf = xf[1:]
while xf[-1] == '.':
xf = xf[:-1]
return '.'.join([prefix, xf])
def q_node_activity(fg):
"""
A reference quantity for dimensionless node activity. This should be part of Qdb reference quantities (but isn't)
:param fg:
:return:
"""
try:
return fg.get_canonical('node activity')
except EntityNotFound:
fg.new_quantity('Node Activity', ref_unit='activity', external_ref='node activity', comment='MFA metric')
return fg.get_canonical('node activity')
def enum(iterable, filt=None, invert=True):
"""
Enumerate an iterable for interactive use. return it as a list. Optional negative filter supplied as regex
:param iterable:
:param filt:
:param invert: [True] sense of filter. note default is negative i.e. to screen *out* matches
(the thinking is that the input is already positive-filtered)
:return:
"""
ret = []
if filt is not None:
if invert:
_iter = filter(lambda x: not bool(re.search(filt, str(x), flags=re.I)), iterable)
else:
_iter = filter(lambda x: bool(re.search(filt, str(x), flags=re.I)), iterable)
else:
_iter = iterable
for k, v in enumerate(_iter):
print(' [%02d] %s' % (k, v))
ret.append(v)
return ret
"""
In most LCA software, including the current operational version of lca-tools, a 'flow' is a composite entity
that is made up of a 'flowable' (substance, product, intervention, or service) and a 'context', which is
synonymous with an environmental compartment.
The US EPA review of elementary flows recommended managing the names of flowables and contexts separately, and that
is the approach that is done here.
The exchange model consists of : parent | flow(able), direction | [exch value] | [terminal node]
If the terminal node is a context, the exchange is elementary. if it's a process, then intermediate.
If none, then cutoff.
The new Flat Background already implements context-as-termination, but the main code has had to transition and we are
still technically debugging the CalRecycle project. So we introduce this flag CONTEXT_STATUS_ to express to client code
which one to do. It should take either of the two values: 'compat' means "old style" (flows have Compartments) and
'new' means use the new data model (exchange terminations are contexts)
"""
CONTEXT_STATUS_ = 'new' # 'compat': context = flow['Compartment']; 'new': context = exch.termination
# Containers of information about linked exchanges. Direction is given with respect to the termination.
ExteriorFlow = namedtuple('ExteriorFlow', ('origin', 'flow', 'direction', 'termination'))
# ProductFlow = namedtuple('ProductFlow', ('origin', 'flow', 'direction', 'termination', 'component_id'))
EntitySpec = namedtuple('EntitySpec', ('link', 'ref', 'name', 'group'))
# packages that contain 'providers'
antelope_herd = [
'antelope_background',
'antelope_foreground'
]
|
# encoding: utf-8
import matplotlib as mpl
import matplotlib.pyplot as plt
import os
from natsort import natsorted, ns
import numpy
import re
import xlsxwriter
def start():
directory = 'result_exe1/'
arq = os.listdir(directory)
arquivosDiretorio = natsorted(arq, alg=ns.IGNORECASE) #ordena arquivos para plot
size = len(arquivosDiretorio)
throughput_list = []
queueingDelay_list = []
signalDelay_list = []
for i in range(size):
with open(directory + '/'+ arquivosDiretorio[i], 'r') as f:
try:
lines = f.read().splitlines()
throughput_line = lines[-5].split()
throughput = throughput_line[2]
throughput_list.append(throughput)
queueingDelay_line = lines[-4].split()
queueingDelay = queueingDelay_line[5]
queueingDelay_list.append(queueingDelay)
signalDelay_line = lines[-3].split()
signalDelay = signalDelay_line[4]
signalDelay_list.append(signalDelay)
except:
print('Erro na leitura dos arquivos')
x = [float(i) for i in signalDelay_list]
y = [float(i) for i in throughput_list]
w = [] #Throughput/Delay ou Potencia
for i in range(len(x)):
w.append(y[i]/(x[i]*0.001))
x_str = []
for i in range(size):
x_str.append("{:.2f}".format(x[i]))
y_str = []
for i in range(size):
y_str.append("{:.2f}".format(y[i]))
w_str = []
for i in range(size):
w_str.append("{:.2f}".format(w[i])) #{:.2e}
x_ = range(1,size+1)
fig1 = plt.figure()
ax1 = fig1.add_subplot(211)
ax1.scatter(x,y, color='b', label='Taxa de transmissao x Atraso do sinal')
# for i in range(size):
# ax1.text(x[i]+0.05,y[i], x_[i], ha ='left',color="k", fontsize=8,wrap=True)
ax1.set_xlabel('Atraso do sinal com percentil de 95% (ms)')
ax1.set_ylabel('Taxa de transmissao (Mbps)')
ax2 = plt.subplot(212)
ax2.bar(x_, w,color='r')
for i in range(size):
#ax2.text(x_[i] - 0.15, w[i] - 0.5, "{:.2f}".format(w[i]) ,ha ='left',color="w", wrap=True)
#ax2.text(x_[i] - 0.35, w[i] - 1, "{:.2f}".format(w[i]) ,ha ='left',color="w", fontsize=8,wrap=True)
ax2.text(x_[i] - 0.35, w[i] - 2, "{:.2f}".format(w[i]) ,ha ='left',color="w", fontsize=8,wrap=True)
#for i in range(size):
#ax1.text(x[i] - 0.4, y[i] - 2, "{:.2f}".format(w[i]) ,ha ='left',color="w", wrap=True)
#ax1.text(x[i], y[i], str(i) + ' ' + w_str[i] ,ha ='left',color="r", wrap=True)
ax2.set_xticks(range(1,size+1))
ax2.set_ylabel('Taxa de transmissao x Atraso do sinal')
ax2.set_xlabel('Execucao')
plt.show()
if __name__ == "__main__":
start()
|
import smtplib
import requests
from BeautifulSoup import BeautifulSoup
import time
import random
def main():
nogood=1
x=0
while nogood==1:
bitly = "http://goo.gl/WNOecx"
time.sleep(random.randint(12, 15))
r = requests.get(bitly)
soup=BeautifulSoup(r.text)
y = soup.find(id='shelfDiv').find(id="border")
new = str(y)
newfile=open('new.txt', 'w+')
newfile.write (new)
newfile.close()
reopen=open('new.txt', 'w+')
new=reopen.read()
reopen.close()
f=open('baseline.txt', 'w+')
old=f.read()
if new==old:
print "round: " + str(x)
x+=1
else:
server = smtplib.SMTP("smtp.gmail.com", 587)
server.starttls()
server.login('XXXXXXXXXXXXXXXXXXX@gmail.com', 'XXXXX')
server.sendmail("me!!", "XXXXXXXXXXXXX@messaging.sprintpcs.com", bitly)
print 'noooooooooo'
f.write(new)
nogood=0
break
f.close()
if __name__ == "__main__":
main()
|
"""
created by Nasim Zolaktaf, 2019
This file estimates the reaction rate constant of a reactions and returns the squared error between the predicted log reaction rate cosntant and experimental log reaction rate constant
"""
from __future__ import division
import warnings
import gc
import numpy as np
import ConfigParser
import math
import cPickle as pickle
from scipy.sparse.linalg import *
from subprocess import Popen, PIPE , call
import copy
import timeit
import myenums
import learndnakinetics
from multistrand.options import Options, Literals
from multistrand.builder import Builder, BuilderRate , transitiontype, localtype
from multistrand.concurrent import FirstStepRate, FirstPassageRate, Bootstrap, MergeSim
from multistrand.system import SimSystem
from builderChild import BuilderRate_FPEI
from paths import PathHelix, PathHairpin
from multistrand.objects import StopCondition
import os
configParser = ConfigParser.ConfigParser()
configParser.readfp(open(r'../learndnakinetics/config_file.txt'))
CONFIG_NAME = 'parent'
use_FPEI_MFPT= bool(configParser.getint(CONFIG_NAME, 'use_FPEI_MFPT'))
use_Gillespie_MFPT = bool(configParser.getint(CONFIG_NAME, 'use_Gillespie_MFPT'))
RETURN_MINUS_INF = None
LNA_INDEX= 0
E_INDEX = 1
bimolecular_scaling = "bimolecular_scaling"
unimolecular_scaling = "unimolecular_scaling"
class Output(object):
#output object returned to learndnakinetics.py
def __init__(self, **kwargs):
for k, v in kwargs.items():
setattr(self, k, v)
def set_specific ( self, **kwargs) :
for k, v in kwargs.items():
setattr(self, k, v)
class ParentComplex(object):
#
def __init__(self , dataset) :
if rate_method == Literals.arrhenius :
theta = dataset.theta_simulation
self.kinetic_parameters_simulation = {localtype.stack: (theta[0] ,theta[1]) ,
localtype.loop: (theta[2] ,theta[3]),
localtype.end: (theta[4] ,theta[5]),
localtype.stackloop: (theta[6] ,theta[7]),
localtype.stackend: (theta[8] ,theta[9]),
localtype.loopend: (theta[10] ,theta[11]),
localtype.stackstack: (theta[12] ,theta[13]),
bimolecular_scaling : (theta[14]) }
elif rate_method == Literals.metropolis :
theta = dataset.theta_simulation
self.kinetic_parameters_simulation ={ unimolecular_scaling :theta[0] , bimolecular_scaling :theta[1] } #used in matrix computations
else:
raise ValueError('Error: Please specify rate_method to be Arrhenius or Metropolis in the configuration file!')
self.strands_list = dataset.strands_list
self.use_initialfinalfrompathway= dataset.use_initialfinalfrompathway
self.dataset_type = dataset.dataset_type
self.reaction_type =dataset.reaction_type
self.dataset_name = dataset.dataset_name
self.startStates = None
self.load = dataset.load
self.save = dataset.save
self.real_rate = dataset.real_rate
self.reaction_type =dataset.reaction_type
self.bimolecular_reaction = dataset.bimolecular_reaction
self.dataset_path = dataset.dataset_path
self.docID = dataset.docID
self.temperature = dataset.temperature
self.join_concentration = float(dataset.join_concentration ) # concentration has to be a float (join_concentration in Multistrand has to be set to a float)
self.sodium = dataset.sodium
self.magnesium = dataset.magnesium
self.num_simulations= dataset.num_simulations
self.simulation_time = dataset.simulation_time
self.join_concentration_change =1 # Do not change this
self.temperature_change = 0 #Do not change this
self.loglikelihoods= dict()
self.MFPTs= dict()
self.original_loglikelihoods= dict( )
if self.reaction_type == myenums.ReactionType.HELIXDISSOCIATION.value :
pathway= PathHelix( False , self.strands_list , self.reaction_type, self.dataset_name, self.dataset_type,cutoff = dataset.cutoff )
elif self.reaction_type == myenums.ReactionType.HELIXASSOCIATION.value :
pathway = PathHelix( True, self.strands_list , self.reaction_type, self.dataset_name, self.dataset_type,cutoff =dataset.cutoff )
elif self.reaction_type == myenums.ReactionType.HAIRPINCLOSING.value :
pathway = PathHairpin( True , self.strands_list , self.reaction_type, self.dataset_name, self.dataset_type ,cutoff =dataset.cutoff)
elif self.reaction_type == myenums.ReactionType.HAIRPINOPENING.value :
pathway = PathHairpin( False , self.strands_list , self.reaction_type, self.dataset_name, self.dataset_type , cutoff =dataset.cutoff)
if self.use_initialfinalfrompathway == True :
self.startStates = pathway.get_intial_final()
def find_meanfirstpassagetime(self):
attributes_file = self.dataset_path+"/"+myenums.Permanent_Folder.MYBUILDER.value+ "/atrributes" +str(self.docID)+".txt"
attributes_file = open(attributes_file, 'a')
attributes_file.write("num_simulations: " +str(self.num_simulations) + " simulations_time: " + str(self.simulation_time) +" concentration_change: " + str(self.join_concentration_change) + " concentration: " + str(self.join_concentration )+ " temperature: " + str(self.temperature )+ " temprature_change: " + str(self.temperature_change)+ " sodium: " +str( self.sodium)+ " magnesium: "+ str(self.magnesium) +"\n")
attributes_file.close()
if use_FPEI_MFPT == True :
return self.find_meanfirstpassagetime_FPEI( )
elif use_Gillespie_MFPT == True :
return self.find_meanfirstpassagetime_Gillespie()
def print_path(self,o):
print o.full_path[0][0][3] # the strand sequence #if you get list index out of range, set this options.output_interval = 1
print o.start_state[0].structure # the starting structure
for i in range(len(o.full_path)):
time = o.full_path_times[i]
state = o.full_path[i][0]
struct = state[4]
sequence = state[3]
dG = state[5]
print struct + ' t=%11.9f seconds, dG=%6.2f kcal/mol' % (time, dG)
"""Estimating the Mean First Passage Time with FPEI """
def find_meanfirstpassagetime_Gillespie(self) :
startime = timeit.default_timer()
options = doReaction([self.num_simulations,self.simulation_time, self.reaction_type, self.dataset_type, self.strands_list , self.sodium, self.magnesium, self.kinetic_parameters_simulation, self.bimolecular_reaction, self.temperature , self.temperature_change, self.join_concentration_change , self.join_concentration, rate_method , self.use_initialfinalfrompathway, self.startStates])
#options.output_interval = 1 #to store all the transitions!!!!!!!!!!!!!!!!!!
s = SimSystem(options )
s.start()
myRates = FirstPassageRate( options.interface.results)
del s
finishtime = timeit.default_timer()
#print "average sampling time is " , str ( (finishtime -startime ) / self.num_simulations )
mfpt = myRates.k1()
del myRates
gc.collect()
return mfpt
def call_Builder(self, num_simulations ) :
myBuilder = Builder(doReaction, [ num_simulations, self.simulation_time, self.reaction_type, self.dataset_type, self.strands_list , self.sodium, self.magnesium, self.kinetic_parameters_simulation, self.bimolecular_reaction,self.temperature , self.temperature_change, self.join_concentration_change , self.join_concentration, rate_method , self.use_initialfinalfrompathway, self.startStates])
start_time = timeit.default_timer()
if self.num_simulations > 0 :
myBuilder.genAndSavePathsFile(supplyInitialState= self.startStates[0])
builderpath = self.dataset_path+"/"+myenums.Permanent_Folder.MYBUILDER.value+"/"+myenums.Permanent_Folder.MYBUILDER.value+str(self.docID)
lenp = len( myBuilder.protoSpace)
start = 0
myBuilder.protoSpacebackup= copy.deepcopy(myBuilder.protoSpace)
pathuniqueid = builderpath + myenums.EDITIONAL.UNIQUEID.value
with open(pathuniqueid , "wb" ) as p :
pickle.dump(myBuilder.uniqueID_number, p )
pathspace= builderpath + myenums.EDITIONAL.PROTOSPACEBACKUP.value
with open(pathspace , "wb" ) as p :
pickle.dump(myBuilder.protoSpacebackup, p )
pathsequences= builderpath + myenums.EDITIONAL.PROTOSEQUENCES.value
with open(pathsequences , "wb" ) as p :
pickle.dump(myBuilder.protoSequences, p )
pathoptions = builderpath + myenums.EDITIONAL.PATHOPTIONS.value
with open(pathoptions , "wb" ) as p :
pickle.dump(myBuilder.optionsArgs, p )
batchsize = 2000
while start < lenp :
st = timeit.default_timer ( )
end = min(lenp , start+batchsize)
print "progress " , str(end) , " / ", str(lenp) , self.docID
# There was some memory leak issues when I used the fattenStateSpace function in builder.py, so I added fatten helper to avoid the memory issues by saving intermediate results, restarting Multistrand, and restoring the intermediate results
command = ["python", "fattenhelper.py" , str(start), str(end) , builderpath, pathspace, pathsequences, pathoptions, pathuniqueid]
shell = call(command )
ft = timeit.default_timer()
#print "making fatten state space time" , ft-st
del shell
with open( builderpath + myenums.EDITIONAL.TEMPSTATESPACE.value + str(start)+"-" +str(end) , "rb" ) as p:
tempstatespace = pickle.load ( p)
with open( builderpath + myenums.EDITIONAL.TEMPTRANSITIONS.value + str(start)+"-" +str(end) , "rb" ) as p:
temptransitions = pickle.load( p)
os.remove(builderpath + myenums.EDITIONAL.TEMPSTATESPACE.value + str(start)+"-" +str(end))
os.remove(builderpath + myenums.EDITIONAL.TEMPTRANSITIONS.value + str(start)+"-" +str(end))
myBuilder.mergeSet(myBuilder.protoSpace, tempstatespace)
myBuilder.mergeSet(myBuilder.protoTransitions_FPEI, temptransitions )
start = end
with open(pathuniqueid , "rb" ) as p :
myBuilder.uniqueID_number = pickle.load( p )
os.remove(pathuniqueid)
os.remove(pathspace)
os.remove(pathsequences)
os.remove(pathoptions)
del myBuilder.protoSpacebackup
del myBuilder.uniqueID_number
print "Statistics: " , "statespace: ", len(myBuilder.protoSpace), "finalstates: ", len(myBuilder.protoFinalStates), "initialstates: ", len(myBuilder.protoInitialStates)
return myBuilder
def get_builder(self , ignoreSavedBuilder = False , ignoreNumSimulations = False , picklepath = "" ):
if ignoreNumSimulations == True :
num_simulations = 1
else:
num_simulations = self.num_simulations
attributes_file = self.dataset_path+"/"+myenums.Permanent_Folder.MYBUILDER.value+ "/atrributes" +str(self.docID)+".txt"
attributes_file = open(attributes_file, 'a')
if self.load == False or ignoreSavedBuilder == True :
myBuilder = self.call_Builder( num_simulations )
if len(myBuilder.protoFinalStates) == 0 or len(myBuilder.protoInitialStates) == 0 :
raise Exception('"no final states found! multiply simulation_time by 1000. in except block !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!1"')
else :
start_time = timeit.default_timer()
attributes_file.write( "Starting to load Builder ")
#print "starting to load builder"
with open(picklepath, "rb") as p:
myBuilder= pickle.load(p)
attributes_file.write("finished loading Builder took : " +str(timeit.default_timer() -start_time) + "\n")
if rate_method == Literals.metropolis:
set_Metropolis_params(myBuilder.options, self.kinetic_parameters_simulation)
elif rate_method == Literals.arrhenius:
set_Arrhenius_params(myBuilder.options,self.kinetic_parameters_simulation)
else:
raise ValueError(' Parameter method not set correctly ' )
return myBuilder
"""Estimating the Mean First Passage Time with FPEI """
def find_meanfirstpassagetime_FPEI(self ) :
attributes_file = self.dataset_path+"/"+myenums.Permanent_Folder.MYBUILDER.value+ "/atrributes" +str(self.docID)+".txt"
attributes_file = open(attributes_file, 'a')
attributes_file.write("use_string: " + str(self.use_initialfinalfrompathway)+ " num_simulations: " +str(self.num_simulations) + " simulations_time: " + str(self.simulation_time) + " concentration: " + str(self.join_concentration )+ " temperature: " + str(self.temperature )+ " concentration_change: " + str(self.join_concentration_change) + " temprature_change: " + str(self.temperature_change)+ " sodium: " +str( self.sodium)+ " magnesium: "+ str(self.magnesium))
successful_simulation = False
while successful_simulation == False :
random_time = False
trial_count = self.num_simulations
switch = 1
if learndnakinetics.iter >= switch:
original = False
else :
original = True
self.original = original
path_file = self.dataset_path+"/"+myenums.Permanent_Folder.TRAJECTORY.value+"/"+myenums.Permanent_Folder.TRAJECTORY.value+str(self.docID)
def building_paths() :
start= 1
num_simulations_temp =start
builderpath = self.dataset_path+"/"+myenums.Permanent_Folder.MYBUILDER.value+"/"+myenums.Permanent_Folder.MYBUILDER.value+str(self.docID)
#solutions_builderRate = 0
while num_simulations_temp <= self.num_simulations :
myBuilder =self.get_builder(ignoreSavedBuilder = True , ignoreNumSimulations = True)
builderRate_FPEI= BuilderRate_FPEI(myBuilder, trial_count =trial_count, random_time = random_time, stopStep = 100000 ,path_file = path_file + "-" + str( num_simulations_temp -1 ) + "-" , original= original, num_simulations_temp = num_simulations_temp, parameter_folder = learndnakinetics.parameter_folder )
num_simulations_temp +=1
loglikelihood1 , MFPT1, original_loglikelihood1= builderRate_FPEI.get_essentials ( )
self.loglikelihoods [ num_simulations_temp -2] = loglikelihood1
self.MFPTs [num_simulations_temp -2] = MFPT1
self.original_loglikelihoods[ num_simulations_temp -2] = original_loglikelihood1
dir = builderpath
if not os.path.exists(dir):
os.makedirs(dir)
with open(dir +"/" +str(num_simulations_temp - 2 ) , "wb") as f:
pickle.dump( myBuilder , f)
del myBuilder
del builderRate_FPEI
self.lengthPaths = self.num_simulations
solutions_builderRate = self.averageTime()
return solutions_builderRate
if original == True :
solutions_builderRate = building_paths()
else:
if learndnakinetics.iter <= 1 :
builderpath = self.dataset_path+"/"+myenums.Permanent_Folder.MYBUILDER.value+"/"+myenums.Permanent_Folder.MYBUILDER.value+str(self.docID)
builderratepath = builderpath + myenums.EDITIONAL.BUILDERRATE.value
num_simulations_temp = 0
while num_simulations_temp < self.num_simulations :
num_simulations_temp +=1
myBuilder = self.get_builder(picklepath=builderpath +"/" + str(num_simulations_temp - 1 ) )
builderRate_FPEI= BuilderRate_FPEI(myBuilder, trial_count =trial_count, random_time = random_time, stopStep = 100000 ,path_file = path_file + "-" + str( num_simulations_temp -1 ) + "-" , original= original, num_simulations_temp = num_simulations_temp , parameter_folder= learndnakinetics.parameter_folder )
loglikelihood1 , MFPT1, original_loglikelihood1= builderRate_FPEI.get_essentials ( )
self.loglikelihoods [ num_simulations_temp -1] = loglikelihood1
self.MFPTs [num_simulations_temp - 1 ] = MFPT1
self.original_loglikelihoods[ num_simulations_temp -1 ] = original_loglikelihood1
dir =builderratepath
if not os.path.exists(dir ):
os.makedirs(dir)
del builderRate_FPEI.build.neighbors_FPEI
del builderRate_FPEI.build.protoTransitionsCount
del builderRate_FPEI.build.protoSequences
del builderRate_FPEI.build.protoTransitions_FPEI
builderRate_FPEI.build.protoTransitions_FPEI = copy.deepcopy( builderRate_FPEI.build.protoTransitions )
del builderRate_FPEI.build.protoTransitions
newprotoSpace=dict()
for state1, state2 in builderRate_FPEI.paths[0].edges :
newprotoSpace[state1] = builderRate_FPEI.build.protoSpace[state1]
newprotoSpace[state2] = builderRate_FPEI.build.protoSpace[state2]
builderRate_FPEI.build.protoSpace = copy.deepcopy(newprotoSpace)
del newprotoSpace
with open (dir + "/" + str(num_simulations_temp -1 ) , "wb") as picklerbuilderrate :
pickle.dump ( builderRate_FPEI, picklerbuilderrate)
del myBuilder
del builderRate_FPEI
self.lengthPaths = num_simulations_temp
solutions_builderRate= self.averageTime()
else:
builderpath = self.dataset_path+"/"+myenums.Permanent_Folder.MYBUILDER.value+"/"+myenums.Permanent_Folder.MYBUILDER.value+str(self.docID)
builderratepath = builderpath + myenums.EDITIONAL.BUILDERRATE.value
countbuilderrates= 0
num_simulations_temp = 0
while num_simulations_temp < self.num_simulations :
with open(builderratepath +"/"+ str(num_simulations_temp), "rb") as picklerbuilderrate:
builderRate_FPEI = pickle.load(picklerbuilderrate )
countbuilderrates +=1 #do not move this line or the counter which is used to make avg mfpt would be wrong!
if rate_method == Literals.metropolis:
set_Metropolis_params(builderRate_FPEI.build.options, self.kinetic_parameters_simulation)
elif rate_method == Literals.arrhenius:
set_Arrhenius_params(builderRate_FPEI.build.options,self.kinetic_parameters_simulation)
else:
raise ValueError(' Parameter method not set correctly ' )
num_simulations_temp +=1
builderRate_FPEI.reset( trial_count =trial_count, random_time = random_time, stopStep = 100000 ,path_file = path_file + "-" + str( num_simulations_temp -1 ) + "-" , original= original, num_simulations_temp = num_simulations_temp , parameter_folder= learndnakinetics.parameter_folder )
loglikelihood1 , MFPT1, original_loglikelihood1= builderRate_FPEI.get_essentials ( )
self.loglikelihoods [ countbuilderrates -1 ] = loglikelihood1
self.MFPTs [countbuilderrates-1] = MFPT1
self.original_loglikelihoods[ countbuilderrates-1] = original_loglikelihood1
del builderRate_FPEI
self.lengthPaths = countbuilderrates
solutions_builderRate = self.averageTime()
successful_simulation = True
if self.save == True :
attributes_file.write("started saving builder")
start_time = timeit.default_timer()
attributes_file.write("finished saving builder" +str(timeit.default_timer() - start_time)+ "\n")
gc.collect()
return solutions_builderRate
def averageTime(self):
return self.averageTime_normalweighing( )
"""the Mean First Passage Time is calculated where every path has the same weight"""
def averageTime_normalweighing(self):
avg_MFPT = 0
length =self.lengthPaths
#print "Individual MFPTs of paths for reaction is ", self.MFPTs
for i in range ( length ) :
mfpt =self.MFPTs[i]
avg_MFPT += mfpt
avg_MFPT = avg_MFPT / length
#print "MFPT of reaction is " , avg_MFPT
return avg_MFPT
""" Calculates Mean First Passage Time, from either FPEI or SSA, and then returns the log reaction rate constant"""
def find_answers(self):
concentration = self.join_concentration
real_rate = self.real_rate
bimolecular_reaction = self.bimolecular_reaction
meanfirstpassagetime = self.find_meanfirstpassagetime()
attributes_file = self.dataset_path+"/"+myenums.Permanent_Folder.MYBUILDER.value+ "/atrributes" +str(self.docID)+".txt"
attributes_file = open(attributes_file, 'a')
if meanfirstpassagetime == RETURN_MINUS_INF or meanfirstpassagetime<= 0:
#meanfirstpassagetime should be greater then 0!
attributes_file.write( "None or Negative MFPT is: "+self.docID +str(meanfirstpassagetime )+ "\n")
return Output( error = np.inf , predicted_log_10_rate = np.inf , real_log_10_rate= np.inf )
#calculating reaction rate constant from mean first passage time.
if use_FPEI_MFPT == True :
if bimolecular_reaction == True :
predicted_rate= 1.0 / (meanfirstpassagetime * concentration)
else :
predicted_rate= 1.0 / meanfirstpassagetime
elif use_Gillespie_MFPT == True:
if bimolecular_reaction == True :
predicted_rate = meanfirstpassagetime * (1./concentration)
else:
predicted_rate = meanfirstpassagetime
warnings.filterwarnings('error')
try :
predicted_log_10_rate =np.log10(predicted_rate)
real_log_10_rate = np.log10(real_rate)
error = math.pow( real_log_10_rate - predicted_log_10_rate, 2)
except Exception as e :
print "exception " , e, e.args
return Output( error = np.inf , predicted_log_10_rate = np.inf , real_log_10_rate= np.inf )
gc.collect()
attributes_file.write( " error: " + str(error) + " real_log_10_rate: " +str (real_log_10_rate) + " predicted_log_10_rate: " +str (predicted_log_10_rate) + "\n" )
attributes_file.write("Next iteration *************************************************************************************************** "+"\n")
return Output( error = error , predicted_log_10_rate = predicted_log_10_rate , real_log_10_rate= real_log_10_rate )
def doReaction(arguments ) :
# the first argument is always the number of paths
options =Options(trials = arguments[0])
#options.output_interval = 1 # do not uncomment ---> might get memory issues
options.num_simulations= arguments[0]
options.simulation_time = arguments[1]
options.sodium = arguments[5]
options.magnesium = arguments[6]
options.temperature = arguments[9] +arguments[10]
options.join_concentration = arguments[12] * arguments[11]
if arguments[13] == Literals.metropolis :
set_Metropolis_params(options, arguments[7] )
elif arguments[13] == Literals.arrhenius :
set_Arrhenius_params(options, arguments[7] )
options.simulation_mode = Literals.trajectory
if arguments[14] == True :
endComplex1 = arguments[15][-1][0]
stopSuccess = StopCondition(Literals.success, [(endComplex1, Literals.exact_macrostate, 0)])
options.stop_conditions = [stopSuccess]
if use_Gillespie_MFPT ==True:
options.start_state = arguments[15][0]
return options
def doReaction2(n_trials, arguments ) :
arguments[0] = n_trials
return doReaction(arguments)
"""Setting parameters for the Arrhenius kinetic model"""
def set_Arrhenius_params (options, params):
options.rate_method = Literals.arrhenius
options.lnAStack = float( params[localtype.stack][LNA_INDEX] )
options.EStack = float( params[localtype.stack][E_INDEX] )
options.lnALoop = float( params[localtype.loop][LNA_INDEX] )
options.ELoop = float( params[localtype.loop][E_INDEX] )
options.lnAEnd = float( params[localtype.end][LNA_INDEX] )
options.EEnd = float(params[localtype.end][E_INDEX] )
options.lnAStackLoop = float(params[localtype.stackloop][LNA_INDEX])
options.EStackLoop = float( params[localtype.stackloop][E_INDEX] )
options.lnAStackEnd = float(params[localtype.stackend][LNA_INDEX] )
options.EStackEnd = float(params[localtype.stackend][E_INDEX] )
options.lnALoopEnd = float(params[localtype.loopend][LNA_INDEX] )
options.ELoopEnd =float( params[localtype.loopend][E_INDEX] )
options.lnAStackStack = float(params[localtype.stackstack][LNA_INDEX] )
options.EStackStack =float( params[localtype.stackstack][E_INDEX] )
options.bimolecular_scaling = float(params[bimolecular_scaling] )
"""Setting parameters for the Arrhenius kinetic model"""
def set_Metropolis_params(options, params):
options.rate_method = Literals.metropolis
options.unimolecular_scaling = float(params [unimolecular_scaling])
options.bimolecular_scaling =float( params[bimolecular_scaling])
def main(complex ):
return complex.find_answers( )
|
print "A circle/sphere calculator for easy conversion:"
radius = raw_input("radius of a circle(cm) = ")
x = radius
circumference = float(x)*2*3.14159
area = float(x)*float(x)*3.14159
volume = (float(x)*float(x)*float(x)*4*3.14159)/3
print 'circumference of the circle =',circumference,'cm'
print 'area of the circle =', area,'sq.cm'
print 'volume of the sphere =', volume, 'cubic cm'
print 'Would you like to use an english unit? '
ans = raw_input('answer (y or n) = ')
if ans == 'y': #inch calculation
y = radius
circumference = float(x)*2*3.14159/2.54
area = float(y)*float(y)*3.14159/((2.54)**2)
volume = float(y)*float(y)*float(y)*4*3.14159/(3*((2.54)**3))
print 'circumference of the circle =',circumference,'in.'
print 'area of the circle =', area,'sq.in.'
print 'volume of the sphere =', volume, 'cubic in.'
if ans == 'n':
exit(0)
|
{
"cells": [
{
"cell_type": "code",
"execution_count": 3,
"metadata": {
"collapsed": false
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"You pressed something!\n"
]
}
],
"source": [
"##simple GUI\n",
"\"\"\"\n",
"Very simple GUI example with Tkinter\n",
"\n",
"Author: Abdikaiym Zhiyenbek\n",
"\"\"\"\n",
"\n",
"## --------------------------------------------------------------------------------------------------------------------\n",
"# Import packages and modules:\n",
"# ---------------------------------------------------------------------------------------------------------------------\n",
"# import packages:\n",
"from tkinter import *\n",
"from tkinter import ttk\n",
"\n",
"## --------------------------------------------------------------------------------------------------------------------\n",
"# Start the GUI\n",
"# ---------------------------------------------------------------------------------------------------------------------\n",
"class SimpleGUI:\n",
" def __init__(self, start_window):\n",
"\n",
" ## ------------------------------------------------------------------------------------------------------------\n",
" # Start window\n",
" # -------------------------------------------------------------------------------------------------------------\n",
" self.start_window = start_window\n",
" start_window.title('Very Simple GUI')\n",
" # master.resizable(False, False)\n",
"\n",
" # # Logo:\n",
" sse_logo = 'C:\\\\Users\\\\Walker\\\\Documents\\\\Python\\\\ESD_Logo.png'\n",
" originalSSEImage = PhotoImage(file=sse_logo)\n",
" displaySSEImage = originalSSEImage.subsample(2, 2)\n",
" self.sse_logo = displaySSEImage\n",
"\n",
" ttk.Label(start_window, text='Version 1.0').grid(row=0, column=0, sticky='w', padx=10,\n",
" pady=(10, 0))\n",
"\n",
" ttk.Label(start_window, image=self.sse_logo).grid(row=3, column=0, padx=10, pady=10, columnspan=2)\n",
"\n",
" ttk.Label(start_window, text='Chair of Ecological Systems Design (ESD)').grid(row=4, column=0, sticky='w',\n",
" padx=(20, 0), pady=0)\n",
" ttk.Label(start_window, text=u\"\\N{COPYRIGHT SIGN}\" ' 2017 ETH Zurich').grid(row=5, column=0, sticky='w',\n",
" padx=(20, 0), pady=0)\n",
"\n",
" ttk.Button(start_window, text='Exit', command=self.exit_start_window).grid(row=5, column=1, padx=50, pady=10,\n",
" sticky='e')\n",
"\n",
" ## ------------------------------------------------------------------------------------------------------------\n",
" # About window:\n",
" # -------------------------------------------------------------------------------------------------------------\n",
" ttk.Button(start_window, text='About', command=self.about).grid(row=2, column=0, sticky='w', padx=10,\n",
" pady=(0, 10))\n",
"\n",
" ## ------------------------------------------------------------------------------------------------------------\n",
" # Main Menu:\n",
" # -------------------------------------------------------------------------------------------------------------\n",
" # initiate main menu variables:\n",
" self.main_window = None\n",
" self.notebook_main_window = None\n",
" self.first_tab_frame = None\n",
" self.second_tab_frame = None\n",
"\n",
" self.frame_1_tab_1 = None\n",
" self.frame_2_tab_1 = None\n",
" self.frame_3_tab_1 = None\n",
" self.frame_4_tab_1 = None\n",
" self.frame_5_tab_1 = None\n",
" self.frame_1_tab_2 = None\n",
" self.frame_2_tab_2 = None\n",
" self.frame_3_tab_2 = None\n",
" self.frame_4_tab_2 = None\n",
" self.frame_5_tab_2 = None\n",
"\n",
" self.scenario_window = None\n",
" self.scenario_window_frame = None\n",
" self.number_of_scen = None\n",
" self.scenario_number = None\n",
"\n",
" self.impact_method_var1 = None\n",
" self.impact_method_combobox_label_1 = None\n",
" self.impact_method_combobox_1 = None\n",
" self.impact_method_var2 = None\n",
" self.impact_method_combobox_label_2 = None\n",
" self.impact_method_combobox_2 = None\n",
"\n",
" self.some_entry_var = None\n",
" self.some_entry_label = None\n",
" self.some_entry = None\n",
" self.turn_on_combobox_var = None\n",
" self.turn_on_combobox_label = None\n",
" self.turn_on_combobox = None\n",
" self.second_combobox_var = None\n",
" self.second_combobox_label = None\n",
" self.second_combobox = None\n",
" self.third_combobox_var = None\n",
" self.third_combobox_label = None\n",
" self.third_combobox = None\n",
" self.number_of_scenarios = None\n",
" self.number_of_scenarios_label = None\n",
" self.new_window = None\n",
" self.new_window_frame = None\n",
"\n",
" def main_menu_functions():\n",
" self.main_menu()\n",
" self.add_menu_line()\n",
"\n",
" ttk.Button(start_window, text='Start', command=main_menu_functions).grid(row=4, column=1, padx=50, pady=5,\n",
" sticky='e')\n",
"\n",
" ## ----------------------------------------------------------------------------------------------------------------\n",
" # (2) About window\n",
" # -----------------------------------------------------------------------------------------------------------------\n",
" def about(self):\n",
" about_window = Toplevel(self.start_window)\n",
" about_window.title('About')\n",
" about_window.lift(self.start_window)\n",
"\n",
" notebook_about_window = ttk.Notebook(about_window)\n",
" notebook_about_window.pack()\n",
" #\n",
" frame_about_1 = ttk.Frame(notebook_about_window)\n",
" frame_about_2 = ttk.Frame(notebook_about_window)\n",
"\n",
" notebook_about_window.add(frame_about_1, text='About')\n",
" notebook_about_window.add(frame_about_2, text='Author')\n",
"\n",
"\n",
" frame_about_1.config(height=200, width=300)\n",
" frame_about_2.config(height=200, width=300)\n",
"\n",
"\n",
" def close_about():\n",
" about_window.destroy()\n",
"\n",
" # Frame 1:\n",
" ttk.Label(frame_about_1, wraplength=300,\n",
" text='Very simple GUI'\n",
"\n",
" ).grid(row=0, column=0, sticky='w', padx=(5, 5), pady=(10, 10))\n",
" ttk.Label(frame_about_1, text=u\"\\N{COPYRIGHT SIGN}\" ' 2017 ETH Zurich').grid(row=1, column=0, sticky='w',\n",
" padx=(5, 5),\n",
" pady=(10, 10))\n",
" # Frame 2:\n",
" ttk.Label(frame_about_2, wraplength=300, text='\\nEmail: someone@ifu.baug.ethz.ch'\n",
" ).grid(row=0, column=0, sticky='w', padx=(5, 5), pady=(10, 10))\n",
"\n",
" ttk.Button(frame_about_1, text='Close', command=close_about).grid(row=7, column=0, padx=(10, 20), pady=(20, 20)\n",
" , sticky='e')\n",
"\n",
" ## ----------------------------------------------------------------------------------------------------------------\n",
" # Close windows\n",
" # -----------------------------------------------------------------------------------------------------------------\n",
" def exit_start_window(self):\n",
" self.start_window.destroy()\n",
"\n",
" def close_main_menu(self):\n",
" self.main_window.destroy()\n",
"\n",
" def close_new_window(self):\n",
" self.new_window.destroy()\n",
"\n",
" ## ----------------------------------------------------------------------------------------------------------------\n",
" # (3) Main menu window\n",
" # -----------------------------------------------------------------------------------------------------------------\n",
" def main_menu(self):\n",
" self.main_window = Toplevel(self.start_window)\n",
" self.main_window.title('Main menu')\n",
" self.main_window.lift(self.start_window)\n",
" # main_window.state('zoomed') # full screen\n",
"\n",
" # to make sure other copies of the window not to open:\n",
" self.main_window.focus_set() # sets the focus to the main menu window\n",
" self.main_window.grab_set() # prohibits any other window to accept events\n",
"\n",
" # -------------------------------------------------------------------------------------------------------------\n",
" # tabs:\n",
" # -------------------------------------------------------------------------------------------------------------\n",
" self.notebook_main_window = ttk.Notebook(self.main_window)\n",
" self.notebook_main_window.pack()\n",
"\n",
" # parent frames:\n",
" self.first_tab_frame = ttk.Frame(self.notebook_main_window)\n",
" self.first_tab_frame.grid(row=0, column=0, sticky=W)\n",
"\n",
" self.second_tab_frame = ttk.Frame(self.notebook_main_window)\n",
" self.second_tab_frame.grid(row=0, column=0, sticky=W)\n",
" self.notebook_main_window.add(self.first_tab_frame, text='First tab')\n",
" self.notebook_main_window.add(self.second_tab_frame, text='Second tab')\n",
"\n",
" # -------------------------------------------------------------------------------------------------------------\n",
" # first tab:\n",
" # -------------------------------------------------------------------------------------------------------------\n",
" self.frame_1_tab_1 = ttk.LabelFrame(self.first_tab_frame, height=60, width=700,\n",
" text='Some frame name')\n",
" self.frame_1_tab_1.grid(row=0, column=0, padx=10, pady=10)\n",
" self.frame_1_tab_1.grid_propagate(False)\n",
"\n",
" self.frame_2_tab_1 = ttk.LabelFrame(self.first_tab_frame, height=130, width=700,\n",
" text='Some frame name')\n",
" self.frame_2_tab_1.grid(row=1, column=0, padx=10, pady=10)\n",
" self.frame_2_tab_1.grid_propagate(False)\n",
"\n",
" self.frame_3_tab_1 = ttk.LabelFrame(self.first_tab_frame, height=50, width=700,\n",
" text='Some frame name')\n",
" self.frame_3_tab_1.grid(row=2, column=0, padx=10, pady=10)\n",
" self.frame_3_tab_1.grid_propagate(False)\n",
"\n",
" self.frame_4_tab_1 = ttk.LabelFrame(self.first_tab_frame, height=80, width=450,\n",
" text='Some frame name')\n",
" self.frame_4_tab_1.grid(row=3, column=0, padx=10, pady=10, sticky=W)\n",
" self.frame_4_tab_1.grid_propagate(False)\n",
"\n",
" self.frame_5_tab_1 = ttk.LabelFrame(self.first_tab_frame, height=80, width=200)\n",
" self.frame_5_tab_1.grid(row=3, column=0, padx=10, pady=10, sticky=E)\n",
" self.frame_5_tab_1.grid_propagate(False)\n",
"\n",
" # Cancel button:\n",
" ttk.Button(self.frame_5_tab_1, text='Cancel', command=self.close_main_menu).grid(row=3,\n",
" column=0, padx=60,\n",
" pady=15)\n",
"\n",
" # -------------------------------------------------------------------------------------------------------------\n",
" # second tab:\n",
" # -------------------------------------------------------------------------------------------------------------\n",
" self.frame_1_tab_2 = ttk.LabelFrame(self.second_tab_frame, height=60, width=700,\n",
" text='Some frame name')\n",
" self.frame_1_tab_2.grid(row=0, column=0, padx=10, pady=10)\n",
" self.frame_1_tab_2.grid_propagate(False)\n",
"\n",
" self.frame_2_tab_2 = ttk.LabelFrame(self.second_tab_frame, height=130, width=700,\n",
" text='Some frame name')\n",
" self.frame_2_tab_2.grid(row=1, column=0, padx=10, pady=10)\n",
" self.frame_2_tab_2.grid_propagate(False)\n",
"\n",
" self.frame_3_tab_2 = ttk.LabelFrame(self.second_tab_frame, height=50, width=700,\n",
" text='Some frame name')\n",
" self.frame_3_tab_2.grid(row=2, column=0, padx=10, pady=10)\n",
" self.frame_3_tab_2.grid_propagate(False)\n",
"\n",
" self.frame_4_tab_2 = ttk.LabelFrame(self.second_tab_frame, height=80, width=450,\n",
" text='Some frame name')\n",
" self.frame_4_tab_2.grid(row=3, column=0, padx=10, pady=10, sticky=W)\n",
" self.frame_4_tab_2.grid_propagate(False)\n",
"\n",
" self.frame_5_tab_2 = ttk.LabelFrame(self.second_tab_frame, height=80, width=200)\n",
" self.frame_5_tab_2.grid(row=3, column=0, padx=10, pady=10, sticky=E)\n",
" self.frame_5_tab_2.grid_propagate(False)\n",
"\n",
" ttk.Button(self.frame_5_tab_2, text='Cancel', command=self.close_main_menu).grid(row=3,\n",
" column=0, padx=60,\n",
" pady=15)\n",
"\n",
" #\n",
" some_selection = StringVar()\n",
"\n",
" ttk.Radiobutton(self.frame_1_tab_1, text='Radio button', variable=some_selection,\n",
" value='Radiobutton', command=self.new_window).grid(row=0, column=0, padx=(20, 20),\n",
" pady=10, sticky='w')\n",
"\n",
" ttk.Radiobutton(self.frame_1_tab_2, text='Radio button', variable=some_selection,\n",
" value='Radiobutton', command=self.new_window).grid(row=0, column=0, padx=(20, 20),\n",
" pady=10, sticky='w')\n",
"\n",
"\n",
" # impact method combo box:\n",
" self.impact_method_var1 = StringVar()\n",
" self.impact_method_combobox_label_1 = ttk.Label(self.frame_3_tab_1, text=\"Select bla bla bla combobox\")\n",
" self.impact_method_combobox_1 = ttk.Combobox(self.frame_3_tab_1, textvariable=self.impact_method_var1)\n",
" self.impact_method_combobox_1.grid(row=0, column=1, padx=(5, 0), pady=(5, 5), sticky=W)\n",
" self.impact_method_combobox_label_1.grid(row=0, column=0, padx=(5, 0), pady=(5, 5), sticky=W)\n",
" self.impact_method_combobox_1.config(values=('', 'bla bla bla', ' a lot of bla bla bla'))\n",
"\n",
" self.impact_method_var2 = StringVar()\n",
" self.impact_method_combobox_label_2 = ttk.Label(self.frame_3_tab_2, text=\"Select bla bla bla combobox\")\n",
" self.impact_method_combobox_2 = ttk.Combobox(self.frame_3_tab_2, textvariable=self.impact_method_var2)\n",
" self.impact_method_combobox_2.grid(row=0, column=1, padx=(5, 0), pady=(5, 5), sticky=W)\n",
" self.impact_method_combobox_label_2.grid(row=0, column=0, padx=(5, 0), pady=(5, 5), sticky=W)\n",
" self.impact_method_combobox_2.config(values=('', 'bla bla bla', ' a lot of bla bla bla'))\n",
" # -------------------------------------------------------------------------------------------------------------\n",
" # Select and enter input data:\n",
" # -------------------------------------------------------------------------------------------------------------\n",
"\n",
" # some entry (1):\n",
" self.some_entry_var = StringVar()\n",
" self.some_entry_label = ttk.Label(self.frame_2_tab_1, text=\"Bla bla bla Entry\")\n",
" self.some_entry= ttk.Entry(self.frame_2_tab_1, textvariable=self.some_entry_var)\n",
" self.some_entry_label.pack()\n",
" self.some_entry.pack()\n",
" self.some_entry_label.grid(row=0, column=0, padx=(5, 0), pady=(5, 5), sticky=W)\n",
" self.some_entry.grid(row=0, column=1, padx=(5, 0), pady=(5, 5), sticky=W)\n",
"\n",
"\n",
" # Greenhouse heating combo box:\n",
" self.turn_on_combobox_var = StringVar()\n",
" self.turn_on_combobox_label = ttk.Label(self.frame_2_tab_1, text=\"Turn on/off combobox\")\n",
" self.turn_on_combobox = ttk.Combobox(self.frame_2_tab_1,\n",
" textvariable='Turn on/off combobox')\n",
" self.turn_on_combobox.grid(row=3, column=1, padx=(5, 0), pady=(5, 5), sticky=W)\n",
" self.turn_on_combobox_label.grid(row=3, column=0, padx=(5, 0), pady=(5, 5), sticky=W)\n",
" self.turn_on_combobox.config(values=('', 'Yes', 'No'))\n",
"\n",
" def enable_comboboxes(event):\n",
" if self.turn_on_combobox.get() == \"Yes\":\n",
" self.second_combobox.configure(state=\"enabled\")\n",
" self.third_combobox.configure(state=\"enabled\")\n",
" else:\n",
" self.second_combobox.configure(state=\"disabled\")\n",
" self.third_combobox.configure(state=\"disabled\")\n",
"\n",
" self.turn_on_combobox.bind(\"<<ComboboxSelected>>\", enable_comboboxes)\n",
"\n",
" # some other comboboxes:\n",
" self.second_combobox_var = StringVar()\n",
" self.second_combobox_label = ttk.Label(self.frame_2_tab_1, text=\"month names\")\n",
" self.second_combobox = ttk.Combobox(self.frame_2_tab_1, textvariable=\"month names\")\n",
" self.second_combobox.grid(row=3, column=3, padx=(5, 0), pady=(5, 5), sticky=E)\n",
" self.second_combobox_label.grid(row=3, column=2, padx=(100, 0), pady=(5, 5), sticky=E)\n",
" self.second_combobox.config(values=('', 'January', 'February', 'March', 'April', 'May', 'June', 'July',\n",
" 'August', 'September', 'October', 'November', 'December'))\n",
" self.second_combobox.configure(state=\"disabled\")\n",
"\n",
" # third combobox:\n",
" self.third_combobox_var= StringVar()\n",
" self.third_combobox_label = ttk.Label(self.frame_2_tab_1, text=\"some duration\")\n",
" self.third_combobox = ttk.Combobox(self.frame_2_tab_1,\n",
" textvariable=\"some duration\")\n",
" self.third_combobox.grid(row=4, column=1, padx=(5, 0), pady=(5, 5), sticky=W)\n",
" self.third_combobox_label.grid(row=4, column=0, padx=(5, 0), pady=(5, 5), sticky=W)\n",
" self.third_combobox.config(\n",
" values=('', '1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12'))\n",
" self.third_combobox.configure(state=\"disabled\")\n",
"\n",
" # -------------------------------------------------------------------------------------------------------------\n",
" # Import and Export tab:\n",
" # -------------------------------------------------------------------------------------------------------------\n",
" # Number of scenarios spinbox:\n",
" self.number_of_scenarios = StringVar()\n",
" Spinbox(self.frame_2_tab_2, from_=1, to=15, textvariable=self.number_of_scenarios).grid(row=1,\n",
" column=0,\n",
" padx=(\n",
" 20, 20),\n",
" pady=(0, 0),\n",
" sticky=W)\n",
" self.number_of_scenarios_label = ttk.Label(self.frame_2_tab_2, text=\"Select number of scenarios\")\n",
" self.number_of_scenarios_label.grid(row=0, column=0, padx=(20, 20), pady=(0, 0), sticky=W)\n",
"\n",
" # Select scenarios:\n",
" ttk.Button(self.frame_2_tab_2, text='New window 1',\n",
" command=self.print_something).grid(\n",
" row=0, column=1, padx=(100, 20), pady=(5, 10), sticky=W)\n",
"\n",
" ttk.Button(self.frame_2_tab_2, text='New window 2',\n",
" command=self.print_something).grid(\n",
" row=1, column=1, padx=(100, 5), pady=(10, 5), sticky=W)\n",
"\n",
" # progress bar:\n",
" progressbar = ttk.Progressbar(self.frame_4_tab_1, orient=HORIZONTAL, length=200)\n",
" progressbar.grid(row=0, column=1, padx=(5, 0), pady=(5, 5), sticky=W)\n",
"\n",
" progressbar.config(mode='indeterminate')\n",
" progressbar.start()\n",
"\n",
" # progress bar:\n",
" progressbar = ttk.Progressbar(self.frame_4_tab_2, orient=HORIZONTAL, length=200)\n",
" progressbar.grid(row=0, column=1, padx=(5, 0), pady=(5, 5), sticky=W)\n",
"\n",
" progressbar.config(mode='indeterminate')\n",
" progressbar.start()\n",
"\n",
" # Run the model button 1:\n",
" ttk.Button(self.frame_4_tab_1, text='Run something', command=self.print_something).grid(\n",
" row=0, column=0, padx=(20, 20), pady=(20, 20))\n",
"\n",
" # Run the model button 2:\n",
" ttk.Button(self.frame_4_tab_2, text='Run something', command=self.print_something).grid(\n",
" row=0, column=0, padx=(20, 20), pady=(20, 20))\n",
"\n",
" # -----------------------------------------------------------------------------------------------------------------\n",
" # Add menu line:\n",
" # -----------------------------------------------------------------------------------------------------------------\n",
" def add_menu_line(self):\n",
" self.main_window.option_add('*tearOff', False)\n",
" menubar = Menu(self.main_window)\n",
" self.main_window.config(menu=menubar)\n",
" file = Menu(menubar)\n",
" edit = Menu(menubar)\n",
" help_ = Menu(menubar)\n",
" menubar.add_cascade(menu=file, label='File')\n",
" menubar.add_cascade(menu=edit, label='Edit')\n",
" menubar.add_cascade(menu=help_, label='Help')\n",
"\n",
" # -----------------------------------------------------------------------------------------------------------------\n",
" # Add new windows\n",
" # -----------------------------------------------------------------------------------------------------------------\n",
" # separate window for loading the database:\n",
" def new_window(self):\n",
" self.new_window = Toplevel(self.main_window)\n",
" self.new_window.title('Load something important')\n",
" self.new_window.lift(self.main_window)\n",
"\n",
" # to make sure other copies of the window not to open:\n",
" self.new_window.focus_set() # sets the focus to the main menu window\n",
" self.new_window.grab_set() # prohibits any other window to accept events\n",
"\n",
" self.new_window_frame = ttk.LabelFrame(self.new_window, text='Select something important: ',\n",
" height=200, width=400)\n",
" self.new_window_frame.grid(row=0, column=0, sticky=W)\n",
" self.new_window_frame.grid_propagate(False)\n",
"\n",
" ttk.Radiobutton(self.new_window_frame, text='bla bla bla',\n",
" variable='bla bla bla', value='bla bla bla').grid(row=0, column=0, padx=(30, 10), pady=(20, 10), sticky='w')\n",
"\n",
" ttk.Radiobutton(self.new_window_frame, text='bla bla bla',\n",
" variable='bla bla bla', value='bla bla bla',\n",
" command=self.print_something).grid(row=1, column=0, padx=(30, 10), pady=(10, 20), sticky='w')\n",
"\n",
"\n",
" @staticmethod\n",
" def print_something():\n",
" print('You pressed something!')\n",
"\n",
"\n",
"\n",
"def main():\n",
" root = Tk()\n",
" sse = SimpleGUI(root)\n",
" root.mainloop()\n",
"\n",
"\n",
"if __name__ == '__main__':\n",
" main()\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.5.2"
}
},
"nbformat": 4,
"nbformat_minor": 0
}
|
from os import linesep
from asyncio.subprocess import Process
from typing import Iterable, Tuple, Union
from logging import error, log, DEBUG
from tqdm import tqdm
import config
def chunks(lst, n):
"""Yield successive n-sized chunks from lst."""
for i in range(0, len(lst), n):
yield lst[i:i + n]
def log_adb_command(command: Iterable[str], level: int = DEBUG):
"""Logs ADB command generated"""
log(level, f'ADB command generated: {" ".join([*command])}')
def log_adb_error(proc: Process, proc_output: Tuple[bytes, bytes]):
"""Logs adb error from an asyncio.subprocess.Process instance and output from proc.communicate()"""
if proc.returncode != 0:
proc_stdout, proc_stderr = [stream.decode(encoding='utf-8') for stream in proc_output]
error(
'[Error] ADB command failed with non-zero return code.'
+ linesep
+ '> stdout'
+ linesep
+ linesep.join([f'--->{line}' for line in proc_stdout.splitlines()])
+ linesep
+ '> stderr'
+ linesep
+ linesep.join([f'--->{line}' for line in proc_stderr.splitlines()])
)
return proc.returncode != 0
def bar(iter: Union[Iterable, int], desc: str):
"""More elegant way to make progress bars"""
if isinstance(iter, int):
return tqdm(desc=desc, total=iter, unit='file(s)')
else:
return tqdm(iter, desc, unit='file(s)') |
# encoding: utf-8
from src.config import FaqConfig, TfidfTransformerConfig
import json
import pickle
from sklearn.exceptions import NotFittedError
from sklearn.feature_extraction.text import TfidfVectorizer
from src.utils import Cutter
from src.utils import singleton
import logging.config
import logging
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), '../'))
logging.config.fileConfig(fname='log.config', disable_existing_loggers=False)
@singleton
class TfidfTransformer:
def __init__(self, max_features=256):
self.tfidf_transformer = TfidfVectorizer(
max_features=max_features, min_df=0.1, token_pattern=r'(?u)\b\w+\b')
self.cutter = Cutter()
self.logger = logging.getLogger('TfidfTransformer')
def train(self, json_file):
train_data = []
with open(json_file, 'r', encoding='utf-8') as f:
for line in f:
data = json.loads(line, encoding='utf-8')
question = data['question']
q_tokens = self.cutter.cut_all(question)
q_str = ' '.join(q_tokens)
train_data.append(q_str)
self.tfidf_transformer.fit_transform(train_data)
self.logger.info('train tfidf transformer SUCCESS !')
def predict(self, text):
t_tokens = self.cutter.cut_all(text)
t_str = ' '.join(t_tokens)
prd = self.tfidf_transformer.transform([t_str])
# self.logger.info('predict tfidf transformer SUCCESS !')
return list(prd.toarray()[0])
def save_model(self, model_file):
with open(model_file, 'wb') as f:
pickle.dump(self.tfidf_transformer, f)
self.logger.info(
'save tfidf transformer model ' +
model_file +
' SUCCESS !')
def load_model(self, model_file):
with open(model_file, 'rb') as f:
self.tfidf_transformer = pickle.load(f)
self.logger.info(
'load tfidf transformer model ' +
model_file +
' SUCCESS !')
def get_feature_dims(self):
dim_num = len(self.tfidf_transformer.get_feature_names())
self.logger.debug(
'feature dims of tfidf transformer is: ' +
str(dim_num))
return dim_num
def get_feature_names(self):
names = self.tfidf_transformer.get_feature_names()
self.logger.debug(
'feature names of tfidf transformer is: ' +
str(names))
return names
def check_tfidf_transformer(tfidf_transformer: TfidfTransformer, faq_config: FaqConfig):
dims = tfidf_transformer.get_feature_dims()
if faq_config.tfidf_transformer.feature_dims != dims:
faq_config.tfidf_transformer.feature_dims = dims
faq_config.set('tfidf_transformer', 'feature_dims', str(dims))
faq_config.save()
def init_tfidf_transformer(faq_config: FaqConfig):
tfidf_transformer = TfidfTransformer(
max_features=faq_config.tfidf_transformer.max_feature)
tfidf_transformer.load_model(faq_config.tfidf_transformer.model_file)
check_tfidf_transformer(tfidf_transformer, faq_config)
logger = logging.getLogger('init_tfidf_transformer')
logger.info('init tfidf transformer SUCCESS !')
def get_embedding_dims(tf_config: TfidfTransformerConfig):
tfidf_transformer = TfidfTransformer(max_features=tf_config.max_feature)
return tfidf_transformer.get_feature_dims()
def get_feature_dims(tf_config: TfidfTransformerConfig):
tfidf_transformer = TfidfTransformer(max_features=tf_config.max_feature)
return tfidf_transformer.get_feature_dims()
def generate_embedding(text, tf_config: TfidfTransformerConfig):
tfidf_transformer = TfidfTransformer(max_features=tf_config.max_feature)
try:
emb_res = tfidf_transformer.predict(text)
except NotFittedError:
emb_res = []
return emb_res
if __name__ == '__main__':
from src.config import init_faq_config
faq_config = init_faq_config('faq.config')
tt = TfidfTransformer(
max_features=faq_config.tfidf_transformer.max_feature)
tt.train('../faq_vec.index')
tt.save_model('tfidftransformer.pkl')
print(tt.get_feature_names())
print(tt.get_feature_dims())
tt.load_model('tfidftransformer.pkl')
print(tt.predict('你是谁?'))
print(tt.predict('what are you ding today? 你好呀, do you know who am i ?'))
|
class Node(object):
def __init__(self, data, pnext=None):
self.data = data
self._next = pnext
class ChainTable(object):
def __init__(self):
self.head = None
self.length = 0
def isEmpty(self):
return (self.length == 0)
def append(self, dataOrNode):
item = None
if isinstance(dataOrNode, Node):
item = dataOrNode
else:
item = Node(dataOrNode)
if not self.head:
self.head = item
self.length += 1
else:
node = self.head
while node._next:
node = node._next
node._next = item
self.length += 1
def delete(self, index):
if self.isEmpty():
print("this chain table is empty")
return
if index < 0 or index > self.length:
print("error: out of index")
return
if index == 0:
self.head = self.head._next
self.length -= 1
return
j = 0
node = self.head
prev = self.head
while node._next and j < index:
prev = node
node = node._next
j += 1
if j == index:
prev._next = node._next
self.length -= 1
def insert(self, index, dataOrNode):
if self.isEmpty():
print("this chain table is empty")
return
if index < 0 or index >= self.length:
print("error: out of index")
return
item = None
if isinstance(dataOrNode, Node):
item = dataOrNode
else:
item = Node(dataOrNode)
if index == 0:
item._next = self.head
self.head = item
self.length += 1
return
j = 0
node = self.head
prev = self.head
while node._next and j < index:
prev = node
node = node._next
j += 1
if j == index:
item._next = node
prev._next = item
self.length += 1
def update(self, index, data):
if self.isEmpty() or index < 0 or index >= self.length:
print("error: out of index")
return
j = 0
node = self.head
while node._next and j < index:
node = node._next
j += 1
if j == index:
node.data = data
def getItem(self, index):
if self.isEmpty() or index < 0 or index >= self.length:
print("error: out of index")
return
j = 0
node = self.head
while node._next and j < index:
node = node._next
j += 1
return node.data
def getIndex(self, data):
if self.isEmpty():
print("this chain table is empty")
return
j = 0
node = node._next
while node:
if node.data == data:
return j
node = node._next
j += 1
if j == self.length:
print("%s not found" % str(data))
return
def clear(self):
self.head = None
self.length = 0
def __getitem__(self, index):
if self.isEmpty() or index < 0 or index >= self.length:
print("error: out of index")
return
return self.getItem(index)
def __setitem__(self, index, data):
if self.isEmpty() or index < 0 or index >= self.length:
print("error: out of index")
return
self.update(index, data)
def __len__(self):
return self.length
import unittest
class TestChainTable(unittest.TestCase):
def setUp(self):
self.chain_table = ChainTable()
def test_append(self):
for i in range(0, 50):
self.chain_table.append(i)
for i in range(50, 100):
self.chain_table.append(Node(i))
for i in range(0, 100):
self.assertEqual(i, self.chain_table.getItem(i))
self.assertEqual(i, self.chain_table[i])
def test_insert(self):
self.test_append()
self.chain_table.insert(0, 10000)
self.assertEqual(10000, self.chain_table[0])
self.chain_table.insert(0, Node(100000))
self.assertEqual(100000, self.chain_table[0])
import random
index = random.randint(0, 102)
value = random.randint(0, 10000)
self.chain_table.insert(index, value)
self.assertEqual(self.chain_table.length, 103)
self.assertEqual(self.chain_table.getItem(index), value)
def test_update(self):
self.test_insert()
self.chain_table.update(0, -1)
self.assertEqual(self.chain_table[0], -1)
import random
index = random.randint(0, self.chain_table.length-1)
value = random.randint(0, 10000)
self.chain_table[index] = value
self.assertEqual(self.chain_table.getItem(index), value)
def test_delete(self):
self.test_append()
self.chain_table.delete(0)
self.assertEqual(1, self.chain_table[0])
import random
index = random.randint(1, self.chain_table.length-1)
value = self.chain_table[index+1]
self.chain_table.delete(index)
self.assertEqual(self.chain_table[index], value)
if __name__ == '__main__':
unittest.main()
|
from tabulate import tabulate
import GraphQL
import os
import decimal
import yaml
c = yaml.load(open('config.yml', encoding='utf8'), Loader=yaml.SafeLoader)
################################################################
# Define the payout calculation here
################################################################
public_key = str(c["VALIDATOR_ADDRESS"])
staking_epoch = int(c["STAKING_EPOCH_NUMBER"])
fee = float(c["VALIDATOR_FEE"])
SP_FEE = float(c["VALIDATOR_FEE_SP"])
foundation_fee = float(c["VALIDATOR_FEE_FOUNDATION"])
min_height = int(c["FIRST_BLOCK_HEIGHT"]) # This can be the last known payout or this could vary the query to be a starting date
latest_block = int(c["LATEST_BLOCK_HEIGHT"])
confirmations = int(c["CONFIRMATIONS_NUM"]) # Can set this to any value for min confirmations up to `k`
MINIMUM_PAYOUT = float(c["MINIMUM_PAYOUT"])
decimal_ = 1e9
COINBASE = 720
with open("version", "r") as v_file:
version = v_file.read()
print(f'Script version: {version}')
def float_to_string(number, precision=9):
return '{0:.{prec}f}'.format(
decimal.Context(prec=100).create_decimal(str(number)),
prec=precision,
).rstrip('0').rstrip('.') or '0'
def write_to_file(data_string: str, file_name: str, mode: str = "w"):
with open(file_name, mode) as some_file:
some_file.write(data_string + "\n")
with open("foundation_addresses.txt", "r") as f:
foundation_delegations = f.read().split("\n")
try:
ledger_hash = GraphQL.getLedgerHash(epoch=staking_epoch)
print(ledger_hash)
ledger_hash = ledger_hash["data"]["blocks"][0] \
["protocolState"]["consensusState"] \
["stakingEpochData"]["ledger"]["hash"]
except Exception as e:
print(e)
exit("Issue getting ledger_hash from GraphQL")
if latest_block == 0:
# Get the latest block height
latest_block = GraphQL.getLatestHeight()
else:
latest_block = {'data': {'blocks': [{'blockHeight': latest_block}]}}
if not latest_block:
exit("Issue getting the latest height")
assert latest_block["data"]["blocks"][0]["blockHeight"] > 1
# Only ever pay out confirmed blocks
max_height = latest_block["data"]["blocks"][0]["blockHeight"] - confirmations
assert max_height <= latest_block["data"]["blocks"][0]["blockHeight"]
print(f"This script will payout from blocks {min_height} to {max_height}")
# Initialize some stuff
total_staking_balance = 0
total_staking_balance_unlocked = 0
total_staking_balance_foundation = 0
all_block_rewards = 0
all_x2_block_rewards = 0
total_snark_fee = 0
all_blocks_total_fees = 0
payouts = []
blocks = []
blocks_included = []
store_payout = []
# Get the staking ledger for an epoch
try:
staking_ledger = GraphQL.getStakingLedger({
"delegate": public_key,
"ledgerHash": ledger_hash,
})
except Exception as e:
print(e)
exit("Issue getting staking ledger from GraphQL")
if not staking_ledger["data"]["stakes"]:
exit("We have no stakers")
try:
blocks = GraphQL.getBlocks({
"creator": public_key,
"epoch": staking_epoch,
"blockHeightMin": min_height,
"blockHeightMax": max_height,
})
except Exception as e:
print(e)
exit("Issue getting blocks from GraphQL")
if not blocks["data"]["blocks"]:
exit("Nothing to payout as we didn't win anything")
csv_header_delegates = "address;stake;foundation_delegation?;is_locked?are_tokens_locked?"
delegator_file_name = "delegates.csv"
write_to_file(data_string=csv_header_delegates, file_name=delegator_file_name, mode="w")
latest_slot_for_created_block = blocks["data"]["blocks"][0]["protocolState"]["consensusState"]["slotSinceGenesis"]
for s in staking_ledger["data"]["stakes"]:
# skip delegates with staking balance == 0
if s["balance"] == 0:
continue
if not s["timing"]:
# 100% unlocked
timed_weighting = "unlocked"
total_staking_balance_unlocked += s["balance"]
elif s["timing"]["untimed_slot"] <= latest_slot_for_created_block:
# if the last slot of the last created validator by the block >= untimed_slot,
# then we consider that in this epoch the delegator tokens are completely unlocked
timed_weighting = "unlocked"
total_staking_balance_unlocked += s["balance"]
else:
# locked tokens
timed_weighting = "locked"
# Is this a Foundation address
if s["public_key"] in foundation_delegations:
foundation_delegation = True
total_staking_balance_foundation += s["balance"]
else:
foundation_delegation = False
payouts.append({
"publicKey": s["public_key"],
"total_reward": 0,
"staking_balance": s["balance"],
"percentage_of_total": 0, # delegator's share in %, relative to total_staking_balance
"percentage_of_SP": 0, # percentage of unlocked tokens from the total amount of unlocked tokens
"timed_weighting": timed_weighting,
"foundation_delegation": foundation_delegation
})
total_staking_balance += s["balance"]
delegator_csv_string = f'{s["public_key"]};{float_to_string(s["balance"])};{foundation_delegation};{timed_weighting}'
write_to_file(data_string=delegator_csv_string, file_name=delegator_file_name, mode="a")
csv_header_blocks = "block_height;slot;block_reward;snark_fee;tx_fee;epoch;state_hash"
blocks_file_name = f"blocks.csv"
write_to_file(data_string=csv_header_blocks, file_name=blocks_file_name, mode="w")
for b in reversed(blocks["data"]["blocks"]):
if not b["transactions"]["coinbaseReceiverAccount"]:
print(f"{b['blockHeight']} didn't have a coinbase so won it but no rewards.")
continue
if not b["canonical"]:
print("Block not in canonical chain")
continue
block_height = b["blockHeight"]
slot = b["protocolState"]["consensusState"]["slotSinceGenesis"]
block_reward_mina = int(b["transactions"]["coinbase"]) / decimal_
block_reward_nano = int(b["transactions"]["coinbase"])
snark_fee = b["snarkFees"]
epoch = b["protocolState"]["consensusState"]["epoch"]
state_hash = b["stateHash"]
tx_fees = b["txFees"]
total_snark_fee += int(snark_fee)
all_blocks_total_fees += int(tx_fees)
blocks_included.append(b['blockHeight'])
if block_reward_mina > COINBASE:
all_x2_block_rewards += block_reward_nano - (COINBASE * decimal_)
all_block_rewards += block_reward_nano - (COINBASE * decimal_)
else:
all_block_rewards += block_reward_nano
csv_string = f"{block_height};" \
f"{slot};" \
f"{block_reward_mina};" \
f"{float_to_string(int(snark_fee) / decimal_)};" \
f"{float_to_string(int(tx_fees) / decimal_)};" \
f"{epoch};" \
f"{state_hash};"
write_to_file(data_string=csv_string, file_name=blocks_file_name, mode="a")
total_reward = all_block_rewards + all_blocks_total_fees - total_snark_fee
delegators_reward_sum = 0
payout_table = []
# remove the file to prevent invalid data in the payout file
try:
os.remove(f'e{staking_epoch}_payouts.csv')
except:
pass
for p in payouts:
if p["foundation_delegation"] is True:
p["percentage_of_total"] = float(p["staking_balance"]) / total_staking_balance
p["total_reward"] = float(total_reward * p["percentage_of_total"] * (1 - foundation_fee))
elif p["timed_weighting"] == "unlocked":
p["percentage_of_SP"] = float(p["staking_balance"]) / total_staking_balance_unlocked
p["percentage_of_total"] = float(p["staking_balance"]) / total_staking_balance
p["total_reward"] = float(total_reward * p["percentage_of_total"] * (1 - fee))
p["total_reward"] = p["total_reward"] + (float(all_x2_block_rewards * p["percentage_of_SP"] * (1 - SP_FEE)))
else:
p["percentage_of_total"] = float(p["staking_balance"]) / total_staking_balance
p["total_reward"] = float(total_reward * p["percentage_of_total"] * (1 - fee))
delegators_reward_sum += p["total_reward"]
payout_table.append([
p["publicKey"],
p["staking_balance"],
float_to_string(p["total_reward"] / decimal_),
p["foundation_delegation"],
p["timed_weighting"]
])
payout_string = f'{p["publicKey"]};' \
f'{float_to_string(int(p["total_reward"]))};' \
f'{float_to_string(p["total_reward"] / decimal_)};' \
f'{p["foundation_delegation"]};' \
f'{p["timed_weighting"]}'
# do not pay anything if reward < 0.1 MINA
if p["total_reward"] / decimal_ < MINIMUM_PAYOUT:
continue
write_to_file(data_string=payout_string,
file_name=f'e{staking_epoch}_payouts.csv', mode='a')
# pprint(payouts)
# We now know the total pool staking balance with total_staking_balance
print(f"The pool total staking balance is: {total_staking_balance}\n"
f"The Foundation delegation balance is: {total_staking_balance_foundation}\n"
f"Blocks won: {len(blocks_included)}\n"
f"Delegates in the pool: {len(payouts)}")
validator_reward = total_reward + all_x2_block_rewards - delegators_reward_sum
print(f'Supercharged rewards total: {all_x2_block_rewards / decimal_}')
print(f'Total: {(total_reward + all_x2_block_rewards) / decimal_}')
print(f'Validator fee: {validator_reward / decimal_}')
print(tabulate(payout_table,
headers=["PublicKey", "Staking Balance", "Payout mina", "Foundation", "Tokens_lock_status"], tablefmt="pretty"))
|
a = 2
b=3
c=a+b
print (c)
a = 2
b=3
print (a+b)
print ('Привет мир!')
c=a-b
print (c)
c=a*b
print (c)
c=a**b
print (c)
c=a/b
print (c)
a = 2.1
b=3.3
c=a+b
print (c)
a = 3.1
b=0.1
c=a-b
print (c)
a = 3.1
b=0.1
c=a<b
print (c)
a = 3.1
b=0.1
c=a>b
print (c)
a = 3.1
b=0.1
c=a!=b
print (c)
a = 3.1
b=0.1
c=a==b
print (c)
a = 'Привет'
b='мир'
c=a + ' ' + b + '!'
print (c)
a = 'Привет'
b='всем'
print (a + ' ' + b + '!')
a = 'Здравствуйте'
b='все'
c= 2
d='{} {} {}!' .format (a, b, c)
print (d)
user = 'Вася'
d='{}, {}!' .format (a, user)
print (d)
user = 'Петя'
d='Привет, {name}!' .format (name=user)
print (d)
user = 'Иван'
d = f'Добрый день, {user}!'
print (d)
print (len (d))
user = 'Николай' .upper ()
print (user)
user = 'Николай' .lower ()
print (user)
user = 'николай' .capitalize ()
print (user)
user =' Александр'
print (user)
print (len (user))
user1 = user.strip ()
print (user1)
print (len (user1))
a= 'Палажить'
b= a.replace ('а', 'о')
print (a)
print (b)
a= 'ПАиграть'.lower() .replace ('а', 'о') .capitalize ()
print (a)
a= 'ПАиграть'
b=a.lower() .replace ('а', 'о') .capitalize ()
print (a)
print (b)
a='67,906,97643'
print (a.split(','))
a= 'Заходите к нам на огонек'
b=a.split()
print (b)
print (len (b))
a=None
b=0
print (a is None)
print (b is None)
print (b is not None)
a='2.0'
b= 2
c=2.0
print (type (a))
print (type (b))
print (type (c))
name=input ('Введите Ваше имя: ')
print (f'Привет, {name}!')
age= int (input ('Сколько Вам лет? '))
birth_year=2019-age
print (f'Вы родились в {birth_year} году')
print (bool ('Ну, погоди!')) #Не пустая строка, не ноль и не None - True
print (bool (''))
print (bool (111))
print (bool (0))
print (bool (0.001))
print (bool (None))
print (bool (-2))
|
import requests
import re
data = requests.get(input())
result = []
for link in re.findall(r"<a(.*?)href(.*?)=(.*?)(\"|')(((.*?):\/\/)|(\.\.)|)(.*?)(\/|:|\"|')(.*)", data.text):
domain = link[8]
if domain not in result:
result.append(domain)
result.sort()
for domain in result:
print(domain)
|
def maxArea(height):
n=len(height)
maxindex=height.index(max(height))
#找出列表中第一个最大元素对应的下标
maxarea=0
i=0
while i<n-1 and i<maxindex+1:
j=i+1
while j<n:
maxarea=max(maxarea,(j-i)*min(height[j],height[i]))
j+=1
while height[i]>=height[i+1] and i<maxindex:
#由于下标为maxarea对应的值最大,因此i最多取到maxindex
i+=1
i+=1
#由于height[i+1]大于height[i],所以取下标为i+1。最终i的值会取到maxindex+1为止
return maxarea
height=[1,8,6,2,5,4,8,3,7]
print(maxArea(height))
|
import click
import numpy as np
import logging
import pickle
from sklearn.preprocessing import RobustScaler
from sklearn.utils import check_random_state
from recnn.recnn import event_baseline_predict
logging.basicConfig(level=logging.INFO,
format="[%(asctime)s %(levelname)s] %(message)s")
@click.command()
@click.argument("filename_train")
@click.argument("filename_test")
@click.argument("filename_model")
@click.argument("n_events_train")
@click.argument("n_events_test")
@click.argument("filename_output")
@click.option("--n_particles_per_event", default=10)
@click.option("--random_state", default=1)
def test(filename_train,
filename_test,
filename_model,
n_events_train,
n_events_test,
filename_output,
n_particles_per_event=10,
random_state=1):
# Initialization
n_events_train = int(n_events_train)
n_events_test = int(n_events_test)
logging.info("Calling with...")
logging.info("\tfilename_train = %s" % filename_train)
logging.info("\tfilename_test = %s" % filename_test)
logging.info("\tfilename_model = %s" % filename_model)
logging.info("\tn_events_train = %d" % n_events_train)
logging.info("\tn_events_test = %d" % n_events_test)
logging.info("\tfilename_output = %s" % filename_output)
logging.info("\tn_particles_per_event = %d" % n_particles_per_event)
logging.info("\trandom_state = %d" % random_state)
rng = check_random_state(random_state)
# Make data
logging.info("Loading train data + preprocessing...")
fd = open(filename_train, "rb")
X = []
y = []
for i in range(n_events_train):
v_i, y_i = pickle.load(fd)
v_i = v_i[:n_particles_per_event]
X.append(v_i)
y.append(y_i)
y = np.array(y)
fd.close()
logging.info("\tfilename = %s" % filename_train)
logging.info("\tX size = %d" % len(X))
logging.info("\ty size = %d" % len(y))
# Building scalers
logging.info("Building scalers...")
tf_features = RobustScaler().fit(
np.vstack([features for features in X]))
X = None
y = None
# Loading test data
logging.info("Loading test data + preprocessing...")
fd = open(filename_test, "rb")
X = []
y = []
for i in range(n_events_test):
v_i, y_i = pickle.load(fd)
v_i = v_i[:n_particles_per_event]
X.append(v_i)
y.append(y_i)
y = np.array(y)
fd.close()
logging.info("\tfilename = %s" % filename_train)
logging.info("\tX size = %d" % len(X))
logging.info("\ty size = %d" % len(y))
# Scaling
logging.info("Scaling...")
for i in range(len(X)):
X[i] = tf_features.transform(X[i])
if len(X[i]) < n_particles_per_event:
X[i] = np.vstack([X[i],
np.zeros((n_particles_per_event - len(X[i]), 4))])
# Testing
logging.info("Testing...")
predict = event_baseline_predict
fd = open(filename_model, "rb")
params = pickle.load(fd)
fd.close()
all_y_pred = []
for start in range(0, len(y), 1000):
y_pred = predict(params, X[start:start+1000],
n_particles_per_event=n_particles_per_event)
all_y_pred.append(y_pred)
y_pred = np.concatenate(all_y_pred)
# Save
output = np.hstack((y.reshape(-1, 1),
y_pred.reshape(-1, 1)))
fd = open(filename_output, "wb")
pickle.dump(output, fd, protocol=2)
fd.close()
if __name__ == "__main__":
test()
|
new_price = [1, 2, 3]
map(str, new_price)
|
"""Created February 7, 2019 by Mimi Sun
Rose8bot.py - main file
cmd - drag file and press enter
terminal - python3 rose8bot.py
install discord.py rewrite - python3 -m pip install -U discord.py[voice]
pip install -U git+https://github.com/Rapptz/discord.py@rewrite#egg=discord.py[voice]
pip install psycopg2-binary
pip install requests
"""
#----- IMPORTS -----
import discord
#from discord.ext import commands
from datetime import datetime
import os #for db
import psycopg2 #for db
from random import randint
#import math
import requests
import json
#module imports..
import levelingSystem
DATABASE_URL = 'postgres://xwceyseiukfdkz:51db1a70bff79495e8d0f3bf5874b49a4e9e8fd5d6760449a98aa5ccb3c4d1b1@ec2-54-204-39-238.compute-1.amazonaws.com:5432/d22dp6v8g7jfcb'
conn = psycopg2.connect(DATABASE_URL, sslmode='require')
bot_token = 'NTQzMTI0NDEzMjEwNjg5NTM2.Dz4A2g.jJ82SFse6Dvk7_ARmvTGK9vWdEk'
client = discord.Client() #establish Discord client
wolfPack = client.get_guild(349593648595206154)
_8bitpetals = client.get_guild(538596813809254414)
def html_decode(s):
"""
Returns the ASCII decoded version of the given HTML string. This does
NOT remove normal HTML tags like <p>.
"""
htmlCodes = (
("'", '''),
('"', '"'),
('>', '>'),
('<', '<'),
('&', '&'),
('é', 'é')
)
for code in htmlCodes:
s = s.replace(code[1], code[0])
return s
def Create_user(userid, uname, nick, discrim, a_url):
sql = """ INSERT INTO users(memberid, username, nickname, discriminator, avatar_url)
SELECT %s ,%s, %s, %s, %s WHERE NOT EXISTS(SELECT memberid FROM users WHERE memberid = %s) RETURNING memberid; """
conn = None
user_id = None
try:
conn = psycopg2.connect(DATABASE_URL, sslmode='require')
#create new cursor
cur = conn.cursor()
#execute the INSERT statement
cur.execute(sql, (str(userid), uname, nick, discrim, a_url,str(userid)))
#get newly generated id back
user_id = cur.fetchone()[0]
Insert_xp(user_id,0,0)
Insert_rep(user_id, 'Neutral',0)
Insert_currency(user_id, '0','0','0')
Insert_upvotes(user_id, 0,0)
Insert_rank(user_id, 0)
#commit changes to db
conn.commit()
count = cur.rowcount
print('(+) Record successfully inserted into user.')
#close communication with db server
cur.close()
except (Exception, psycopg2.DatabaseError) as error:
print(error)
if conn:
print('(-) Failed to insert record into user.')
finally:
if conn is not None:
conn.close()
####print('(-------(x) PostgreSQL connection is closed.')
return user_id
def Update_currency(user_id, currency_earned, currency_spent):
find_user = """SELECT user_id, current_currency, total_currency_earned, total_currency_spent, currency_daily FROM currency WHERE user_id = %s;"""
insert_currency = """UPDATE currency SET current_currency = %s, total_currency_earned=%s, total_currency_spent=%s, currency_daily=%s WHERE user_id = %s RETURNING current_currency, total_currency_earned, total_currency_spent;"""
conn = None
try:
conn = psycopg2.connect(DATABASE_URL, sslmode='require')
cur = conn.cursor()
cur.execute(find_user,(str(user_id),))
row = cur.fetchone()
current = int(row[1])
global earned
earned = round(int(row[2]) + int(currency_earned))
#print('earned = ' + str(earned))
global spent
spent = round(int(row[3]) + int(currency_spent))
#print('spent = ' + str(spent))
global current_currency
current_currency = round(earned - spent)
#print('current_currency = ' + str(current_currency))
global currency_daily
currency_daily = int(row[4])
cur.execute(insert_currency, (current_currency, earned, spent,earned,str(user_id)))
conn.commit()
#print('!! ++ !! currency successfully added to user.')
cur.close()
except (Exception, psycopg2.DatabaseError) as error:
print(error)
if conn:
print('(-) Failed to insert record into currency.')
finally:
if conn is not None:
conn.close()
####print('(-------(x) PostgreSQL connection is closed.')
def Add_xp(user_id, xp_earned):
find_user = """SELECT user_id, current_xp, total_xp FROM xp WHERE user_id = %s;"""
insert_xp_sql = """UPDATE xp SET total_xp = %s WHERE user_id = %s RETURNING user_id, current_xp, total_xp;"""
conn = None
try:
conn = psycopg2.connect(DATABASE_URL, sslmode='require')
cur = conn.cursor()
cur.execute(find_user,(str(user_id),))
row = cur.fetchone()
global userid
#print('fetchone() = ' + str(row))
userid = row[0]
#print('userid = ' + str(userid))
currentxp = row[1]
#print('currentxp = ' + str(currentxp))
totalxp = row[2]
#print('totalxp = ' + str(totalxp))
#new total xp
global new_totalxp
new_totalxp = int(totalxp) + int(xp_earned)
#print('new_total = ' + str(totalxp) + ' + ' + str(xp_earned))
#level before xp was added
global lvl_prev
lvl_prev = round((math.sqrt(totalxp))//5)
#current level
global lvl
lvl = round((math.sqrt(new_totalxp))//5)
#lvl = round((1+math.sqrt(1+8*new_totalxp/150))/2)
#print('lvl = ' + str(lvl))
global currentlvl
currentlvl = round(lvl)
#print('currentlvl = ' + str(currentlvl))
global nextlvl
nextlvl = int(currentlvl) + 1
#print('nextlvl = ' + str(nextlvl))
#xp needed for current level
global xp_for_level
xp_for_level = round((5*currentlvl)**2)
#xp_for_level = (((currentlvl*currentlvl)-currentlvl)*150)/2
#print('xp_for_level = ' + str(xp_for_level))
#xp progress
global xp_progress
xp_progress = round(new_totalxp - xp_for_level)
#print('xp_progress = ' + str(xp_progress))
#total xp needed for next level
global xp_for_next_level
xp_for_next_level = (5*nextlvl)**2
#xp_next_level = round((((int(nextlvl)**2)-int(nextlvl))*150)/2)
#print('xp_for_next_level = ' + str(xp_for_next_level))
global xp_next_progress
xp_next_progress = round(xp_for_next_level - xp_for_level)
cur.execute(insert_xp_sql,(new_totalxp, str(user_id)))
conn.commit()
#print('!! ++ !! xp successfully added to user.')
cur.close()
except (Exception, psycopg2.DatabaseError) as error:
print(error)
if conn:
print('!! -- !! xp failed add to user.')
finally:
if conn is not None:
conn.close()
#print('-------(x) PostgreSQL connection is closed.')
def Insert_xp(user_id, curr_xp, total_xp):
sql = "INSERT INTO xp(user_id, current_xp, total_xp) VALUES(%s, %s, %s)"
conn = None
try:
conn = psycopg2.connect(DATABASE_URL, sslmode='require')
#create new cursor
cur = conn.cursor()
cur.execute(sql,(user_id, curr_xp, total_xp))
conn.commit()
print('(+) xp record successfully inserted.')
cur.close()
except (Exception, psycopg2.DatabaseError) as error:
print(error)
if conn:
print('(-) Failed to insert record into xp.')
finally:
if conn is not None:
conn.close()
#print('-------(x) PostgreSQL connection is closed.')
def Insert_rep(user_id, rep_name, total_rep):
sql = "INSERT INTO rep(user_id, rep_name, total_rep) VALUES (%s, %s, %s)"
conn = None
try:
conn = psycopg2.connect(DATABASE_URL, sslmode='require')
#create new cursor
cur = conn.cursor()
cur.execute(sql, (user_id, rep_name, total_rep))
conn.commit()
print('(+) rep record successfully inserted.')
cur.close()
except (Exception, psycopg2.DatabaseError) as error:
print(error)
if conn:
print('(-) Failed to insert record into rep.')
finally:
if conn is not None:
conn.close()
#print('-------(x) PostgreSQL connection is closed.')
def Insert_currency(user_id, curr_currency, currency_earned, currency_spent):
print('attempting to insert_currency...')
sql = "INSERT INTO currency(user_id, current_currency, total_currency_earned, total_currency_spent, currency_daily) VALUES (%s, %s, %s, %s,%s)"
conn = None
try:
conn = psycopg2.connect(DATABASE_URL, sslmode='require')
#create new cursor
cur = conn.cursor()
cur.execute(sql, (user_id, curr_currency, currency_earned, currency_spent, currency_earned))
conn.commit()
print('(+) currency record successfully inserted.')
cur.close()
except (Exception, psycopg2.DatabaseError) as error:
print(error)
if conn:
print('(-) Failed to insert record into currency.')
finally:
if conn is not None:
conn.close()
#print('-------(x) PostgreSQL connection is closed.')
def Insert_upvotes(user_id, up_given, up_received):
sql = "INSERT INTO upvotes(user_id, upvotes_given, upvotes_received) VALUES (%s, %s, %s)"
conn = None
try:
conn = psycopg2.connect(DATABASE_URL, sslmode='require')
#create new cursor
cur = conn.cursor()
cur.execute(sql,(user_id, up_given, up_received))
conn.commit()
print('(+) upvotes record successfully inserted.')
cur.close()
except (Exception, psycopg2.DatabaseError) as error:
print(error)
if conn:
print('(-) Failed to insert record into upvotes.')
finally:
if conn is not None:
conn.close()
#print('-------(x) PostgreSQL connection is closed.')
def Insert_rank(user_id, rank):
sql = "INSERT INTO rank(user_id, rank) VALUES (%s, %s)"
conn = None
try:
conn = psycopg2.connect(DATABASE_URL, sslmode='require')
#create new cursor
cur = conn.cursor()
cur.execute(sql,(user_id, rank))
conn.commit()
#print('(+) rank record successfully inserted.')
cur.close()
except (Exception, psycopg2.DatabaseError) as error:
print(error)
if conn:
print('(-) Failed to insert record into rank.')
finally:
if conn is not None:
conn.close()
#print('-------(x) PostgreSQL connection is closed.')
def Get_users():
conn = None
try:
conn = psycopg2.connect(DATABASE_URL, sslmode='require')
#create new cursor
cur = conn.cursor()
cur.execute("SELECT * FROM users ORDER BY user_id")
print('Number of users: ' + str(cur.rowcount))
print('TABLE users (user_id, memberid, uname, nickname, disc, avatar')
row = cur.fetchone()
while row is not None:
print(str(row))
row = cur.fetchone()
cur.execute("SELECT * FROM xp ORDER BY user_id")
print('Number of users: ' + str(cur.rowcount))
print('TABLE xp (xp_id, user_id, current_xp, total_xp')
row = cur.fetchone()
while row is not None:
print(str(row))
row = cur.fetchone()
cur.execute("SELECT * FROM rep ORDER BY user_id")
print('Number of users: ' + str(cur.rowcount))
print('TABLE rep rep_id, user_id, rep_name, total_rep')
row = cur.fetchone()
while row is not None:
print(str(row))
row = cur.fetchone()
cur.execute("SELECT * FROM currency ORDER BY user_id")
print('Number of users: ' + str(cur.rowcount))
print('TABLE currency (currency_id, current $, total $ earned, total $spent')
row = cur.fetchone()
while row is not None:
print(str(row))
row = cur.fetchone()
cur.execute("SELECT * FROM upvotes ORDER BY user_id")
print('Number of users: ' + str(cur.rowcount))
print('TABLE upvotes (upvotes_id, user_id, ups given, ups received')
row = cur.fetchone()
while row is not None:
print(str(row))
row = cur.fetchone()
cur.execute("SELECT * FROM rank ORDER BY user_id")
print('Number of users: ' + str(cur.rowcount))
print('TABLE rank (rank_id, user_id, rank')
row = cur.fetchone()
while row is not None:
print(str(row))
row = cur.fetchone()
cur.close()
except (Exception, psycopg2.DatabaseError) as error:
print(error)
finally:
if conn is not None:
conn.close()
def Delete_dupes():
sql = "DELETE FROM users a USING users b WHERE a.memberid < b.memberid;"
conn = None
try:
conn = conn = psycopg2.connect(DATABASE_URL, sslmode='require')
cur = conn.cursor()
cur.execute(sql)
conn.commit()
Get_users()
cur.close()
print("postgreSQL connection closed.")
except (Exception, psycopg2.DatabaseError) as error:
print(error)
finally:
if conn is not None:
conn.close()
def Retrieve_Token():
retrieve_token = requests.get("https://opentdb.com/api_token.php?command=request").json()
global token
token = retrieve_token['token']
print('Retrieving token')
print('token = ' + token)
global token_response_message
token_response_message = retrieve_token['response_message']
#server stats
statsEmbed = discord.Embed(
title='Server Stats',
colour=0xFC6A5B,
)
#commands menu
cmdsEmbed = discord.Embed(
title='Commands Menu',
colour =0xFC6A5B,
)
#user stats
myStatsEmbed = discord.Embed(
title='User Stats',
colour=0xFC6A5B,
)
#user profile
profileEmbed = discord.Embed(
title = 'Profile',
colour = 0xFFFFFF,
)
moonsEmbed = discord.Embed(
title = 'Moons',
colour = 0xFFFFFF,
)
@client.event
async def on_ready():
print("Rose8bot is ready!")
#await allows multitasking so the bot can respond to
#different events at the same time
await client.change_presence(activity=discord.Game(name="v2.3.4"))
levelingSystem.Create_tables()
Retrieve_Token()
#Delete_dupes()
@client.event
async def on_message(message):
user = client.get_user(message.author.id)
guild = message.guild
msg = message
member = message.author
global username
username = user.name
global nickname
nickname = member.nick
global discriminator
discriminator = user.discriminator
global avatar_url
avatar_url = member.avatar_url
global trivia_started
trivia_started = False
print('message received')
#ignore messages sent by Rose8bot
if message.author == client.user:
print('message sent by bot, ignoring...')
return
else:
Create_user(user.id,username, nickname, discriminator, avatar_url)
xp_add = randint(1,5)
Add_xp(str(user.id),str(xp_add))
Update_currency(str(user.id),str(0),str(0))
if lvl > lvl_prev:
await message.channel.send(f"🆙 || Congrats {message.author.mention}, you just hit level **{lvl}**!!")
#Add_xp(str(user.id),str(0))
#Update_currency(str(user.id),str(0),str(0))
#commands menu
if message.content == '$menu':
statsEmbed.set_footer(text='Bot created by Rose8bit. ')
cmdsEmbed.set_thumbnail(url='https://vignette.wikia.nocookie.net/mcpc/images/1/10/Oxeye-Daisy.png/revision/latest?cb=20131121042853')
cmdsEmbed.add_field(name = '$newcat [category name]', value ='Create a new category', inline=False)
cmdsEmbed.add_field(name = '$newtext [channel name]', value = 'Create a new text channel', inline=False)
cmdsEmbed.add_field(name = 'newvoice [channel name]', value = 'Create a new voice channel', inline=False)
cmdsEmbed.add_field(name = '$rolepoll', value = 'Create a poll to auto-assign roles based on reactions', inline=False)
cmdsEmbed.add_field(name = '$statsfor [username]', value = 'Display stats for given username. (username is case-sensitive)', inline=False)
cmdsEmbed.add_field(name = '$stats', value = 'Display server stats', inline=False)
cmdsEmbed.add_field(name = '$mystats', value = 'Display user stats.', inline=False)
cmdsEmbed.add_field(name = '$stats', value = 'Display server stats.', inline=False)
cmdsEmbed.add_field(name = '$slots', value = 'Roll the slot machine!', inline=False)
cmdsEmbed.add_field(name = '$trivia', value = '**Usable by Rose8bit and Vanitaz only** Trivia time!')
cmdsEmbed.add_field(name = '$end', value = '**Usable by Rose8bit and Vanitaz only** End the current trivia session and announce the winner.')
await message.channel.send(embed=cmdsEmbed)
cmdsEmbed.clear_fields()
#display server stats
if message.content == '$stats':
statsEmbed.set_footer(text='Bot created by Rose8bit. ')
statsEmbed.set_thumbnail(url=guild.icon_url)
statsEmbed.add_field(name = 'Server Name', value = message.guild, inline=True)
statsEmbed.add_field(name = 'Server Owner', value = guild.owner, inline=True)
statsEmbed.add_field(name = 'Text Channels', value = len(guild.text_channels), inline = True)
statsEmbed.add_field(name = 'Voice Channels', value =len(guild.voice_channels), inline = True)
statsEmbed.add_field(name = 'Members', value = guild.member_count, inline = True)
await message.channel.send(embed=statsEmbed)
statsEmbed.clear_fields()
#display user stats
if message.content == '$mystats':
await message.channel.send('Displaying stats for ' + str(user.mention))
myStatsEmbed.set_footer(text='Bot created by Rose8bit. ')
myStatsEmbed.set_thumbnail(url=member.avatar_url)
myStatsEmbed.add_field(name = 'Username', value = str(user.name), inline=False)
myStatsEmbed.add_field(name = 'Discriminator', value = str(user.discriminator), inline=False)
#myStatsEmbed.add_field(name = 'Display Name', value = str(user.display_name), inline=False)
myStatsEmbed.add_field(name = 'Server specific nickname', value = member.nick, inline=False)
myStatsEmbed.add_field(name = 'Date joined Server', value = member.joined_at, inline=False)
myStatsEmbed.add_field(name = 'Date joined Discord', value = str(user.created_at), inline=False)
await message.channel.send(embed=myStatsEmbed)
myStatsEmbed.clear_fields()
if message.content == '$profile':
Create_user(user.id,username, nickname, discriminator, avatar_url)
profileEmbed.set_footer(text = 'Bot created by Rose8bit.')
profileEmbed.set_thumbnail(url=member.avatar_url)
profileEmbed.add_field(name = 'Username', value = str(user.name), inline=False)
profileEmbed.add_field(name = 'Server Rank', value = 'N/A')
profileEmbed.add_field(name = 'Nickname', value = member.nick, inline = False)
profileEmbed.add_field(name = 'Level', value = str(currentlvl), inline=False)
profileEmbed.add_field(name = 'XP progress', value = str(xp_progress) + '/' + str(xp_next_progress), inline=False)
profileEmbed.add_field(name = 'Moons', value = str(current_currency) + ' 🌕')
await message.channel.send(embed = profileEmbed)
profileEmbed.clear_fields()
if message.content == '$moons':
moonsEmbed.set_footer(text = 'Bot created by Rose8bit.')
moonsEmbed.set_thumbnail(url=member.avatar_url)
moonsEmbed.add_field(name = 'Username', value = str(user.name), inline=False)
moonsEmbed.add_field(name = 'Moons', value = str(current_currency) + ' 🌕')
await message.channel.send(embed = moonsEmbed)
moonsEmbed.clear_fields()
#if message.content == '$mimisun':
#Update_currency(str(user.id),'100','0')
if message.content == '$allusers':
Get_users()
global slotcost
slotcost = 2
if message.content == '$trivia' and (user.id == 461656626567577630 or user.id == 294707990303342592):
print('$trivia entered')
category = ['15','16','29','31','32','11','14']
trivia_response= requests.get("https://opentdb.com/api.php?amount=1&category=" + random.choice(category)).json()
#trivia_response = requests.get("https://opentdb.com/api.php?amount=1&category=" + random.choice(category) + "&token=" + token + "").json()
#await client.get_channel(349593648595206155)
response_code = trivia_response['response_code']
#Success - Returned results found successfully.
if response_code == 0:
global correct_answer
correct_answer = trivia_response['results'][0]['correct_answer']
decoded = html_decode(trivia_response['results'][0]["question"])
decoded_answers = []
trivia_text = '🤔 || **Trivia Time!** first correct answer wins ``'+ str(slotcost) +'`` 🌕' + '\n\n ``' + trivia_response['results'][0]['category'] + '\nDifficulty: ' + trivia_response['results'][0]['difficulty'] +'``\n' + '```' + decoded + '```\n'
choices = trivia_response['results'][0]['incorrect_answers']
#await message.channel.send('A: ' + correct_answer)
choices.append(correct_answer)
choices_num = []
i = 0
while i < len(choices):
decoded_answers.append(html_decode(choices[i]))
i += 1
choices_num.append(str(i))
counter = 1
for i in range(len(decoded_answers)):
randomWord = random.choice(decoded_answers)
#await message.channel.send(f"\n{counter}. {randomWord} \n")
trivia_text += '`' + str(counter) + '.` ' + randomWord + '\n'
if randomWord == correct_answer:
answer_option = str(counter)
counter += 1
decoded_answers.remove(randomWord)
await message.channel.send(trivia_text)
trivia_players = []
trivia_winner_id = []
trivia_winner_name = []
def check(m):
#if someone enters a guess
if m.content in choices_num:
#check if user has already submitted answer
if m.author.id in trivia_players:
print('---> ' + m.author.name + ' has already submitted a guess. Ignoring...')
#check if user has submitted the correct answer
if m.content == answer_option and m.author.id not in trivia_players:
print('---> winner found!')
#add to the list of winners...
trivia_winner_id.append(m.author.id)
trivia_winner_name.append(m.author.mention)
#user has not submitted a guess yet
else:
print('---> player added to trivia_players')
trivia_players.append(m.author.id)
#return (m.content == answer_option or m.content == '$trivia')
return (m.content == '$trivia' or m.content =='$end') and m.channel == message.channel
#return (m.content.upper().replace(" ","") == correct_answer.upper().replace(" ","") or m.content =='$trivia') and m.channel == message.channel
try:
#wait for the timer to run out, or a new trivia to start
msg = await client.wait_for('message', timeout=20, check=check)
#timer ran out or an error ocurred
except Exception as error:
print('!!!!!!' + str(error))
if not trivia_winner_id:
await message.channel.send(f"⏰ Time\'s up! No winners this round. The correct answer was `` {html_decode(correct_answer)}``. Better luck next time...\n")
#if there is one winner
else:
await message.channel.send(f"Congrats{trivia_winner_name[0]}, ``{html_decode(correct_answer)}`` is the correct answer! \n ``{str(slotcost)}``🌕 has been added to your account.\n")
Update_currency(str(client.get_user(trivia_winner_id[0]).id),str(slotcost),'0')
slotpot = 0
trivia_players.clear()
trivia_winner_id.clear()
trivia_winner_name.clear()
# $trivia was entered and a new trivia session has started
else:
if msg.content == '$end' and (user.id == 461656626567577630 or user.id == 294707990303342592):
if not trivia_winner_id:
await message.channel.send(f"No winners this round. The correct answer was `` {html_decode(correct_answer)}``. Better luck next time...")
else:
await message.channel.send(f"Congrats{trivia_winner_name[0]}, ``{html_decode(correct_answer)}`` is the correct answer! \n ``2``🌕 has been added to your account.")
Update_currency(str(client.get_user(trivia_winner_id[0]).id),'2','0')
trivia_players.clear()
#No results - Could not return results. API doesn't have enough questions
if response_code == 1:
await client.get_channel(544340495384576012).send('``Response code 1:`` **No results** Could not return results. Resetting token...')
#Invalid parameters - contains an invalid parameter. Check code
if response_code == 2:
await client.get_channel(544340495384576012).send('``Response code 2:`` **Invalid parameters** Contains an invalid parameter. Check the code 😛.')
#Token not found - Session token does not exist. Retrieving new token...
if response_code == 3:
await client.get_channel(544340495384576012).send('``Response code 3: `` **Token not found** Session token does not exist. Retrieving a new session token...')
await client.get_channel(544340495384576012).send(token_response_message)
await client.get_channel(544340495384576012).send('new token is ' + token)
#Token empty - token has returned all possible questions. Retrieving new..
if response_code == 4:
await client.get_channel(544340495384576012).send('``Response code 4:`` **Token empty** Session token has returned all possible questions. Retrieving a new session token...')
await client.get_channel(544340495384576012).send(token_response_message)
if message.content.startswith('$statsfor'):
userLookup = message.content.split(' ',1)[1]
member = discord.utils.get(message.guild.members, name = userLookup)
if member == None:
await message.channel.send('Member ``' + userLookup + '`` not found.')
else:
user = client.get_user(member.id)
await message.channel.send('Displaying stats for ' + user.mention)
myStatsEmbed.set_footer(text='Bot created by Rose8bit. ')
myStatsEmbed.set_thumbnail(url=member.avatar_url)
myStatsEmbed.add_field(name = 'Username', value = str(user.name), inline=False)
myStatsEmbed.add_field(name = 'Discriminator', value = str(user.discriminator), inline=False)
myStatsEmbed.add_field(name = 'Display Name', value = str(user.display_name), inline=False)
myStatsEmbed.add_field(name = 'Server specific nickname', value = member.nick, inline=False)
myStatsEmbed.add_field(name = 'Date joined Server', value = member.joined_at, inline=False)
myStatsEmbed.add_field(name = 'Date joined Discord', value = str(user.created_at), inline=False)
await message.channel.send(embed=myStatsEmbed)
myStatsEmbed.clear_fields()
#create a new category channel
if message.content.startswith('$newcat'):
channel = message.channel
channelName = message.content.split(' ',1)[1]
newChannel = await guild.create_category_channel(channelName)
await channel.send('Category ``'+channelName+'`` has been created.')
#create a new category channel
if message.content.startswith('$newtext'):
channel = message.channel
channelName = message.content.split(' ',1)[1]
newChannel = await guild.create_text_channel(channelName)
await channel.send('Text channel ``'+channelName+'`` has been created.')
#create new voice channel
if message.content.startswith('$newvoice'):
channel = message.channel
channelName = message.content.split(' ',1)[1]
newChannel = await guild.create_voice_channel(channelName)
await channel.send('Voice channel ``'+channelName+'`` has been created.')
#test waiting for a reaction from message author
if message.content.startswith('$thumb'):
channel = message.channel
await channel.send('Send me that 👍 reaction!')
def check(reaction, user):
return user == message.author and str(reaction.emoji) == '👍'
try:
reaction, user = await client.wait_for('reaction_add', timeout=20.0, check=check)
except:
await channel.send('👎 You took too long!')
else:
await channel.send('Awesome! Thank you :blush:')
#test waiting for a user reply
if message.content.startswith('$greet'):
channel = message.channel
await channel.send('Say hello!')
def check(m):
return m.content == 'hello' and m.channel == channel
try:
msg = await client.wait_for('message',timeout=10.0, check=check)
except:
await channel.send('You took too long!')
else:
await channel.send('Hello {.author}!'.format(msg))
#create a poll to auto-assign roles based on emoji reactions
if message.content == '$rolepoll':
if str(message.author.id) == '461656626567577630' or str(message.author.id) == '294707990303342592':
reactEmb = discord.Embed(title='Role Menu: Games',description='React to assign yourself a role. \n\n'
+ '<:pfsmile:544288794820739152> : ``Apex Legends`` \n\n'
+ '<:fortnite:544280152507416618> : ``Fortnite`` \n\n'
+ '<:league:544349301904637954>: ``League of Legends`` \n\n'
+ '<:ow:544289940062732307> : ``Overwatch`` \n\n'
+ '<:smash:544283152886005775> : ``Smash`` \n\n'
+ '<:smite:544349358921875468> : ``Smite``',
color = 0xFC6A5B)
reactEmb.set_author(name='Assign a role',icon_url=client.user.avatar_url)
mesg = await message.channel.send(embed=reactEmb)
await mesg.add_reaction(emoji=':pfsmile:544288794820739152')
await mesg.add_reaction(emoji=':fortnite:544280152507416618')
await mesg.add_reaction(emoji=':league:544349301904637954')
await mesg.add_reaction(emoji=':ow:544289940062732307')
await mesg.add_reaction(emoji=':smash:544283152886005775')
await mesg.add_reaction(emoji=':smite:544349358921875468')
global pollMessage
global poll_message
poll_message = str(mesg.id)
else: #someone other than admins tried to use command
await message.channel.send('Sorry, you do not have permission to use that command.')
#record error-log
await client.get_channel(544340495384576012).send(message.author.mention + 'attempted to use the command $rolepoll.')
if message.content == '$raffle':
users = await reaction.users().flatten()
winner = random.choice(users)
await message.channel.send('{} has won the raffle!'.format(winner))
#slot machine
if message.content == '$slots':
emojis = []
emojis = ['🍎','🍊','🍐','🍋','🍉','🍇','🍓','🍒','🍏','🍌','🍉','🍈','🍑','🍍','🍅']
if int(current_currency)< 3:
await message.channel.send(message.author.mention + ' you do not have enough 🌕 moons to play that.')
else:
await message.channel.send('$lots 💰 || ``3`` 🌕 per play')
a = random.choice(emojis)
b = random.choice(emojis)
c = random.choice(emojis)
slotmachine = f"**[ {a} {b} {c} ]\n\n**"
if (a == b and a == c):
await message.channel.send(f"{slotmachine} 💰 || **{message.author.mention}** you rolled 3 in a row and have won ``15`` 🌕!")
Update_currency(user.id, 15, 3)
elif ((a == b) or (b == c)):
await message.channel.send(f"{slotmachine} 🎉 || **{message.author.mention}** you rolled 2 in a row and have won ``6`` 🌕!")
Update_currency(user.id, 6, 3)
else:
await message.channel.send(f"{slotmachine} 😢 || **{message.author.mention}** sorry no match, you lost. ")
Update_currency(user.id, 0, 3)
@client.event
async def on_raw_reaction_add(payload):
#will be dispatched everytime a user adds a reaction to a message the bot can see
#print('reaction added')
if not payload.guild_id:
#if the reaction was added in a DM channel with the bot
#do nothing
return
guild = client.get_guild(payload.guild_id)
#need the guild to get the member who reacted
member = guild.get_member(payload.user_id)
#get member who will receive the role
#getting roles...
role1 = discord.utils.get(guild.roles, name="Smash Bros")
role2 = discord.utils.get(guild.roles, name="Fortnite")
role3 = discord.utils.get(guild.roles, name="Overwatch")
role4 = discord.utils.get(guild.roles, name="Apex Legends")
role5 = discord.utils.get(guild.roles, name="Smite")
role6 = discord.utils.get(guild.roles, name="League of Legends")
#putting roles in array...
role_list = [role1, role2, role3,role4,role5,role6]
#reaction emoji ids
#smash, fortnite, ow, apex, smite, lol
reaction_list = [544283152886005775,544280152507416618,544289940062732307,544288794820739152,544349301904637954,544349358921875468]
#if the message is the right one...
if payload.message_id == 548171468777717760:
#cycle through reaction_list to find the emoji that was used to react
j=0
while j < len(reaction_list):
if payload.emoji.id == reaction_list[j]:
role_added = role_list[j]
j += 1
#if the user does not already belong to that role
if role_added not in member.roles:
#add the role to the member
await member.add_roles(role_added, reason='Role Menu: Games reaction')
#send confirmation to member
await member.send('✅ || ' + member.mention + ' has been given the role ' + '``' + str(role_added) + '``!')
await client.get_channel(544340495384576012).send('✅ || ' + member.mention + ' has been given the role ' + '``' + str(role_added) + '``!')
@client.event
async def on_raw_reaction_remove(payload):
#will be dispatched everytime a user removes a reaction to a message the bot can see
print ('reaction removed')
if not payload.guild_id:
#if the reaction was removed in a DM channel with the bot
#do nothing
return
guild = client.get_guild(payload.guild_id)
#need the guild to get the member who reacted
member = guild.get_member(payload.user_id)
#get member who will receive the role
#getting roles...
role1 = discord.utils.get(guild.roles, name="Smash Bros")
role2 = discord.utils.get(guild.roles, name="Fortnite")
role3 = discord.utils.get(guild.roles, name="Overwatch")
role4 = discord.utils.get(guild.roles, name="Apex Legends")
role5 = discord.utils.get(guild.roles, name="Smite")
role6 = discord.utils.get(guild.roles, name="League of Legends")
#putting roles in array...
role_list = [role1, role2, role3,role4,role5,role6]
#reaction emoji ids
#smash, fortnite, ow, apex, smite, lol
reaction_list = [544283152886005775,544280152507416618,544289940062732307,544288794820739152,544349301904637954,544349358921875468]
#if the message is the right one...
if payload.message_id == 548171468777717760:
#cycle through reaction_list to find the emoji that was removed
j=0
while j < len(reaction_list):
if payload.emoji.id == reaction_list[j]:
role_removed = role_list[j]
j += 1
#if the user already belongs to that role
if role_removed in member.roles:
#remove the role from the member
await member.remove_roles(role_removed, reason='Role Menu: Games reaction')
#send confirmation to member
await member.send('🚫 || ' + member.mention + ' has been removed from the role ' + '``' + str(role_removed) + '``!')
#send message to log channel
await client.get_channel(544340495384576012).send('🚫 || ' + member.mention + ' has been removed from the role ' + '``' + str(role_removed) + '``!')
@client.event
async def on_member_join(member):
#send message in main chat
await client.get_channel(349593648595206155).send('🐺 || Welcome to the wolfpack' + member.mention + '!')
@client.event
async def on_guild_channel_create(channel):
await client.get_channel(544340495384576012).send('``' + channel.name + '`` channel created in server ``' + channel.guild.name + '``')
@client.event
async def on_guild_channel_delete(channel):
await client.get_channel(544340495384576012).send('``' + channel.name + '`` channel deleted in server ``' + channel.guild.name + '``')
client.run(bot_token)
|
#!/usr/bin/env python
import rospy
from hektar.msg import wheelVelocity
from std_msgs.msg import Float64, Int8
from dynamic_reconfigure.server import Server
from hektar.cfg import WheelControlConfig
UPPER_LIMIT = 127
LOWER_LIMIT = -127
class Callback():
def __init__(self):
self.speed = 0
self.variation_factor = 0.0
self.offset_multiplier = 0.0
self.offset_addition = 0
self.pub = rospy.Publisher("wheel_output", wheelVelocity, queue_size=1)
def wheel_callback(self, feedback):
wheels = wheelVelocity()
delta_L = 0
delta_R = 0
# scalar and addition offsets for left wheel to account for wheel speed
# discrepancies
computed_speedL = int((self.speed + (feedback.data * self.variation_factor)) \
* self.offset_multiplier + self.offset_addition)
if computed_speedL > UPPER_LIMIT:
delta_L = computed_speedL - UPPER_LIMIT
computed_speedR = int(self.speed - (feedback.data * self.variation_factor))
if computed_speedR > UPPER_LIMIT:
delta_R = computed_speedR - UPPER_LIMIT
# difference between wheel speeds must be kept constant, so if one wheel goes above
# the max allowed speed value, this difference must be subtracted from the other wheel
computed_speedR -= delta_L
computed_speedL -= delta_R
wheels.wheelL = max(LOWER_LIMIT, min(computed_speedL, UPPER_LIMIT))
wheels.wheelR = max(LOWER_LIMIT, min(computed_speedR, UPPER_LIMIT))
#rospy.loginfo(rospy.get_caller_id() + " Wheels: %f, %f", wheels.wheelL, wheels.wheelR)
self.pub.publish(wheels)
# required to dynamically reconfigure parameters
def callback(self, config, level):
rospy.loginfo("""Reconfigure Request: {speed}, {variation_factor}, \
{offset_multiplier}, {offset_addition}""".format(**config))
self.speed = config["speed"]
self.variation_factor = config["variation_factor"]
self.offset_multiplier = config["offset_multiplier"]
self.offset_addition = config["offset_addition"]
return config
def set_speed(self, message):
self.speed = message.data
def control():
rospy.init_node('wheel_control', anonymous=True)
callbacker = Callback()
srv = Server(WheelControlConfig, callbacker.callback)
rospy.Subscriber('control_effort', Float64, callbacker.wheel_callback, \
queue_size=1, tcp_nodelay=False)
rospy.Subscriber('set_speed', Int8, callbacker.set_speed, \
queue_size=1, tcp_nodelay=False)
rospy.spin()
if __name__ == '__main__':
try:
control()
except rospy.ROSInterruptException: pass
|
yz=int(input())
mg=0
temp=yz
while(temp>0):
dig=temp%10
mg=mg+dig ** 3
temp=temp//10
if(mg==yz):
print("yes")
else:
print("no")
|
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
import warnings
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Dense, Lambda
from tensorflow.keras.layers import Activation
from tensorflow.keras.layers import Conv2D
from tensorflow.keras.layers import GlobalAveragePooling2D, GlobalMaxPooling2D, MaxPooling2D
from tensorflow.keras.layers import Input
from tensorflow.keras.layers import concatenate, add
from tensorflow.keras.layers import BatchNormalization
from tensorflow.keras.regularizers import l2
from tensorflow.keras.utils import convert_all_kernels_in_model
from tensorflow.keras.utils import get_file
from tensorflow.keras.utils import get_source_inputs
import tensorflow.keras.backend as K
def __initial_conv_block(input, weight_decay=5e-4):
''' Adds an initial convolution block, with batch normalization and relu activation
Args:
input: input tensor
weight_decay: weight decay factor
Returns: a keras tensor
'''
channel_axis = 1 if K.image_data_format() == 'channels_first' else -1
x = Conv2D(64, (3, 3), padding='same', use_bias=False, kernel_initializer='he_normal',
kernel_regularizer=l2(weight_decay))(input)
x = BatchNormalization(axis=channel_axis)(x)
x = Activation('relu')(x)
return x
def __initial_conv_block_imagenet(input, weight_decay=5e-4):
''' Adds an initial conv block, with batch norm and relu for the inception resnext
Args:
input: input tensor
weight_decay: weight decay factor
Returns: a keras tensor
'''
channel_axis = 1 if K.image_data_format() == 'channels_first' else -1
x = Conv2D(64, (7, 7), padding='same', use_bias=False, kernel_initializer='he_normal',
kernel_regularizer=l2(weight_decay), strides=(2, 2))(input)
x = BatchNormalization(axis=channel_axis)(x)
x = Activation('relu')(x)
x = MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x)
return x
def __grouped_convolution_block(input, grouped_channels, cardinality, strides, weight_decay=5e-4):
''' Adds a grouped convolution block. It is an equivalent block from the paper
Args:
input: input tensor
grouped_channels: grouped number of filters
cardinality: cardinality factor describing the number of groups
strides: performs strided convolution for downscaling if > 1
weight_decay: weight decay term
Returns: a keras tensor
'''
init = input
channel_axis = 1 if K.image_data_format() == 'channels_first' else -1
group_list = []
if cardinality == 1:
# with cardinality 1, it is a standard convolution
x = Conv2D(grouped_channels, (3, 3), padding='same', use_bias=False, strides=(strides, strides),
kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay))(init)
x = BatchNormalization(axis=channel_axis)(x)
x = Activation('relu')(x)
return x
for c in range(cardinality):
x = Lambda(lambda z: z[:, :, :, c * grouped_channels:(c + 1) * grouped_channels]
if K.image_data_format() == 'channels_last' else
lambda z: z[:, c * grouped_channels:(c + 1) * grouped_channels, :, :])(input)
x = Conv2D(grouped_channels, (3, 3), padding='same', use_bias=False, strides=(strides, strides),
kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay))(x)
group_list.append(x)
group_merge = concatenate(group_list, axis=channel_axis)
x = BatchNormalization(axis=channel_axis)(group_merge)
x = Activation('relu')(x)
return x
def __bottleneck_block(input, filters=64, cardinality=8, strides=1, weight_decay=5e-4):
''' Adds a bottleneck block
Args:
input: input tensor
filters: number of output filters
cardinality: cardinality factor described number of
grouped convolutions
strides: performs strided convolution for downsampling if > 1
weight_decay: weight decay factor
Returns: a keras tensor
'''
init = input
grouped_channels = int(filters / cardinality)
channel_axis = 1 if K.image_data_format() == 'channels_first' else -1
# Check if input number of filters is same as 16 * k, else create convolution2d for this input
if K.image_data_format() == 'channels_first':
if init.shape[1] != 2 * filters:
init = Conv2D(filters * 2, (1, 1), padding='same', strides=(strides, strides),
use_bias=False, kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay))(init)
init = BatchNormalization(axis=channel_axis)(init)
else:
if init.shape[-1] != 2 * filters:
init = Conv2D(filters * 2, (1, 1), padding='same', strides=(strides, strides),
use_bias=False, kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay))(init)
init = BatchNormalization(axis=channel_axis)(init)
x = Conv2D(filters, (1, 1), padding='same', use_bias=False,
kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay))(input)
x = BatchNormalization(axis=channel_axis)(x)
x = Activation('relu')(x)
x = __grouped_convolution_block(x, grouped_channels, cardinality, strides, weight_decay)
x = Conv2D(filters * 2, (1, 1), padding='same', use_bias=False, kernel_initializer='he_normal',
kernel_regularizer=l2(weight_decay))(x)
x = BatchNormalization(axis=channel_axis)(x)
x = add([init, x])
x = Activation('relu')(x)
return x |
import collections
import logging
from concurrent.futures import ProcessPoolExecutor
from functools import partial
import nltk
import pandas as pd
from nltk.tokenize import word_tokenize
from nltk.util import ngrams
from tqdm import tqdm
from nlp.chunker import Chunker
from nlp.pattern_grammer import PatternGrammar
from nlp.pos_tagger import PosTagger
from nlp.string_cleaner import StringCleaner
MULTIPLE_WHITESPACE_REGEX = nltk.re.compile(r'\s+')
import click
TOP_PHRASE_COUNT = 1000000
def merge_two_dict(dict_x, dict_y):
"""
:param dict_x: {'a': [3, 4], 'b': [6]}
:param dict_y: {'c': [3], 'a': [1, 2]}
:return: {'c': [3], 'a': [3, 4, 1, 2], 'b': [6]}
"""
dict_z = dict_x.copy() # Never modify input param , or take inplace as param for explicit use case
for key, value in dict_y.items():
if dict_z.get(key):
dict_z[key].extend(value)
else:
dict_z[key] = value
return dict_z
def extract_chunk_dict(pos_tagged_sentence, chunker_obj):
return chunker_obj.chunk_pos_tagged_sentence(pos_tagged_sentence)
def get_phrase_list(grammar, pos_tagged_sentences):
chunk_dict = {}
chunker_obj = Chunker(grammar)
with ProcessPoolExecutor(max_workers=10) as pool:
worker = partial(extract_chunk_dict, chunker_obj=chunker_obj)
single_chunk_dict_list = pool.map(worker, pos_tagged_sentences)
for single_chunk_dict in single_chunk_dict_list:
chunk_dict = merge_two_dict(chunk_dict, single_chunk_dict)
phrase_list = list()
for rule, pos_tagged_chunk_list in chunk_dict.items():
for pos_tagged_phrase_chunk in pos_tagged_chunk_list:
phrase = ' '.join(list(map(lambda x: x[0], pos_tagged_phrase_chunk)))
phrase_list.append(phrase)
return phrase_list
valid_phrase_grammar_clause = ['NN_all', 'NN_CC_JJ_multi']
def get_phrases(compiled_grammar, pos_tagged_sentences, testing_clauses):
phrase_list = list()
for grammar in testing_clauses:
phrase = get_phrase_list(grammar=compiled_grammar[grammar], pos_tagged_sentences=pos_tagged_sentences)
phrase_list.extend(phrase)
return phrase_list
def frequent_phrases(text, top_k):
sentences = nltk.sent_tokenize(text)
valid_clauses = ['NN_all', 'NN_CC_JJ_multi']
compiled_grammar = PatternGrammar().init_all_clause()
phrase_list = sentence_phrase_extract(compiled_grammar, sentences, valid_clauses)
count_object = collections.Counter(phrase_list)
return count_object.most_common(n=top_k)
def sentence_phrase_extract(compiled_grammar, sentences, valid_clauses):
pos_tagged_sentences = [PosTagger(sentence=sentence).pos_tag() for sentence in sentences]
phrase_list = get_phrases(compiled_grammar, pos_tagged_sentences, valid_clauses)
return phrase_list
def extract_phrases(filepath):
with open(filepath, 'r') as file:
file_read_iterator = file.readlines()
logging.info('Initializing for roller coaster ride')
overall_top_phrases_dict = dict()
for batch_lines in split_every(size=10000, iterable=tqdm(file_read_iterator, unit='line processed', ncols=120)):
logging.debug('Length of line being processed:{}'.format(len(batch_lines)))
logging.debug('Length of single-line in batch being processed:{}'.format(len(batch_lines[0])))
lines_list = [StringCleaner.clean(line).rstrip('\n') for line in batch_lines]
text = ' '.join(lines_list)
logging.debug('Processing text:{}..'.format(text[:100]))
batch_top_phrases_dict = dict(frequent_phrases(text, top_k=100))
update_top_phrase_dict(overall_top_phrases_dict, batch_top_phrases_dict)
logging.debug('Got total {} frequent phrases.'.format(len(batch_top_phrases_dict)))
logging.debug('Frequent phrases in batch:%s ...', list(batch_top_phrases_dict.keys())[:5])
overall_top_phrases_dict = update_top_phrase_dict(overall_top_phrases_dict, batch_top_phrases_dict)
return overall_top_phrases_dict
def update_top_phrase_dict(overall_top_phrases_dict, batch_top_phrases_dict):
overall_keys = set(overall_top_phrases_dict.keys())
batch_keys = set(batch_top_phrases_dict.keys())
for key in batch_keys:
if key in overall_keys:
overall_top_phrases_dict[key] += batch_top_phrases_dict[key]
else:
overall_top_phrases_dict[key] = batch_top_phrases_dict[key]
return dict(sorted(overall_top_phrases_dict.items(), key=lambda x: x[1], reverse=True)[:TOP_PHRASE_COUNT])
def get_ngrams(text, n):
n_grams = ngrams(word_tokenize(text), n)
return [' '.join(grams).strip() for grams in n_grams]
from itertools import count
from itertools import groupby
def split_every(size, iterable):
c = count()
for k, g in groupby(iterable, lambda x: next(c) // size):
yield list(g) # or yield g if you want to output a generator
@click.command()
@click.option('--input_file', '-i', help='The input file need to be processed')
@click.option('--output_file', '-o', help='The out file need to be written after processing')
@click.option('--frequent_phrases_dict_path', '-fpd', help='frequent_phrases_dict either given else extracted from text',
default=None)
def process_large_text_file(input_file, output_file, frequent_phrases_dict_path):
logging.info('Evaluating file: {} for extracting frequent tags'.format(input_file))
threshold_freq = 10
if frequent_phrases_dict_path is None:
frequent_phrases_dict = extract_phrases(input_file)
pd.to_pickle(frequent_phrases_dict, '/tmp/{}'.format(frequent_phrases_dict_path))
logging.info('Got a frequent_phrases_dict of size:{}'.format(len(frequent_phrases_dict)))
frequent_phrases_dict = {key: value for key, value in frequent_phrases_dict.items() if value > threshold_freq}
else:
frequent_phrases_dict = pd.read_pickle(frequent_phrases_dict_path)
logging.info('Got a frequent_phrases_dict of size:{} after pruning with threshold of {threshold_freq}.'.format(
len(frequent_phrases_dict), threshold_freq=threshold_freq))
frequent_phrases = set(frequent_phrases_dict.keys())
with open(input_file, "r") as review_text, open(output_file, "w") as updated_review_text:
lines = review_text.readlines()
total = len(lines)
for index, line in tqdm(enumerate(lines), total=total, unit='line'):
two_grams = get_ngrams(line, 2)
for gram in two_grams:
if gram in frequent_phrases:
line = line.replace(gram, '_'.join(gram.split()))
updated_review_text.writelines(line + '\n')
logging.info('Output file: %s is written with most frequent phrases updated', output_file)
if __name__ == '__main__':
logging.basicConfig(format='[%(asctime)s] %(levelname)s : %(message)s', level=logging.INFO)
process_large_text_file()
|
import sys
import uuid
if sys.version_info[0] == 2: # noqa
from io import BytesIO as StringIO
else:
from io import StringIO
import warnings
from base64 import b64encode
import numpy as np
from matplotlib.pyplot import cm
from matplotlib.colors import Colormap
from astropy import units as u
from traitlets import HasTraits, validate, observe
from .traits import Any, Unicode, Float, Color, Bool, to_hex
__all__ = ['LayerManager', 'TableLayer']
VALID_FRAMES = ['sky', 'ecliptic', 'galactic', 'sun', 'mercury', 'venus',
'earth', 'mars', 'jupiter', 'saturn', 'uranus', 'neptune',
'pluto', 'moon', 'io', 'europa', 'ganymede', 'callisto']
VALID_LON_UNITS = {u.deg: 'degrees',
u.hour: 'hours',
u.hourangle: 'hours'}
# NOTE: for cartesian coordinates, we can also allow custom units
VALID_ALT_UNITS = {u.m: 'meters',
u.imperial.foot: 'feet',
u.imperial.inch: 'inches',
u.imperial.mile: 'miles',
u.km: 'kilometers',
u.au: 'astronomicalUnits',
u.lyr: 'lightYears',
u.pc: 'parsecs',
u.Mpc: 'megaParsecs'}
VALID_ALT_TYPES = ['depth', 'altitude', 'distance', 'seaLevel', 'terrain']
VALID_MARKER_TYPES = ['gaussian', 'point', 'circle', 'square', 'pushpin']
VALID_MARKER_SCALES = ['screen', 'world']
# The following are columns that we add dynamically and internally, so we need
# to make sure they have unique names that won't clash with existing columns
SIZE_COLUMN_NAME = str(uuid.uuid4())
CMAP_COLUMN_NAME = str(uuid.uuid4())
def guess_lon_lat_columns(colnames):
"""
Given column names in a table, return the columns to use for lon/lat, or
None/None if no high confidence possibilities.
"""
# Do all the checks in lowercase
colnames_lower = [colname.lower() for colname in colnames]
for lon, lat in [('ra', 'dec'), ('lon', 'lat'), ('lng', 'lat')]:
# Check first for exact matches
if colnames_lower.count(lon) == 1 and colnames_lower.count(lat) == 1:
return lon, lat
# Next check for columns that start with specified names
lon_match = [colname.startswith(lon) for colname in colnames_lower]
lat_match = [colname.startswith(lat) for colname in colnames_lower]
if sum(lon_match) == 1 and sum(lat_match) == 1:
return colnames[lon_match.index(True)], colnames[lat_match.index(True)]
# We don't check for cases where lon/lat are inside the name but not at
# the start since that might be e.g. for proper motions (pm_ra) or
# errors (dlat).
return None, None
def pick_unit_if_available(unit, valid_units):
# Check for equality rather than just identity
for valid_unit in valid_units:
if unit == valid_unit:
return valid_unit
return unit
class LayerManager(object):
"""
A simple container for layers.
"""
def __init__(self, parent=None):
self._layers = []
self._parent = parent
def add_data_layer(self, table=None, frame='Sky', **kwargs):
"""
Add a data layer to the current view
Parameters
----------
"""
# Validate frame
if frame.lower() not in VALID_FRAMES:
raise ValueError('frame should be one of {0}'.format('/'.join(sorted(str(x) for x in VALID_FRAMES))))
frame = frame.capitalize()
if table is not None:
layer = TableLayer(self._parent, table=table, frame=frame, **kwargs)
else:
# NOTE: in future we may allow different arguments such as e.g.
# orbit=, hence why we haven't made this a positional argument.
raise ValueError("The table argument is required")
self._add_layer(layer)
return layer
def _add_layer(self, layer):
if layer in self._layers:
raise ValueError("layer already exists in layer manager")
self._layers.append(layer)
layer._manager = self
def remove_layer(self, layer):
if layer not in self._layers:
raise ValueError("layer not in layer manager")
layer.remove()
# By this point, the call to remove() above may already have resulted
# in the layer getting removed, so we check first if it's still present.
if layer in self._layers:
self._layers.remove(layer)
def __len__(self):
return len(self._layers)
def __iter__(self):
for layer in self._layers:
yield layer
def __getitem__(self, item):
return self._layers[item]
def __str__(self):
if len(self) == 0:
return 'Layer manager with no layers'
else:
s = 'Layer manager with {0} layers:\n\n'.format(len(self))
for ilayer, layer in enumerate(self._layers):
s += ' [{0}]: {1}\n'.format(ilayer, layer)
return s
__repr__ = __str__
class TableLayer(HasTraits):
"""
A layer where the data is stored in an :class:`~astropy.table.Table`
"""
lon_att = Unicode(help='The column to use for the longitude').tag(wwt='lngColumn')
lon_unit = Any(help='The units to use for longitude').tag(wwt='raUnits')
lat_att = Unicode(help='The column to use for the latitude').tag(wwt='latColumn')
alt_att = Unicode(help='The column to use for the altitude').tag(wwt='altColumn')
alt_unit = Any(help='The units to use for the altitude').tag(wwt='altUnit')
alt_type = Unicode(help='The type of altitude').tag(wwt='altType')
size_scale = Float(10, help='The factor by which to scale the size of the points').tag(wwt='scaleFactor')
# NOTE: we deliberately don't link size_att to sizeColumn because we need to
# compute the sizes ourselves based on the min/max and then use the
# resulting column.
size_att = Unicode(help='The column to use for the size')
size_vmin = Float(None, allow_none=True)
size_vmax = Float(None, allow_none=True)
# NOTE: we deliberately don't link cmap_att to colorMapColumn because we
# need to compute the colors ourselves based on the min/max and then use the
# resulting column.
cmap_att = Unicode(help='The column to use for the colormap')
cmap_vmin = Float(None, allow_none=True)
cmap_vmax = Float(None, allow_none=True)
cmap = Any(cm.viridis, help='The Matplotlib colormap')
color = Color('white', help='The color of the markers').tag(wwt='color')
opacity = Float(1, help='The opacity of the markers').tag(wwt='opacity')
marker_type = Unicode('gaussian', help='The type of marker').tag(wwt='plotType')
marker_scale = Unicode('screen', help='Whether the scale is defined in '
'world or pixel coordinates').tag(wwt='markerScale')
far_side_visible = Bool(False, help='Whether markers on the far side are '
'visible').tag(wwt='showFarSide')
# TODO: support:
# xAxisColumn
# yAxisColumn
# zAxisColumn
# xAxisReverse
# yAxisReverse
# zAxisReverse
def __init__(self, parent=None, table=None, frame=None, **kwargs):
# TODO: need to validate reference frame
self.table = table
self.frame = frame
self.parent = parent
self.id = str(uuid.uuid4())
# Attribute to keep track of the manager, so that we can notify the
# manager if a layer is removed.
self._manager = None
self._removed = False
self._initialize_layer()
# Force defaults
self._on_trait_change({'name': 'alt_type', 'new': self.alt_type})
self._on_trait_change({'name': 'size_scale', 'new': self.size_scale})
self._on_trait_change({'name': 'color', 'new': self.color})
self._on_trait_change({'name': 'opacity', 'new': self.opacity})
self._on_trait_change({'name': 'marker_type', 'new': self.marker_type})
self._on_trait_change({'name': 'marker_scale', 'new': self.marker_scale})
self._on_trait_change({'name': 'far_side_visible', 'new': self.far_side_visible})
self._on_trait_change({'name': 'size_att', 'new': self.size_att})
self._on_trait_change({'name': 'cmap_att', 'new': self.cmap_att})
self.observe(self._on_trait_change, type='change')
if any(key not in self.trait_names() for key in kwargs):
raise KeyError('a key doesn\'t match any layer trait name')
super(TableLayer, self).__init__(**kwargs)
lon_guess, lat_guess = guess_lon_lat_columns(self.table.colnames)
if 'lon_att' not in kwargs:
self.lon_att = lon_guess or self.table.colnames[0]
if 'lat_att' not in kwargs:
self.lat_att = lat_guess or self.table.colnames[1]
@validate('lon_unit')
def _check_lon_unit(self, proposal):
# Pass the proposal to Unit - this allows us to validate the unit,
# and allows strings to be passed.
unit = u.Unit(proposal['value'])
unit = pick_unit_if_available(unit, VALID_LON_UNITS)
if unit in VALID_LON_UNITS:
return unit
else:
raise ValueError('lon_unit should be one of {0}'.format('/'.join(sorted(str(x) for x in VALID_LON_UNITS))))
@validate('alt_unit')
def _check_alt_unit(self, proposal):
# Pass the proposal to Unit - this allows us to validate the unit,
# and allows strings to be passed.
with u.imperial.enable():
unit = u.Unit(proposal['value'])
unit = pick_unit_if_available(unit, VALID_ALT_UNITS)
if unit in VALID_ALT_UNITS:
return unit
else:
raise ValueError('alt_unit should be one of {0}'.format('/'.join(sorted(str(x) for x in VALID_ALT_UNITS))))
@validate('alt_type')
def _check_alt_type(self, proposal):
if proposal['value'] in VALID_ALT_TYPES:
return proposal['value']
else:
raise ValueError('alt_type should be one of {0}'.format('/'.join(str(x) for x in VALID_ALT_TYPES)))
@validate('marker_type')
def _check_marker_type(self, proposal):
if proposal['value'] in VALID_MARKER_TYPES:
return proposal['value']
else:
raise ValueError('marker_type should be one of {0}'.format('/'.join(str(x) for x in VALID_MARKER_TYPES)))
@validate('marker_scale')
def _check_marker_scale(self, proposal):
if proposal['value'] in VALID_MARKER_SCALES:
return proposal['value']
else:
raise ValueError('marker_scale should be one of {0}'.format('/'.join(str(x) for x in VALID_MARKER_SCALES)))
@validate('cmap')
def _check_cmap(self, proposal):
if isinstance(proposal['value'], str):
return cm.get_cmap(proposal['value'])
elif not isinstance(proposal['value'], Colormap):
raise TypeError('cmap should be set to a Matplotlib colormap')
else:
return proposal['value']
@observe('alt_att')
def _on_alt_att_change(self, *value):
# Check if we can set the unit of the altitude automatically
if len(self.alt_att) == 0:
return
column = self.table[self.alt_att]
unit = pick_unit_if_available(column.unit, VALID_ALT_UNITS)
if unit in VALID_ALT_UNITS:
self.alt_unit = unit
elif unit is not None:
warnings.warn('Column {0} has units of {1} but this is not a valid '
'unit of altitude - set the unit directly with '
'alt_unit'.format(self.alt_att, unit), UserWarning)
@observe('lon_att')
def _on_lon_att_change(self, *value):
# Check if we can set the unit of the altitude automatically
if len(self.lon_att) == 0:
return
column = self.table[self.lon_att]
unit = pick_unit_if_available(column.unit, VALID_LON_UNITS)
if unit in VALID_LON_UNITS:
self.lon_unit = unit
elif unit is not None:
warnings.warn('Column {0} has units of {1} but this is not a valid '
'unit of longitude - set the unit directly with '
'lon_unit'.format(self.lon_att, unit), UserWarning)
@observe('size_att')
def _on_size_att_change(self, *value):
# Set the min/max levels automatically based on the min/max values
if len(self.size_att) == 0:
self.parent._send_msg(event='table_layer_set', id=self.id,
setting='sizeColumn', value=-1)
return
self.size_vmin = None
self.size_vmax = None
column = self.table[self.size_att]
self.size_vmin = np.nanmin(column)
self.size_vmax = np.nanmax(column)
@observe('size_vmin', 'size_vmax')
def _on_size_vmin_vmax_change(self, *value):
# Update the size column in the table
if len(self.size_att) == 0 or self.size_vmin is None or self.size_vmax is None:
self.parent._send_msg(event='table_layer_set', id=self.id,
setting='sizeColumn', value=-1)
return
column = self.table[self.size_att]
size = (column - self.size_vmin) / (self.size_vmax - self.size_vmin) * 10
self.table[SIZE_COLUMN_NAME] = size
self.parent._send_msg(event='table_layer_update', id=self.id,
table=self._table_b64)
self.parent._send_msg(event='table_layer_set', id=self.id,
setting='pointScaleType', value=0)
self.parent._send_msg(event='table_layer_set', id=self.id,
setting='sizeColumn', value=SIZE_COLUMN_NAME)
@observe('cmap_att')
def _on_cmap_att_change(self, *value):
# Set the min/max levels automatically based on the min/max values
if len(self.cmap_att) == 0:
self.parent._send_msg(event='table_layer_set', id=self.id,
setting='colorMapColumn', value=-1)
self.parent._send_msg(event='table_layer_set', id=self.id,
setting='_colorMap', value=0)
return
self.cmap_vmin = None
self.cmap_vmax = None
column = self.table[self.cmap_att]
self.cmap_vmin = np.nanmin(column)
self.cmap_vmax = np.nanmax(column)
@observe('cmap_vmin', 'cmap_vmax', 'cmap')
def _on_cmap_vmin_vmax_change(self, *value):
# Update the cmap column in the table
if len(self.cmap_att) == 0 or self.cmap_vmin is None or self.cmap_vmax is None:
self.parent._send_msg(event='table_layer_set', id=self.id,
setting='colorMapColumn', value=-1)
self.parent._send_msg(event='table_layer_set', id=self.id,
setting='_colorMap', value=0)
return
column = self.table[self.cmap_att]
values = (column - self.cmap_vmin) / (self.cmap_vmax - self.cmap_vmin)
# PERF: vectorize the calculation of the hex strings
rgb = self.cmap(values)[:, :-1]
hex_values = [to_hex(x) for x in rgb]
self.table[CMAP_COLUMN_NAME] = hex_values
self.parent._send_msg(event='table_layer_update', id=self.id,
table=self._table_b64)
self.parent._send_msg(event='table_layer_set', id=self.id,
setting='_colorMap', value=3)
self.parent._send_msg(event='table_layer_set', id=self.id,
setting='colorMapColumn', value=CMAP_COLUMN_NAME)
@property
def _table_b64(self):
# TODO: We need to make sure that the table has ra/dec columns since
# WWT absolutely needs that upon creation.
s = StringIO()
self.table.write(s, format='ascii.basic', delimiter=',', comment=False)
s.seek(0)
# Enforce Windows line endings
# TODO: check if this needs to be different on Windows
csv = s.read().replace('\n', '\r\n')
return b64encode(csv.encode('ascii', errors='replace')).decode('ascii')
def _initialize_layer(self):
self.parent._send_msg(event='table_layer_create',
id=self.id, table=self._table_b64, frame=self.frame)
def update_data(self, table=None):
"""
Update the underlying data.
"""
self.table = table.copy(copy_data=False)
self.parent._send_msg(event='table_layer_update', id=self.id, table=self._table_b64)
if len(self.alt_att) > 0:
if self.alt_att in self.table.colnames:
self._on_alt_att_change()
else:
self.alt_att = ''
lon_guess, lat_guess = guess_lon_lat_columns(self.table.colnames)
if self.lon_att in self.table.colnames:
self._on_lon_att_change()
else:
self.lon_att = lon_guess or self.table.colnames[0]
if self.lat_att not in self.table.colnames:
self.lat_att = lat_guess or self.table.colnames[1]
def remove(self):
"""
Remove the layer.
"""
if self._removed:
return
self.parent._send_msg(event='table_layer_remove', id=self.id)
self._removed = True
if self._manager is not None:
self._manager.remove_layer(self)
def _on_trait_change(self, changed):
# This method gets called anytime a trait gets changed. Since this class
# gets inherited by the Jupyter widgets class which adds some traits of
# its own, we only want to react to changes in traits that have the wwt
# metadata attribute (which indicates the name of the corresponding WWT
# setting).
wwt_name = self.trait_metadata(changed['name'], 'wwt')
if wwt_name is not None:
value = changed['new']
if changed['name'] == 'alt_unit':
value = VALID_ALT_UNITS[self._check_alt_unit({'value': value})]
elif changed['name'] == 'lon_unit':
value = VALID_LON_UNITS[self._check_lon_unit({'value': value})]
self.parent._send_msg(event='table_layer_set',
id=self.id,
setting=wwt_name,
value=value)
def __str__(self):
return 'TableLayer with {0} markers'.format(len(self.table))
def __repr__(self):
return '<{0}>'.format(str(self))
|
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.cluster import KMeans
from sklearn.datasets import load_digits
from sklearn.datasets import load_sample_image
from sklearn.datasets.samples_generator import make_blobs
sns.set()
'''
K-Means
'''
# 设置随机样例点
X, y = make_blobs(n_samples=300, centers=4, random_state=0, cluster_std=0.60)
plt.scatter(X[:, 0], X[:, 1], s=50)
plt.show()
# 4中心聚类实现对上面样例数据的聚类
est = KMeans(4)
est.fit(X)
y_kmeans = est.predict(X)
plt.scatter(X[:, 0], X[:, 1], c=y_kmeans, s=50, cmap='rainbow')
plt.show()
'''
手写数字应用
'''
# 加载数据
digits = load_digits()
# 加载模型
est = KMeans(n_clusters=10)
clusters = est.fit_predict(digits.data)
print(est.cluster_centers_.shape) # (10, 64)
# 显示10个数字
fig = plt.figure(figsize=(8, 3))
for i in range(10):
ax = fig.add_subplot(2, 5, 1 + i, xticks=[], yticks=[])
ax.imshow(est.cluster_centers_[i].reshape((8, 8)), cmap=plt.cm.binary)
plt.show()
'''
图片色彩压缩应用
'''
# 加载图片
china = load_sample_image("china.jpg")
plt.imshow(china)
plt.grid(False)
plt.show()
# 显示图片尺寸
print(china.shape) # (427, 640, 3)
# 重整图片尺寸
X = (china / 255.0).reshape(-1, 3)
print(X.shape) # (273280, 3)
# 降低图片尺寸加速实现效果
image = china[::3, ::3]
n_colors = 64
X = (image / 255.0).reshape(-1, 3)
model = KMeans(n_colors)
labels = model.fit_predict(X)
colors = model.cluster_centers_
new_image = colors[labels].reshape(image.shape)
new_image = (255 * new_image).astype(np.uint8)
# 对比色彩压缩图片
with sns.axes_style('white'):
plt.figure()
plt.imshow(image)
plt.title('input')
plt.figure()
plt.imshow(new_image)
plt.title('{0} colors'.format(n_colors))
plt.show() |
import redis
import time
import threading
class Listener(threading.Thread):
def __init__(self, r, p):
threading.Thread.__init__(self)
self.redis = r
self.pubsub = self.redis.pubsub()
self.pubsub.psubscribe(p)
def run(self):
for m in self.pubsub.listen():
if 'pmessage' != m['type']:
continue
if '__admin__' == m['channel'].decode("utf-8") and 'shutdown' == m['data'].decode("utf-8"):
print ('Listener shutting down, bye bye.')
break
print ('[{}]: {}'.format(m['channel'].decode("utf-8") , m['data'].decode("utf-8") ))
# channel + data
if 'register' == m['channel'].decode("utf-8"):
print ('Register :', m['data'].decode("utf-8"))
if __name__ == "__main__":
r = redis.StrictRedis(host='localhost', port=6379, db=0)
#r = redis.StrictRedis()
client = Listener(r, '*')
client.start()
|
import sc2, sys
from __init__ import run_ladder_game
from sc2 import Race, Difficulty
from sc2.player import Bot, Computer, Human
import random
# Load bot
from Overmind import Overmind
bot = Bot(Race.Zerg, Overmind())
# Start game
if __name__ == '__main__':
if "--LadderServer" in sys.argv:
# Ladder game started by LadderManager
print("Starting ladder game...")
run_ladder_game(bot)
else:
# Local game
print("Starting local game...")
map_name = random.choice(["CatalystLE"])
#map_name = random.choice(["ProximaStationLE", "NewkirkPrecinctTE", "OdysseyLE", "MechDepotLE", "AscensiontoAiurLE", "BelShirVestigeLE"])
#map_name = "(2)16-BitLE"
sc2.run_game(sc2.maps.get(map_name), [
#Human(Race.Terran),
bot,
Computer(Race.Random, Difficulty.VeryHard) # CheatInsane VeryHard
], realtime=False, save_replay_as="Example.SC2Replay")
|
from SupportClasses.DatasetEmbedder import DatasetEmbedder
from SupportClasses.TrainTestSplitter import TrainTestSplitter
class PreProcessor:
# def __init__(self, classifyToParent=False, classifyToChild=False):
# if(classifyToParent or classifyToChild):
# self.datasetEmbedder = self.__createDatasetEmbedder()
# else:
# self.datasetEmbedder = self.__createHierarchicalDatasetEmbedder()
# __createDatasetEmbedder(self):
# return DatasetEmbedder()
def preProcess(self, data, trainSplitPercentage=0.8, labelPosition='last', embeddingMode='none'):
dataEmbedder = DatasetEmbedder(data, labelPosition, embeddingMode)
# trainTestSplitter
tts = TrainTestSplitter(trainSplitPercentage,
dataEmbedder.getPaddedData(),
dataEmbedder.getEmbeddedLabels())
(trainData, trainLabels,
testData, testLabels) = tts.getTrainTestSplit()
# get label dictionary
labelDictionary = dataEmbedder.getLabelDictionary()
return trainData, trainLabels, testData, testLabels, labelDictionary
|
"""add article
Revision ID: 36c61afe3519
Revises: 519e5b696ae4
Create Date: 2015-11-24 16:50:25.083234
"""
# revision identifiers, used by Alembic.
revision = '36c61afe3519'
down_revision = '519e5b696ae4'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('article',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('details', sa.UnicodeText(), nullable=False),
sa.Column('digest', sa.UnicodeText(), nullable=False),
sa.Column('disabled', sa.Boolean(), nullable=True),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('article')
### end Alembic commands ###
|
from django.contrib import admin
from django.db.models import Q
from django.utils.translation import gettext as _
from .models.post import Post
class InputFilter(admin.SimpleListFilter):
template = 'admin/input_filter.html'
def lookups(self, request, model_admin):
# Dummy, required to show the filter.
return ((),)
def choices(self, changelist):
# Grab only the "all" option.
all_choice = next(super().choices(changelist))
all_choice['query_parts'] = (
(k, v)
for k, v in changelist.get_filters_params().items()
if k != self.parameter_name
)
yield all_choice
class StockFilter(InputFilter):
parameter_name = 'stock'
title = _('Stock')
def queryset(self, request, queryset):
if self.value() is not None:
stock = self.value()
return queryset.filter(
Q(stock=stock)
)
class SenderNameFilter(InputFilter):
parameter_name = 'senderName'
title = _('Sender Name')
def queryset(self, request, queryset):
if self.value() is not None:
senderName = self.value()
return queryset.filter(
Q(senderName=senderName)
)
@admin.register(Post)
class PostAdmin(admin.ModelAdmin):
list_display = ('messageId', 'content', 'senderName', 'channelName', 'stock', 'sentiment', 'source')
search_fields = ('stock', 'sentiment', 'senderName', 'channelName', 'source')
list_filter = (StockFilter, SenderNameFilter, 'sentiment', 'source')
|
import tempfile
import os
import subprocess
from django.conf import settings
from django.db import connection
class DbUtil(object):
"""
a set of utility method to access db
"""
def __init__(self):
raise Exception("Utility class can't be instantiated.")
_database = settings.DATABASES["default"]
_env = os.environ.copy()
_table_schema_dump_cmd = ["pg_dump", "-h", _database["HOST"], "-d", _database["NAME"], "-U", _database["USER"], "-F", "p", "-w", "-x", "-O", "--no-security-labels", "--no-tablespaces", "-s"]
if 'PASSWORD' in _database and _database['PASSWORD'].strip():
_env["PGPASSWORD"] = _database["PASSWORD"]
if _database["PORT"]:
_table_schema_dump_cmd += ["-p", str(_database["PORT"])]
_cursor=connection.cursor()
_query_index_constraint_sql = """
SELECT s.conname ,s.contype
FROM pg_constraint s JOIN pg_class c ON s.conrelid = c.oid JOIN pg_namespace n on c.relnamespace = n.oid
WHERE n.nspname='{0}' and c.relname='{1}' and s.contype in ('p','u')
"""
_query_index_sql = """
SELECT ci.relname,i.indisprimary
FROM pg_index i JOIN pg_class ci ON i.indexrelid = ci.oid JOIN pg_class ct ON i.indrelid = ct.oid JOIN pg_namespace np on ct.relnamespace = np.oid
WHERE np.nspname='{0}' and ct.relname='{1}'
"""
@staticmethod
def get_create_table_sql(schema,table):
#get the input table structure
f = tempfile.NamedTemporaryFile(delete=False)
f.close()
cmd = DbUtil._table_schema_dump_cmd + ["-t", schema + "." + table, "-f", f.name]
output = subprocess.Popen(cmd,stdout=subprocess.PIPE,stderr=subprocess.PIPE, env=DbUtil._env).communicate()
if output[1].strip() :
raise Exception(output[1])
try:
reader = open(f.name,'r')
return ''.join([s for s in reader if not (s.startswith('SET') or s.startswith('--')) and s.strip() ])
finally:
if reader:
reader.close()
os.unlink(f.name)
@staticmethod
def drop_all_indexes(schema,table,include_pk=False):
"""
drop all indexes.
drop primary key also if include_pk is true
"""
#drop related constraint first
#import ipdb;ipdb.set_trace()
sql_result = DbUtil._cursor.execute(DbUtil._query_index_constraint_sql.format(schema,table))
rows = None
if sql_result:
rows = sql_result.fetchall()
else:
rows = DbUtil._cursor.fetchall()
drop_constraint_sql = "\r\n".join(["ALTER TABLE \"{0}\".{1} DROP CONSTRAINT IF EXISTS {2} CASCADE;".format(schema,table,r[0]) for r in rows if r[1] != 'p' or include_pk ])
if drop_constraint_sql:
DbUtil._cursor.execute(drop_constraint_sql)
sql_result = DbUtil._cursor.execute(DbUtil._query_index_sql.format(schema,table))
rows = None
if sql_result:
rows = sql_result.fetchall()
else:
rows = DbUtil._cursor.fetchall()
drop_index_sql = "\r\n".join(["DROP INDEX IF EXISTS \"{0}\".\"{1}\" CASCADE;".format(schema,r[0]) for r in rows if not r[1] or include_pk ])
if drop_index_sql:
DbUtil._cursor.execute(drop_index_sql)
|
assists = crosses = 3
chancesCreated = goals = 4
shotsOnTarget = 5
successfulDribbles = 10
points = 89
# points = goals * 9 + assists * 6 + chancesCreated * 3 + \
# shotsOnTarget * 2 + crosses + successfulDribbles
|
#!/usr/bin/env python
import unittest
from asyncdnspy.dns_message_decoder import DNSMessageDecoder
from asyncdnspy.error.asyncdnspy_error import AsyncDNSPyError
from asyncdnspy.udp_client import UDPClient
from asyncdnspy.tcp_client import TCPClient
from asyncdnspy.dns_raw_message import DNSRawMessage
from asyncdnspy.dnspy_enum import RecordType, SocketType
class DNSMessageDecoderTest(unittest.TestCase):
def test_decode_empty_buffer(self):
dns_message = DNSMessageDecoder.decode(b'',
b'\xf6>\x81\x80\x00\x01\x00\x01\x00\x00\x00\x00\x06google\x03com\x00\x00\x01\x00\x01\xc0\x0c\x00\x01\x00\x01\x00\x00\x00\xcb\x00\x04\xac\xd9\xa9.',
RecordType.a)
self.assertIsNone(dns_message)
def test_decode_empty_response(self):
dns_message = DNSMessageDecoder.decode(b'\xf6>\x81\x80\x00\x01\x00\x01\x00\x00\x00\x00\x06google\x03com\x00\x00\x01\x00\x01\xc0\x0c\x00\x01\x00\x01\x00\x00\x00\xcb\x00\x04\xac\xd9\xa9.',
b'',
RecordType.a)
self.assertIsNone(dns_message)
def test_decode_a_record_udp_socket(self):
udp_client = UDPClient('8.8.8.8', 53)
udp_client.connect()
dns_raw_message = DNSRawMessage()
data = dns_raw_message.query('google.com', RecordType.a)
result = udp_client.send(data)
self.assertTrue(result != -1)
response = udp_client.receive()
dns_message = DNSMessageDecoder.decode(data, response, RecordType.a)
self.assertTrue(len(dns_message.answers) > 0)
self.assertTrue(len(dns_message.questions) > 0)
ipv4_address = dns_message.answers[0].get_ipv4_address()
self.assertIsNotNone(ipv4_address)
def test_decode_a_record_tcp_socket(self):
tcp_client = TCPClient('8.8.8.8', 53)
tcp_client.connect()
dns_raw_message = DNSRawMessage()
data = dns_raw_message.query('google.com', RecordType.a, SocketType.tcp)
result = tcp_client.send(data)
self.assertTrue(result != -1)
response = tcp_client.receive()
dns_message = DNSMessageDecoder.decode(data, response, RecordType.a, SocketType.tcp)
self.assertTrue(len(dns_message.answers) > 0)
self.assertTrue(len(dns_message.questions) > 0)
ipv4_address = dns_message.answers[0].get_ipv4_address()
self.assertIsNotNone(ipv4_address)
def test_decode_aaaa_record_udp_socket(self):
udp_client = UDPClient('8.8.8.8', 53)
udp_client.connect()
dns_raw_message = DNSRawMessage()
data = dns_raw_message.query('google.com', RecordType.aaaa)
result = udp_client.send(data)
self.assertTrue(result != -1)
response = udp_client.receive()
dns_message = DNSMessageDecoder.decode(data, response, RecordType.aaaa)
self.assertTrue(len(dns_message.answers) > 0)
self.assertTrue(len(dns_message.questions) > 0)
ipv6_address = dns_message.answers[0].get_ipv6_address()
self.assertIsNotNone(ipv6_address)
def test_decode_aaaa_record_tcp_socket(self):
tcp_client = TCPClient('8.8.8.8', 53)
tcp_client.connect()
dns_raw_message = DNSRawMessage()
data = dns_raw_message.query('google.com', RecordType.aaaa, SocketType.tcp)
result = tcp_client.send(data)
self.assertTrue(result != -1)
response = tcp_client.receive()
dns_message = DNSMessageDecoder.decode(data, response, RecordType.aaaa, SocketType.tcp)
self.assertTrue(len(dns_message.answers) > 0)
self.assertTrue(len(dns_message.questions) > 0)
ipv6_address = dns_message.answers[0].get_ipv6_address()
self.assertIsNotNone(ipv6_address)
def test_decode_txt_record_udp_socket(self):
udp_client = UDPClient('8.8.8.8', 53)
udp_client.connect()
dns_raw_message = DNSRawMessage()
data = dns_raw_message.query('google.com', RecordType.txt)
result = udp_client.send(data)
self.assertTrue(result != -1)
response = udp_client.receive()
dns_message = DNSMessageDecoder.decode(data, response, RecordType.txt)
self.assertTrue(len(dns_message.answers) > 0)
self.assertTrue(len(dns_message.questions) > 0)
txt = dns_message.answers[0].get_txt()
self.assertIsNotNone(txt)
def test_decode_txt_record_tcp_socket(self):
tcp_client = TCPClient('8.8.8.8', 53)
tcp_client.connect()
dns_raw_message = DNSRawMessage()
data = dns_raw_message.query('google.com', RecordType.txt, SocketType.tcp)
result = tcp_client.send(data)
self.assertTrue(result != -1)
response = tcp_client.receive()
dns_message = DNSMessageDecoder.decode(data, response, RecordType.txt, SocketType.tcp)
self.assertTrue(len(dns_message.answers) > 0)
self.assertTrue(len(dns_message.questions) > 0)
txt = dns_message.answers[0].get_txt()
self.assertIsNotNone(txt)
def main(self):
self.test_decode_empty_buffer()
self.test_decode_empty_response()
self.test_decode_a_record_udp_socket()
self.test_decode_a_record_tcp_socket()
self.test_decode_aaaa_record_udp_socket()
self.test_decode_aaaa_record_tcp_socket()
self.test_decode_txt_record_udp_socket()
self.test_decode_txt_record_tcp_socket()
if __name__ == '__main__':
tests = DNSMessageDecoderTest()
tests.main() |
#coding : utf-8
#Ewan GRIGNOUX LEVERT
#Avril 2020
import sys
import csv
from tkinter import *
from PIL import Image, ImageTk # pour les images
from tkinter import ttk
from tkinter.messagebox import*
from datetime import *
from Champs import Champs
from Inventaire import Inventaire
from Magasin import Magasin
import Chargement
root = Tk()
root.title('Jardinage')
root.geometry("1920x1080")
root.configure(bg = 'green')
frame = Frame(root, bg='green')
frame.pack()
def new_game_function():
def retour_function():
Name.destroy()
Nametxt.destroy()
retour.destroy()
lancer.destroy()
acceuil()
def menu_function():
player = str(Name.get())
if player != '':
with open ('infos.txt', 'a') as txt:
msg = (f"{player},")
txt.write(msg)
plantations_pro = Chargement.lireFichierCSV('Classeur1.csv')
with open(f'Classeur_{player}.csv', 'w', newline='', encoding = 'utf-8')as fichier:
titres = ['num','semis','date_semis','date_levée','date_récolte','type_avant','arroser']
ecrivain = csv.DictWriter(fichier, fieldnames=titres)
ecrivain.writeheader()
plantations_pro[2]['arroser'] = "no"
for compartiment in plantations_pro:
ecrivain.writerow(compartiment)
semis_pro = Chargement.lireFichierCSV('Modèle_inventaire.csv')
with open(f'Inventaire_{player}.csv', 'w', newline='', encoding = 'utf-8')as fichier:
titres = ['Num','Plante','Quantite']
ecrivain = csv.DictWriter(fichier, fieldnames=titres)
ecrivain.writeheader()
semis_pro[16]['Quantite'] = '2'
semis_pro[20]['Quantite'] = '20'
semis_pro[18]['Quantite'] = '2'
for compartiment in semis_pro:
ecrivain.writerow(compartiment)
def Champs_function():
Champs(player)
showinfo('Champs',"Voici ton champs, c'est ici que tu vas pouvoir produire tes fruits et légumes.")
def Inventaire_function():
Inventaire(player)
def Magasin_function():
Magasin(player)
def Quitter():
Jardin.destroy()
Champs_button.destroy()
Inventaire_button.destroy()
Magasin_button.destroy()
acceuil()
Exit.destroy()
Name.destroy()
Nametxt.destroy()
lancer.destroy()
retour.destroy()
showinfo('Histoire',f"Bienvenue jeune fermier, {player}, dans le menu prncipal. D'ici, tu peux aller voir ton champs, aller au marché pour y vendre ta production, au magasin pour acheter ce dont tu as besoin, et enfin voir ce que tu possède dans ton inventaire; ")
Jardin = Label(frame, text='Jardinage', fg='yellow', bg='green', font=('Candara', 50))
Jardin.pack(padx=5, pady=5)
Champs_button = Button(frame, text='Champs', fg='yellow', bg='green', command=Champs_function, font=('Candara', 20))
Champs_button.pack(padx=5, pady=5)
Inventaire_button = Button(frame, text='Inventaire', fg='yellow', bg='green', command=Inventaire_function, font=('Candara', 20))
Inventaire_button.pack(padx=5, pady=5)
Magasin_button = Button(frame, text='Magasin', fg='yellow', bg='green', command=Magasin_function, font=('Candara', 20))
Magasin_button.pack(padx=5, pady=5)
Exit = Button(frame,text='Quitter la partie', fg='yellow', bg='green', command=Quitter, font=('Candara', 20))
Exit.pack(padx=5, pady=5)
else:
showerror('Important', 'Vous devez nommer cette partie.')
new_game.pack_forget()
continue_game.pack_forget()
Nametxt = Label(frame, text='Nom du joueur:', fg='yellow', bg = 'green', font=('Candara', 20))
Nametxt.pack(padx=5, pady=5)
Name = Entry(frame, textvariable='Nom du joueur', width = 17, font=('Candara', 20))
Name.pack(padx=5, pady=5)
lancer = Button(frame, text='Jouer', fg='yellow',bg='green', command=menu_function, font=('Candara', 20))
lancer.pack(padx=5, pady=5)
retour = Button(frame, text='Retour', fg='yellow',bg='green', command=retour_function, font=('Candara', 20) )
retour.pack(padx=5, pady=5)
def continue_game_function():
def retour_function():
lancer.destroy()
partytxt.destroy()
party.destroy()
retour.destroy()
acceuil()
def menu_function():
def Champs_function():
Champs(player)
def Inventaire_function():
Inventaire(player)
def Magasin_function():
Magasin(player)
def Quitter():
Jardin.destroy()
Champs_button.destroy()
Inventaire_button.destroy()
Magasin_button.destroy()
acceuil()
Exit.destroy()
player = party.get()
party.destroy()
partytxt.destroy()
lancer.destroy()
retour.destroy()
Jardin = Label(frame, text='Jardinage', fg='yellow', bg='green', font=('Candara', 50))
Jardin.pack(padx=5, pady=5)
Champs_button = Button(frame, text='Champs', fg='yellow', bg='green', command=Champs_function, font=('Candara', 20))
Champs_button.pack(padx=5, pady=5)
Inventaire_button = Button(frame, text='Inventaire', fg='yellow', bg='green', command=Inventaire_function, font=('Candara', 20))
Inventaire_button.pack(padx=5, pady=5)
Magasin_button = Button(frame, text='Magasin', fg='yellow', bg='green', command=Magasin_function, font=('Candara', 20))
Magasin_button.pack(padx=5, pady=5)
Exit = Button(frame,text='Quitter la partie', fg='yellow', bg='green', command=Quitter, font=('Candara', 20))
Exit.pack(padx=5, pady=5)
new_game.pack_forget()
continue_game.pack_forget()
liste_party=[]
with open ('infos.txt','r') as r:
d = r.read()
liste_party = d.split(',')
partytxt = Label(frame, text='Partie:', fg='yellow',bg='green', font=('Candara', 50))
partytxt.pack(padx=5, pady=5)
party = ttk.Combobox(frame, values=liste_party ,width=10)
party.pack(padx=5, pady=5)
lancer = Button(frame, text='Jouer', fg='yellow',bg='green', command=menu_function, font=('Candara', 20))
lancer.pack(padx=5, pady=5)
retour = Button(frame, text='Retour', fg='yellow',bg='green', command=retour_function, font=('Candara', 20) )
retour.pack(padx=5, pady=5)
new_game = Button(frame, text='Nouvelle Partie', bg='green', fg='yellow', command=new_game_function, font=('Candara', 20))
continue_game = Button(frame, text='Continuer', bg='green', fg='yellow', command=continue_game_function, font=('Candara', 20))
def acceuil():
new_game.pack(padx=5, pady=5)
continue_game.pack(padx=5, pady=5)
acceuil()
frame.pack(expand=YES)
root.mainloop()
sys.exit() |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# User Datagram Protocol
# Analyser for UDP header
from .transport import Transport
from ..utilities import Info
class UDP(Transport):
"""This class implements Transmission Control Protocol.
Properties:
* name -- str, name of corresponding procotol
* info -- Info, info dict of current instance
* layer -- str, `Transport`
* length -- int, header length of corresponding protocol
* protocol -- str, name of next layer protocol
* protochain -- ProtoChain, protocol chain of current instance
* src -- int, source port
* dst -- int, destination port
Methods:
* read_udp -- read User Datagram Protocol (UDP)
Attributes:
* _file -- BytesIO, bytes to be extracted
* _info -- Info, info dict of current instance
* _protos -- ProtoChain, protocol chain of current instance
Utilities:
* _read_protos -- read next layer protocol type
* _read_fileng -- read file buffer
* _read_unpack -- read bytes and unpack to integers
* _read_binary -- read bytes and convert into binaries
* _decode_next_layer -- decode next layer protocol type
* _import_next_layer -- import next layer protocol extractor
"""
##########################################################################
# Properties.
##########################################################################
@property
def name(self):
return 'User Datagram Protocol'
@property
def length(self):
return 8
@property
def src(self):
return self._info.src
@property
def dst(self):
return self._info.dst
##########################################################################
# Methods.
##########################################################################
def read_udp(self, length):
"""Read User Datagram Protocol (UDP).
Structure of UDP header [RFC 768]:
0 7 8 15 16 23 24 31
+--------+--------+--------+--------+
| Source | Destination |
| Port | Port |
+--------+--------+--------+--------+
| | |
| Length | Checksum |
+--------+--------+--------+--------+
|
| data octets ...
+---------------- ...
Octets Bits Name Discription
0 0 udp.srcport Source Port
2 16 udp.dstport Destination Port
4 32 udp.len Length (header includes)
6 48 udp.checksum Checksum
"""
_srcp = self._read_unpack(2)
_dstp = self._read_unpack(2)
_tlen = self._read_unpack(2)
_csum = self._read_fileng(2)
udp = dict(
srcport = _srcp,
dstport = _dstp,
len = _tlen,
checksum = _csum,
)
length = udp['len'] - 8
return self._decode_next_layer(udp, None, length)
##########################################################################
# Data models.
##########################################################################
def __init__(self, _file, length=None):
self._file = _file
self._info = Info(self.read_udp(length))
def __len__(self):
return 8
def __length_hint__(self):
return 8
|
#CalpiV2.py
from time import perf_counter
from random import random
DARTS=1000*1000
#DARTS=1000*1000*10
hits=0.0
start=perf_counter()
#for i in range(DARTS):
for i in range(1,1+DARTS):
x,y=random(),random()
dist=pow(x**2+y**2,0.5)#求点(x,y)到原点的距离
if dist<=1.0:
hits+=1
pi=4*(hits/DARTS)
print("圆周率为:{}".format(pi))
print("运行时间是:{:.5f}s".format(perf_counter()-start))
|
from concurrent.futures import ThreadPoolExecutor,ProcessPoolExecutor
from threading import current_thread
import time
import random
import os
# 这就是进程池和线程池的两个类
# threading 模块 没有提供池
# multiprocessing 模块是仿照 threading 写的,Pool
# py 3.x 推出了 concurrent.futures 模块,线程池和进程池都能够用相似的方式启动/使用
# pp = ProcessPoolExecutor(5) # 创建5个进程对象,放在池中
# tp = ThreadPoolExecutor(20) # 创建20个线程对象,这就创建了池
###########
# 线程池
##########
# def func(a,b):
# print(current_thread().ident,'start',a,b)
# time.sleep(random.randint(1,4))
# print(current_thread().ident,'end',a,b)
#
#
# tp = ThreadPoolExecutor(4) # 创建线程池,个数为4
#
#
# for i in range(20):
# tp.submit(func,i,b=i+1) # 把任务 func 提交给线程池 tp,后边还可以给任务函数传参数
# 实例化 创建池
# 使用 submit 传递任务,和参数
# 进程池和线程池操作一样
###########
# 进程池
##########
# def func(a,b):
# print(os.getpid(),'start',a,b)
# time.sleep(random.randint(1,4))
# print(os.getpid(),'end',a,b)
#
# if __name__ == '__main__':
#
# tp = ProcessPoolExecutor(4) # 创建线程池,个数为4
# for i in range(20):
# tp.submit(func,i,b=i+1) # 把任务 func 提交给进程池 tp,后边还可以给任务函数传参数
#############################################
# 获取进程/线程的返回结果
#############################################
# low版
# def func(a,b):
# print(os.getpid(),'start',a,b)
# #time.sleep(random.randint(1,4))
# print(os.getpid(),'end',a,b)
# return a*b
#
# if __name__ == '__main__':
#
# tp = ProcessPoolExecutor(4) # 创建线程池,个数为4
# for i in range(20):
# ret = tp.submit(func,i,b=i+1) # ret 获得的返回结果是一个 future 对象,未来对象,为啥叫未来对象,因为我们创建的时候不用,后来才用
# print(ret.result()) # future 对象加 result() 可以获取原来的结果
#
# 但是这么写,多线程会一个一个执行,不同时执行
# 牛逼版
# def func(a,b):
# print(os.getpid(),'start',a,b)
# time.sleep(random.randint(1,4))
# print(os.getpid(),'end',a,b)
# return a*b
#
# if __name__ == '__main__':
#
# tp = ProcessPoolExecutor(4) # 创建线程池,个数为4
# future_l = {}
# for i in range(20): # 异步非阻塞
# ret = tp.submit(func,i,b=i+1) # ret 获得的返回结果是一个 future 对象
# future_l[i] = ret
# for key in future_l: # 同步阻塞
# print(key,future_l[key].result())
#############################################
# 池的 map 函数
#############################################
# def func(a):
# b = a+1
# # print(os.getpid(),'start',a,b)
# # time.sleep(random.randint(1,4))
# # print(os.getpid(),'end',a,b)
# return a*b
#
# if __name__ == '__main__':
#
# tp = ProcessPoolExecutor(4) # 创建线程池,个数为4
# ret = tp.map(func,range(20)) # map 只能传递可迭代对象作为参数
# for key in ret:
# print(key)
#############################################
# 池的 回调 函数 add_done_callback(),效率最高,只要是想要获取线程的返回值,就用这个
#############################################
# def func(a,b):
# print(current_thread().ident,'start',a,b)
# time.sleep(random.randint(1,4))
# print(current_thread().ident,'end',a,b)
# return a*b
#
# def print_func(ret):
# print(ret.result())
#
# if __name__ == '__main__':
#
# tp = ThreadPoolExecutor(4) # 创建线程池,个数为4
# for i in range(20): # 异步非阻塞
# ret = tp.submit(func,i,b=i+1) # ret 获得的返回结果是一个 future 对象
# ret.add_done_callback(print_func) # 异步阻塞
# # 异步阻塞的回调函数,返回的ret对象绑定一个回调函数,等待ret对应的任务有了结果以后立即调用 print_func 函数,并且把任务的返回值
# # 传递给print_func 作为参数
# # 就可以对返回的结果立即进行处理,而不用按照顺序接受结果处理结果
#############################################
# 池的 回调 函数 add_done_callback() 的底层实现
#############################################
# import time
# import random
# import queue
# from threading import Thread
#
# def func(q,i):
# print('start',i)
# time.sleep(random.randint(1,5))
# print('end',i)
# q.put(i*(i+1))
#
# def print_func(q):
# print(q.get())
#
# q = queue.Queue()
# for i in range(20):
# Thread(target=func,args=(q,i)).start()
# for i in range(20):
# Thread(target=print_func,args=(q,)).start()
#############################################
# 池的 回调 函数 add_done_callback() 的例子
#############################################
from concurrent.futures import ThreadPoolExecutor
import requests
import os
def get_page(url): # 访问网页,获取网页源代码 线程池中的线程来操作
print('<进程%s> get %s' %(os.getpid(),url))
respone=requests.get(url)
if respone.status_code == 200:
return {'url':url,'text':respone.text}
def parse_page(res): # 获取到字典结果之后,计算网页源码的长度,把https://www.baidu.com : 1929749729写到文件里 线程任务执行完毕之后绑定回调函数
res=res.result()
print('<进程%s> parse %s' %(os.getpid(),res['url']))
parse_res='url:<%s> size:[%s]\n' %(res['url'],len(res['text']))
with open('db.txt','a') as f:
f.write(parse_res)
if __name__ == '__main__':
urls=[
'https://www.baidu.com',
'https://www.python.org',
'https://www.openstack.org',
'https://help.github.com/',
'http://www.sina.com.cn/'
]
tp = ThreadPoolExecutor(4) # 创建线程池
for i in urls:
# 得到一个futrue对象 = 把每一个url提交一个get_page任务
ret = tp.submit(get_page,i) # 向进程池提交任务
ret.add_done_callback(parse_page) # 绑定 回调函数 parse_page 到线程执行返回结果,谁先回来谁就先写结果进文件
# 不用回调函数:
# 按照顺序获取网页 百度 python openstack git sina
# 也只能按照顺序写
# 用上了回调函数
# 按照顺序获取网页 百度 python openstack git sina
# 哪一个网页先返回结果,就先执行那个网页对应的parserpage(回调函数)
# 会起池\会提交任务
# 会获取返回值\会用回调函数
# 1.所有的例题 会默
# 2.进程池(高计算的场景,没有io(没有文件操作\没有数据库操作\没有网络操作\没有input)) : >cpu_count*1 <cpu_count*2
# 线程池(一般根据io的比例定制) : cpu_count*5
# 5*20 = 100并发
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
* UMSE Antivirus Agent Example
* Author: David Alvarez Perez <dalvarezperez87[at]gmail[dot]com>
* Module: UMSE Decryption Tools
* Description: This module allows to decrypt UMSE file entries.
*
* Copyright (c) 2019-2020. The UMSE Authors. All Rights Reserved.
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation and/or
* other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
* ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
"""
import Crypto
#import umse
import ctypes
from Crypto.PublicKey import RSA
from Crypto.Hash import SHA
from Crypto.Cipher import PKCS1_OAEP
from Crypto.Cipher import AES
RSA_PEM_FILE_PATH = "rsa_private_key.pem"
def get_rsa_private_key_from_pem_file(pem_file_path):
'''
This function reads the rsa private key from the given PEM file
'''
with open(pem_file_path, "rt") as rsa_priv_key_file:
return rsa_priv_key_file.read()
'''
@deprecated(version='0.1', reason="Crypto library not longer used for RSA encryption and decryption")
def rsa_decryption(ciphertext, pem_file_path):
ciphertext = bytearray(ciphertext)
rsa_priv_key = get_rsa_private_key_from_pem_file(pem_file_path)
dsize = SHA.digest_size
dcipher = PKCS1_OAEP.new(rsa_priv_key)
aes_key = dcipher.decrypt(ciphertext)
return aes_key
@deprecated(version='0.1', reason="Crypto library not longer used for RSA encryption and decryption")
def aes_decryption(aes_key, encrypted_message):
pad = lambda s: s + (AES.block_size - len(s) % AES.block_size) * chr(AES.block_size - len(s) % AES.block_size)
unpad = lambda s: s[:-ord(s[len(s) - 1:])]
aes_iv = b"\0"*AES.block_size
aes_cipher = AES.new(aes_key, AES.MODE_CBC, aes_iv)
data = unpad(aes_cipher.decrypt(encrypted_message))
return data
@deprecated(version='0.1', reason="Crypto library not longer used for RSA encryption and decryption")
def get_aes_key_from_entry_to_decrypt(entry_to_decrypt, umse_sample):
if(entry_to_decrypt >= umse_sample.umse_header.num_file_entries):
return None
entry_to_decrypt_confidentiality = umse_sample.entry[entry_to_decrypt].level_of_confidentiality
for i in range(umse_sample.umse_header.num_records_dec_table):
if(umse_sample.decryption_table[i].level_of_confidentiality == entry_to_decrypt_confidentiality):
aes_key = rsa_decryption(umse_sample.decryption_table[i].aes_wrapped, RSA_PEM_FILE_PATH)
return aes_key
return None
@deprecated
def get_encrypted_message_from_entry(entry_to_decrypt, umse_sample):
if(entry_to_decrypt >= umse_sample.umse_header.num_file_entries):
return None
entry_to_decrypt_confidentiality = umse_sample.entry[entry_to_decrypt].level_of_confidentiality
for i in range(umse_sample.umse_header.num_records_dec_table):
if(umse_sample.decryption_table[i].level_of_confidentiality == entry_to_decrypt_confidentiality):
encrypted_message = bytearray(umse_sample.entry[entry_to_decrypt].encrypted_message)
return encrypted_message
return None
'''
class GeneralError(Exception):
pass
class InvalidAuthenticationHeader(Exception):
pass
class InsufficientCredentials(Exception):
pass
def decrypt_entry(entry_to_decrypt, umse_sample, access_level):
'''
This function decrypts an UMSE file entry
'''
rsa_private_key = get_rsa_private_key_from_pem_file(RSA_PEM_FILE_PATH).encode("ascii")
lib_umse_dll = ctypes.cdll.LoadLibrary("libUmse.dll")
decryptedEntryLength = ctypes.c_int()
decryptedEntry = ctypes.POINTER(ctypes.c_ubyte)()
umse_bytes = umse_sample
umse_bytes_length = len(umse_bytes)
lib_umse_dll.DecryptUmse.restype = ctypes.c_int
ret_val = lib_umse_dll.DecryptUmse(ctypes.c_int(umse_bytes_length), ctypes.c_char_p(umse_bytes), ctypes.c_int(entry_to_decrypt), ctypes.c_int(access_level), ctypes.c_char_p(rsa_private_key), ctypes.byref(decryptedEntryLength), ctypes.byref(decryptedEntry))
if(ret_val != 0):
if(ret_val == -1):
raise GeneralError('General error')
if(ret_val == -2):
raise InvalidAuthenticationHeader('Invalid UMSE authentication header.')
elif(ret_val == -3):
raise InsufficientCredentials('Insufficient credentials.')
return bytearray(decryptedEntry[:decryptedEntryLength.value])
'''
def decrypt_entry(entry_to_decrypt, umse_sample):
aes_key = get_aes_key_from_entry_to_decrypt(entry_to_decrypt, umse_sample)
encrypted_message = get_encrypted_message_from_entry(entry_to_decrypt, umse_sample)
decrypted_entry = aes_decryption(aes_key, encrypted_message)
return decrypted_entry
''' |
def rgb_range(n):
return min(255, max(n, 0))
def rgb(r, g, b):
return ('{:02X}' * 3).format(rgb_range(r), rgb_range(g), rgb_range(b))
|
from unittest.case import TestCase
from pythonbrasil.lista_2_estrutura_de_decisao.ex_07_mostrar_maior_e_menor_de_tres_numeros \
import obter_maior_numero, obter_menor_numero
class ObterMaiorNumeroTests(TestCase):
def test_todos_numeros_iguais(self):
self.assertEqual(1, obter_maior_numero(1, 1, 1))
self.assertEqual(5, obter_maior_numero(5, 5, 5))
self.assertEqual(10, obter_maior_numero(10, 10, 10))
def test_dois_numeros_iguais(self):
self.assertEqual(10, obter_maior_numero(10, 10, 1))
self.assertEqual(10, obter_maior_numero(1, 10, 10))
self.assertEqual(10, obter_maior_numero(10, 1, 10))
def test_primeiro_numero_maior(self):
self.assertEqual(10, obter_maior_numero(10, 5, 2))
self.assertEqual(10, obter_maior_numero(10, 2, 5))
self.assertEqual(10, obter_maior_numero(10, 5, 5))
def test_segundo_numero_maior(self):
self.assertEqual(10, obter_maior_numero(5, 10, 2))
self.assertEqual(10, obter_maior_numero(2, 10, 5))
self.assertEqual(10, obter_maior_numero(5, 10, 5))
def test_terceiro_numero_maior(self):
self.assertEqual(10, obter_maior_numero(5, 2, 10))
self.assertEqual(10, obter_maior_numero(2, 5, 10))
self.assertEqual(10, obter_maior_numero(5, 5, 10))
class ObterMenorNumeroTests(TestCase):
def test_todos_numeros_iguais(self):
self.assertEqual(1, obter_menor_numero(1, 1, 1))
self.assertEqual(5, obter_menor_numero(5, 5, 5))
self.assertEqual(10, obter_menor_numero(10, 10, 10))
def test_dois_numeros_iguais(self):
self.assertEqual(1, obter_menor_numero(10, 10, 1))
self.assertEqual(1, obter_menor_numero(1, 10, 10))
self.assertEqual(1, obter_menor_numero(10, 1, 10))
def test_primeiro_numero_menor(self):
self.assertEqual(2, obter_menor_numero(10, 5, 2))
self.assertEqual(2, obter_menor_numero(10, 2, 5))
self.assertEqual(2, obter_menor_numero(10, 2, 2))
def test_segundo_numero_menor(self):
self.assertEqual(2, obter_menor_numero(5, 10, 2))
self.assertEqual(2, obter_menor_numero(2, 10, 5))
self.assertEqual(2, obter_menor_numero(2, 10, 2))
def test_terceiro_numero_menor(self):
self.assertEqual(2, obter_menor_numero(5, 2, 10))
self.assertEqual(2, obter_menor_numero(2, 5, 10))
self.assertEqual(2, obter_menor_numero(2, 2, 10))
|
""" template
This code is written for COMP9021.
Author: Jack Jiang (z5129432)
Version: v01
Date: 2017
"""
import os
import sys
def template_function():
""" function
Arguements:
Returns:
"""
return
# Test Codes
if __name__ == "__main__":
pass
|
from PyQt5 import QtWidgets, QtCore, QtGui
class DrawWords(QtWidgets.QWidget):
def __init__(self, word_1, word_2, word_3):
super(DrawWords, self).__init__()
self.word_1 = word_1
self.word_2 = word_2
self.word_3 = word_3
self.move(150,50)
self.setFixedSize(900,500)
self.startA = 5
self.endA = 30
self.linewidth = 1
def paintEvent(self, event):
paint = QtGui.QPainter()
paint.begin(self)
paint.setRenderHint(QtGui.QPainter.Antialiasing)
paint.setBrush(QtCore.Qt.white)
paint.drawRect(event.rect())
first_circle = QtCore.QRect(70, 70, 250, 250)
text_coord_x = 70 + (first_circle.width() - 40)/2
text_coord_y = 70 + (first_circle.height() - 10)/2
paint.setPen(QtCore.Qt.blue)
paint.setBrush(QtCore.Qt.blue)
paint.drawEllipse(first_circle)
paint.setPen(QtCore.Qt.white)
paint.drawText(text_coord_x, text_coord_y, self.word_1)
second_circle = QtCore.QRect(390, 105, 200, 200)
text_coord_x = 390 + (second_circle.width() - 40)/2
text_coord_y = 105 + (second_circle.height() - 10)/2
paint.setPen(QtCore.Qt.darkGreen)
paint.setBrush(QtCore.Qt.darkGreen)
paint.drawEllipse(second_circle)
paint.setPen(QtCore.Qt.white)
paint.drawText(text_coord_x, text_coord_y, self.word_2)
third_circle = QtCore.QRect(660, 140, 150, 150)
text_coord_x = 660 + (third_circle.width() - 40)/2
text_coord_y = 140 + (third_circle.height() - 10)/2
paint.setPen(QtCore.Qt.darkCyan)
paint.setBrush(QtCore.Qt.darkCyan)
paint.drawEllipse(third_circle)
paint.setPen(QtCore.Qt.white)
paint.drawText(text_coord_x, text_coord_y, self.word_3)
paint.end()
|
# -*- coding: utf-8 -*-
# snapshottest: v1 - https://goo.gl/zC4yUc
from __future__ import unicode_literals
from snapshottest import Snapshot
snapshots = Snapshot()
snapshots['MutateEventTestCase::test_create_event_with_calendar_authorized 1'] = {
'data': {
'createEvent': None
},
'errors': [
{
'locations': [
{
'column': 13,
'line': 3
}
],
'message': 'Sorry, you cannot enter a past date',
'path': [
'createEvent'
]
}
]
}
snapshots['MutateEventTestCase::test_create_event_with_calendar_unauthorizd 1'] = {
'data': {
'createEvent': None
},
'errors': [
{
'locations': [
{
'column': 13,
'line': 3
}
],
'message': 'Sorry, you cannot enter a past date',
'path': [
'createEvent'
]
}
]
}
snapshots['MutateEventTestCase::test_deactivate_event_as_admin 1'] = {
'data': {
'deactivateEvent': {
'actionMessage': 'Event deactivated'
}
}
}
snapshots['MutateEventTestCase::test_deactivate_event_as_creator 1'] = {
'data': {
'deactivateEvent': {
'actionMessage': 'Event deactivated'
}
}
}
snapshots['MutateEventTestCase::test_deactivate_event_as_non_creator 1'] = {
'data': {
'deactivateEvent': None
},
'errors': [
{
'locations': [
{
'column': 17,
'line': 3
}
],
'message': "You aren't authorised to deactivate the event",
'path': [
'deactivateEvent'
]
}
]
}
snapshots['MutateEventTestCase::test_event_with_an_existing_location 1'] = {
'data': {
'eventsList': {
'edges': [
{
'node': {
'id': 'RXZlbnROb2RlOjU='
}
}
]
}
}
}
snapshots['MutateEventTestCase::test_event_with_non_exixting_location 1'] = {
'data': {
'eventsList': {
'edges': [
]
}
}
}
snapshots['MutateEventTestCase::test_query_updated_event 1'] = {
'data': {
'event': {
'description': 'test description default',
'id': 'RXZlbnROb2RlOjU=',
'socialEvent': {
'id': 'Q2F0ZWdvcnlOb2RlOjQ1'
},
'title': 'test title default'
}
}
}
snapshots['MutateEventTestCase::test_send_event_invite 1'] = {
'data': {
'sendEventInvite': {
'message': 'Event invite delivered'
}
}
}
snapshots['MutateEventTestCase::test_send_invite_for_invalid_event 1'] = {
'data': {
'sendEventInvite': None
},
'errors': [
{
'locations': [
{
'column': 17,
'line': 3
}
],
'message': 'Event does not exist',
'path': [
'sendEventInvite'
]
}
]
}
snapshots['MutateEventTestCase::test_send_invite_to_invalid_user 1'] = {
'data': {
'sendEventInvite': None
},
'errors': [
{
'locations': [
{
'column': 17,
'line': 3
}
],
'message': 'Recipient User does not exist',
'path': [
'sendEventInvite'
]
}
]
}
snapshots['MutateEventTestCase::test_send_invite_to_self 1'] = {
'data': {
'sendEventInvite': None
},
'errors': [
{
'locations': [
{
'column': 17,
'line': 3
}
],
'message': 'User cannot invite self',
'path': [
'sendEventInvite'
]
}
]
}
snapshots['MutateEventTestCase::test_update_event_as_admin 1'] = {
'data': {
'updateEvent': None
},
'errors': [
{
'locations': [
{
'column': 17,
'line': 3
}
],
'message': 'An Error occurred. Please try again',
'path': [
'updateEvent'
]
}
]
}
snapshots['MutateEventTestCase::test_update_event_as_creator 1'] = {
'data': {
'updateEvent': None
},
'errors': [
{
'locations': [
{
'column': 13,
'line': 3
}
],
'message': 'An Error occurred. Please try again',
'path': [
'updateEvent'
]
}
]
}
snapshots['MutateEventTestCase::test_update_event_as_non_creator 1'] = {
'data': {
'updateEvent': None
},
'errors': [
{
'locations': [
{
'column': 13,
'line': 3
}
],
'message': 'An Error occurred. Please try again',
'path': [
'updateEvent'
]
}
]
}
snapshots['MutateEventTestCase::test_validate_invite_link 1'] = {
'data': {
'validateEventInvite': {
'event': None,
'isValid': False,
'message': 'Expired Invite: Event has ended'
}
}
}
snapshots['MutateEventTestCase::test_validate_invite_link_expired_event 1'] = {
'data': {
'validateEventInvite': {
'event': None,
'isValid': False,
'message': 'Expired Invite: Event has ended'
}
}
}
snapshots['MutateEventTestCase::test_validate_invite_link_invalid_event 1'] = {
'data': {
'validateEventInvite': {
'event': None,
'isValid': False,
'message': 'Not Found: Invalid event/user in invite'
}
}
}
snapshots['MutateEventTestCase::test_validate_invite_link_invalid_hash 1'] = {
'data': {
'validateEventInvite': {
'event': None,
'isValid': False,
'message': 'Bad Request: Invalid invite URL'
}
}
}
snapshots['MutateEventTestCase::test_validate_invite_link_invalid_sender 1'] = {
'data': {
'validateEventInvite': {
'event': None,
'isValid': False,
'message': 'Expired Invite: Event has ended'
}
}
}
snapshots['MutateEventTestCase::test_validate_invite_link_unauthorized_user 1'] = {
'data': {
'validateEventInvite': {
'event': None,
'isValid': False,
'message': 'Forbidden: Unauthorized access'
}
}
}
|
# DET
from common import *
import tune
mbbOut('DET:GAIN', DESC = 'Detector gain', *dBrange(7, -12) + ['-120dB'])
boolOut('DET:MODE', 'All Bunches', 'Single Bunch',
FLNK = tune.setting_changed, DESC = 'Detector mode')
mbbOut('DET:INPUT', 'ADC', 'FIR',
DESC = 'Detector input selection')
boolOut('DET:AUTOGAIN', 'Fixed Gain', 'Autogain',
DESC = 'Detector automatic gain')
for bunch in range(4):
bunch_select = longOut('DET:BUNCH%d' % bunch, 0, BUNCHES_PER_TURN/4-1,
DESC = 'Detector bunch select #%d' % bunch)
bunch_select.FLNK = records.calc('DET:BUNCH%d' % bunch,
CALC = '4*A+B', INPA = bunch_select, INPB = bunch,
FLNK = tune.setting_changed,
DESC = 'Selected bunch #%d' % bunch)
# The frequency and timebase scales will be reprocessed when necessary,
# basically when the sequencer settings change and a detector trigger occurs.
Trigger('DET:SCALE',
aIn('DET:DELAY', PREC = 3, EGU = 'turns', DESC = 'Detector delay'),
Waveform('DET:SCALE', TUNE_LENGTH, 'DOUBLE',
DESC = 'Scale for frequency sweep'),
Waveform('DET:TIMEBASE', TUNE_LENGTH, 'LONG', DESC = 'Timebase scale'))
# Three overflow detection bits are generated
overflows = [
boolIn('DET:OVF:INP', 'Ok', 'Overflow', OSV = 'MAJOR',
DESC = 'Detector input overflow'),
boolIn('DET:OVF:ACC', 'Ok', 'Overflow', OSV = 'MAJOR',
DESC = 'Detector accumulator overflow'),
boolIn('DET:OVF:IQ', 'Ok', 'Overflow', OSV = 'MAJOR',
DESC = 'IQ scaling overflow')]
overflows.append(
AggregateSeverity('DET:OVF', 'Detector overflow', overflows))
# We have five sweep channels: one for each bunch and an aggregate consisting of
# the sum of all four.
def SweepChannel(name, desc):
def Name(field):
return 'DET:%s:%s' % (field, name)
return [
# Basic I/Q waveforms and power spectrum
Waveform(Name('I'), TUNE_LENGTH, 'SHORT', DESC = '%s I' % desc),
Waveform(Name('Q'), TUNE_LENGTH, 'SHORT', DESC = '%s Q' % desc),
Waveform(Name('POWER'), TUNE_LENGTH, 'LONG', DESC = '%s power' % desc),
]
bunch_channels = [SweepChannel(b, 'Bunch %s' % b) for b in '0123']
mean_channel = SweepChannel('M', 'Bunch mean')
Trigger('DET', *concat(bunch_channels) + mean_channel + overflows)
# Control over the internal detector window.
det_window = WaveformOut('DET:WINDOW', 1024, 'FLOAT', DESC = 'Detector window')
Action('DET:RESET_WIN', FLNK = det_window,
DESC = 'Reset detector window to Hamming')
# Total loop delay in turns.
aOut('DET:LOOP:ADC',
EGU = 'turns', PREC = 3, DESC = 'Closed loop delay in turns')
stringIn('TUNE:MODE', SCAN = '1 second', DESC = 'Tune mode')
|
class Solution:
# @param A : list of list of integers
# @return the same list modified
def setZeroes(self, A):
n = len(A)
n2 = len(A[0])
for i in range(n):
for j in range(n2):
if(A[i][j]==0):
for col in range(n2):
if(A[i][col] == 0):
continue
else:
A[i][col] = -1
for row in range(n):
if(A[row][j] == 0):
continue
else:
A[row][j] = -1
for i in range(n):
for j in range(n2):
if(A[i][j] != 1):
A[i][j] = 0
return A |
"""Example_1"""
# try:
# number = int(input("enter a number: "))
# print(number)
# # most general type of exceptions
# except Exception as e:
# print("please enter a number!", e)
"""Example_2"""
# try:
# number = int(input("enter a number: "))
# res = 10 / number
# print(number)
# # most general type of exceptions
# except ValueError as e:
# print("please enter a number!", e)
# except ZeroDivisionError as e:
# print("division by 0 not allowed", e)
# except Exception as e:
# print("some error happened", e)
# finally:
# print("we are in finally ")
"""Example_3"""
# customised exception
class divisionby12Error(Exception):
pass
try:
number = int(input("enter a number: "))
res = 10 / number
if number == 12:
raise divisionby12Error("division by 12 not allowed in our program")
print(number)
except ValueError as e:
print("please enter a number!", e)
except ZeroDivisionError as e:
print("division by 0 not allowed", e)
except divisionby12Error as e:
print("error", e)
except Exception as e:
print("some error happened", e)
finally:
print("we are in finally ")
|
def solution(numbers):
numbers.sort()
if numbers[0] < 0 and numbers[1] < 0:
if numbers[0] * numbers[1] >= numbers[-1] * numbers[-2]:
return numbers[0] * numbers[1]
else:
return numbers[-1] * numbers[-2]
else:
return numbers[-1] * numbers[-2] |
from unittest.mock import Mock
from game import Game
import math
from model.skills.whirlwind import Whirlwind
from model.systems.system import ComponentSystem
import pytest
def _mock_factory(*args, fighter=None, **kwargs):
fighter = fighter or Mock()
m = Mock(*args, **kwargs)
Game.instance.fighter_system.set(m, fighter)
m.distance.side_effect = lambda x, y: math.sqrt((x - m.x) ** 2 + (y - m.y) ** 2)
return m
@pytest.fixture
def whirlwind():
yield Whirlwind
def test_process(whirlwind):
player_fighter = Mock()
player = _mock_factory(x=1, y=2, fighter=player_fighter)
bushslime = _mock_factory(x=7, y=8)
tigerslash = _mock_factory(x=2, y=2)
steelhawk = _mock_factory(x=2, y=1)
whirlwind.process(player, 2, Mock(entities=[player, bushslime, tigerslash, steelhawk]))
player_fighter.attack.assert_any_call(tigerslash)
player_fighter.attack.assert_any_call(steelhawk)
with pytest.raises(AssertionError):
player_fighter.attack.assert_called_with(player)
with pytest.raises(AssertionError):
player_fighter.attack.assert_called_with(bushslime)
|
'''
Created on Mar 12, 2019
@author: akash18.TRN
'''
dbname="Hello"
tname="employee2"
author="Teja"
filename="d2.txt"
primarykey_col=-1
thisdict={
};
batch_size=20000
deli=','
col_name=['col1','col2','col3','col4','col5','col6','col7','col8']
datatype_dict={
1: "varchar(30)",
2: "varchar(30)",
3: "bigint",
4: "datetime(6)",
5: "double",
6: "double",
7: "double",
8: "varchar(100)",
}; |
# Libraries
import gym
import numpy as np
import random
import matplotlib.pyplot as plt
import tensorflow as tf
import copy
import sys
# Install TF 2 and enable GPU
# if "2." not in tf.__version__ or not tf.test.is_gpu_available():
# !pip uninstall tensorflow
# !pip install tensorflow-gpu
# print(f"Tensorflow version: {tf.__version__}")
# print(f"Python version: {sys.version}")
# device_name = tf.test.gpu_device_name()
# if device_name != '/device:GPU:0':
# raise SystemError('GPU device not found')
# print('Found GPU at: {}'.format(device_name))
# print("Num GPUs Available: ", len(tf.config.experimental.list_physical_devices('GPU')))
# Hyper parameters
PROBLEM = 'BreakoutDeterministic-v4'
FRAME_SKIP = 4
MEMORY_BATCH_SIZE = 32
REPLAY_START_SIZE = 50000
REPLAY_MEMORY_SIZE = 1000000 # RMSProp train updates sampled from this number of recent frames
NUMBER_OF_EPISODES = 1000000 # TODO: save and restore model with infinite episodes
EXPLORATION_RATE = 1
MIN_EXPLORATION_RATE = 0.1
MAX_FRAMES_DECAYED = REPLAY_MEMORY_SIZE / FRAME_SKIP # TODO: correct? 1 million in paper
IMAGE_HEIGHT, IMAGE_WIDTH, IMAGE_CHANNELS = 84, 84, 1
IMAGE_SHAPE = (IMAGE_HEIGHT, IMAGE_WIDTH, IMAGE_CHANNELS)
CONV1_NUM_FILTERS, CONV1_FILTER_SIZE, CONV1_FILTER_STRIDES = 32, 8, 4
CONV2_NUM_FILTERS, CONV2_FILTER_SIZE, CONV2_FILTER_STRIDES = 64, 4, 2
CONV3_NUM_FILTERS, CONV3_FILTER_SIZE, CONV3_FILTER_STRIDES = 64, 3, 1
DENSE_NUM_UNITS, OUTPUT_NUM_UNITS = 512, 4 # TODO: GET Action count from constructor
LEARNING_RATE, GRADIENT_MOMENTUM, MIN_SQUARED_GRADIENT = 0.00025, 0.95, 0.01
HUBER_LOSS_DELTA, DISCOUNT_FACTOR = 1.0, 0.99
RANDOM_WEIGHT_INITIALIZER = tf.initializers.RandomNormal()
HIDDEN_ACTIVATION, OUTPUT_ACTIVATION, PADDING = 'relu', 'linear', "SAME" # TODO: remove?
TARGET_MODEL_UPDATE_FREQUENCY = 10000
optimizer = tf.optimizers.RMSprop(learning_rate=LEARNING_RATE, rho=GRADIENT_MOMENTUM, epsilon=MIN_SQUARED_GRADIENT)
optimizer = tf.optimizers.RMSprop(learning_rate=LEARNING_RATE, rho=GRADIENT_MOMENTUM, epsilon=MIN_SQUARED_GRADIENT)
# LEAKY_RELU_ALPHA, DROPOUT_RATE = 0.2, 0.5 # TODO: remove or use to improve paper
class FramePreprocessor:
"""
FramePreprocessor re-sizes, normalizes and converts RGB atari frames to gray scale frames.
"""
def __init__(self, state_space):
self.state_space = state_space
def convert_rgb_to_grayscale(self, tf_frame):
return tf.image.rgb_to_grayscale(tf_frame)
def resize_frame(self, tf_frame, frame_height, frame_width):
return tf.image.resize(tf_frame, [frame_height,frame_width], method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
def plot_frame_from_greyscale_values(self, image):
height, width, _ = image.shape
grey_image = np.array([[(image[i, j].numpy()[0], image[i, j].numpy()[0], image[i, j].numpy()[0])
for i in range(height)]
for j in range(width)])
grey_image = np.transpose(grey_image, (1, 0, 2)) # Switch height and width
plt.imshow(grey_image)
plt.show()
def preprocess_frame(self, frame):
tf_frame = tf.Variable(frame, shape=self.state_space, dtype=tf.uint8)
image = self.convert_rgb_to_grayscale(tf_frame)
image = self.resize_frame(image, IMAGE_HEIGHT, IMAGE_WIDTH)
return image
# Todo use experience: (state, action, reward, next_state, is_done)
from typing import NamedTuple, Tuple
class Experience(NamedTuple):
state: Tuple[int, int, int] # y, x, c
action: int
reward: float
next_state: Tuple[int, int, int]
is_done: bool
class ReplayMemory:
"""
Memory class holds a list of game plays stored as experiences (s,a,r,s', d) = (state, action, reward, next_state, is_done)
Credits: https://stackoverflow.com/questions/40181284/how-to-get-random-sample-from-deque-in-python-3
"""
def __init__(self, capacity): # Initialize memory with given capacity
self.experiences = [None] * capacity
self.capacity = capacity
self.index = 0
self.size = 0
def add(self, experience): # Add a sample to the memory, removing the earliest entry if memeory capacity is reached
self.experiences[self.index] = experience
self.size = min(self.size + 1, self.capacity)
self.index = (self.index + 1) % self.capacity # Overwrites earliest entry if memory capacity reached
def sample(self, size):
indices = random.sample(range(self.size), size)
return [self.experiences[index] for index in indices] # Efficient random access
class ConvolutionalNeuralNetwork:
"""
CNN Architecture for DQN has 4 hidden layers:
Input: 84 X 84 X 1 image (4 in paper due to frame skipping) (PREPROCESSED image), Game-score, Life count, Actions_count (4)
1st Hidden layer: Convolves 32 filters of 8 X 8 with stride 4 (relu)
2nd hidden layer: Convolves 64 filters of 4 X 4 with stride 2 (relu)
3rd hidden layer: Convolves 64 filters of 3 X 3 with stride 1 (Relu)
4th hidden layer: Fully connected, (512 relu units)
Output: Fully connected linear layer, Separate output unit for each action, outputs are predicted Q-values
"""
weights = { # 4D: Filter Width, Filter Height, In Channel, Out Channel
# Conv Layer 1: 8x8 conv, 1 input (preprocessed image has 1 color channel), 32 output filters
'conv1_weights': tf.Variable(RANDOM_WEIGHT_INITIALIZER([CONV1_FILTER_SIZE, CONV1_FILTER_SIZE, IMAGE_CHANNELS, CONV1_NUM_FILTERS])),
# Conv Layer 2: 4x4 conv, 32 input filters, 64 output filters
'conv2_weights': tf.Variable(RANDOM_WEIGHT_INITIALIZER([CONV2_FILTER_SIZE, CONV2_FILTER_SIZE, CONV1_NUM_FILTERS, CONV2_NUM_FILTERS])),
# Conv Layer 3: 3x3 conv, 64 input filters, 64 output filters
'conv3_weights': tf.Variable(RANDOM_WEIGHT_INITIALIZER([CONV3_FILTER_SIZE, CONV3_FILTER_SIZE, CONV2_NUM_FILTERS, CONV3_NUM_FILTERS])),
# Fully Connected (Dense) Layer: 3x3x64 inputs (64 filters of size 3x3), 512 output units
'dense_weights': tf.Variable(RANDOM_WEIGHT_INITIALIZER([IMAGE_HEIGHT * IMAGE_WIDTH * CONV3_NUM_FILTERS, DENSE_NUM_UNITS])),
# Output layer: 512 input units, 4 output units (actions)
'output_weights': tf.Variable(RANDOM_WEIGHT_INITIALIZER([DENSE_NUM_UNITS, OUTPUT_NUM_UNITS]))
}
biases = {
'conv1_biases': tf.Variable(tf.zeros([CONV1_NUM_FILTERS])), # 32
'conv2_biases': tf.Variable(tf.zeros([CONV2_NUM_FILTERS])), # 64
'conv3_biases': tf.Variable(tf.zeros([CONV3_NUM_FILTERS])), # 64
'dense_biases': tf.Variable(tf.zeros([DENSE_NUM_UNITS])), # 512
'output_biases': tf.Variable(tf.zeros([OUTPUT_NUM_UNITS])) # 4
}
target_weights = { # 4D: Filter Height, Filter Width, In Channel, Out Channel
# Conv Layer 1: 8x8 conv, 1 input (preprocessed image has 1 color channel), 32 output filters
'conv1_target_weights': tf.Variable(RANDOM_WEIGHT_INITIALIZER([CONV1_FILTER_SIZE, CONV1_FILTER_SIZE, IMAGE_CHANNELS, CONV1_NUM_FILTERS])), # Out Channel
# Conv Layer 2: 4x4 conv, 32 input filters, 64 output filters
'conv2_target_weights': tf.Variable(RANDOM_WEIGHT_INITIALIZER([CONV2_FILTER_SIZE, CONV2_FILTER_SIZE, CONV1_NUM_FILTERS, CONV2_NUM_FILTERS])),
# Conv Layer 3: 3x3 conv, 64 input filters, 64 output filters
'conv3_target_weights': tf.Variable(RANDOM_WEIGHT_INITIALIZER([CONV3_FILTER_SIZE, CONV3_FILTER_SIZE, CONV2_NUM_FILTERS, CONV3_NUM_FILTERS])),
# Fully Connected (Dense) Layer: 3x3x64 inputs (64 filters of size 3x3), 512 output units
'dense_target_weights': tf.Variable(RANDOM_WEIGHT_INITIALIZER([IMAGE_HEIGHT * IMAGE_WIDTH * CONV3_NUM_FILTERS, DENSE_NUM_UNITS])),
# Output layer: 512 input units, 4 output units (actions)
'output_target_weights': tf.Variable(RANDOM_WEIGHT_INITIALIZER([DENSE_NUM_UNITS, OUTPUT_NUM_UNITS]))
}
target_biases = {
'conv1_target_biases': tf.Variable(tf.zeros([CONV1_NUM_FILTERS])), # 32
'conv2_target_biases': tf.Variable(tf.zeros([CONV2_NUM_FILTERS])), # 64
'conv3_target_biases': tf.Variable(tf.zeros([CONV3_NUM_FILTERS])), # 64
'dense_target_biases': tf.Variable(tf.zeros([DENSE_NUM_UNITS])), # 512
'output_target_biases': tf.Variable(tf.zeros([OUTPUT_NUM_UNITS])) # 4
}
def __init__(self, number_of_states, number_of_actions): #, model=None):
self.number_of_states = number_of_states
self.number_of_actions = number_of_actions
def overwrite_model_params(self): # Assume same order and length
for weight, target_weight_key in zip(self.weights.values(), self.target_weights.keys()):
self.target_weights[target_weight_key].assign(tf.identity(weight))
for bias, target_bias_key in zip(self.biases.values(), self.target_biases.keys()):
self.target_biases[target_bias_key].assign(tf.identity(bias))
@tf.function
def normalize_images(self, images):
return tf.cast(images / 255, dtype=tf.float32)
@tf.function
def convolutional_2d_layer(self, inputs, filter_weights, biases, strides=1):
output = tf.nn.conv2d(inputs, filter_weights, strides, padding=PADDING) # TODO: padding in paper?
output_with_bias = tf.nn.bias_add(output, biases)
activation = tf.nn.relu(output_with_bias) # non-linearity TODO: improve paper with leaky relu?
return activation
@tf.function
def flatten_layer(self, layer): # output shape: [32, 64*84*84]
# Shape: Minibatches: 32, Num of Filters * Img Height, Image width: 64*84*84 = 451584
memory_batch_size, image_height, image_width, num_filters = layer.get_shape()
flattened_layer = tf.reshape(layer, (memory_batch_size, num_filters * image_height * image_width))
return flattened_layer
@tf.function
def dense_layer(self, inputs, weights, biases):
output = tf.nn.bias_add(tf.matmul(inputs, weights), biases)
dense_activation = tf.nn.relu(output) # non-linearity
# dropout = tf.nn.dropout(dense_activation, rate=DROPOUT_RATE) # TODO: does paper dropout?
return dense_activation
@tf.function
def output_layer(self, input, weights, biases):
linear_output = tf.nn.bias_add(tf.matmul(input, weights), biases)
return linear_output
@tf.function
def huber_error_loss(self, y_true, y_predictions, delta=1.0):
y_predictions = tf.cast(y_predictions, dtype=tf.float32)
errors = y_true - y_predictions
condition = tf.abs(errors) <= delta
l2_squared_loss = 0.5 * tf.square(errors)
l1_absolute_loss = delta * (tf.abs(errors) - 0.5 * delta)
loss = tf.where(condition, l2_squared_loss, l1_absolute_loss)
return loss
@tf.function
def train(self, inputs, outputs): # Optimization
# Wrap computation inside a GradientTape for automatic differentiation
with tf.GradientTape() as tape:
predictions = self.predict(inputs)
current_loss = self.huber_error_loss(predictions, outputs)
# Trainable variables to update
trainable_variables = list(self.weights.values()) + list(self.biases.values())
gradients = tape.gradient(current_loss, trainable_variables)
# Update weights and biases following gradients
optimizer.apply_gradients(zip(gradients, trainable_variables))
# tf.print(tf.reduce_mean(current_loss))
@tf.function
def predict(self, inputs, is_target = False): # 4D input for CNN: (batch_size, height, width, depth)
# Input shape: [32, 84, 84, 1]. A batch of 84x84x1 (gray scale) images.
inputs = self.normalize_images(inputs)
# Convolution Layer 1 with output shape [32, 84, 84, 32]
conv1_weights = self.target_weights['conv1_target_weights'] if is_target else self.weights['conv1_weights']
conv1_biases = self.target_biases['conv1_target_biases'] if is_target else self.biases['conv1_biases']
conv1 = self.convolutional_2d_layer(inputs,conv1_weights,conv1_biases)
# Convolutional Layer 2 with output shape [32, 84, 84, 64]
conv2_weights = self.target_weights['conv2_target_weights'] if is_target else self.weights['conv2_weights']
conv2_biases = self.target_biases['conv2_target_biases'] if is_target else self.biases['conv2_biases']
conv2 = self.convolutional_2d_layer(conv1, conv2_weights, conv2_biases)
# Convolutional Layer 3 with output shape [1, 84, 84, 64]
conv3_weights = self.target_weights['conv3_target_weights'] if is_target else self.weights['conv3_weights']
conv3_biases = self.target_biases['conv3_target_biases'] if is_target else self.biases['conv3_biases']
conv3 = self.convolutional_2d_layer(conv2, conv3_weights, conv3_biases)
# Flatten output of 2nd conv. layer to fit dense layer input, output shape [32, 64*84*84]
flattened_layer = self.flatten_layer(layer=conv3)
# Dense fully connected layer with output shape [1, 512]
dense_weights = self.target_weights['dense_target_weights'] if is_target else self.weights['dense_weights']
dense_biases = self.target_biases['dense_target_biases'] if is_target else self.biases['dense_biases']
dense_layer = self.dense_layer(flattened_layer, dense_weights, dense_biases)
# Fully connected output of shape [1, 4]
output_weights = self.target_weights['output_target_weights'] if is_target else self.weights['output_weights']
output_biases = self.target_biases['output_target_biases'] if is_target else self.biases['output_biases']
output_layer = self.output_layer(dense_layer, output_weights, output_biases)
return output_layer
@tf.function
def predict_one(self, state, is_target = False):
state = tf.reshape(state, shape=(1, IMAGE_HEIGHT, IMAGE_WIDTH, IMAGE_CHANNELS)) # Reshape
prediction = self.predict(state, is_target)
return prediction
class Agent:
"""
Agent takes actions and saves them to its memory, which is initialized with a given capacity
"""
steps = 0
exploration_rate = EXPLORATION_RATE
def decay_exploration_rate(self):
decay_rate = (self.exploration_rate - MIN_EXPLORATION_RATE) / MAX_FRAMES_DECAYED
return decay_rate
# Initialize agent with a given memory capacity, and a state, and action space
def __init__(self, number_of_states, number_of_actions):
self.experiences = ReplayMemory(REPLAY_MEMORY_SIZE)
self.model = ConvolutionalNeuralNetwork(number_of_states, number_of_actions)
self.number_of_states = number_of_states
self.number_of_actions = number_of_actions
self.decay_rate = self.decay_exploration_rate()
# The behaviour policy during training was e-greedy with e annealed linearly
# from 1.0 to 0.1 over the first million frames, and fixed at 0.1 thereafter
def e_greedy_policy(self, state):
exploration_rate_threshold = random.uniform(0, 1)
if exploration_rate_threshold > self.exploration_rate:
next_q_values = self.model.predict_one(state)
best_action = np.argmax(next_q_values) # tf.argmax fails on tie
else:
best_action = self.random_policy()
return best_action
def random_policy(self):
return random.randint(0, self.number_of_actions - 1)
def act(self, state):
return self.random_policy() if self.experiences.size <= REPLAY_START_SIZE else self.e_greedy_policy(state)
def update_target_model(self):
self.model.overwrite_model_params()
@tf.function
def reshape_image(self, images, batch_size=1):
return tf.reshape(images, shape=(batch_size, IMAGE_HEIGHT, IMAGE_WIDTH, IMAGE_CHANNELS))
def observe(self, experience):
self.experiences.add(experience)
self.steps += 1
self.exploration_rate = (MIN_EXPLORATION_RATE if self.exploration_rate <= MIN_EXPLORATION_RATE
else self.exploration_rate - self.decay_rate)
if self.steps % TARGET_MODEL_UPDATE_FREQUENCY == 0:
self.update_target_model()
def replay(self): # Experience: (state, action, reward, next_state, is_done) # Train neural net with experiences
memory_batch = self.experiences.sample(MEMORY_BATCH_SIZE)
memory_batch = [(self.reshape_image(state), action, reward, np.zeros(shape=(1, *IMAGE_SHAPE), dtype=np.uint8), done) if done
else (self.reshape_image(state), action, reward, self.reshape_image(next_state), done)
for (state, action, reward, next_state, done) in memory_batch]
states = self.reshape_image([state for (state, *rest) in memory_batch], batch_size=MEMORY_BATCH_SIZE)
next_states = self.reshape_image([next_state for (_, _, _, next_state, _) in memory_batch], batch_size=MEMORY_BATCH_SIZE)
state_predictions = self.model.predict(states)
next_state_predictions = self.model.predict(next_states)
target_next_state_predictions = self.model.predict(next_states, is_target = True)
inputs = np.zeros(shape=(MEMORY_BATCH_SIZE, *IMAGE_SHAPE))
outputs = np.zeros(shape=(MEMORY_BATCH_SIZE, number_of_actions))
for i, (state, action, reward, next_state, is_done) in enumerate(memory_batch):
state_target = state_predictions[i].numpy() # Target Q(s,a) for state and action of sample i: [Q1 Q2 Q3 Q4]
next_state_target = target_next_state_predictions[i]
future_discounted_reward = target_next_state_predictions[i][tf.argmax(next_state_predictions[i])] # QTarget[nextstate][action]
state_target[action] = reward if is_done else reward + DISCOUNT_FACTOR * future_discounted_reward
inputs[i], outputs[i] = state, state_target
self.model.train(inputs, outputs)
class Environment:
"""
Creates a game environment which an agent can play using certain actions.
Run takes an agent as argument that plays the game, until the agent 'dies' (no more lives)
"""
def __init__(self, problem):
self.gym = gym.make(problem)
self.state_space = self.gym.observation_space.shape
self.frame_preprocessor = FramePreprocessor(self.state_space)
self.best_reward = 0
def clip_reward(self, reward): # Clip positive rewards to 1 and negative rewards to -1
return np.sign(reward)
def run(self, agent, should_print):
state = self.gym.reset()
state = self.frame_preprocessor.preprocess_frame(state)
total_reward, step = 0, 0
while True:
action = agent.act(state)
next_state, reward, is_done, _ = self.gym.step(action)
next_state = self.frame_preprocessor.preprocess_frame(next_state)
# reward = self.clip_reward(reward) # Only for generalization to other Atari games
if is_done: next_state = None
experience = (state, action, reward, next_state, is_done) # Experience(experience)
agent.observe(experience)
if agent.experiences.size > REPLAY_START_SIZE: # SPEED UP BY TRAINING ONLY EVERY 50th STEP and step < 50:
agent.replay() # Train on states in mini batches
state = next_state
total_reward += reward
step += 1
if is_done: break
self.best_reward = total_reward if total_reward > self.best_reward else self.best_reward
self.gym.close()
if should_print:
print(f"Total reward: {total_reward} memory: {agent.experiences.size} exploration rate: {agent.exploration_rate} \n")
environment = Environment(PROBLEM)
number_of_states = environment.gym.observation_space.shape
number_of_actions = environment.gym.action_space.n
dqn_agent = Agent(number_of_states, number_of_actions)
for episode in range(NUMBER_OF_EPISODES):
should_print = (episode + 1) % 1 == 0
environment.run(dqn_agent, should_print)
if should_print:
print(f"Episode: {episode+1} with best reward: {environment.best_reward}")
# TODO: 3) Save and restore model parameters 2) Convert NP to Tensors 3) Run experiments!!!
# Report: What did you implement. The experiments, difficulties (local machines, scalability, less episodes and memory) and results. Last 2-3 hours with less experiments. 14 pages
# Images of architecture, Breakout, convolutions, preprocessed images, Tables of results (time, reward, exploration rate, episodes, memory, hyperparams)
# Intro: Paper 1-2 page Objective, Theory behind CNN and Reinforcement Q Learning and Deep Q Learning 3 pages, Implementation 2 pages, Experiments and Results 2 pages, Discuss Improvements/Conclusion 1 page
# Improvements: faster machine, scalable optimizations, run with more games, generalize to other games? we run for Breakout but not for generalization
# Technical: CNN architecture, experience replay, Q target network,
# 500 episodes play randomly, train 300 episodes, env.render every100th episode and repeat training after,
|
"""
From causality-treated data; construct a graph of causality
Author : Diviyan Kalainathan
Date : 28/06/2016
"""
import csv
import cPickle as pkl
import numpy
import scipy.stats as stats
import sys
inputfolder = 'output/obj8/pca_var/cluster_5/'
causal_results = inputfolder + 'results_lp_CSP+Public_thres0.12.csv' # csv with 3 cols, Avar, Bvar & target
if 'obj8' in inputfolder:
obj = True
else:
obj = False
flags = False # Taking account of flags
skeleton_construction_method = int(sys.argv[1])
""" Type of skeleton construction
#0 : Skip and load computed data
#1 : Absolute value of Pearson's correlation
#2 : Regular value of Pearson's correlation
#3 : Causation coefficient
"""
if sys.argv[1][0] == '0': # Choose which data to load w/ arg of type "01"
load_skeleton = True
else:
load_skeleton = False
deconvolution_method = int(sys.argv[2])
"""Method used for the deconvolution
#1 : Deconvolution according to Soheil Feizi
#2 : Recursive method according to Michele Sebag
#3 : Deconvolution/global silencing by B. Barzel, A.-L. Barab\'asi
"""
print('Loading data')
ordered_var_names = pkl.load(open('input/header.p'))
if not flags: # remove flag vars
ordered_var_names = [x for x in ordered_var_names if 'flag' not in x]
if obj == True:
num_axis = 8
else:
num_axis = 5
for axis in range(num_axis):
ordered_var_names.append('pca_axis_' + str(axis + 1))
if flags:
ordered_var_names.append('pca_axis_' + str(axis + 1) + '_flag')
link_mat = numpy.ones((len(ordered_var_names), len(ordered_var_names))) # Matrix of links, fully connected
# 1 is linked and 0 unlinked,
print('Done.')
#### Pearson's correlation to remove links ####
print('Creating link skeleton')
if load_skeleton:
print('Skipping construction & loading values')
with open(inputfolder + 'link_mat_pval_' + str(skeleton_construction_method) + '.p', 'rb') as link_mat_file:
link_mat = pkl.load(link_mat_file)
elif skeleton_construction_method < 3:
with open(inputfolder + 'pairs_c_5.csv', 'rb') as pairs_file:
datareader = csv.reader(pairs_file, delimiter=';')
header = next(datareader)
threshold_pval = 0.05
threshold_pearsonc=0.5
var_1 = 0
var_2 = 0
# Idea: go through the vars and unlink the skipped (not in the pairs file) pairs of vars.
for row in datareader:
if row == []: # Skipping blank lines
continue
pair = row[0].split('-')
if not flags and ('flag' in pair[0] or 'flag' in pair[1]):
continue # Skipping values w/ flags
# Finding the pair var_1 var_2 corresponding to the line
# and un-linking skipped values
while pair[0] != ordered_var_names[var_1]:
if var_2 != len(ordered_var_names):
link_mat[var_1, var_2 + 1:] = 0
var_1 += 1
var_2 = 0
skipped_value = False # Mustn't erase checked values
while pair[1] != ordered_var_names[var_2]:
if skipped_value:
link_mat[var_1, var_2] = 0
var_2 += 1
skipped_value = True
# Parsing values of table & removing artifacts
var_1_value = [float(x) for x in row[1].split(' ') if x is not '']
var_2_value = [float(x) for x in row[2].split(' ') if x is not '']
if len(var_1_value) != len(var_2_value):
raise ValueError
if abs(stats.pearsonr(var_1_value, var_2_value)[1]) < threshold_pval\
and abs(stats.pearsonr(var_1_value, var_2_value)[0]) < threshold_pearsonc :
if skeleton_construction_method == 1:
link_mat[var_1, var_2] = abs(stats.pearsonr(var_1_value, var_2_value)[0])
elif skeleton_construction_method == 2:
link_mat[var_1, var_2] = (stats.pearsonr(var_1_value, var_2_value)[0])
else:
link_mat[var_1, var_2] = 0
# Symmetrize matrix
for col in range(0, (len(ordered_var_names) - 1)):
for line in range(col + 1, (len(ordered_var_names))):
link_mat[line, col] = link_mat[col, line]
# Diagonal elts
for diag in range(0, (len(ordered_var_names))):
link_mat[diag, diag] = 0
#### Causality score to remove links ####
elif skeleton_construction_method == 3:
with open(causal_results, 'rb') as pairs_file:
datareader = csv.reader(pairs_file, delimiter=';')
header = next(datareader)
threshold = 0.12
var_1 = 0
var_2 = 0
# Idea: go through the vars and unlink the skipped (not in the pairs file) pairs of vars.
for row in datareader:
if not flags and ('flag' in row[0] or 'flag' in row[1]):
continue # Skipping values w/ flags
# Finding the pair var_1 var_2 corresponding to the line
# and un-linking skipped values
while row[0] != ordered_var_names[var_1]:
if var_2 != len(ordered_var_names):
link_mat[var_1, var_2 + 1:] = 0
var_1 += 1
var_2 = 0
skipped_value = False # Mustn't erase checked values
while row[1] != ordered_var_names[var_2]:
if skipped_value:
link_mat[var_1, var_2] = 0
var_2 += 1
skipped_value = True
if float(row[2]) > threshold:
link_mat[var_1, var_2] = float(row[2])
# Anti-symmetrize matrix
for col in range(0, (len(ordered_var_names) - 1)):
for line in range(col + 1, (len(ordered_var_names))):
link_mat[line, col] = -link_mat[col, line]
# Diagonal elts
for diag in range(0, (len(ordered_var_names))):
link_mat[diag, diag] = 0
else:
raise ValueError
if skeleton_construction_method != 0:
with open(inputfolder + 'link_mat_pval_' + str(skeleton_construction_method) + '.p', 'wb') as link_mat_file:
pkl.dump(link_mat, link_mat_file)
print('Done.')
#### Loading causation data ####
# Go through all nodes and remove redundant links
list_var = [] # create blank list for name of vars
causality_links = [] # List of links between vars
# Init list var and causation links:
for name_var in ordered_var_names:
list_var.append(name_var)
causality_links.append([[], []])
# Import data, construction of data structure
with open(causal_results, 'rb') as inputfile:
reader = csv.reader(inputfile, delimiter=';')
header = next(reader)
for row in reader:
if (row[0]) not in list_var:
list_var.append(row[0])
causality_links.append([[], []]) # 0 for parents, 1 for children
if (row[1]) not in list_var:
list_var.append(row[1])
causality_links.append([[], []]) # 0 for parents, 1 for children
if float(row[2]) > 0:
causality_links[list_var.index(row[0])][1].append(list_var.index(row[1]))
causality_links[list_var.index(row[1])][0].append(list_var.index(row[0]))
else:
causality_links[list_var.index(row[0])][0].append(list_var.index(row[1]))
causality_links[list_var.index(row[1])][1].append(list_var.index(row[0]))
with open(causal_results + 'causality.pkl', 'wb') as handle:
pkl.dump(causality_links, handle)
with open(causal_results + 'list_vars.pkl', 'wb') as handle:
pkl.dump(list_var, handle)
#### Apply deconvolution ####
print('Deconvolution')
if deconvolution_method == 1:
"""This is a python implementation/translation of network deconvolution
AUTHORS:
Algorithm was programmed by Soheil Feizi.
Paper authors are S. Feizi, D. Marbach, M. Medard and M. Kellis
REFERENCES:
For more details, see the following paper:
Network Deconvolution as a General Method to Distinguish
Direct Dependencies over Networks
By: Soheil Feizi, Daniel Marbach, Muriel Medard and Manolis Kellis
Nature Biotechnology""" # Credits, Ref
Gdir = numpy.dot(link_mat, numpy.linalg.inv(numpy.identity(len(ordered_var_names)) + link_mat))
elif deconvolution_method == 2:
# Creating all possible combinations:
Gdir = link_mat
causality_possibilites = [[[], []] for i in causality_links]
# ToDO
# 1. Generate all lists
# 2. Generate up to n parents & n children
elif deconvolution_method == 3:
"""This is a python implementation/translation of network deconvolution
AUTHORS :
B. Barzel, A.-L. Barab\'asi
REFERENCES :
Network link prediction by global silencing of indirect correlations
By: Baruch Barzel, Albert-L\'aszl\'o Barab\'asi
Nature Biotechnology""" # Credits, Ref
mat_diag= numpy.zeros((len(ordered_var_names),len(ordered_var_names)))
D_temp= numpy.dot(link_mat-numpy.identity(len(ordered_var_names)),link_mat)
for i in range(len(ordered_var_names)):
mat_diag[i,i]=D_temp[i,i]
Gdir = numpy.dot((link_mat-numpy.identity(len(ordered_var_names))+mat_diag),numpy.linalg.inv(link_mat))
else:
raise ValueError
print('Done.')
#### Output values ####
print('Writing output files')
with open(inputfolder + 'deconv_links' + str(skeleton_construction_method) + str(deconvolution_method) + '.csv',
'wb') as outputfile:
writer = csv.writer(outputfile, delimiter=';', lineterminator='\n')
writer.writerow(['Source', 'Target', 'Weight'])
for var_1 in range(len(ordered_var_names) - 1):
for var_2 in range(var_1 + 1, len(ordered_var_names)):
if abs(Gdir[var_1, var_2]) > 0.001: #ignore value if it's near 0
# Find the causal direction
if list_var.index(ordered_var_names[var_2]) in \
causality_links[list_var.index(ordered_var_names[var_1])][1]:
# var_2 is the child
writer.writerow([ordered_var_names[var_1], ordered_var_names[var_2], abs(Gdir[var_1, var_2])])
elif list_var.index(ordered_var_names[var_2]) in \
causality_links[list_var.index(ordered_var_names[var_1])][
0]:
# Var_2 is the parent
writer.writerow([ordered_var_names[var_2], ordered_var_names[var_1], abs(Gdir[var_1, var_2])])
print('Done.')
print('End of program.')
|
"""
This file demonstrates two different styles of tests (one doctest and one
unittest). These will both pass when you run "manage.py test".
Replace these with more appropriate tests for your application.
"""
import sys
from os import path
from io import BytesIO
from difflib import unified_diff
import json
from django.test import TestCase
from django.core import serializers
from django.core.management import call_command
from django_dumpdb import dumprestore
DATA_PATH = path.join(path.dirname(__file__), 'testdata')
class TestDumpRestoreBase(object):
data_set = None
def open_fixture(self):
return open(path.join(DATA_PATH, self.data_set, 'dump.json'))
def open_dump(self):
return open(path.join(DATA_PATH, self.data_set, 'dump'))
def test_dump(self):
"""Dump test data and compare with the reference file."""
with self.open_fixture() as fixture:
objects = serializers.deserialize('json', fixture)
for obj in objects:
obj.save()
output = BytesIO()
dumprestore.dump(file=output)
with self.open_dump() as reference_dump:
self.assertTextEqual(reference_dump.read(), output.getvalue())
def test_restore(self):
with self.open_dump() as dump:
dumprestore.load(dump)
backup_stdout = sys.stdout
try:
sys.stdout = BytesIO()
call_command('dumpdata', format='json', indent=4, traceback=True)
result = sys.stdout.getvalue()
finally:
sys.stdout.close()
sys.stdout = backup_stdout
with self.open_fixture() as reference_fixture:
self.assertEqual(
json.loads(reference_fixture.read()),
json.loads(result)
)
def assertTextEqual(self, expected, got):
self.assertEqual(
got,
expected,
'\n' + '\n'.join(unified_diff(expected.splitlines(), got.splitlines(), 'expected', 'got')),
)
def make_tests():
with open(path.join(DATA_PATH, 'index')) as index:
data_sets = [line.strip() for line in index]
for data_set in data_sets:
cls_name = 'TestDumpRestore_%s' % data_set
globals()[cls_name] = type(cls_name, (TestDumpRestoreBase, TestCase), {'data_set': data_set})
make_tests()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Aug 14 21:59:46 2020
@author: thomas
"""
import numpy as np
import pandas as pd
import os, sys
import time as t
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
from matplotlib.patches import Circle
from matplotlib.ticker import MaxNLocator
from matplotlib.ticker import (MultipleLocator,AutoMinorLocator)
import pathlib
from matplotlib.colors import Normalize
from scipy import interpolate
norm = Normalize()
from resource import getrusage, RUSAGE_SELF
#CONSTANTS
cwd_PYTHON = os.getcwd() + '/'
RHO = 1000.0
DX = 0.025/256.0
PERIOD = 0.1
FREQUENCY = 1.0/PERIOD
OMEGA = 2.0*np.pi*FREQUENCY
RADIUS_LARGE = 0.002
AMPLITUDE = 0.8*RADIUS_LARGE
EPSILON = AMPLITUDE/AMPLITUDE
maxWin = 0.03
minWin = -1.0*maxWin
config = sys.argv[1]
Re = sys.argv[2]
perNumber = int(float(sys.argv[3])/PERIOD)
csfont = {'fontname':'Times New Roman'}
# constructs a filepath for the plot omages of Re=$Re, config=$config, and field=$field
def plotName(cwd,Re,config,field,idx):
#field = Vort, Pres, Vel, AvgW, AvgP, AvgU
strDir = cwd+"../Figures/VelDecay/{0}/".format(config)
pathlib.Path(strDir).mkdir(parents=True, exist_ok=True)
return strDir+"{0}_{1}_{2}_{3}".format(config,Re,field,idx)
def set_size(w,h, ax=None):
""" w, h: width, height in inches """
if not ax: ax=plt.gca()
l = ax.figure.subplotpars.left
r = ax.figure.subplotpars.right
t = ax.figure.subplotpars.top
b = ax.figure.subplotpars.bottom
figw = float(w)/(r-l)
figh = float(h)/(t-b)
ax.figure.set_size_inches(figw, figh)
return ax
def Rotate(xy, theta):
# https://en.wikipedia.org/wiki/Rotation_matrix#In_two_dimensions
#First Rotate based on Theta
#Allocate Arrays
rotationMatrix = np.zeros((2,2))
#Calculate rotation matrix
rotationMatrix[0,0] = np.cos(theta)
rotationMatrix[0,1] = -1.0*np.sin(theta)
rotationMatrix[1,0] = np.sin(theta)
rotationMatrix[1,1] = np.cos(theta)
return rotationMatrix.dot(xy)
def CalcLabAngle(pos):
#Find swimming axis (normal y-axis)
xU, xL = pos.loc[0,'aXU'], pos.loc[0,'aXL']
yU, yL = pos.loc[0,'aYU'], pos.loc[0,'aYL']
labX = xU - xL
labY = yU - yL
length = np.hypot(labX,labY)
normX = labX/length
normY = labY/length
#2) Calculate Theta
if(normX <= 0.0):
theta = np.arccos(normY)
else:
theta = -1.0*np.arccos(normY)+2.0*np.pi
print('theta = ',theta*180.0/np.pi)
return 2.0*np.pi - theta
def RotateVectorField(pos,mx,my,Ux,Uy,NX,NY):
#Shift field by CM
#Calculate angle of swimmer 1 from y-axis
#Rotate field by 2pi - theta
#Shift x and y by the CM location
xCM = 0.25*(pos.loc[0,'aXU'] + pos.loc[0,'bXU'] + pos.loc[0,'aXL'] + pos.loc[0,'bXL'])
yCM = 0.25*(pos.loc[0,'aYU'] + pos.loc[0,'bYU'] + pos.loc[0,'aYL'] + pos.loc[0,'bYL'])
#Do the same for mx and my
mx -= xCM
my -= yCM
#Shift pos data by xCM and yCM
pos['aXU'] -= xCM
pos['aXL'] -= xCM
pos['bXU'] -= xCM
pos['bXL'] -= xCM
pos['aYU'] -= yCM
pos['aYL'] -= yCM
pos['bYU'] -= yCM
pos['bYL'] -= yCM
#Rotate Reference frame by swimmer 1's axis
#Calculate Theta (Rotate by -Theta)
theta_rotate = CalcLabAngle(pos)
print('theta_rotate = ',theta_rotate*180.0/np.pi)
mxy = np.array([mx.flatten(),my.flatten()])
mxy_rot = np.zeros((2,NX*NY))
#Do the same for the U field
Uxy = np.array([Ux.flatten(),Uy.flatten()])
Uxy_rot = np.zeros((2,NX*NY))
for jdx in range(NX*NY):
mxy_rot[:,jdx] = Rotate(mxy[:,jdx],theta_rotate)
Uxy_rot[:,jdx] = Rotate(Uxy[:,jdx],theta_rotate)
mx_rot = mxy_rot[0,:].reshape((NX,NY))
my_rot = mxy_rot[1,:].reshape((NX,NY))
Ux_rot = Uxy_rot[0,:].reshape((NX,NY))
Uy_rot = Uxy_rot[1,:].reshape((NX,NY))
aU_pos = np.array([pos.loc[0,'aXU'],pos.loc[0,'aYU']])
aL_pos = np.array([pos.loc[0,'aXL'],pos.loc[0,'aYL']])
bU_pos = np.array([pos.loc[0,'bXU'],pos.loc[0,'bYU']])
bL_pos = np.array([pos.loc[0,'bXL'],pos.loc[0,'bYL']])
aU_rot = Rotate(aU_pos,theta_rotate)
print('aU = ',aU_pos)
print('aU_rot = ',aU_rot)
aL_rot = Rotate(aL_pos,theta_rotate)
bU_rot = Rotate(bU_pos,theta_rotate)
bL_rot = Rotate(bL_pos,theta_rotate)
pos['aXU_rot'], pos['aYU_rot'] = aU_rot[0], aU_rot[1]
pos['aXL_rot'], pos['aYL_rot'] = aL_rot[0], aL_rot[1]
pos['bXU_rot'], pos['bYU_rot'] = bU_rot[0], bU_rot[1]
pos['bXL_rot'], pos['bYL_rot'] = bL_rot[0], bL_rot[1]
return (pos,mx_rot,my_rot,Ux_rot,Uy_rot)
def InterpolateToNewCoordinateSystem(mx,my,arrayUx,arrayUy,mx_new,my_new):
#Interpolate Ux and Uy from original cartesian coordainates to new ones
#Griddata
print('About to inteprolate field data')
print('peak memory = ',getrusage(RUSAGE_SELF).ru_maxrss)
sys.stdout.flush()
arrayUx_new=interpolate.griddata((mx.flatten(),my.flatten()),arrayUx.flatten() , (mx_new,my_new),method='linear')
print('X transformation complete')
print('peak memory = ',getrusage(RUSAGE_SELF).ru_maxrss)
sys.stdout.flush()
arrayUy_new=interpolate.griddata((mx.flatten(),my.flatten()),arrayUy.flatten() , (mx_new,my_new),method='linear')
print('Coordinate Transformation Complete!')
print('peak memory = ',getrusage(RUSAGE_SELF).ru_maxrss)
sys.stdout.flush()
return (arrayUx_new,arrayUy_new)
def PlotVelocityDecay(cwd,time,mx,my,w,Ux,Uy,pos,pars):
global OMEGA, EPSILON, RADIUS_LARGE
Re = pars[0]
config = pars[1]
field = pars[2]
idx = pars[3]
fig, ax = plt.subplots(nrows=2, ncols=2,figsize=(12,12),dpi=200,num=10)
ax[0,0].set_ylabel(r'$U_x$ (U/fR)',fontsize=25,**csfont)
ax[1,0].set_ylabel(r'$U_y$ (U/fR)',fontsize=25,**csfont)
ax[1,0].set_xlabel(r'$r$ (R)',fontsize=25,**csfont)
ax[1,1].set_xlabel(r'$r$ (R)',fontsize=25,**csfont)
#ax.set_title(r'time = %.2fs'%(time),fontsize=16)
#Rotate by Swimmer 1 axis around CM of pair
pos,mx_rot,my_rot,Ux_rot,Uy_rot = RotateVectorField(pos,mx,my,Ux,Uy,1022,1022)
#Interpolate onto a new coordinate system
x = np.linspace(-0.05,0.05,512)
y = np.linspace(-0.05,0.05,512)
mx_stream, my_stream = np.meshgrid(x,y)
interpUx, interpUy = InterpolateToNewCoordinateSystem(mx_rot,my_rot,Ux_rot,Uy_rot,mx_stream,my_stream)
#Now that we have the interpolated (Rotated) velocity field
#We can calculate the lineouts for each dir (+-x, +-y)
#First, get lineouts from mesh
#For neg, loop over 0:256, get avg of 255,256 values for Ux or Uy
#For pos, loop over 256:512, get avg of 255,256 values for Ux or Uy
line_x_neg = mx_stream[0,0:256]/RADIUS_LARGE
line_x_pos = mx_stream[0,256:512]/RADIUS_LARGE
print(line_x_pos)
sys.stdout.flush()
line_y_neg = my_stream[0:256,0]/RADIUS_LARGE
line_y_pos = my_stream[256:512,0]/RADIUS_LARGE
U_x_neg = 0.5*(interpUx[255,0:256] + interpUx[256,0:256])/(FREQUENCY*RADIUS_LARGE)
U_y_neg = 0.5*(interpUy[0:256,255] + interpUy[0:256,256])/(FREQUENCY*RADIUS_LARGE)
U_x_pos = 0.5*(interpUx[255,256:512] + interpUx[256,256:512])/(FREQUENCY*RADIUS_LARGE)
U_y_pos = 0.5*(interpUy[256:512,255] + interpUy[256:512,256])/(FREQUENCY*RADIUS_LARGE)
#Plot lineout for each axis direction
#Plot y = 0
ax[0,0].plot([np.amin(line_x_neg),np.amax(line_x_neg)],[0.0,0.0],c='k')
ax[0,1].plot([np.amin(line_x_pos),np.amax(line_x_pos)],[0.0,0.0],c='k')
ax[1,0].plot([np.amin(line_y_neg),np.amax(line_y_neg)],[0.0,0.0],c='k')
ax[1,1].plot([np.amin(line_y_pos),np.amax(line_y_pos)],[0.0,0.0],c='k')
#Plot Velocities
ax[0,0].plot(line_x_neg,U_x_neg,c='k')
ax[0,1].plot(line_x_pos,U_x_pos,c='k')
ax[1,0].plot(line_y_neg,U_y_neg,c='k')
ax[1,1].plot(line_y_pos,U_y_pos,c='k')
#axis = [- 0.025,0.025,-0.025,0.025]
#ax.axis(axis)
#ax.set_aspect('equal')
ax[0,0] = SetTickParams(ax[0,0])
ax[0,1] = SetTickParams(ax[0,1])
ax[1,0] = SetTickParams(ax[1,0])
ax[1,1] = SetTickParams(ax[1,1])
#ax[0,0].yaxis.set_minor_locator(MultipleLocator(0.0025))
#ax[0,0].yaxis.set_major_locator(MultipleLocator(0.005))
#ax[0,1].yaxis.set_minor_locator(MultipleLocator(0.0025))
#ax[0,1].yaxis.set_major_locator(MultipleLocator(0.005))
#ax[1,0].yaxis.set_minor_locator(MultipleLocator(0.04))
#ax[1,0].yaxis.set_major_locator(MultipleLocator(0.02))
#ax[1,1].yaxis.set_minor_locator(MultipleLocator(0.005))
#ax[1,1].yaxis.set_major_locator(MultipleLocator(0.0025))
for jdx in range(2):
for kdx in range(2):
for label in (ax[jdx,kdx].get_xticklabels() + ax[jdx,kdx].get_yticklabels()):
label.set_fontsize(20)
#ax[0,0] = set_size(6,6,ax[0,0])
#ax[0,1] = set_size(6,6,ax[0,1])
#ax[1,0] = set_size(6,6,ax[1,0])
#ax[1,1] = set_size(6,6,ax[1,1])
fig.tight_layout()
fig.savefig(plotName(cwd,Re,config,field,idx)+'.png')
fig.clf()
plt.close()
return
def SetTickParams(ax):
ax.tick_params(which='major',axis='both',direction='in',length=14,width=1,zorder=10)
ax.tick_params(which='minor',axis='both',direction='in',length=8,width=0.75)
ax.xaxis.set_major_locator(MultipleLocator(5.0))
ax.xaxis.set_minor_locator(MultipleLocator(2.5))
ax.yaxis.set_minor_locator(AutoMinorLocator(n=2))
return ax
# constructs a filepath for the pos data of Re = $Re
def pname(cwd):
#return cwd+"/pd.txt"
#cwd = cwd_PYTHON
return cwd+"/pd.txt"
def GetPosData(cwd,time,config):
data = pd.read_csv(pname(cwd),delimiter=' ')
if(config == 'V' or config == 'O'):
pos = data[data['time'] == time*2.0]
else:
pos = data[data['time'] == time]
#pos = data[data['time'] == time]
pos = pos.reset_index(drop=True)
return pos
def GetPosDataLength(cwd):
data = pd.read_csv(pname(cwd),delimiter=' ')
return len(data['time'])
def GetAvgFieldData(cwd,idx):
#Load position data
#Columns
#mx.flat my.flat avgW.flat avgP.flat avgUx.flat avgUy.flat
#cwd = cwd_PYTHON
#fieldData = pd.read_csv(cwd+'AVG_Acc_%04d.csv'%idx,delimiter=' ')
fieldData = pd.read_csv(cwd+'AVG_%04d.csv'%idx,delimiter=' ')
print(fieldData.head())
#All field values to a list
mxList = fieldData['mx'].values.tolist()
myList = fieldData['my'].values.tolist()
WList = fieldData['avgW'].values.tolist()
PList = fieldData['avgP'].values.tolist()
UxList = fieldData['avgUx'].values.tolist()
UyList = fieldData['avgUy'].values.tolist()
#axList = fieldData['avgax'].values.tolist()
#ayList = fieldData['avgay'].values.tolist()
#Convert lists to numpy arrays
#Reshape them to be Nx x Ny
Nx, Ny = 1024, 1024
mxArr = np.array(mxList).reshape((Nx,Ny))
myArr = np.array(myList).reshape((Nx,Ny))
WArr = np.array(WList).reshape((Nx,Ny))
PArr = np.array(PList).reshape((Nx,Ny))
UxArr = np.array(UxList).reshape((Nx,Ny))
UyArr = np.array(UyList).reshape((Nx,Ny))
#axArr = np.array(axList).reshape((Nx,Ny))
#ayArr = np.array(ayList).reshape((Nx,Ny))
#return (mxArr, myArr, WArr, PArr, UxArr, UyArr, axArr, ayArr)
return (mxArr, myArr, WArr, PArr, UxArr, UyArr)
if __name__ == '__main__':
#READ ALL AVG FILES IN A SIMULATION DIRECTORY
#EXTRACT AVERAGE FIELD DATA INTO NUMPY ARRAYS
#PLOT AVERAGED FIELD DATA
#Simulation Parameters
#simList = ['HB','SF','L','V']
cwd_Re = cwd_PYTHON+'../'+config+'/Re'+Re+'/'
#Extract Position Data
cwd_POS = cwd_POS = cwd_PYTHON+'../PosData/'+config+'/Re'+Re+'/'
#Calculate # Periods
DUMP_INT = 20.0
nTime = GetPosDataLength(cwd_POS)
nPer = int(np.trunc(1.0*nTime/DUMP_INT))
#nPer = 2
#Paths to data and plots
cwd_DATA = cwd_Re+'/VTK/AVG/'
countPer = 0
for countPer in range(nPer):
if(countPer == perNumber):
AVGPlot = pathlib.Path(cwd_DATA+'AVG_%04d.csv'%countPer)
#if not AVGPlot.exists ():
if AVGPlot.exists ():
start = t.clock()
#Get Avg Field Data
mx,my,avgW,avgP,avgUx,avgUy = GetAvgFieldData(cwd_DATA,countPer)
#Extract Position and Time Data
time = np.round(0.05 + countPer*PERIOD,2)
#print('time = ',time)
posData = GetPosData(cwd_POS,time,config)
#print(posData)
#Plot Averaged Field Data
#Vorticity And Streamlines
pars = [Re,config,'VelDecay',countPer]
PlotVelocityDecay(cwd_PYTHON,time,mx,my,avgW,avgUx,avgUy,posData,pars)
stend = t.clock()
diff = stend - start
print('Time to run for 1 period = %.5fs'%diff)
sys.stdout.flush()
|
# from sololearn
# class methods, static methods, properties
# classmethods
# class methods are called by the class and then passed to cls
# Methods of objects we've looked at so far are called by an instance of a class, which is then passed to the self parameter of the method.
# Class methods are different - they are called by a class, which is passed to the cls parameter of the method.
# A common use of these are factory methods, which instantiate an instance of a class, using different parameters than those usually passed to the class constructor.
class Rectangle:
def __init__(self, width, height):
self.width = width
self.height = height
def calculate_area(self):
return self.width * self.height
@classmethod # Class methods are marked with a classmethod decorator
def new_square(cls, side_length):
return cls(side_length, side_length)
square = Rectangle.new_square(5)
print(square.calculate_area())
# static method
# identical to normal functions of a class
# like classmethods but no additional arguments
class Calculator:
def __init__(self, value1, value2):
self.value1 = value1
self.value2 = value2
@staticmethod # Static methods are marked with a staticmethod decorator
def add(value1, value2):
return value1 + value2
n1 = int(1)
n2 = int(1)
print(Calculator.add(n1, n2))
# properties
# customizing access to instance attributes
# one common use of a property is to make an attribute read-only
class Pizza:
def __init__(self, toppings):
self.toppings = toppings
@property
def pineapple_allowed(self): #method pineapple_allowed
return False
pizza = Pizza(["cheese", "tomato"])
print(pizza.pineapple_allowed) # instance attribute pineapple_allowed is called > calls method pineapple_allowed
# works without @property, if typed print(pizza.pineapple_allowed()) > normal function
# properties can also be set by defining setter/getter functions
# the setter function sets the corresponding property's value, the getter gets the value
# @propertyname.setter @propertyname.getter
class Pizza:
def __init__(self, toppings):
self.toppings = toppings
self._pineapple_allowed = False
@property
def pineapple_allowed(self):
return self._pineapple_allowed
@pineapple_allowed.setter
def pineapple_allowed(self, value):
if value:
password = input("Enter the password: ")
if password == "Sw0rdf1sh!":
self._pineapple_allowed = value
else:
raise ValueError("Alert! Intruder!")
pizza = Pizza(["cheese", "tomato"])
print(pizza.pineapple_allowed)
pizza.pineapple_allowed = True
print(pizza.pineapple_allowed)
# from realpython
class MyClass:
def method(self):
return 'instance method called', self
@classmethod
def classmethod(cls):
return 'class method called', cls
@staticmethod
def staticmethod():
return 'static method called'
# Instance methods need a class instance and can access the instance through self.
# Class methods don’t need a class instance. They can’t access the instance (self) but they have access to the class itself via cls.
# Static methods don’t have access to cls or self. They work like regular functions but belong to the class’s namespace.
# Static and class methods communicate and (to a certain degree) enforce developer intent about class design. This can have maintenance benefits. |
#encoding: utf-8
#Patron 1, la n especifica la altura del triángulo. Debe ser mayor o igual a 7
import sys
if len(sys.argv) != 2 :
print 'Args: número'
sys.exit(2)
n = int(sys.argv[1])
if n < 7 :
print 'El primer argumento debe ser mayor o igual a 7'
sys.exit(1)
i = 0
j = n
c = 0
while i < n :
while c >= 0:
print '*',
c-=1
i+=1
c = i
print
c = i - 1
while i >= 0 :
while c > 0:
print '*',
c-=1
i-=1
c = i - 1
print
|
def find_default_player():
pass
__all__ = [
"find_default_player"
]
|
print ('Research on language popularity.')
#LANGUAGE SHARE OF WEBSITES IN THE TOP 10 MIL. NATIVE SPEAKERS, MIL. TOTAL SPEAKERS OF LANGUAGE, MIL.
#English 0.539 378.2 1121
#Russian 0.061 153.9 264.3
#German 0.06 76 132
#Spanish 0.049 442.3 512.9
#French 0.04 76.7 284.9
#Japanese 0.034 128.2 128.3
#Portuguese 0.029 222.7 236.5
# Italian 0.024 64.8 67.8
# Persian 0.02 60 110
# Polish 0.018 39.6 40.3
# Chinese 0.017 908.7 1107
# Danish 0.012 22 28
# Turkish 0.012 78.5 78.9
# Czech 0.01 10.4 10.6
############################################################################################
print(1121 / 7539)
print ((128.3 - 128.2) / 128.3)
print(908.7 / 378.2)
#########################################################################################
english_native = 378.2
russian_native = 153.9
german_native = 76.0
chinese_native = 908.7
top3_total = english_native + russian_native + german_native
print (chinese_native - top3_total )
############################################################################################
japanese_2013 = 0.045
chinese_2013 = 0.043
japanese_2018 = 0.034
chinese_2018 = 0.017
japanese = japanese_2013
chinese = chinese_2013
print(japanese/chinese)
japanese = japanese_2018
chinese = chinese_2018
print(japanese/chinese)
##########################################################################################
russian_web_share = 0.061
total_web = 10000000
russian_websites = russian_web_share * total_web
print(russian_websites)
print(type(russian_websites))
russian_native_millions = 153.9
russian_native = russian_native_millions * 1000000
russian_native_int = int(russian_native)
print(russian_native_int)
print(type(russian_native_int))
############################################################################################
total_web = 10
total_speakers = 7539
chinese_speakers = 1107
chinese_web_share = 0.017
english_speakers = 1121
english_web_share = 0.539
russian_speakers = 264.3
russian_web_share = 0.061
total_speakers = 7539
chinese_speakers = 1107
english_speakers = 1121
russian_speakers = 264.3
chinese_speakers_share = chinese_speakers / total_speakers
print("Share of people who speak Chinese:", chinese_speakers_share)
english_speakers_share = english_speakers / total_speakers
print("Share of people who speak English:", english_speakers_share)
russian_speakers_share = russian_speakers / total_speakers
print("Share of people who speak Russian:", russian_speakers_share)
##########################################################################################
total_speakers = 7539
chinese_speakers = 1107
english_speakers = 1121
russian_speakers = 264.3
chinese_speakers_share = chinese_speakers / total_speakers
english_speakers_share = english_speakers / total_speakers
russian_speakers_share = russian_speakers / total_speakers
print('Percentage of people who speak Chinese: {:.1%}'.format(chinese_speakers_share))
print('Percentage of people who speak English: {:.1%}'.format(english_speakers_share))
print('Percentage of people who speak Russian: {:.1%}'.format(russian_speakers_share))
#############################################################################################
total_speakers = 7539
chinese_speakers = 1107
english_speakers = 1121
russian_speakers = 264.3
chinese_speakers_share = chinese_speakers / total_speakers
print('--- Chinese ---')
print('Percentage of speakers: {:.1%}'.format(chinese_speakers_share))
print()
english_speakers_share = english_speakers / total_speakers
print('--- English ---')
print('Percentage of speakers: {:.1%}'.format(english_speakers_share))
print()
russian_speakers_share = russian_speakers / total_speakers
print('--- Russian ---')
print('Percentage of speakers: {:.1%}'.format(russian_speakers_share))
###############################################################################################
total_web = 10
total_speakers = 7539
chinese_speakers = 1107
chinese_web_share = 0.017
english_speakers = 1121
english_web_share = 0.539
russian_speakers = 264.3
russian_web_share = 0.061
chinese_speakers_share = chinese_speakers / total_speakers
chinese_websites = chinese_web_share * total_web
chinese_index = 1000 * chinese_websites / chinese_speakers
print('--- Chinese ---')
print('Percentage of speakers: {:.1%}'.format(chinese_speakers_share))
print('Percentage of websites in the language: {:.1%}'.format(chinese_web_share))
print('Web popularity index: {:.2f}'.format(chinese_index))
print()
english_speakers_share = english_speakers / total_speakers
english_websites = english_web_share * total_web
english_index = 1000 * english_websites / english_speakers
print('--- English ---')
print('Percentage of speakers: {:.1%}'.format(english_speakers_share))
print('Percentage of websites in the language: {:.1%}'.format(english_web_share))
print('Web popularity index: {:.2f}'.format(english_index))
print()
russian_speakers_share = russian_speakers / total_speakers
russian_websites = russian_web_share * total_web
russian_index = 1000 * russian_websites / russian_speakers
print('--- Russian ---')
print('Percentage of speakers: {:.1%}'.format(russian_speakers_share))
print('Percentage of websites in the language: {:.1%}'.format(russian_web_share))
print('Web popularity index: {:.2f}'.format(russian_index))
print()
# --- Chinese ---
# Percentage of speakers: 14.6%
# Percentage of websites in the language: 1.7%
# Web popularity index: 0.15
#
# --- English ---
# Percentage of speakers: 14.8%
# Percentage of websites in the language: 53.9%
# Web popularity index: 4.81
#
# --- Russian ---
# Percentage of speakers: 3.5%
# Percentage of websites in the language: 6.1%
# Web popularity index: 2.31
|
"""
Generates a sequence of trials that satisfy
a mixed-block/event related design (Visscher et al 2003, NeuroImage).
Currently: no M-sequence. I'll look into this as the time for scanning
gets closer. For now, it's pseudo-random
"""
import random
# How long to wait before the first block of trials (seconds)
INITIAL_WAIT_TIME = 5
# Number of blocks that will contain trials
N_TRIAL_BLOCKS = 2
# How long should each block last? (seconds)
BLOCK_DURATION = 40
# How many trials per block?
N_TRIALS_PER_BLOCK = 10
# What is the minimum time a trial might take?
MIN_TRIAL_DURATION = 1
# How long is the rest block?
REST_DURATION = 20
# Number of inter-trial intervals in a block
N_ITIs_PER_BLOCK = N_TRIALS_PER_BLOCK - 1
# Minimum ITI length
MIN_ITI_DURATION = 2
# What is the default RT deadline if none is specified?
DEFAULT_DEADLINE = 1.5
# ----- 11/25/12 -- Decided we will use varying deadlines each block
MAX_DEADLINE = 0.9 # Seconds
MIN_DEADLINE = 0.25 # Seconds
stepsize = (MAX_DEADLINE-MIN_DEADLINE)/(N_TRIAL_BLOCKS-1)
DEFAULT_DEADLINES = [(n*stepsize+ MIN_DEADLINE) for n in range(N_TRIAL_BLOCKS)]
random.shuffle(DEFAULT_DEADLINES) # Shuffle the order
def random_split(time_to_fill,ntimes):
"""Given an amount of time, split it into `ntimes` random pieces
Parameters:
===========
time_to_fill:float
Amount of time that needs to be divided into `ntimes` pieces
ntimes:int
Number of times which `time_to_fill` needs to be divided into
Example:
========
>>> len(random_split(30,5))
5
>>> sum(random_split(30,5))
30
"""
# Pad with zero and the max time
randos = [0] + sorted([random.uniform(0,time_to_fill) for n \
in range(ntimes-1)])
randos.append(time_to_fill)
# split for a diff
first = randos[:-1]
second = randos[1:]
return [ b-a for a,b in zip(first,second) ]
def create_block(block_onset,rt_deadline):
"""
Creates a block of trials with a designated RT deadline.
Parameters:
===========
block_onset:float
Time since the experiment starts when this block of trials begins
rt_deadline:float
Subject must respond faster than this time for a trial to be correct
Returns:
========
block_trials:list
A list of lists that contain details of a trial. It will look like this:
[
[ onset_time, trial_duration, reaction_time_deadline, disappearing_part ],
.....
]
"""
# Check that it is possible to include at least the minimum duration of
# trials and ITI's
min_duration = N_ITIs_PER_BLOCK*MIN_ITI_DURATION + \
N_TRIALS_PER_BLOCK*MIN_TRIAL_DURATION
# Number of total events per block
n_events = N_ITIs_PER_BLOCK + N_TRIALS_PER_BLOCK
if min_duration > BLOCK_DURATION:
raise ValueError(
"""User has requested impossible parameters!\n
Check that N_TRIALS_PER_BLOCK, N_ITIs_PER_BLOCK
and their respective MIN DURATIONS will fit into
BLOCK_DURATION
""")
# Calculate how much time we have to mess with durations
residual_block_time = BLOCK_DURATION - min_duration
# Begin with each trial it it's minumum duration
iti_durations = [ MIN_ITI_DURATION ] * N_ITIs_PER_BLOCK
trial_durations = [ MIN_TRIAL_DURATION ] * N_TRIALS_PER_BLOCK
# Algorithm: generate n_events random numbers
# between 0 and `residual_block_times`, sorts then
# then adds the diffs to the ITI's and trial durations
adjustments = random_split(residual_block_time,n_events)
for n in range(N_TRIALS_PER_BLOCK):
trial_durations[n] += adjustments.pop()
for n in range(N_ITIs_PER_BLOCK):
iti_durations[n] += adjustments.pop()
# Now that they're adjusted to be unpredictable, get their onset
# times in the actual experiment
onsets = []
_current_time = block_onset
for duration in trial_durations:
onsets.append(_current_time)
if len(iti_durations): _current_time += iti_durations.pop()
_current_time += duration
# What's going to disappear?
# NOTE: This is ***random***, not counterbalanced
trial_options = ("hbar","vbar")
disappearing_components = [ trial_options[random.randint(0,1)] for n \
in range(N_TRIALS_PER_BLOCK) ]
# Make the list of trial lists
block_trials = []
for onset, trial_duration, component in zip(
onsets,trial_durations,disappearing_components):
block_trials.append([onset,trial_duration,rt_deadline,component])
return block_trials
def create_full_experiment(rt_deadlines):
"""
Reads the parameters from the beginning of this file and creates
a big list of trials.
Parameters:
===========
rt_deadlines:list of float
A list of length N_TRIAL_BLOCKS that contains the reaction time
deadline percentage for the subject.
Returns:
========
experiment:list
A list containing all blocks for an experiment.
"""
experiment = []
_time = INITIAL_WAIT_TIME
for n_block in range(N_TRIAL_BLOCKS):
experiment += [create_block(_time,rt_deadlines[n_block])]
_time += BLOCK_DURATION + REST_DURATION
# Check that all blocks are accounted for
assert len(experiment) == N_TRIAL_BLOCKS
return experiment
if __name__=="__main__":
viz.go()
# What deadline will be used each block?
rt_deadlines = [ 0.2, 0.5, 0.3, 0.4 ]
experiment = create_full_experiment(rt_deadlines) |
import jpype
from jpype import *
import subprocess
class Farasa:
def __init__(self,path_to_jars):
jvmPath = jpype.getDefaultJVMPath()
jpype.startJVM(jvmPath,
"-Djava.class.path="+path_to_jars)
def segment(self,text):
Far = JPackage("com").qcri.farasa.segmenter.Farasa
far = Far()
return far.segmentLine(text)
def lemmetize(self,text):
Far = JPackage("com").qcri.farasa.segmenter.Farasa
far = Far()
return far.lemmatizeLine(text)
#
# def TAG(self,text):
# Farasa = JPackage("com").qcri.farasa.segmenter.Farasa
# FarPosTagger = JPackage("com").qcri.farasa.pos.FarasaPOSTagger
# Sentence = JPackage("com").qcri.farasa.pos.Sentence;
#
#
# far = Farasa()
# tagger = FarPosTagger(far)
# lines = Sentence()
# lines = self.segment(text)
# sents = tagger.tagLine(lines)
#
# for sent in sents:
# print (sent)
# # print(sents)
def tag_file(self, file_path,path_to_postagger):
subprocess.call(['java', '-jar', path_to_postagger, '-i', file_path, '-o', 'tmp.out'])
try:
tmp_out = open("tmp.out", 'r')
except IOError:
tmp_out = open("tmp.out", 'w')
tagged = tmp_out.read()
res_split = tagged.split()
res = []
for r in res_split:
t = r.split("/")
res.append((t[0], t[1]))
return res
def tag(self, text,path_to_postagger):
tmp_in = open('tmp.in', 'w')
tmp_in.write(text)
tmp_in.close()
subprocess.call(['java', '-jar', path_to_postagger, '-i', 'tmp.in', '-o', 'tmp.out'])
try:
tmp_out = open("tmp.out", 'r')
except IOError:
tmp_out = open("tmp.out", 'w')
tagged = tmp_out.read()
res_split = tagged.split()
res = []
for r in res_split:
t = r.split("/")
res.append((t[0], t[1]))
return res
|
#!/usr/bin/env python2
import sys
import struct
import time
# You can use this method to exit on failure conditions.
def bork(msg):
sys.exit(msg)
# Some constants. You shouldn't need to change these.
MAGIC = 0x8BADF00D
VERSION = 1
if len(sys.argv) < 2:
sys.exit("Usage: python stub.py input_file.fpff")
# Normally we'd parse a stream to save memory, but the FPFF files in this
# assignment are relatively small.
with open(sys.argv[1], 'rb') as fpff:
data = fpff.read()
# Hint: struct.unpack will be VERY useful.
# Hint: you might find it easier to use an index/offset variable than
# hardcoding ranges like 0:8
magic, version = struct.unpack("<LL", data[0:8])
if magic != MAGIC:
bork("Bad magic! Got %s, expected %s" % (hex(magic), hex(MAGIC)))
if version != VERSION:
bork("Bad version! Got %d, expected %d" % (int(version), int(VERSION)))
print("------- HEADER -------")
print("MAGIC: %s" % hex(magic))
print("VERSION: %d" % int(version))
datactr = 8
timestamp, author, sectionc = struct.unpack("<l8sL", data[datactr:datactr+16])
datactr += 16
if sectionc == 0:
bork("Not enough sections!")
print("TIME: %s" % time.strftime("%m/%d/%y %H:%M:%S", time.localtime(timestamp)))
print("AUTHOR: %s" % author)
print("SECTIONS: %d" % sectionc)
print("------- BODY -------")
filectr = 0
def parse_ascii(slen):
global datactr
global filectr
if slen == 0:
print("No text to save")
return
text = struct.unpack("%ds" % slen, data[datactr:datactr+slen])[0]
datactr += slen
with open("carve/%d.txt" % filectr, "w+") as f:
f.write(text)
print("Wrote %d bytes ascii to %d.txt" % (slen,filectr))
filectr += 1
def parse_utf8(slen):
global datactr
global filectr
if slen == 0:
print("No text to save")
return
text = struct.unpack("%ds" % slen, data[datactr:datactr+slen])[0]
datactr+= slen
with open("carve/%d.txt" % filectr, "w+") as f:
f.write(text)
print("Wrote %d bytes utf-8 to %d.txt" % (slen, filectr))
filectr += 1
def parse_words(slen):
global datactr
global filectr
if slen == 0:
print("No words to print")
return
words = struct.unpack("=%dL" % slen / 4, data[datactr:datactr+slen])
datactr += slen
with open("carve/%d" % filectr, "wb+") as f:
for word in words:
f.write(word)
print("Wrote %d words to %d" % (slen / 4, filectr))
filectr += 1
def parse_dwords(slen):
global datactr
global filectr
if slen == 0:
print("No dwords to print")
return
dwords = struct.unpack("=%dQ" % slen / 8, data[datactr:datactr+slen])
datactr += slen
with open("carve/%d" % filectr, "wb+") as f:
for word in dwords:
f.write(word)
print("Wrote %d dwords to %d" % (slen / 8, filectr))
filectr += 1
def parse_doubles(slen):
global datactr
if slen == 0:
print("No doubles to print")
return
doubles = struct.unpack("=%dd" % slen / 8, data[datactr:datactr+slen])
datactr += slen
for double in doubles:
print(double)
def parse_coord(slen):
global datactr
if slen != 16:
bork("Expected doubles section of length 16, got %d" % slen)
x, y = struct.unpack("=2d", data[datactr:datactr+16])
datactr += 16
print("(%f, %f)" % (x,y))
def parse_ref(slen):
global datactr
if slen != 4:
bork("Expected reference section of length 4, got %d" % slen)
ref = struct.unpack("%<L", data[datactr:datactr+4])
if ref[0] > sectionc - 1:
bork("Reference outside of acceptable range 0-%d: %d" % (sectionc,ref[0]))
datactr += 4
print("REF: %d" % ref[0])
def parse_png(slen):
global datactr
global filectr
if slen == 0:
bork("Empty PNG?")
header = b'\x89\x50\x4E\x47\x0D\x0A\x1A\x0A'
with open("carve/%d.png" % filectr, "wb+") as f:
f.write(header)
for byte in data[datactr:datactr+slen]:
f.write(byte)
print("Wrote %d bytes to %d.png" % (slen, filectr))
filectr += 1
datactr += slen
def parse_gif87(slen):
global datactr
global filectr
if slen == 0:
bork("Empty GIF?")
header = "GIF87a"
with open("carve/%d.gif" % filectr, "wb+") as f:
f.write(header)
for byte in data[datactr:datactr+slen]:
f.write(byte)
print("Wrote %d bytes to %d.gif" % (slen, filectr))
filectr += 1
datactr += slen
def parse_gif89(slen):
global datactr
global filectr
if slen == 0:
bork("Empty GIF?")
header = "GIF89a"
with open("carve/%d.gif" % filectr, "wb+") as f:
f.write(header)
for byte in data[datactr:datactr+slen]:
f.write(byte)
print("Wrote %d bytes to %d.gif" % (slen, filectr))
filectr += 1
datactr += slen
for i in range(0,sectionc):
stype, slen = struct.unpack("<LL", data[datactr:datactr+8])
datactr += 8
stypes = ""
if stype < 1 or stype > 10:
bork("Section %d: bad section type!" % i + 1)
if stype == 1:
parse_ascii(slen)
stypes = "ASCII"
if stype == 2:
parse_utf8(slen)
stypes = "UTF8"
if stype == 3:
parse_words(slen)
stypes = "WORDS"
if stype == 4:
parse_dwords(slen)
stypes = "DWORDS"
if stype == 5:
parse_doubles(slen)
stypes = "DOUBLES"
if stype == 6:
parse_coord(slen)
stypes = "COORDINATES"
if stype == 7:
parse_ref(slen)
stypes = "REFERENCE"
if stype == 8:
parse_png(slen)
stypes = "PNG"
if stype == 9:
parse_gif87(slen)
stypes = "GIF87"
if stype == 10:
parse_gif89(slen)
stypes = "GIF89"
print("SECTION %d: %s" % (i, stypes))
|
import numpy as np
import keras
from keras.models import Sequential
from keras.layers import Dense, LSTM, Dropout, BatchNormalization
a = np.array(range(1,101))
batch_size = 1
size = 5
def split_5(seq, size):
aaa = []
for i in range(len(a)-size+1):
subset = a[i:(i+size)]
aaa.append(subset)
# print(type(aaa))
return np.array(aaa)
dataset = split_5(a, size)
print('=================')
# print(dataset)
# print(dataset.shape)
x_train = dataset[:,0:4]
y_train = dataset[:, 4]
x_train = np.reshape(x_train,(len(x_train), size-1, 1))
x_test = x_train + 100
y_test = y_train + 100
# print(x_train.shape)
# print(y_train.shape)
# print(x_test)
# print(y_test)
model = Sequential()
model.add(LSTM(128, batch_input_shape=(1,4,1), stateful=True))
# model.add(Dropout(0.5))
# model.add(Dense(512))
model.add(Dense(50, activation='relu'))
model.add(Dense(12, activation='relu'))
model.add(Dense(15, activation='relu'))
# model.add(Dense(6, activation='relu'))
model.add(Dense(1))
model.summary()
model.compile(loss='mse', optimizer='adam', metrics=['mse'])
from keras.callbacks import EarlyStopping
early = EarlyStopping(monitor='mse', patience=30)
num_epochs = 200
loss = []
valloss = []
for epochs_idx in range(num_epochs):
print('epochs:' + str(epochs_idx))
history = model.fit(x_train, y_train, epochs = 1, batch_size=batch_size, verbose=2, shuffle= False, validation_data=(x_test, y_test), callbacks=[early])
loss.append(history.history['mean_squared_error'])
valloss.append(history.history['val_mean_squared_error'])
model.reset_states()
mse, _ = model.evaluate(x_train, y_train, batch_size)
print('mse: ', mse)
model.reset_states()
y_predict = model.predict(x_test, batch_size=1)
print(y_predict[0:5])
from sklearn.metrics import mean_squared_error
def RMSE(y_test, y_predict):
return np.sqrt(mean_squared_error(y_test, y_predict))
print("RMSE: ", RMSE(y_test, y_predict))
#R2 구하기
from sklearn.metrics import r2_score
r2_y_predict = r2_score(y_test, y_predict)
print("R2: ", r2_y_predict)
import matplotlib.pyplot as plt
# for x in history:
# print(loss)
# # print
plt.plot(loss)
plt.plot(valloss)
plt.title('model loss')
plt.ylabel('mse')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
|
import filecmp
import os
import tempfile
import unittest
import sbol3
import tyto
import labop
import labop_time as labopt
import uml
# from labop_check.labop_check import check_doc, get_minimum_duration
class TestTime(unittest.TestCase):
def test_single_behavior(self):
#############################################
# set up the document
print("Setting up document")
doc = sbol3.Document()
sbol3.set_namespace("https://bbn.com/scratch/")
#############################################
# Create the behavior and constraints
print("Creating Constraints")
a = labop.Primitive("a")
# Constrain start time of a to [0, 10]
start_a = labopt.startTime(a, [0, 10], units=tyto.OM.hour)
# Constrain end time of a to [10, 15]
end_a = labopt.endTime(a, [10, 15], units=tyto.OM.hour)
# Constrain duration of a to [1, 5]
duration_a = labopt.duration(a, [1, 5], units=tyto.OM.hour)
constraint = labopt.And([start_a, end_a, duration_a])
time_constraints = labopt.TimeConstraints(
"small_protocol_constraints", constraints=[constraint]
)
doc.add(a)
doc.add(time_constraints)
########################################
# Validate and write the document
print("Validating and writing time")
v = doc.validate()
assert not v.errors and not v.warnings, "".join(
str(e) for e in doc.validate().errors
)
# doc.write('difference.nt', 'sorted nt')
# doc.write('difference.ttl', 'turtle')
def test_two_behaviors(self):
#############################################
# set up the document
print("Setting up document")
doc = sbol3.Document()
sbol3.set_namespace("https://bbn.com/scratch/")
#############################################
# Create the behavior and constraints
print("Creating Constraints")
a = labop.Primitive("a")
b = labop.Primitive("b")
# Constrain start of b to follow end of a by [10, 15]
follows_constraint = labopt.precedes(a, [10, 15], b, units=tyto.OM.hour)
doc.add(a)
# doc.add(follows_constraint)
########################################
# Validate and write the document
print("Validating and writing time")
v = doc.validate()
assert not v.errors and not v.warnings, "".join(
str(e) for e in doc.validate().errors
)
# doc.write('timed_protocol.nt', 'sorted nt')
# doc.write('timed_protocol.ttl', 'turtle')
def test_timed_small_protocol(self):
#############################################
# set up the document
print("Setting up document")
doc = sbol3.Document()
sbol3.set_namespace("https://bbn.com/scratch/")
#############################################
# Create the Protocol
print("Creating Protocol")
protocol = labop.Protocol("test_protocol")
# Protocol starts at time zero
start = labopt.startTime(protocol, 0, units=tyto.OM.hour)
# Protocol lasts 10 - 15 hours
duration = labopt.duration(protocol, [10, 15], units=tyto.OM.hour)
time_constraints = labopt.TimeConstraints(
"small_protocol_constraints",
constraints=labopt.And([start, duration]),
protocols=[protocol],
)
doc.add(protocol)
doc.add(time_constraints)
########################################
# Validate and write the document
print("Validating and writing time")
v = doc.validate()
assert not v.errors and not v.warnings, "".join(
str(e) for e in doc.validate().errors
)
# doc.write('timed_protocol.nt', 'sorted nt')
# doc.write('timed_protocol.ttl', 'turtle')
@unittest.skip("need to fix non-determinism in nt file comparison")
def test_create_timed_protocol(self):
#############################################
# set up the document
print("Setting up document")
doc = sbol3.Document()
sbol3.set_namespace("https://bbn.com/scratch/")
#############################################
# Import the primitive libraries
print("Importing libraries")
labop.import_library("liquid_handling")
print("... Imported liquid handling")
labop.import_library("plate_handling")
print("... Imported plate handling")
labop.import_library("spectrophotometry")
print("... Imported spectrophotometry")
labop.import_library("sample_arrays")
print("... Imported sample arrays")
#############################################
# Create the protocol
print("Creating protocol")
protocol = labop.Protocol("iGEM_LUDOX_OD_calibration_2018")
protocol.name = "iGEM 2018 LUDOX OD calibration protocol"
protocol.description = """
With this protocol you will use LUDOX CL-X (a 45% colloidal silica suspension) as a single point reference to
obtain a conversion factor to transform absorbance (OD600) data from your plate reader into a comparable
OD600 measurement as would be obtained in a spectrophotometer. This conversion is necessary because plate
reader measurements of absorbance are volume dependent; the depth of the fluid in the well defines the path
length of the light passing through the sample, which can vary slightly from well to well. In a standard
spectrophotometer, the path length is fixed and is defined by the width of the cuvette, which is constant.
Therefore this conversion calculation can transform OD600 measurements from a plate reader (i.e. absorbance
at 600 nm, the basic output of most instruments) into comparable OD600 measurements. The LUDOX solution
is only weakly scattering and so will give a low absorbance value.
"""
doc.add(protocol)
# create the materials to be provisioned
ddh2o = sbol3.Component(
"ddH2O", "https://identifiers.org/pubchem.substance:24901740"
)
ddh2o.name = "Water, sterile-filtered, BioReagent, suitable for cell culture" # TODO get via tyto
doc.add(ddh2o)
ludox = sbol3.Component(
"LUDOX", "https://identifiers.org/pubchem.substance:24866361"
)
ludox.name = "LUDOX(R) CL-X colloidal silica, 45 wt. % suspension in H2O"
doc.add(ludox)
# actual steps of the protocol
# get a plate
plate = protocol.primitive_step(
"EmptyContainer", specification=tyto.NCIT.get_uri_by_term("Microplate")
) # replace with container ontology
# put ludox and water in selected wells
c_ddh2o = protocol.primitive_step(
"PlateCoordinates", source=plate, coordinates="A1:D1"
)
provision_ludox = protocol.primitive_step(
"Provision",
resource=ludox,
destination=c_ddh2o.output_pin("samples"),
amount=sbol3.Measure(100, tyto.OM.microliter),
)
c_ludox = protocol.primitive_step(
"PlateCoordinates", source=plate, coordinates="A2:D2"
)
provision_ddh2o = protocol.primitive_step(
"Provision",
resource=ddh2o,
destination=c_ludox.output_pin("samples"),
amount=sbol3.Measure(100, tyto.OM.microliter),
)
# measure the absorbance
c_measure = protocol.primitive_step(
"PlateCoordinates", source=plate, coordinates="A1:D2"
)
measure = protocol.primitive_step(
"MeasureAbsorbance",
samples=c_measure,
wavelength=sbol3.Measure(600, tyto.OM.nanometer),
)
protocol.add_output("absorbance", measure.output_pin("measurements"))
# Set protocol timepoints
# protocol starts at time 0
protocol_start_time = labopt.startTime(protocol, 0, units=tyto.OM.hour)
provision_ludox_duration = labopt.duration(
provision_ludox, 60, units=tyto.OM.second
)
provision_ddh2o_duration = labopt.duration(
provision_ddh2o, 60, units=tyto.OM.second
)
execute_measurement_duration = labopt.duration(
measure, 60, units=tyto.OM.minute
)
ludox_before_ddh2o_constraint = labopt.precedes(
provision_ludox, [10, 15], provision_ddh2o, units=tyto.OM.hour
)
time_constraints = labopt.TimeConstraints(
"ludox_protocol_constraints",
constraints=[
labopt.And(
[
protocol_start_time,
provision_ludox_duration,
provision_ddh2o_duration,
execute_measurement_duration,
ludox_before_ddh2o_constraint,
]
)
],
protocols=[protocol],
)
doc.add(time_constraints)
########################################
# Validate and write the document
print("Validating and writing protocol")
v = doc.validate()
assert not v.errors and not v.warnings, "".join(
str(e) for e in doc.validate().errors
)
# assert check_doc(doc) # Is the protocol consistent?
# assert get_minimum_duration(doc) # What is the minimum duration for each protocol in doc
temp_name = os.path.join(tempfile.gettempdir(), "igem_ludox_time_test.nt")
doc.write(temp_name, sbol3.SORTED_NTRIPLES)
print(f"Wrote file as {temp_name}")
comparison_file = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
"testfiles",
"igem_ludox_time_test.nt",
)
# doc.write(comparison_file, sbol3.SORTED_NTRIPLES)
print(f"Comparing against {comparison_file}")
assert filecmp.cmp(temp_name, comparison_file), "Files are not identical"
print("File identical with test file")
def test_expressions(self):
#############################################
# set up the document
print("Setting up document")
doc = sbol3.Document()
sbol3.set_namespace("https://bbn.com/scratch/")
#############################################
# Create the Expressions
print("Creating Protocol")
# expression e1: 60s * duration(a1)
a1 = labop.Primitive("a1")
d1 = uml.Duration(observation=uml.DurationObservation(event=[a1]))
m1 = labopt.TimeMeasure(expr=sbol3.Measure(60, tyto.OM.second))
e1 = uml.Expression(symbol="*", is_ordered=False, operand=[m1, d1])
# doc.add(e1)
# expression lt1: e1 < e2
e2 = labopt.TimeMeasure(expr=sbol3.Measure(120, tyto.OM.second))
lt1 = uml.Expression(symbol="<", is_ordered=True, operand=[e1, e2])
# doc.add(lt1)
# c1: Not(lt1)
c1 = labopt.Not(constrained_elements=lt1)
# doc.add(c1)
########################################
# Validate and write the document
print("Validating and writing time")
v = doc.validate()
assert not v.errors and not v.warnings, "".join(
str(e) for e in doc.validate().errors
)
# doc.write('timed_protocol.nt', 'sorted nt')
# doc.write('timed_protocol.ttl', 'turtle')
if __name__ == "__main__":
unittest.main()
|
import socket
import sys
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
print ("socket successfully created")
except socket.error as err:
print ("socket creation failed with error %s" %(err))
sys.exit()
# let's open a connection to google with socket
PORT = 80
try:
# first we need to find google ip
host = socket.gethostbyname('www.google.com')
except socket.gaierror as err:
print("cannot find google ip %s" % (err))
sys.exit()
s.connect((host, PORT))
print ("the socket has successfully connected to google") |
import boto3
import sys
import time
import json
import os
import sagemaker
from sagemaker.processing import ProcessingJob
from sagemaker.model_monitor import DefaultModelMonitor, BaseliningJob, CronExpressionGenerator
# Load arguments
bucket_name = sys.argv[1]
prefix = sys.argv[2]
execution_role = sys.argv[3]
processing_job_name = sys.argv[4]
endpoint_name = sys.argv[5]
# Upload pre-processor scripts
start = time.time()
print('Loading monitor baseline for job: {}'.format(processing_job_name))
code_prefix = '{}/code'.format(prefix)
s3_code_preprocessor_uri = 's3://{}/{}/{}'.format(bucket_name, code_prefix, 'preprocessor.py')
s3_code_postprocessor_uri = 's3://{}/{}/{}'.format(bucket_name, code_prefix, 'postprocessor.py')
reports_prefix = '{}/reports'.format(prefix)
s3_report_path = 's3://{}/{}'.format(bucket_name, reports_prefix)
print("Report path: {}".format(s3_report_path))
print("Preproc Code path: {}".format(s3_code_preprocessor_uri))
print("Postproc Code path: {}".format(s3_code_postprocessor_uri))
# Load the processing job
processing_job = ProcessingJob.from_processing_name(
processing_job_name=processing_job_name,
sagemaker_session=sagemaker.Session())
from sagemaker.model_monitor import BaseliningJob
baseline_job = BaseliningJob.from_processing_job(processing_job)
my_default_monitor = DefaultModelMonitor(
role=execution_role,
instance_count=1,
instance_type='ml.m5.xlarge',
volume_size_in_gb=20,
max_runtime_in_seconds=3600,
)
print('Starting monitor schedule for endpoint: {}'.format(endpoint_name))
# First, copy over some test scripts to the S3 bucket so that they can be used for pre and post processing
s3 = boto3.Session().resource('s3')
s3.Bucket(bucket_name).Object(code_prefix+"/preprocessor.py").upload_file('workflow/schedule/preprocessor.py')
s3.Bucket(bucket_name).Object(code_prefix+"/postprocessor.py").upload_file('workflow/schedule/postprocessor.py')
my_default_monitor.create_monitoring_schedule(
monitor_schedule_name=processing_job_name,
endpoint_input=endpoint_name,
#record_preprocessor_script=pre_processor_script,
post_analytics_processor_script=s3_code_postprocessor_uri,
output_s3_uri=s3_report_path,
statistics=baseline_job.baseline_statistics(),
constraints=baseline_job.suggested_constraints(),
schedule_cron_expression=CronExpressionGenerator.hourly(),
enable_cloudwatch_metrics=True
)
# TODO: Save constraints and statistics to output directory?
end = time.time()
print('Monitor schedule complete in: {}'.format(end - start)) |
#Bitácora de prácticas
#Edoardo Martín Ricalde Ché
#=============================
#Optimización Local
#=============================
#Ejercicio 1
#Original
Edo = 3
Victor = 5
Sobra = 2
total = a + b
diferencia = a - b
print(total)
#Optimizado
#Las variables y operaciones que no se necesitan o utilizan se eliminan
Edo = 3
Victor = 5
print(Edo + Victor)
#Ejercicio 2
#Original
x = 4
y = 1
z = 3
op1 = x-1
op2 = y * z
final = op1 + op2
print(final)
#Optimizado
#Asignación de una sola línea a las variables y simplificación de operaciones en la variable final
x,y,z = 4,1,3
final = (x-1) + (y * z)
print(final)
#EEjercicio 3
#Original
x = 1
y = 2
z = x + x
z2 = x + x
a = y + y
b = x + y
print(b)
#Optimizado
#Eliminación de Redundancia
x,y = 1,2
b = x + y
print (b)
#============================
#Optimización de ciclos
#============================
#Ejercicio 4
#Original
for j in [0,1,2]:
print("Hola")
#Optimizado
#Se crea una variable para almacenar el arreglo en vez de instanciarlo en el for
g = [0,1,2]
for j in g:
print("Hola ", end='')
print("\n")
#Ejercicio 5
#Original
x2 = 0
y2 = "England"
while x2 < 5:
x2 += 1
print(y2)
print("\n")
#Optimizado
#No es necesario almacenar la información a imprimir en una variable, se cambia a que directamente se imprima ese texto en la función print
x1 = 0
while x1 < 5:
x1 += 1
print("England")
#Ejercicio 6
#Original
a1 = [1,2,3]
for b1 in a1:
c1 = b1 + 1
res = "Suma de 1 + " + str(b1) + " = " + str(c1)
print(res)
#Optimizado
#Eliminación de concatenación y exceso de variables
a1 = [1,2,3]
for b1 in a1:
print(f"Suma de 1 + {b1} = {b1+1}")
#*****************************
#Ejercicios de Mirilla
#*****************************
#Ejercicio 7
#Original
z1 = 0
while z1 < 4:
z1 += 1
if z1 == 3:
print(z1)
else:
pass
#CODIGO OPTIMIZADO
#Terminamos de manera definida con un break evitando elementos inecesarios al quitar pass, interrumpiendo el flujo
z1 = 0
while z1 < 4:
z1 += 1
if z1 == 3:
print(z1)
break
#Ejercicio 8
#Original
for z in "itsva":
v = "v"
if z == v:
pass
else:
print(z)
#Optimizado
#Eliminacion de variable para liberar espacio y el uso del break para terminar de manera definida, interrumpiendo el flujo
for z in "itsva":
if z == "v":
break
print(z)
#Ejercicio 9
#Original
i = 0
j = "Linea 1"
while i < 3:
i += 1
print(j)
#Optimizado
#Eliminación de la variable innecesaria
i = 0
while i < 3:
i += 1
print("Hola")
break
#==============================
#Ejercicios Globales
#==============================
#Ejercicio 10
#Original
contenido = "Después de todo este tiempo? Siempre...!"
file = "archivo.txt"
archivo = open(file, 'w')
archivo.write(contenido)
archivo.close()
archivo2 = open(file, "r")
for linea in archivo2.readlines():
print(linea)
archivo2.close()
#Optimizado
#En vez de escribir el contenido en el archivo que queremos mostrar, se hace una búsqueda de posición para imprimir la primera línea del archivo, estando ahí el contenido que buscamos
archivo3 = open(file,"r")
archivo3.seek(0)
print(archivo3.read())
archivo3.close() |
# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
from textwrap import dedent
import pytest
from pants.backend.python import target_types_rules
from pants.backend.python.lint.flake8 import skip_field
from pants.backend.python.lint.flake8.subsystem import Flake8FirstPartyPlugins
from pants.backend.python.lint.flake8.subsystem import rules as subsystem_rules
from pants.backend.python.target_types import (
InterpreterConstraintsField,
PythonRequirementTarget,
PythonSourcesGeneratorTarget,
)
from pants.backend.python.util_rules import python_sources
from pants.build_graph.address import Address
from pants.core.target_types import GenericTarget
from pants.testutil.python_interpreter_selection import skip_unless_all_pythons_present
from pants.testutil.python_rule_runner import PythonRuleRunner
from pants.testutil.rule_runner import QueryRule
from pants.util.ordered_set import FrozenOrderedSet
@pytest.fixture
def rule_runner() -> PythonRuleRunner:
return PythonRuleRunner(
rules=[
*subsystem_rules(),
*skip_field.rules(),
*python_sources.rules(),
*target_types_rules.rules(),
QueryRule(Flake8FirstPartyPlugins, []),
],
target_types=[PythonSourcesGeneratorTarget, GenericTarget, PythonRequirementTarget],
)
@skip_unless_all_pythons_present("3.8", "3.9")
def test_first_party_plugins(rule_runner: PythonRuleRunner) -> None:
rule_runner.write_files(
{
"BUILD": dedent(
"""\
python_requirement(name='flake8', requirements=['flake8==2.11.1'])
python_requirement(name='colors', requirements=['ansicolors'])
"""
),
"flake8-plugins/subdir1/util.py": "",
"flake8-plugins/subdir1/BUILD": dedent(
"""\
python_sources(
interpreter_constraints=['==3.9.*'],
dependencies=['flake8-plugins/subdir2']
)
"""
),
"flake8-plugins/subdir2/another_util.py": "",
"flake8-plugins/subdir2/BUILD": "python_sources(interpreter_constraints=['==3.8.*'])",
"flake8-plugins/plugin.py": "",
"flake8-plugins/BUILD": dedent(
"""\
python_sources(
dependencies=['//:flake8', '//:colors', "flake8-plugins/subdir1"]
)
"""
),
}
)
rule_runner.set_options(
[
"--source-root-patterns=flake8-plugins",
"--flake8-source-plugins=flake8-plugins/plugin.py",
],
env_inherit={"PATH", "PYENV_ROOT", "HOME"},
)
first_party_plugins = rule_runner.request(Flake8FirstPartyPlugins, [])
assert first_party_plugins.requirement_strings == FrozenOrderedSet(
["ansicolors", "flake8==2.11.1"]
)
assert first_party_plugins.interpreter_constraints_fields == FrozenOrderedSet(
[
InterpreterConstraintsField(ic, Address("", target_name="tgt"))
for ic in (None, ["==3.9.*"], ["==3.8.*"])
]
)
assert (
first_party_plugins.sources_digest
== rule_runner.make_snapshot(
{
f"{Flake8FirstPartyPlugins.PREFIX}/plugin.py": "",
f"{Flake8FirstPartyPlugins.PREFIX}/subdir1/util.py": "",
f"{Flake8FirstPartyPlugins.PREFIX}/subdir2/another_util.py": "",
}
).digest
)
|
# Generated by Django 2.1.7 on 2019-02-27 11:45
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0009_auto_20190227_1102'),
]
operations = [
migrations.AlterField(
model_name='profile',
name='birth_date',
field=models.DateField(blank=True, null=True),
),
migrations.AlterField(
model_name='profile',
name='ph_no',
field=models.IntegerField(blank=True, null=True, verbose_name='Phone Number'),
),
migrations.AlterField(
model_name='profile',
name='photo',
field=models.FileField(blank=True, null=True, upload_to='profiles'),
),
]
|
from wsgiref.simple_server import make_server # the wsgiref webserver (default with Python)
from pyramid.config import Configurator
from pyramid.response import Response
from pyramid.response import FileResponse
from pyramid.renderers import render_to_response
''' Basic Routes '''
def home_route(req):
return FileResponse('home.html')
'''
def linda_route(req):
return FileResponse('Linda.html')
def zoe_route(req):
return FileResponse('Zoe.html')
def emily_route(req):
return FileResponse('Emily.html')
def martha_route(req):
return FileResponse('Martha.html')
'''
''' Routes using Jinja templating '''
def linda_template_route2(req):
data = {'message': 'Linda!'}
return render_to_response('Linda_template.html', data, request=req)
def emily_template_route2(req):
data = {'message': 'Emily!'}
return render_to_response('Emily_template.html', data, request=req)
def martha_template_route2(req):
data = {'message': 'Martha!'}
return render_to_response('Martha_template.html', data, request=req)
def zoe_template_route2(req):
data = {'message': 'Zoe!'}
return render_to_response('Zoe_template.html', data, request=req)
'''
def zoe_template_route(req):
data = {'count': 1, 'files': ['Zoe.html']}
return render_to_response('template.html', data, request=req)
def linda_template_route(req):
data = {'count': 1, 'files': ['Linda_template.html']}
return render_to_response('template.html', data, request=req)
def emily_template_route(req):
data = {'count': 1, 'files': ['Emily.html']}
return render_to_response('template.html', data, request=req)
def martha_template_route(req):
data = {'count': 1, 'files': ['Martha.html']}
return render_to_response('template.html', data, request=req)
'''
''' Main Application '''
def main() :
with Configurator() as config:
# Home Route
config.add_route('home', '/')
config.add_view(home_route, route_name='home')
# Jinja Routes
config.include('pyramid_jinja2')
config.add_jinja2_renderer('.html')
config.add_route('linda_template2', '/linda_template2')
config.add_view(linda_template_route2, route_name='linda_template2')
config.add_route('zoe_template2', '/zoe_template2')
config.add_view(zoe_template_route2, route_name='zoe_template2')
config.add_route('martha_template2', '/martha_template2')
config.add_view(martha_template_route2, route_name='martha_template2')
config.add_route('emily_template2', '/emily_template2')
config.add_view(emily_template_route2, route_name='emily_template2')
# add static folder to search path
config.add_static_view(name='/', path='./', cache_max_age=3600)
# create the webserver config
app = config.make_wsgi_app()
'''
# Linda Route
config.add_route('linda', '/linda')
config.add_view(linda_route, route_name='linda')
# Zoe Route
config.add_route('zoe', '/zoe')
config.add_view(zoe_route, route_name='zoe')
# Emily Route
config.add_route('emily', '/emily')
config.add_view(emily_route, route_name='emily')
# Martha Route
config.add_route('martha', '/martha')
config.add_view(martha_route, route_name='martha')
config.add_route('zoe_template', '/zoe_template')
config.add_view(zoe_template_route, route_name='zoe_template')
config.add_route('martha_template', '/martha_template')
config.add_view(martha_template_route, route_name='martha_template')
config.add_route('emily_template', '/emily_template')
config.add_view(emily_template_route, route_name='emily_template')
'''
# run the server
server = make_server('127.0.0.2', 8080, app)
print("The server is now running on: http://127.0.0.2:8080")
try:
server.serve_forever()
except KeyboardInterrupt:
print("\nExiting...")
exit(0)
if __name__ == '__main__':
main() |
print "Hello World"
x = 5
myInt = 7
myFloat = 7.0
myFloat2 = float(7)
hello = "hello"
world = "world"
lotsOfHellos = hello * 10
print "Lots of hellos using the * 10 operator gives " + lotsOfHellos
helloworld = hello + " xxx " + world
print helloworld
print "my int is %d" % myInt
#print "my int is also" + myInt # this does not work!!
#isInstance(variablename, dataType)
#if myString == "hello":
print "x is %d" % x
myIntSquared = myInt ** 2
print "myInt squarred is %d" %myIntSquared
mylist = []
mylist.append(1)
mylist.append(2)
mylist.append(3)
print(mylist[0]) # prints 1
print(mylist[1]) # prints 2
print(mylist[2]) # prints 3
# prints out 1,2,3
for x in mylist:
print x
evenNumbers = [2,4,6,8]
oddNumbers = [1,3,5,7]
allNumbers = oddNumbers + evenNumbers
moreEvenNumbers = evenNumbers * 3
#==================STRING FORMATTING===================
name = "Scott"
print "Hello, %s. Hope you are having a great day!" % name
age = 19
print "Hello, %s. You are %d years old." % (name, age)
print "This prints out evenNumber list: %s" % evenNumbers
# %s String, %d integers, %f floating points, %.<number of digits>f
#================Basic String Operations===============
movie = "The Godfather Part II"
print "length of movie string is %d and its name is %s" % (len(movie), movie)
print "The index of the letter f is %d" %movie.index("f")
print "The number of letter ts in the movie string is %d" %movie.count("t")
print "A slice of the string from index 2 to 11 is " + movie[2:11]
print "This prints the string backwards [::-1] " + movie[::-1]
movie = movie.upper()
print movie + "... just used movie = movie.upper(). now doing movie = movie.lower()"
movie = movie.lower()
print movie
print movie.startswith("the")
print "..just did movie.startswith('the')"
print movie.endswith("cat")
#==================Conditions ==========================
a = 2
print "Doing conditions now..."
print x == 2 #prints out True
print x == 3
print x < 3
print "Boolean operators... 'and', 'or'...if statements..end with :"
name = "Scott"
age = 19
if name == "Scott" and age == 19:
print "Your name is %s and your age is %d" %(name, age)
if name == "John" or name == "Scott":
print "Your name is either Scott or John"
#the in operator
print "the in Operator..used to check if a specified object exists within an iterable object container, such as a list"
namesList = ["Scott", "Bob", "Rick"]
#remember to end if statements with :
if name in namesList:
print "your name is in the names list"
#===============IF and ELIF condition
if age < 10:
print "age is less than 10"
elif age < 20:
print "age is less than 20"
else:
print "age is greater than or equal to twenty"
# ================== the 'is' operator ===========
# the '==' equals operator matches values of variables.
# the 'is' operator matches instances
x = 5
y = 5
print "comparing x == y to x is y..."
print x == y
print x is y
print "lol what just happened...that should be false."
#the "not" operator...not before a boolean expression inverts it
print "printing not False should print true"
print not False
#========================loops=======================
numbers = [1,2,3,4,5,6,7,8,9,10]
for x in numbers:
print x
#get to call the variable whatever you want, remember the : symbol
#loops can iterate over a sequence of numbers using the "range" and
#"xrange" functions. rnage returns a new list with numbers of that
#specified rannge, whereas xrange returns an iterator, which is more efficient
print "using xrange(6)...remember that it is zero based"
for x in xrange(6):
print x
print "using xrange (3, 6)..."#3 4 5
for x in xrange(3,6):
print x
#while loops
print "using a while loop now..."
count = 0
while count < 5:
print count
count += 1
#break exits a for loop or while loop, continue is used to skip the current block
#can use else for loops...when the loop condition of the "for" or "while"
#statement fails then code part in else is executed
print "using while loop with an else clause"
count = 0
while count < 5:
print count
count += 2
else:
print "count value reached %d" %count |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.