content stringlengths 5 1.05M |
|---|
from src.basic.systems.human_system import HumanSystem
__author__ = 'anushabala'
import sqlite3
import json
from src.basic.event import Event
from src.basic.dataset import Example
from src.basic.kb import KB
from argparse import ArgumentParser
from src.basic.scenario_db import add_scenario_arguments, ScenarioDB
from src.basic.schema import Schema
from src.basic.util import read_json
from datetime import datetime
date_fmt = '%Y-%m-%d %H-%M-%S'
def convert_events_to_json(chat_id, cursor, scenario_db):
try:
cursor.execute('SELECT agent, action, time, data, start_time FROM event WHERE chat_id=? ORDER BY time ASC', (chat_id,))
logged_events = cursor.fetchall()
except sqlite3.OperationalError:
cursor.execute('SELECT agent, action, time, data FROM event WHERE chat_id=? ORDER BY time ASC', (chat_id,))
logged_events = cursor.fetchall()
events = []
for i, (agent, action, time, data) in enumerate(logged_events):
events.append((agent, action, time, data, time))
logged_events = events
cursor.execute('SELECT scenario_id, outcome FROM chat WHERE chat_id=?', (chat_id,))
(uuid, outcome) = cursor.fetchone()
try:
outcome = json.loads(outcome)
except ValueError:
outcome = {'reward': 0}
try:
cursor.execute('SELECT agent_types FROM chat WHERE chat_id=?', (chat_id,))
agent_types = cursor.fetchone()[0]
agent_types = json.loads(agent_types)
except sqlite3.OperationalError:
agent_types = {0: HumanSystem.name(), 1: HumanSystem.name()}
chat_events = []
for (agent, action, time, data, start_time) in logged_events:
if action == 'join' or action == 'leave':
continue
if action == 'select':
data = KB.string_to_item(data)
time = convert_time_format(time)
start_time = convert_time_format(start_time)
event = Event(agent, time, action, data, start_time)
chat_events.append(event)
return Example(scenario_db.get(uuid), uuid, chat_events, outcome, chat_id, agent_types)
def log_transcripts_to_json(scenario_db, db_path, json_path, uids):
conn = sqlite3.connect(db_path)
cursor = conn.cursor()
# c.execute('''CREATE TABLE event (chat_id text, action text, agent integer, time text, data text)''')
if uids is None:
cursor.execute('SELECT DISTINCT chat_id FROM event')
ids = cursor.fetchall()
else:
ids = []
uids = [(x,) for x in uids]
for uid in uids:
cursor.execute('SELECT chat_id FROM mturk_task WHERE name=?', uid)
ids_ = cursor.fetchall()
ids.extend(ids_)
examples = []
for chat_id in ids:
ex = convert_events_to_json(chat_id[0], cursor, scenario_db)
examples.append(ex)
outfile = open(json_path, 'w')
json.dump([ex.to_dict() for ex in examples], outfile)
outfile.close()
conn.close()
def log_surveys_to_json(db_path, surveys_file):
questions = ['fluent', 'correct', 'cooperative', 'humanlike', 'comments']
conn = sqlite3.connect(db_path)
cursor = conn.cursor()
cursor.execute('''SELECT * FROM survey''')
logged_surveys = cursor.fetchall()
survey_data = {}
agent_types = {}
for survey in logged_surveys:
# print survey
(userid, cid, _, fluent, correct, cooperative, humanlike, comments) = survey
responses = dict(zip(questions, [fluent, correct, cooperative, humanlike, comments]))
cursor.execute('''SELECT agent_types, agent_ids FROM chat WHERE chat_id=?''', (cid,))
chat_result = cursor.fetchone()
agents = json.loads(chat_result[0])
agent_ids = json.loads(chat_result[1])
agent_types[cid] = agents
if cid not in survey_data.keys():
survey_data[cid] = {0: {}, 1: {}}
partner_idx = 0 if agent_ids['1'] == userid else 1
survey_data[cid][partner_idx] = responses
json.dump([agent_types, survey_data], open(surveys_file, 'w'))
def convert_time_format(time):
if time is None:
return time
try:
dt = datetime.strptime(time, date_fmt)
s = str((dt - datetime.fromtimestamp(0)).total_seconds())
return s
except (ValueError, TypeError):
try:
dt = datetime.fromtimestamp(float(time)) # make sure that time is a UNIX timestamp
return time
except (ValueError, TypeError):
print ('Unrecognized time format: %s' % time)
return None
if __name__ == "__main__":
parser = ArgumentParser()
add_scenario_arguments(parser)
parser.add_argument('--db', type=str, required=True, help='Path to database file containing logged events')
parser.add_argument('--domain', type=str,
choices=['MutualFriends', 'Matchmaking'])
parser.add_argument('--output', type=str, required=True, help='File to write JSON examples to.')
parser.add_argument('--uid', type=str, nargs='*', help='Only print chats from these uids')
parser.add_argument('--surveys', type=str, help='If provided, writes a file containing results from user surveys.')
args = parser.parse_args()
schema = Schema(args.schema_path, args.domain)
scenario_db = ScenarioDB.from_dict(schema, read_json(args.scenarios_path))
log_transcripts_to_json(scenario_db, args.db, args.output, args.uid)
if args.surveys:
log_surveys_to_json(args.db, args.surveys)
|
__author__ = "Ian Goodfellow"
class Agent(object):
pass
|
import numpy as np
import cv2
from math import sin, cos
def find_T_matrix(pts,t_pts):
A = np.zeros((8,9))
for i in range(0,4):
xi = pts[:,i];
xil = t_pts[:,i];
xi = xi.T
A[i*2, 3:6] = -xil[2]*xi
A[i*2, 6: ] = xil[1]*xi
A[i*2+1, :3] = xil[2]*xi
A[i*2+1, 6: ] = -xil[0]*xi
[U,S,V] = np.linalg.svd(A)
H = V[-1,:].reshape((3,3))
return H
def randomPerturbationPts(pts, alfa = 0.02):
#
# Adds a random perturbation to a quadrilateral, based on factor alfa
#
#
# Difines the direction of the scaling for each point
#
signs = np.array( [[-1,1,1,-1], [-1,-1,1,1]] )
sides = []
pts2 = np.zeros((2,4))
for i in range(4):
sides.append(np.linalg.norm( pts[:,i] - pts[:,(i + 1) %4] ) )
scale = np.array( [(sides[0] + sides[2])/2*alfa, (sides[1] + sides[3])/2*alfa])
for i in range(4):
pts2[:,i] = pts[:, i] + np.random.rand(2)*signs[:, i]*scale
return pts2
def CropWarpImage(img, pts, outsize):
t_ptsh = getRectPts(0,0,outsize[0], outsize[1])
ptsh = np.concatenate((pts, np.ones((1,4))))
H = find_T_matrix(ptsh, t_ptsh)
Ilp = cv2.warpPerspective(img, H, outsize, flags = cv2.INTER_CUBIC + cv2.WARP_INVERSE_MAP, borderValue=.0)
return Ilp
def getRectPts(tlx,tly,brx,bry):
return np.matrix([[tlx,brx,brx,tlx],[tly,tly,bry,bry],[1.,1.,1.,1.]],dtype=float)
def perspective_transform(wh,angles=np.array([0.,0.,0.]),zcop=1000., dpp=1000.):
rads = np.deg2rad(angles)
a = rads[0]; Rx = np.matrix([[1, 0, 0] , [0, cos(a), sin(a)] , [0, -sin(a), cos(a)] ])
a = rads[1]; Ry = np.matrix([[cos(a), 0, -sin(a)] , [0, 1, 0] , [sin(a), 0, cos(a)] ])
a = rads[2]; Rz = np.matrix([[cos(a), sin(a), 0] , [-sin(a), cos(a), 0] , [0, 0, 1] ])
R = Rx*Ry*Rz;
(w,h) = tuple(wh)
xyz = np.matrix([[0,0,w,w],[0,h,0,h],[0,0,0,0]])
hxy = np.matrix([[0,0,w,w],[0,h,0,h],[1,1,1,1]])
xyz = xyz - np.matrix([[w],[h],[0]])/2.
xyz = R*xyz
xyz = xyz - np.matrix([[0],[0],[zcop]])
hxyz = np.concatenate([xyz,np.ones((1,4))])
P = np.matrix([[1,0,0,0],[0,1,0,0],[0,0,-1./dpp,0]])
_hxy = P*hxyz
_hxy = _hxy/_hxy[2,:]
_hxy = _hxy + np.matrix([[w],[h],[0]])/2.
return find_T_matrix(hxy,_hxy) |
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from ellipsoid_fit import ellipsoid_fit, ellipsoid_plot, data_regularize
if __name__=='__main__':
data = np.loadtxt("mag_out.txt")
data2 = data_regularize(data, divs=8)
center, evecs, radii, v = ellipsoid_fit(data2)
data_centered = data - center.T
data_centered_regularized = data2 - center.T
a, b, c = radii
r = (a * b * c) ** (1. / 3.)
D = np.array([[r/a, 0., 0.], [0., r/b, 0.], [0., 0., r/c]])
#http://www.cs.brandeis.edu/~cs155/Lecture_07_6.pdf
#affine transformation from ellipsoid to sphere (translation excluded)
TR = evecs.dot(D).dot(evecs.T)
data_on_sphere = TR.dot(data_centered_regularized.T).T
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
#hack for equal axes
# ax.set_aspect('equal')
# for direction in (-1, 1):
# for point in np.diag(direction * np.max(data) * np.array([1, 1, 1])):
# ax.plot([point[0]], [point[1]], [point[2]], 'w')
ax.scatter(data_centered[:,0], data_centered[:,1], data_centered[:,2], marker='o', color='g')
# ax.scatter(data_centered_regularized[:, 0], data_centered_regularized[:, 1],
# data_centered_regularized[:, 2], marker='o', color='b')
ax.scatter(data_on_sphere[:, 0], data_on_sphere[:, 1],
data_on_sphere[:, 2], marker='o', color='r')
ellipsoid_plot([0, 0, 0], radii, evecs, ax=ax, plot_axes=True, cage_color='g')
ellipsoid_plot([0, 0, 0], [r, r, r], evecs, ax=ax, plot_axes=True, cage_color='orange')
#ax.plot([r],[0],[0],color='r',marker='o')
#ax.plot([radii[0]],[0],[0],color='b',marker='o')
plt.show()
|
import numpy as np
import chainer
from chainer import cuda
import chainer.functions as F
import chainer.links as L
class OptimizerByNet(object):
def __init__(self, optnet, grand_optmizer):
self.grand_optimizer = grand_optmizer
self.grand_optimizer.setup(optnet)
self.optnet = optnet
self.optnet.cleargrads()
self.targets = []
self.weakref_cache = []
def setup(self, *targets):
self.release_all()
self.targets = targets
def trancate_params(self):
for target in self.targets:
for name, param in sorted(target.namedparams()):
param.unchain()
def reset_cache_for_weakref(self):
self.weakref_cache = []
def release_all(self):
self.reset_cache_for_weakref()
self.targets = []
self.optnet.reset_state()
self.optnet.cleargrads()
def set_param(self, link, name, value, train_optnet=True):
value.name = name
if not train_optnet:
value.unchain()
super(chainer.Link, link).__setattr__(name, value)
def meta_update(self):
self.grand_optimizer.update()
self.reset_cache_for_weakref()
self.trancate_params()
self.optnet.trancate_state()
def update(self, train_optnet=True):
# calculate
sorted_namedparams = []
sorted_grads = []
for target in self.targets:
for name, param in sorted(target.namedparams()):
sorted_grads.append(param.grad)
concat_grads = F.concat(
[grad.reshape(-1) for grad in sorted_grads], axis=0).array[:, None]
concat_gs = self.optnet.step(concat_grads)
if not train_optnet:
self.optnet.trancate_state()
self.optnet.cleargrads()
# update
read_size = 0
for target in self.targets:
name2link = dict(target.namedlinks())
for name, param in sorted(target.namedparams()):
if train_optnet:
self.weakref_cache.append(param) # no need?
# update
split_idx = name.rindex('/')
link_name, attr_name = name[:split_idx], name[split_idx + 1:]
g = concat_gs[read_size:read_size + param.size].\
reshape(param.shape)
read_size += param.size
self.set_param(name2link[link_name], attr_name, param + g,
train_optnet=train_optnet)
def preprocess_grad(x, p=10.):
# pre-processing according to Deepmind 'Learning to Learn' paper
xp = chainer.cuda.get_array_module(x)
threshold = xp.exp(-p)
x_abs = xp.abs(x)
is_higher = x_abs >= threshold
processed_x = [xp.zeros_like(x), xp.zeros_like(x)]
processed_x[0][is_higher] = xp.log(x_abs[is_higher] + 1e-8) / p
processed_x[1][is_higher] = xp.sign(x[is_higher])
processed_x[0][~is_higher] = -1.
processed_x[1][~is_higher] = x[~is_higher] * xp.exp(p)
processed_x = xp.concatenate(processed_x, axis=1)
return processed_x
class LSTMOptNet(chainer.Chain):
def __init__(self, n_units=20, n_classes=10, out_scale=0.1,
do_preprocess=True):
super(LSTMOptNet, self).__init__()
with self.init_scope():
n_input = 2 if do_preprocess else 1
self.l1 = L.LSTM(n_input, 20, forget_bias_init=0)
self.l2 = L.LSTM(20, 20, forget_bias_init=0)
self.lout = L.Linear(20, 1)
# self.ldirect = L.Linear(n_input, 1)
self.do_preprocess = do_preprocess
self.out_scale = out_scale
def reset_state(self):
self.l1.reset_state()
self.l2.reset_state()
def trancate_state(self):
def unchain(state):
if state is not None:
state.unchain()
unchain(self.l1.c)
unchain(self.l1.h)
unchain(self.l2.c)
unchain(self.l2.h)
def step(self, x):
if self.do_preprocess:
x = preprocess_grad(x)
h1 = self.l1(x)
h2 = self.l2(h1) # + h1
g = self.lout(h2) # + self.ldirect(x)
return g * self.out_scale
def __call__(self, *args):
raise NotImplementedError
|
from repacolors.convert import *
import random
def test_yiq_reverse():
for _ in range(100):
c = RGBTuple(random.random(), random.random(), random.random())
c2 = yiq2rgb(rgb2yiq(c))
assert abs(c.red - c2.red) < 0.005
assert abs(c.green - c2.green) < 0.005
assert abs(c.blue - c2.blue) < 0.005
def test_hsv_reverse():
for _ in range(100):
c = RGBTuple(random.random(), random.random(), random.random())
c2 = hsv2rgb(rgb2hsv(c))
assert abs(c.red - c2.red) < 0.005
assert abs(c.green - c2.green) < 0.005
assert abs(c.blue - c2.blue) < 0.005
|
#!/usr/bin/env python
from __future__ import print_function
import argparse
import dxpy
import os
import subprocess
import sys
import time
import util
here = os.path.dirname(sys.argv[0])
top_dir = os.path.dirname(os.path.abspath(here))
HOME_REGION = "aws:us-east-1"
TEST_DICT = {
"aws:us-east-1" : "dxWDL_playground"
}
# To add region R, create a project for it, dxWDL_R, and add
# a mapping to the lists
# R : dxWDL_R
RELEASE_DICT = {
"aws:us-east-1" : "dxWDL",
"aws:ap-southeast-2" : "dxWDL_Sydney",
"azure:westus" : "dxWDL_Azure",
"azure:westeurope" : "dxWDL_Amsterdam",
"aws:eu-central-1" : "dxWDL_Berlin"
}
def main():
argparser = argparse.ArgumentParser(description="Build a dxWDL release")
argparser.add_argument("--force",
help="Build even if the there is an existing version",
action='store_true',
default=False)
argparser.add_argument("--multi-region",
help="Copy to all supported regions",
action='store_true',
default=False)
args = argparser.parse_args()
# build multi-region jar for releases, or
# if explicitly specified
multi_region = args.multi_region
# Choose which dictionary to use
if multi_region:
project_dict = RELEASE_DICT
else:
project_dict = TEST_DICT
project = util.get_project(project_dict[HOME_REGION])
print("project: {} ({})".format(project.name, project.get_id()))
# Figure out what the current version is
version_id = util.get_version_id(top_dir)
print("version: {}".format(version_id))
# Set the folder
folder = "/releases/{}".format(version_id)
print("folder: {}".format(folder))
# remove the existing directory paths
if args.force:
for proj_name in project_dict.values():
print("removing path {}:{}".format(proj_name, folder))
dx_proj = util.get_project(proj_name)
try:
dx_proj.remove_folder(folder, recurse=True)
except dxpy.DXError:
pass
# Make sure the target directory exists
project.new_folder(folder, parents=True)
# Build the asset, and the compiler jar file.
path_dict = dict(map(lambda kv: (kv[0], kv[1] + ":" + folder),
project_dict.items()))
(jar_path, home_ad) = util.build(project, folder, version_id, top_dir, path_dict)
if multi_region:
# download dxWDL runtime library
home_rec = dxpy.DXRecord(home_ad.asset_id)
fid = home_rec.get_details()['archiveFileId']['$dnanexus_link']
fn = dxpy.describe(fid)['name']
rtlib_path = "/tmp/{}".format(fn)
print("Download asset file {}".format(fn))
dxpy.download_dxfile(fid,
rtlib_path,
show_progress=True)
# copy to all other regions
for region in project_dict.keys():
if region != home_ad.region:
proj = project_dict[region]
if proj is None:
raise Exception("No project configured for region {}".format(region))
dest_proj = util.get_project(proj)
if dest_proj is not None:
dest_ad = util.copy_across_regions(rtlib_path, home_rec, region, dest_proj, folder)
else:
print("No project named {}".format(proj))
# Upload compiler jar file
util.upload_local_file(jar_path, project, folder)
if __name__ == '__main__':
main()
|
from cvpods.layers import ShapeSpec
from cvpods.modeling.backbone import Backbone
from cvpods.modeling.backbone.fpn import build_resnet_fpn_backbone
from cvpods.modeling.meta_arch.solo import SOLO
from cvpods.modeling.meta_arch.solo_decoupled import DecoupledSOLO
def build_backbone(cfg, input_shape=None):
if input_shape is None:
input_shape = ShapeSpec(channels=len(cfg.MODEL.PIXEL_MEAN))
backbone = build_resnet_fpn_backbone(cfg, input_shape)
assert isinstance(backbone, Backbone)
return backbone
def build_model(cfg):
cfg.build_backbone = build_backbone
solo_head = cfg.MODEL.SOLO.HEAD.TYPE
if solo_head in ["SOLOHead"]:
model = SOLO(cfg)
elif solo_head in ["DecoupledSOLOHead"]:
model = DecoupledSOLO(cfg)
else:
raise ValueError(
f"Unknow SOLO head type: {solo_head}. "
"Please select from ['SOLOHead', 'DecoupledSOLOHead'].")
return model
|
"""
A simple terminal tool, nicely plotting all messages send by contrast over ZeroMQ as text.
"""
import os
import sys
import zmq
import h5py
import json
import time
import numpy as np
class zmq_listener(object):
"""
Subscribes to a contrast stream recordes zmq stream.
Prints the zmq streamed dictionaries in a nice way.
"""
############################################################################
def __init__(self, port=5556, host='localhost'):
context = zmq.Context()
self.sock = context.socket(zmq.SUB)
self.sock.connect("tcp://%s:%u" % (host, port))
self.sock.setsockopt(zmq.SUBSCRIBE, b"")
self.running = True
self.pretty_print_message('started to listen to', '%s:%u' % (host, port))
def run(self):
while self.running:
try:
# listen for a message
_metadata = self.sock.recv_pyobj()
#do something with the data
self.pretty_print_message('message recieved', self.date_time_string())
self.pretty_print_dict(_metadata, indent=1)
# a tiny delay to let the computer breath
time.sleep(0.01)
except KeyboardInterrupt:
self.stop()
except Exception as err:
self.pretty_print_error('')
print(err)
self.stop()
def stop(self):
self.sock.close()
self.running = False
############################################################################
def date_time_string(self):
return time.strftime('%Y-%m-%d_%H:%M:%S')
def pretty_print_dict(self, d, indent=0):
for key, value in d.items():
if isinstance(value, dict):
print('\t' * indent + str(key)+ ' : ')
self.pretty_print_dict(value, indent+1)
else:
print('\t' * indent + str(key) + ' : '+str(value))
#print('\t' * (indent+1) + str(value)) def run(self):
def make_color_code(self, style='none', text_color='black', background_color='white'):
dict_style = {'none':'0', 'bold':'1', 'underline':'2', 'negative1':'3', 'negative2':'5'}
dict_c = {'black':'30', 'k':'30',
'red':'31', 'r':'31',
'green':'32', 'g':'32',
'yellow':'33', 'y':'33',
'blue':'34', 'b':'34',
'purple':'35', 'm':'35',
'cyan':'36', 'c':'36',
'gray':'37', 'gr':'37',
'white':'38', 'w':'38'}
return '\033['+dict_style[style]+';'+dict_c[text_color]+';4'+dict_c[background_color][1]+'m'
def pretty_print_error(self, error_message):
line = self.make_color_code('bold','k','c') + ' ' + sys.argv[0] + ' '
line += self.make_color_code('none','c','r') + '\u25B6 '
line += self.make_color_code('none','k','r') + 'error '
line += self.make_color_code('none','r','w') + '\u25B6 '
line += self.make_color_code('none','k','w') + error_message
print(line)
def pretty_print_warning(self, warning_message):
line = self.make_color_code('bold','k','c') + ' ' + sys.argv[0] + ' '
line += self.make_color_code('none','c','y') + '\u25B6 '
line += self.make_color_code('none','k','y') + 'warning '
line += self.make_color_code('none','y','w') + '\u25B6 '
line += self.make_color_code('none','k','w') + warning_message
print(line)
def pretty_print_message(self, header, message):
line = self.make_color_code('bold','k','c') + ' ' + sys.argv[0] + ' '
line += self.make_color_code('none','c','g') + '\u25B6 '
line += self.make_color_code('none','k','g') + header+' '
line += self.make_color_code('none','g','w') + '\u25B6 '
line += self.make_color_code('none','k','w') + message
print(line)
###############################################################################
if __name__ == "__main__":
known_hosts = {}
known_hosts['cc1'] = '172.16.125.11'
known_hosts['cc3'] = '172.16.125.30'
recv = zmq_listener(host=known_hosts['cc3'], port=5556)
recv.run()
|
"""
Set Constants
"""
ALLOWED_SOURCES = ["jhu", "worldometer", "imedd", "govgr", "who", "sch"]
COLUMN_MAPPINGS = {
"Country,Other": "country",
"TotalCases": "cases",
"NewCases": "new_cases",
"TotalDeaths": "deaths",
"NewDeaths": "new_deaths",
"TotalRecovered": "recovered",
"NewRecovered": "new_recovered",
"ActiveCases": "active",
"Serious,Critical": "critical",
"Tot\xa0Cases/1M pop": "cases_per_1m_pop",
"Deaths/1M pop": "deaths_per_1m_pop",
"TotalTests": "tests",
"Tests/\n1M pop": "test_per_1m_pop",
"Population": "population",
"Continent": "continent",
"1 Caseevery X ppl": "case_ratio",
"1 Deathevery X ppl": "death_ratio",
"1 Testevery X ppl": "test_ratio",
"Σχολείο/Δομή": "school",
"Περιοχή": "region",
"Διεύθυνση": "address",
"Αναστολή έως και": "dueTo",
"Παρατηρήσεις:": "notes",
"Country/Region": "country",
"Date": "date",
"Confirmed": "cases",
"Deaths": "deaths",
"Recovered": "recovered",
"Active": "active",
"New Cases": "new_cases",
"New Deaths": "new_deaths",
"New Recovered": "new_recovered",
"Case-Fatality Ratio": "case_fatality_ratio",
"Admin2": "admin_2",
"Country_Region": "country",
"Lat": "lat",
"Long_": "long",
"Long": "long",
"Population": "population",
"UID": "uid",
}
EXCLUDE_ROWS = [
"",
"North America",
"Asia",
"South America",
"Europe",
"Africa",
"Oceania",
"World",
"Total:",
"MS Zaandam",
"Diamond Princess",
"Wallis and Futuna",
"Greece"
]
OUTPUT = "data/"
TMP = "tmp/"
DATA_JHU_BASE_PATH = "jhu/csse_covid_19_data/csse_covid_19_time_series/"
DATA_IMEDD_BASE_PATH = "imedd/COVID-19/"
DATA_WOM_BASE_LINK = "https://www.worldometers.info/coronavirus/"
DATA_SCH_BASE_LINK = "https://www.sch.gr/anastoli/web/index.php"
REPO_JHU_URL = "https://github.com/CSSEGISandData/COVID-19.git"
REPO_IMEDD_URL = "https://github.com/iMEdD-Lab/open-data.git"
FIX_CORDS = {
"Canada": {"Lat": 56.1304, "Long": -106.3468},
"China": {"Lat": 35.8617, "Long": 104.1954},
"Australia": {"Lat": -25.2744, "Long": 133.7751},
}
|
#!/usr/bin/env python
from __future__ import division, absolute_import, print_function
import os
import sys
def find_in_path(filename):
"""
Find the file named filename in the sys.path.
Returns the full path name if found, None if not found.
Definition
----------
def find_in_path(filename):
Input
-----
filename str; name of searched file
Output
------
Full path of file. None if not found.
Examples
--------
>>> datei = 'find_in_path.py'
>>> isdatei = find_in_path(datei)
>>> if isdatei is None:
... print('No')
... else:
... print('Yes')
Yes
>>> datei = 'gapfill.py'
>>> isdatei = find_in_path(datei)
>>> if isdatei is None:
... print('No')
... else:
... print('Yes')
No
License
-------
This file is part of the JAMS Python package, distributed under the MIT
License. The JAMS Python package originates from the former UFZ Python library,
Department of Computational Hydrosystems, Helmholtz Centre for Environmental
Research - UFZ, Leipzig, Germany.
Copyright (c) 2013 Matthias Cuntz - mc (at) macu (dot) de
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
History
-------
Written, MC, Mar 2013
"""
for dirname in sys.path:
possible = os.path.join(dirname, filename)
if os.path.isfile(possible): return possible
return None
if __name__ == '__main__':
import doctest
doctest.testmod(optionflags=doctest.NORMALIZE_WHITESPACE)
# datei = 'wiki.pdf'
# isdatei = find_in_path(datei)
# if isdatei is None:
# print('No')
# else:
# print('Yes')
# # Yes
# datei = 'Humor-Sans.ttf'
# isdatei = find_in_path(datei)
# if isdatei is None:
# print('No')
# else:
# print('Yes')
# # No
|
from chess_engine.chess_db.utils import execute
def create_moves_table(cursor, conn):
create_table_games = '''CREATE TABLE moves (
id SERIAL PRIMARY KEY NOT NULL,
created time DEFAULT CURRENT_TIMESTAMP,
deleted time,
game_id int NOT NULL,
player_hash char(32),
game_state varchar (100),
san varchar (10),
FOREIGN KEY (game_id) REFERENCES games (id),
FOREIGN KEY (player_hash) REFERENCES users (player_hash)
)'''
execute(cursor, conn, create_table_games)
def add_move_using_auth(cursor, conn, game_id, auth_token, board_config, san):
query = '''INSERT INTO moves (game_id, player_hash, game_state, san) VALUES (
{game_id}, (SELECT player_hash FROM users WHERE users.auth_token = '{auth_token}'),
'{board_state}', '{san}'
)'''.format(
game_id = game_id,
auth_token = auth_token,
board_state = board_config,
san = san
)
execute(cursor, conn, query)
def add_opp_move_using_auth(cursor, conn, player_no, game_id, auth_token, board_config, san):
player_no_curr = 'player_one'
player_no_opp = 'player_two'
if player_no == 1:
player_no_curr = 'player_two'
player_no_opp = 'player_one'
query = '''INSERT INTO moves (game_id, player_hash, game_state, san) VALUES (
{game_id}, (SELECT games.{player_opp} FROM games WHERE games.{player_curr} =
(SELECT users.player_hash FROM users WHERE users.auth_token = '{auth_token}')
AND games.id = {game_id}),
'{board_state}', '{san}')'''.format(
game_id = game_id,
auth_token = auth_token,
player_curr = player_no_curr,
player_opp = player_no_opp,
board_state = board_config,
san = san
)
execute(cursor, conn, query)
|
from flask import jsonify
from . import main
@main.app_errorhandler(400)
def bad_request(e):
# TODO: log the error
return jsonify(error=e.description), 400
@main.app_errorhandler(401)
def unauthorized(e):
error_message = "Unauthorized, bearer token must be provided"
return jsonify(error=error_message), 401, [('WWW-Authenticate', 'Bearer')]
@main.app_errorhandler(403)
def forbidden(e):
error_message = "Forbidden, invalid bearer token provided '{}'".format(
e.description)
return jsonify(error=error_message), 403
@main.app_errorhandler(404)
def page_not_found(e):
return jsonify(error="Not found"), 404
@main.app_errorhandler(500)
def internal_server_error(e):
# TODO: log the error
return jsonify(error="Internal error"), 500
|
from django.conf import settings
# Custom context processors for sikteeri
def is_production(request):
'''
Add the constant 'is_production' to current context
'''
return dict(is_production=settings.PRODUCTION)
|
import mnist_loader as ml
import network1 as nw
def main():
training_data, validation_data, test_data = ml.load_data_wrapper()
net = nw.Network([784, 30, 10])
net.SGD(training_data, 30, 10, 3.0, test_data=test_data)
# hidden_layers_topologies = [15, 25, [30, 10], 60, 80]
# hidden_layers_topologies_small = ["15", "20"]
# print(results)
# results = []
# for i in hidden_layers_topologies_small:
# net = Network([784, i, 10])
# res = net.SGD(training_data, 2, 10, 3.0, test_data=test_data)
# results.append(res)
main() |
from pathlib import Path
import os
from statistics import mean
import matplotlib.pyplot as plt
from random import randint
import pickle
from akkadian.build_data import build_signs_and_transcriptions, break_into_sentences
from akkadian.data import from_key_to_text_and_line_numbers
from akkadian.parse_xml import parse_xml
from akkadian.data import increment_count
def write_sentences_to_file(chars_sentences, translation_sentences, signs_path, transcription_path, translation_path):
"""
Write the data of word by word translations to files (different files for signs, transliterations and translations)
:param chars_sentences: sentences with the signs and transliterations
:param translation_sentences: translations done word by word for the corresponding chars_sentences
:return: nothing, signs, transliterations and translations written to corresponding files
"""
signs_file = open(signs_path, "w", encoding="utf8")
transcription_file = open(transcription_path, "w", encoding="utf8")
translation_file = open(translation_path, "w", encoding="utf8")
translation_lengths = []
for key in translation_sentences:
signs_file.write(key + ": ")
transcription_file.write(key + ": ")
translation_file.write(key + ": ")
for c in chars_sentences[key]:
signs_file.write(c[3])
delim = c[2] if not c[2] is None else " "
transcription_file.write(c[1] + delim)
translation_lengths.append(len(translation_sentences[key]))
for t in translation_sentences[key]:
translation_file.write(t[1] + " ")
signs_file.write("\n")
transcription_file.write("\n")
translation_file.write("\n")
print("Number of word translations in a line is: " + str(len(translation_lengths)))
print("Mean word translations in a line length is: " + str(mean(translation_lengths)))
# build_graph(translation_lengths, "word translations in a line")
signs_file.close()
transcription_file.close()
translation_file.close()
def write_sentences_to_file_no_translation(chars_sentences, transcription_path):
"""
Write the data of word by word translations to files (different files for signs, transliterations and translations)
:param chars_sentences: sentences with the signs and transliterations
:return: nothing, signs, transliterations and translations written to corresponding files
"""
transcription_file = open(transcription_path, "w", encoding="utf8")
for key in chars_sentences:
transcription_file.write(key + ": ")
for c in chars_sentences[key]:
delim = c[2] if not c[2] is None else " "
transcription_file.write(c[1] + delim)
transcription_file.write("\n")
transcription_file.close()
def build_translations(corpora, mapping):
"""
Build translations for preprocess
:param corpora: corpora to use for building the data for full translation
:param mapping: mapping between different numbering of lines
:return: translations
"""
base_directory = Path(r"../raw_data/tei/")
all_translations = {}
for corpus in corpora:
directory = base_directory / corpus
for r, d, f in os.walk(directory):
for file in f:
translation = parse_xml(os.path.join(r, file), mapping, corpus)
all_translations.update(translation)
return all_translations
def build_full_line_translation_process(corpora, has_translation, signs_path, transcription_path, translation_path):
"""
Do first part of preprocess, build signs and transliterations
:param corpora: corpora to use for building the data for full translation
:return: signs, transliterations and mapping between different numbering of lines
"""
chars, translation, mapping, lines_cut_by_translation = build_signs_and_transcriptions(corpora, True)
chars_sentences = break_into_sentences(chars, lines_cut_by_translation)
if has_translation:
translation_sentences = break_into_sentences(translation, lines_cut_by_translation)
write_sentences_to_file(chars_sentences, translation_sentences, signs_path, transcription_path,
translation_path)
else:
write_sentences_to_file_no_translation(chars_sentences, transcription_path)
return chars_sentences, mapping
def build_graph(translation_lengths, name):
"""
Build a graph to show different translation lengths and their frequencies
:param translation_lengths: list of all translation lengths
:param name: name for the graph
:return: nothing, a graph is saved to a file
"""
# matplotlib histogram
plt.hist(translation_lengths, color='blue', edgecolor='black', bins=100)
# Add labels
plt.title('Histogram of Translation Lengths - ' + name)
plt.xlabel('Number of Words in a Sentence')
plt.ylabel('Number of Sentences')
plt.savefig(Path(r".output/" + name))
def get_dict_sorted(d):
"""
Sort a dictionary
:param d: dictionary to be sorted
:return: the dictionary after sorting
"""
return str({k: v for k, v in sorted(d.items(), key=lambda item: item[1], reverse=True)})
def get_rare_elements_number(d, n):
"""
Count the number of rare elements
:param d: dictionary to use
:param n: the threshold for rarity
:return: the number of rare elements as a string
"""
i = 0
for k, v in d.items():
if v < n:
i += 1
return str(i)
def print_statistics(translation_lengths, long_trs, very_long_trs, signs_vocab, transcription_vocab, translation_vocab,
could_divide_by_three_dots, could_not_divide):
"""
Print all the statistics computed
:param translation_lengths: list of all translation lengths
:param long_trs: counter for long translations
:param very_long_trs: counter for very long translations
:param signs_vocab: vocabulary of all the signs
:param transcription_vocab: vocabulary of all the transliterations
:param translation_vocab: vocabulary of all the words in different translations
:param could_divide_by_three_dots: counter for translations possible to divide based on three dots
:param could_not_divide: counter for translations not possible to divide based on three dots
:return: nothing, all data is printed to stdout
"""
print("Number of real translations is: " + str(len(translation_lengths)))
print("Mean real translations length is: " + str(mean(translation_lengths)))
print("Number of real translations longer than 50 is: " + str(long_trs))
print("Number of real translations longer than 200 is: " + str(very_long_trs))
print("Size of signs vocabulary is: " + str(len(signs_vocab)))
print("Number of signs with less than 5 occurrences is: " + get_rare_elements_number(signs_vocab, 5))
print("The signs vocabulary is: " + get_dict_sorted(signs_vocab))
print("Size of transliteration vocabulary is: " + str(len(transcription_vocab)))
print("Number of transliterations with less than 5 occurrences is: " +
get_rare_elements_number(transcription_vocab, 5))
print("The transliteration vocabulary is: " + get_dict_sorted(transcription_vocab))
print("Size of translation (English) vocabulary is: " + str(len(translation_vocab)))
print("Number of translations (English) with less than 5 occurrences is: " +
get_rare_elements_number(translation_vocab, 5))
print("The translation (English) vocabulary is: " + get_dict_sorted(translation_vocab))
print("Number of sentences that were divided by three dots is: " + str(could_divide_by_three_dots))
print("Number of sentences that were not able to be divided is: " + str(could_not_divide))
# build_graph(translation_lengths, "real translations")
def compute_translation_statistics(tr, translation_lengths, long_trs, very_long_trs, translation_vocab):
"""
Compute statistics related to translation
:param tr: current translation
:param translation_lengths: list of all translation lengths
:param long_trs: counter for long translations
:param very_long_trs: counter for very long translations
:param translation_vocab: vocabulary of all the words in different translations
:return: the four last parameters to the function after updated for current translation
"""
translation_lengths.append(len(tr.split()))
if len(tr.split()) > 50:
long_trs += 1
if len(tr.split()) > 200:
very_long_trs += 1
for word in tr.split():
word = word.replace(",", "").replace("!", "").replace("?", "").replace(":", "").replace(";", "")
if word.replace(".", "") == "":
word = "..."
else:
word = word.replace(".", "")
increment_count(translation_vocab, word)
return translation_lengths, long_trs, very_long_trs, translation_vocab
def clean_signs_transcriptions(signs, is_signs):
"""
Clean the signs and transcriptions and canonize them
:param signs: signs / transliterations
:param is_signs: True if we are dealing with signs
:return: signs / transliterations after clean is done
"""
start_index = 0
while start_index < len(signs):
index1 = signs.find(".", start_index, len(signs))
index2 = signs.find("x", start_index, len(signs))
if index1 != -1 or index2 != -1:
if index1 != -1 and index2 == -1:
index = index1
elif index1 == -1 and index2 != -1:
index = index2
else:
index = min(index1, index2)
end_index = index
if is_signs:
while end_index < len(signs) and (signs[end_index] == "." or signs[end_index] == "x"):
end_index += 1
signs = signs[:index] + "..." + signs[end_index:]
start_index = index + 3
else:
while end_index < len(signs) and (signs[end_index] == "." or signs[end_index] == "x"
or signs[end_index] == " " or signs[end_index] == "-"
or signs[end_index] == "+" or signs[end_index] == "—"
or signs[end_index] == "ₓ"):
end_index += 1
sub_signs = signs[index:end_index]
if sub_signs == ".":
start_index = index + 1
elif sub_signs == ". ":
start_index = index + 2
elif sub_signs == ".-":
start_index = index + 2
elif sub_signs == ".—":
start_index = index + 2
elif sub_signs == "xₓ":
start_index = index + 2
elif sub_signs == "xₓ—":
start_index = index + 3
else:
signs = signs[:index] + "... " + signs[end_index:]
start_index = index + 4
else:
start_index = len(signs)
return signs
def add_translation_to_file(prev_signs, signs_vocab, prev_transcription, transcription_vocab, prev_tr,
translation_lengths, long_trs, very_long_trs, translation_vocab, prev_text,
prev_start_line, prev_end_line, signs_file, transcription_file, translation_file,
could_divide_by_three_dots, could_not_divide, metadata=False, divide_by_three_dots=True):
"""
Add a translation with corresponding signs and transliterations to files
:param prev_signs: previous signs written to file
:param signs_vocab: vocabulary of all the signs
:param prev_transcription: previous transliterations written to file
:param transcription_vocab: vocabulary of all the transliterations
:param prev_tr: previous translation written to file
:param translation_lengths: list of all translation lengths
:param long_trs: counter for long translations
:param very_long_trs: counter for very long translations
:param translation_vocab: vocabulary of all the words in different translations
:param prev_text: previous text written to file
:param prev_start_line: previous start line written to file
:param prev_end_line: previous end line written to file
:param signs_file: file of all signs, being built as input for translation algorithms
:param transcription_file: file of all transliterations, being built as input for translation algorithms
:param translation_file: file of all translations, being built as input for translation algorithms
:param could_divide_by_three_dots: counter for translations possible to divide based on three dots
:param could_not_divide: counter for translations not possible to divide based on three dots
:param metadata: should add the id of each sample to the files
:return: some of the parameters to the function, after update
"""
signs = ""
transcription = ""
for sign in prev_signs:
signs += sign
increment_count(signs_vocab, sign)
for t, delim in prev_transcription:
transcription += t + delim
increment_count(transcription_vocab, t)
signs = clean_signs_transcriptions(signs, True)
transcription = clean_signs_transcriptions(transcription, False)
real_key = [prev_text + "." + str(prev_start_line), prev_text + "." + str(prev_end_line)]
splitted_signs = [s for s in signs.split("...") if s != "" and s != " "]
splitted_transcription = [t for t in transcription.split("... ") if t != "" and t != " "]
splitted_translation = [tr for tr in prev_tr.split("... ") if tr != "" and tr != " "]
# Write to files
if len(splitted_signs) == len(splitted_transcription) and len(splitted_transcription) == len(splitted_translation) \
and divide_by_three_dots:
could_divide_by_three_dots += 1
for i in range(len(splitted_signs)):
if metadata:
signs_file.write(str(real_key) + "[" + str(i + 1) + "]: " + splitted_signs[i] + "\n")
transcription_file.write(str(real_key) + "[" + str(i + 1) + "]: " + splitted_transcription[i] + "\n")
translation_file.write(str(real_key) + "[" + str(i + 1) + "]: " + splitted_translation[i] + "\n")
else:
signs_file.write(splitted_signs[i] + "\n")
transcription_file.write(splitted_transcription[i] + "\n")
translation_file.write(splitted_translation[i] + "\n")
translation_lengths, long_trs, very_long_trs, translation_vocab = \
compute_translation_statistics(splitted_translation[i], translation_lengths, long_trs, very_long_trs,
translation_vocab)
else:
could_not_divide += 1
if metadata:
signs_file.write(str(real_key) + ": " + signs + "\n")
transcription_file.write(str(real_key) + ": " + transcription + "\n")
translation_file.write(str(real_key) + ": " + prev_tr + "\n")
else:
signs_file.write(signs + "\n")
transcription_file.write(transcription + "\n")
translation_file.write(prev_tr + "\n")
translation_lengths, long_trs, very_long_trs, translation_vocab = \
compute_translation_statistics(prev_tr, translation_lengths, long_trs, very_long_trs, translation_vocab)
return signs_vocab, transcription_vocab, translation_lengths, long_trs, very_long_trs, translation_vocab, \
could_divide_by_three_dots, could_not_divide
def write_translations_to_file(chars_sentences, translations, signs_path, transcription_path, translation_path, divide_by_three_dots):
"""
Write all the data we collected (signs, transliterations and translations) to proper files
:param chars_sentences: sentences of the signs ans transliterations
:param translations: translations corresponding to the signs and transliterations
:return: nothing, the signs, transliterations and translations are written to different files
"""
signs_file = open(signs_path, "w", encoding="utf8")
transcription_file = open(transcription_path, "w", encoding="utf8")
translation_file = open(translation_path, "w", encoding="utf8")
translation_lengths = []
long_trs = 0
very_long_trs = 0
signs_vocab = {}
transcription_vocab = {}
translation_vocab = {}
could_divide_by_three_dots = 0
could_not_divide = 0
prev_text = ""
prev_start_line = ""
prev_end_line = ""
prev_key = ""
prev_signs = []
prev_transcription = []
prev_tr = ""
prev_should_add = False
for key in translations.keys():
text, start_line, end_line = from_key_to_text_and_line_numbers(key)
if start_line == -1:
if prev_should_add == True and len(prev_signs) != 0:
signs_vocab, transcription_vocab, translation_lengths, long_trs, very_long_trs, translation_vocab, \
could_divide_by_three_dots, could_not_divide = \
add_translation_to_file(prev_signs, signs_vocab, prev_transcription, transcription_vocab, prev_tr,
translation_lengths, long_trs, very_long_trs, translation_vocab, prev_text,
prev_start_line, prev_end_line, signs_file, transcription_file,
translation_file, could_divide_by_three_dots, could_not_divide, False, divide_by_three_dots)
prev_should_add = False
continue
cur_signs = []
cur_transcription = []
for n in range(start_line, end_line + 1):
k = text + "." + str(n)
if k not in chars_sentences.keys():
# Handle lines divided between sentences.
if start_line == end_line:
if prev_key[1] == key[0]:
if k + "(part 2)" in chars_sentences.keys():
k = k + "(part 2)"
start_line = str(start_line) + "(part 2)"
end_line = start_line
else:
continue
else:
if k + "(part 1)" in chars_sentences.keys():
k = k + "(part 1)"
start_line = str(start_line) + "(part 1)"
end_line = start_line
else:
continue
elif n == start_line and k + "(part 2)" in chars_sentences.keys():
k = k + "(part 2)"
start_line = str(start_line) + "(part 2)"
elif n == end_line and k + "(part 1)" in chars_sentences.keys():
k = k + "(part 1)"
end_line = str(end_line) + "(part 1)"
else:
continue
for c in chars_sentences[k]:
cur_signs.append(c[3])
delim = c[2] if not c[2] is None else " "
cur_transcription.append((c[1], delim))
cur_tr = translations[key]
if text == prev_text and start_line == prev_end_line:
# The translation is not accurate, because it didn't give exact division point, so we don't use it.
prev_should_add = False
else:
if prev_should_add == True and len(prev_signs) != 0:
signs_vocab, transcription_vocab, translation_lengths, long_trs, very_long_trs, translation_vocab, \
could_divide_by_three_dots, could_not_divide = \
add_translation_to_file(prev_signs, signs_vocab, prev_transcription, transcription_vocab, prev_tr,
translation_lengths, long_trs, very_long_trs, translation_vocab, prev_text,
prev_start_line, prev_end_line, signs_file, transcription_file,
translation_file, could_divide_by_three_dots, could_not_divide, False, divide_by_three_dots)
prev_should_add = True
prev_text = text
prev_start_line = start_line
prev_end_line = end_line
prev_key = key
prev_signs = cur_signs
prev_transcription = cur_transcription
prev_tr = cur_tr
if prev_should_add == True and len(prev_signs) != 0:
signs_vocab, transcription_vocab, translation_lengths, long_trs, very_long_trs, translation_vocab, \
could_divide_by_three_dots, could_not_divide = \
add_translation_to_file(prev_signs, signs_vocab, prev_transcription, transcription_vocab, prev_tr,
translation_lengths, long_trs, very_long_trs, translation_vocab, prev_text,
prev_start_line, prev_end_line, signs_file, transcription_file,
translation_file, could_divide_by_three_dots, could_not_divide, False, divide_by_three_dots)
print_statistics(translation_lengths, long_trs, very_long_trs, signs_vocab, transcription_vocab,
translation_vocab, could_divide_by_three_dots, could_not_divide)
signs_file.close()
transcription_file.close()
translation_file.close()
def preprocess(corpora, divide_by_three_dots):
"""
Process corpora for the input of the translation algorithms
:param corpora: corpora to process
:return: nothing
"""
chars_sentences, mapping = build_full_line_translation_process(corpora,
True,
Path(r"../NMT_input/signs_per_line.txt"),
Path(r"../NMT_input/transcriptions_per_line.txt"),
Path(r"../NMT_input/translation_per_line.txt"))
translations = build_translations(corpora, mapping)
if divide_by_three_dots:
write_translations_to_file(chars_sentences,
translations,
Path(r"../NMT_input/signs.txt"),
Path(r"../NMT_input/transcriptions.txt"),
Path(r"../NMT_input/translation.txt"),
True)
else:
write_translations_to_file(chars_sentences,
translations,
Path(r"../NMT_input/not_divided_by_three_dots/signs.txt"),
Path(r"../NMT_input/not_divided_by_three_dots/transcriptions.txt"),
Path(r"../NMT_input/not_divided_by_three_dots/translation.txt"),
False)
def preprocess_not_translated_corpora(corpora):
chars_sentences, mapping = build_full_line_translation_process(corpora,
False,
None,
Path(r"../NMT_input/for_translation.tr"),
None)
def write_train_valid_test_files(file_type, lang, valid_lines, test_lines, divide_by_three_dots):
if divide_by_three_dots:
f = open(Path(r"../NMT_input/" + file_type + ".txt"), "r", encoding="utf8")
train = open(Path(r"../NMT_input/train." + lang), "w", encoding="utf8")
valid = open(Path(r"../NMT_input/valid." + lang), "w", encoding="utf8")
test = open(Path(r"../NMT_input/test." + lang), "w", encoding="utf8")
else:
f = open(Path(r"../NMT_input/not_divided_by_three_dots/" + file_type + ".txt"), "r", encoding="utf8")
train = open(Path(r"../NMT_input/not_divided_by_three_dots/train." + lang), "w", encoding="utf8")
valid = open(Path(r"../NMT_input/not_divided_by_three_dots/valid." + lang), "w", encoding="utf8")
test = open(Path(r"../NMT_input/not_divided_by_three_dots/test." + lang), "w", encoding="utf8")
for i, line in enumerate(f):
if i in valid_lines:
valid.write(line)
elif i in test_lines:
test.write(line)
else:
train.write(line)
f.close()
train.close()
valid.close()
test.close()
def divide_to_train_valid_test(divide_by_three_dots):
if divide_by_three_dots:
file = Path(r"../NMT_input/signs.txt")
else:
file = Path(r"../NMT_input/not_divided_by_three_dots/signs.txt")
with open(file, "r", encoding="utf8") as f:
for i, line in enumerate(f):
pass
line_number = i + 1
train_lines = []
valid_lines = []
test_lines = []
for j in range(line_number):
random_number = randint(1, 20)
if random_number == 1:
valid_lines.append(j)
elif random_number == 2:
test_lines.append(j)
else:
train_lines.append(j)
return valid_lines, test_lines
def build_train_valid_test(divide_by_three_dots):
if divide_by_three_dots:
valid_lines_pkl = Path(r"../NMT_input/valid_lines.pkl")
test_lines_pkl = Path(r"../NMT_input/test_lines.pkl")
else:
valid_lines_pkl = Path(r"../NMT_input/not_divided_by_three_dots/valid_lines.pkl")
test_lines_pkl = Path(r"../NMT_input/not_divided_by_three_dots/test_lines.pkl")
valid_lines, test_lines = divide_to_train_valid_test(divide_by_three_dots)
with open(valid_lines_pkl, "wb") as f:
pickle.dump(valid_lines, f)
with open(valid_lines_pkl, "rb") as f:
valid_lines_file = pickle.load(f)
assert valid_lines == valid_lines_file
with open(test_lines_pkl, "wb") as f:
pickle.dump(test_lines, f)
with open(test_lines_pkl, "rb") as f:
test_lines_file = pickle.load(f)
assert test_lines == test_lines_file
write_train_valid_test_files("signs", "ak", valid_lines_file, test_lines_file, divide_by_three_dots)
write_train_valid_test_files("transcriptions", "tr", valid_lines_file, test_lines_file, divide_by_three_dots)
write_train_valid_test_files("translation", "en", valid_lines_file, test_lines_file, divide_by_three_dots)
def main():
"""
Builds data for translation algorithms
:return: nothing
"""
corpora = ["rinap", "riao", "ribo", "saao", "suhu"]
not_translated_corpora = ["atae"]
divide_by_three_dots = False
preprocess(corpora, divide_by_three_dots)
build_train_valid_test(divide_by_three_dots)
preprocess_not_translated_corpora(not_translated_corpora)
if __name__ == '__main__':
main()
|
from django.apps import AppConfig
class WagtailDjangoAdminConfig(AppConfig):
name = 'wagtail_django_admin'
|
from app import db
__author__ = 'kapnuu'
class Visitor(db.Model):
__tablename__ = 'visitor'
id = db.Column(db.Integer, primary_key=True)
etag = db.Column(db.String(32), index=True)
remote_addr = db.Column(db.String(16), index=True)
t_last_seen = db.Column(db.DateTime)
last_cat_idx = db.Column(db.Integer, default=-1)
def get_mod_time(self) -> str:
s = self.id % 60
m = self.id // 60 % 60
h = self.id // 60 // 60
return f'{h:02}:{m:02}:{s:02}'
class Cat(db.Model):
__tablename__ = 'cat'
id = db.Column(db.Integer, primary_key=True)
index = db.Column(db.Integer, index=True)
url = db.Column(db.String(320))
disabled = db.Column(db.Boolean, default=False)
class Thumbnail(db.Model):
__tablename__ = 'thumbnail'
id = db.Column(db.Integer, primary_key=True)
cat_id = db.Column(db.Integer, unique=True, index=True)
data = db.Column(db.LargeBinary)
width = db.Column(db.Integer)
|
# Copyright 2017 Hugh Salimbeni
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from numpy.testing import assert_almost_equal
import numpy as np
from bayesian_benchmarks.data import regression_datasets, get_regression_data, get_classification_data
@pytest.mark.parametrize('d', ['boston'])
def test_regression(d):
data = get_regression_data(d)
assert_almost_equal(np.average(np.concatenate([data.X_train, data.X_test], 0), 0),
np.zeros(data.X_train.shape[1]))
assert_almost_equal(np.std(np.concatenate([data.X_train, data.X_test], 0), 0),
np.ones(data.X_train.shape[1]),
decimal=3)
assert_almost_equal(np.average(np.concatenate([data.Y_train, data.Y_test], 0), 0),
np.zeros(data.Y_train.shape[1]))
assert_almost_equal(np.std(np.concatenate([data.Y_train, data.Y_test], 0), 0),
np.ones(data.Y_train.shape[1]),
decimal=3)
assert data.X_train.shape[0] == data.Y_train.shape[0]
assert data.X_test.shape[0] == data.Y_test.shape[0]
assert data.X_train.shape[1] == data.X_test.shape[1]
assert data.Y_train.shape[1] == data.Y_test.shape[1]
@pytest.mark.parametrize('d', ['iris', 'thyroid'])
def test_classification(d):
data = get_classification_data(d)
assert_almost_equal(np.average(np.concatenate([data.X_train, data.X_test], 0), 0),
np.zeros(data.X_train.shape[1]))
assert_almost_equal(np.std(np.concatenate([data.X_train, data.X_test], 0), 0),
np.ones(data.X_train.shape[1]),
decimal=3)
K = len(list(set(np.concatenate([data.Y_train, data.Y_test], 0).astype(int).flatten())))
assert K == data.K
assert data.X_train.shape[0] == data.Y_train.shape[0]
assert data.X_test.shape[0] == data.Y_test.shape[0]
assert data.X_train.shape[1] == data.X_test.shape[1]
assert data.Y_train.shape[1] == data.Y_test.shape[1]
|
from typing import Optional
from PyQt5 import QtWidgets, QtCore
from .scanview_ui import Ui_Form
from ..utils.plotscan import PlotScan
from ..utils.window import WindowRequiresDevices
from ...core2.dataclasses import Scan
class ScanViewer(QtWidgets.QWidget, WindowRequiresDevices, Ui_Form):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.setupUi(self)
def setupUi(self, Form):
super().setupUi(Form)
self.treeView.setModel(self.instrument.scan)
self.instrument.scan.rowsInserted.connect(self.resizeTreeColumns)
self.instrument.scan.modelReset.connect(self.resizeTreeColumns)
self.showPushButton.clicked.connect(self.showScan)
self.treeView.activated.connect(self.showScan)
self.resizeTreeColumns()
def resizeTreeColumns(self):
for c in range(self.instrument.scan.columnCount()):
self.treeView.resizeColumnToContents(c)
def showScan(self, index: Optional[QtCore.QModelIndex] = None):
if not isinstance(index, QtCore.QModelIndex):
index = self.treeView.currentIndex()
scan = index.data(QtCore.Qt.UserRole)
assert isinstance(scan, Scan)
plotscan = self.mainwindow.addSubWindow(PlotScan, singleton=False)
plotscan.setScan(scan)
|
#!/usr/bin/env python
"""This module takes two classification outputs from source and targer puppet infrastructure and
takes the user defintions from the source and adds them to the infrastructure defintions of the
target. Allowing the ability to restore a backup of user node definitions"""
import json
import sys
params = json.load(sys.stdin)
source_classification_file = params['source_directory']+"/classification_backup.json"
target_classification_file = params['working_directory']+"/classification_backup.json"
transformed_classification_file = params['working_directory']+"/transformed_classification.json"
def removesubgroups(data_rsg,id_rsg):
"""
This definition allows us to traverse recursively down the json groups finding all children of
the pe infrastructure and to remove them.
Inputs are the resource group and parent ID of the resource groups
Returns
-------
data_rsg : list
The resource groups which did not have the parent ID
"""
groups = list(filter(lambda x:x ["parent"]==id_rsg,data_rsg))
for group in groups:
subid = group["id"]
data_rsg = list(filter(lambda x:x ["id"]!=subid,data_rsg)) # pylint: disable=cell-var-from-loop
data_rsg = removesubgroups(data_rsg,subid)
return data_rsg
# This defintion allows us to traverse down the pe inf tree and find all groups
def addsubgroups(data_asg,id_asg,peinf_asg):
"""
This definition allows us to traverse recursively down the json groups finding all groups in
the pe infrastructure tree and adding them to a list recursively and then returning the list.
Inputs are the list of all resource groups, infrastructure resource groups found so far and
parent ID of infrastructure groups
Returns
-------
data_asg : list
The list of resource groups of pe infrastructure groups at source
"""
groups = list(filter(lambda x:x ["parent"]==id_asg,data_asg))
peinf_asg = peinf_asg + groups
for group in groups:
subid = group["id"]
peinf_asg = addsubgroups(data_asg,subid,peinf_asg)
return peinf_asg
# open the backup classification
with open(source_classification_file) as data_file:
data = json.load(data_file)
# open the DR server classification
with open(target_classification_file) as data_fileDR:
data_DR = json.load(data_fileDR)
# find the infrastructure group and its ID
peinf = list(filter(lambda x:x ["name"]=="PE Infrastructure",data))
group_id = peinf[0]["id"]
# remove this group from the list and recursively remove all sub groups
data = list(filter(lambda x:x ["id"]!=group_id,data))
data = removesubgroups(data,group_id)
# find the dr infrastructure group and its ID
peinf_DR = list(filter(lambda x:x ["name"]=="PE Infrastructure",data_DR))
id_DR = peinf_DR[0]["id"]
# Recursively go through inf groups to get the full tree
peinf_DR = addsubgroups(data_DR,id_DR,peinf_DR)
# Add the contents of the backup classification without pe inf to the DR pe inf groups
# and write to a file
peinf_transformed_groups = data + peinf_DR
with open(transformed_classification_file, 'w') as fp:
json.dump(peinf_transformed_groups, fp)
|
from collections import UserList
from numbers import Real
from typing import List, Optional, Iterable, Union, Set, Iterator, Dict
from ....file.report import Report, DataType, Field, AbstractField
from ._LocatedObject import LocatedObject
from . import constants
class LocatedObjects(UserList):
def __init__(self, objects: Optional[Iterable[LocatedObject]] = None):
super().__init__(objects)
def subset(self, indices: List[int]) -> "LocatedObjects":
"""
Returns a new instance using the specified object indices.
:param indices: The indices for the subset.
:return: The subset.
"""
# Ensure unique indices
indices = set(indices)
return LocatedObjects((obj for obj in self if obj.get_index() in indices))
def remove(self, item: Union[LocatedObject, List[int]]) -> None:
"""
Removes the objects with the specified indices.
:param item: The indices to remove.
"""
# List implementation
if isinstance(item, LocatedObject):
return super().remove(item)
# Ensure unique indices
indices: Set[int] = set(item)
# Pop any objects with indices in the set
i: int = 0
while i < len(self):
if self[i].get_index() in indices:
self.pop(i)
else:
i += 1
def find(self, index: Union[int, str]) -> Optional[LocatedObject]:
"""
Returns the object with the specified index.
:param index: The index to look for.
:return: The object, None if not found.
"""
# Get both a string and integer representation
if isinstance(index, str):
try:
int_index = int(index)
except Exception:
int_index = None
else:
int_index = index
index = str(index)
for obj in self:
# Try for exact match
obj_index_string: str = obj.get_index_string()
if obj_index_string is not None and obj_index_string == index:
return obj
# Try for numeric match
if int_index is not None and obj.get_index() == int_index:
return obj
return None
def scale(self, scale: float):
"""
Scales all objects with the provided scale factor.
:param scale: The scale factor.
"""
for obj in self:
obj.scale(scale)
def to_report(self, prefix: str = "Object", offset: int = 0, update_index: bool = False) -> Report:
"""
Turns the located objects into a report. Using a prefix like "Object." will
result in the following report entries for a single object:
Object.1.x
Object.1.y
Object.1.width
Object.1.height
Object.1.poly_x -- if polygon data present
Object.1.poly_y -- if polygon data present
:param prefix: The prefix to use.
:param offset: The offset for the index to use.
:param update_index: Whether to update the index in the metadata.
:return: The generated report.
"""
# Make sure the prefix doesn't end in a dot
if prefix.endswith("."):
prefix = prefix[:-1]
# Create the empty report
result: Report = Report()
# Create a shortcut for adding values
def add_value(count_string: str, suffix: str, type: DataType, value):
field: Field = Field(f"{prefix}.{count_string}.{suffix}", type)
result.add_field(field)
result.set_value(field, value)
count: int = 0
width: int = len(str(len(self)))
for obj in self:
count += 1
count_string: str = str(count + offset).rjust(width, "0")
# Metadata
for key, value in obj.metadata.items():
# Get the datatype of the meta-data
type: DataType = DataType.UNKNOWN
if isinstance(value, Real):
type = DataType.NUMERIC
elif isinstance(value, bool):
type = DataType.BOOLEAN
elif isinstance(value, str):
type = DataType.STRING
add_value(count_string, key, type, value)
# Index
if update_index:
add_value(count_string, constants.KEY_INDEX, DataType.STRING, count_string)
add_value(count_string, constants.KEY_X, DataType.NUMERIC, obj.x) # X
add_value(count_string, constants.KEY_Y, DataType.NUMERIC, obj.y) # Y
add_value(count_string, constants.KEY_WIDTH, DataType.NUMERIC, obj.width) # Width
add_value(count_string, constants.KEY_HEIGHT, DataType.NUMERIC, obj.height) # Height
add_value(count_string, constants.KEY_LOCATION, DataType.STRING, obj.get_location()) # Location
# Polygon
if obj.has_polygon():
add_value(count_string, constants.KEY_POLY_X, DataType.STRING, ",".join(map(str, obj.get_polygon_x())))
add_value(count_string, constants.KEY_POLY_Y, DataType.STRING, ",".join(map(str, obj.get_polygon_y())))
# Count
field: Field = Field(f"{prefix}.{constants.KEY_COUNT}", DataType.NUMERIC)
result.add_field(field)
result.set_value(field, len(self))
return result
@classmethod
def from_report(cls, report: Report, prefix: str = "Object.") -> "LocatedObjects":
"""
Retrieves all object from the report.
:param report: The report to process.
:param prefix: The prefix to look for.
:return: The objects found.
"""
# Make sure the prefix ends in a dot
if not prefix.endswith("."):
prefix = prefix + "."
result: LocatedObjects = LocatedObjects()
fields: List[AbstractField] = report.get_fields()
# Group fields
groups: Dict[str, List[AbstractField]] = {}
for field in fields:
if field.name.startswith(prefix):
current: str = field.name[:field.name.rindex(".")]
if current not in groups:
groups[current] = []
groups[current].append(field)
# Process grouped fields
for group, group_fields in groups.items():
# Meta-data
meta = {}
if len(group) <= len(prefix):
continue
meta[constants.KEY_INDEX] = group[len(prefix):]
for field in group_fields:
if field.name.endswith((constants.KEY_X, constants.KEY_Y, constants.KEY_WIDTH, constants.KEY_HEIGHT)):
continue
meta[field.name[field.name.rindex(".") + 1:]] = report.get_value(field)
try:
if (report.has_value(f"{group}.{constants.KEY_X}") and
report.has_value(f"{group}.{constants.KEY_Y}") and
report.has_value(f"{group}.{constants.KEY_WIDTH}") and
report.has_value(f"{group}.{constants.KEY_HEIGHT}")):
x: int = round(report.get_real_value(f"{group}.{constants.KEY_X}"))
y: int = round(report.get_real_value(f"{group}.{constants.KEY_Y}"))
width: int = round(report.get_real_value(f"{group}.{constants.KEY_WIDTH}"))
height: int = round(report.get_real_value(f"{group}.{constants.KEY_HEIGHT}"))
obj: LocatedObject = LocatedObject(x, y, width, height, **meta)
result.append(obj)
# Polygon
if (report.has_value(f"{group}.{constants.KEY_POLY_X}") and
report.has_value(f"{group}.{constants.KEY_POLY_Y}")):
obj.metadata[constants.KEY_POLY_X] = report.get_string_value(f"{group}.{constants.KEY_POLY_X}")
obj.metadata[constants.KEY_POLY_Y] = report.get_string_value(f"{group}.{constants.KEY_POLY_Y}")
except Exception:
# Ignored
pass
return result
# PyCharm doesn't seem to be able to work out the typing for this
def __iter__(self) -> Iterator[LocatedObject]:
return super().__iter__()
|
import pathlib
from setuptools import setup # type: ignore
setup(
name="dynamo-dao",
version="0.0.7",
description="Dynamo Dao",
author="Quinn Weber",
author_email="quinn@quinnweber.com",
maintainer="Quinn Weber",
maintainer_email="quinn@quinnweber.com",
url="https://github.com/qsweber/dynamo-dao",
license="MIT",
long_description=(pathlib.Path(__file__).parent / "README.md").read_text(),
long_description_content_type="text/markdown",
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
package_data={"dynamo_dao": ["py.typed"]},
packages=["dynamo_dao"],
package_dir={"": "src"},
install_requires=["Boto3"],
)
|
# Copyright (c) 2021, Weslati Baha Eddine and contributors
# For license information, please see license.txt
# import frappe
from frappe.model.document import Document
class Font(Document):
def validate(self):
if self.type =="Google Fonts" :
self.css=self.googlelinks
if self.type=="Otf file":
if self.is_url==1:
self.css="""
<style>
@font-face {
font-family:"""+ self.name+""";
src:url('"""+self.file_link+"""');
}
</style>
"""
else:
self.css="""
<style>
@font-face {
font-family:"""+ self.name+""";
src:url('"""+self.file+"""');
}
</style>
"""
pass
|
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:percent
# notebook_metadata_filter: all
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.13.1
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# language_info:
# codemirror_mode:
# name: ipython
# version: 3
# file_extension: .py
# mimetype: text/x-python
# name: python
# nbconvert_exporter: python
# pygments_lexer: ipython3
# version: 3.9.1
# widgets:
# application/vnd.jupyter.widget-state+json:
# state: {}
# version_major: 2
# version_minor: 0
# ---
# %% [markdown]
# # The NDDataset object
# %% [markdown]
# The NDDataset is the main object use by **SpectroChemPy**.
#
# Like numpy ndarrays, NDDataset have the capability to be sliced, sorted and subject to matematical operations.
#
# But, in addition, NDDataset may have units, can be masked and each dimensions can have coordinates also with units.
# This make NDDataset aware of unit compatibility, *e.g.*, for binary operation such as addtions or subtraction or
# during the application of mathematical operations. In addition or in replacement of numerical data for coordinates,
# NDDatset can also have labeled coordinates where labels can be different kind of objects (strings, datetime,
# numpy nd.ndarray or othe NDDatasets, etc...).
#
# This offers a lot of flexibility in using NDDatasets that, we hope, will be useful for applications.
# See the **Tutorials** for more information about such possible applications.
# %% [markdown]
# **Below (and in the next sections), we try to give an almost complete view of the NDDataset features.**
# %%
import spectrochempy as scp
# %% [markdown]
# As we will make some reference to the `numpy` library, we also import it here.
# %%
import numpy as np
# %% [markdown]
# We additionnaly import the three main SpectroChemPy objects that we will use through this tutorial
# %%
from spectrochempy import NDDataset, CoordSet, Coord
# %% [markdown]
# For a convenient usage of units, we will also directly import `ur`, the unit registry which contains all available
# units.
# %%
from spectrochempy import ur
# %% [markdown]
# Multidimensional array are defined in Spectrochempy using the `NDDataset` object.
#
# `NDDataset` objects mostly behave as numpy's `numpy.ndarray`
# (see for instance __[numpy quikstart tutorial](https://numpy.org/doc/stable/user/quickstart.html)__).
# %% [markdown]
# However, unlike raw numpy's ndarray, the presence of optional properties make them (hopefully) more appropriate for
# handling spectroscopic information, one of the major objectives of the SpectroChemPy package:
#
# * `mask`: Data can be partially masked at will
# * `units`: Data can have units, allowing units-aware operations
# * `coordset`: Data can have a set of coordinates, one or sevral by dimensions
#
# Additional metadata can also be added to the instances of this class through the `meta` properties.
# %% [markdown]
# ## 1D-Dataset (unidimensional dataset)
# %% [markdown]
# In the following example, a minimal 1D dataset is created from a simple list, to which we can add some metadata:
# %%
d1D = NDDataset(
[10.0, 20.0, 30.0],
name="Dataset N1",
author="Blake and Mortimer",
description="A dataset from scratch",
)
d1D
# %% [markdown]
# <div class='alert alert-info'>
# <b>Note</b>
#
# In the above code, run in a notebook, the output of d1D is in html for a nice display.
#
# To get the same effect, from a console script, one can use `print_` (with an underscore) and not the usual python
# function `print`. As you can see below, the `print` function only gives a short summary of the information,
# while the `print_` method gives more detailed output
#
# </div>
# %%
print(d1D)
# %%
scp.print_(d1D)
# %%
_ = d1D.plot(figsize=(3, 2))
# %% [markdown]
# Except few addtional metadata such `author`, `created` ..., there is not much
# differences with respect to a conventional `numpy.ndarray`. For example, one
# can apply numpy ufunc's directly to a NDDataset or make basic arithmetic
# operation with these objects:
# %%
np.sqrt(d1D)
# %%
d1D + d1D / 2.0
# %% [markdown]
# As seen above, there are some metadata that are automatically added to the dataset:
#
# * `id` : This is a unique identifier for the object
# * `author` : author determined from the computer name if not provided
# * `created` : date/time of creation
# * `modified`: date/time of modification
#
# additionaly, dataset can have a **`name`** (equal to the `id` if it is not provided)
#
# Some other metadata are defined:
#
# * `history`: history of operation achieved on the object since the object creation
# * `description`: A user friendly description of the objects purpose or contents.
# * `title`: A title that will be used in plots or in some other operation on the objects.
#
#
# All this metadata (except, the `id`, `created`, `modified`) can be changed by the user.
#
# For instance:
# %%
d1D.title = "intensity"
d1D.name = "mydataset"
d1D.history = "created from scratch"
d1D.description = "Some experimental measurements"
d1D
# %% [markdown]
# d1D is a 1D (1-dimensional) dataset with only one dimension.
#
# Some attributes are useful to check this kind of information:
# %%
d1D.shape # the shape of 1D contain only one dimension size
# %%
d1D.ndim # the number of dimensions
# %%
d1D.dims # the name of the dimension (it has been automatically attributed)
# %% [markdown]
# **Note**: The names of the dimensions are set automatically. But they can be changed, with the limitation that the
# name must be a single letter.
# %%
d1D.dims = ["q"] # change the list of dim names.
# %%
d1D.dims
# %% [markdown]
# ### nD-Dataset (multidimensional dataset)
# %% [markdown]
# To create a nD NDDataset, we can provide a nD-array like object to the NDDataset instance constructor
# %%
a = np.random.rand(2, 4, 6)
a
# %%
d3D = NDDataset(a)
d3D.title = "energy"
d3D.author = "Someone"
d3D.name = "3D dataset creation"
d3D.history = "created from scratch"
d3D.description = "Some example"
d3D.dims = ["u", "v", "t"]
d3D
# %% [markdown]
# We can also add all information in a single statement
# %%
d3D = NDDataset(
a,
dims=["u", "v", "t"],
title="Energy",
author="Someone",
name="3D_dataset",
history="created from scratch",
description="a single statement creation example",
)
d3D
# %% [markdown]
# Three names are attributed at the creation (if they are not provided with the `dims` attribute, then the name are:
# 'z','y','x' automatically attributed)
# %%
d3D.dims
# %%
d3D.ndim
# %%
d3D.shape
# %% [markdown]
# ## Units
# %% [markdown]
# One interesting possibility for a NDDataset is to have defined units for the internal data.
# %%
d1D.units = ur.eV # ur is a registry containing all available units
# %%
d1D # note the eV symbol of the units added to the values field below
# %% [markdown]
# This allows to make units-aware calculations:
# %%
d1D ** 2 # note the results in eV^2
# %%
np.sqrt(d1D) # note the result in e^0.5
# %%
time = 5.0 * ur.second
d1D / time # here we get results in eV/s
# %% [markdown]
# Conversion can be done between different units transparently
# %%
d1D.to("J")
# %%
d1D.to("K")
# %% [markdown]
# ## Coordinates
# %% [markdown]
# The above created `d3D` dataset has 3 dimensions, but no coordinate for these dimensions. Here arises a big difference
# with simple `numpy`-arrays:
# * We can add coordinates to each dimensions of a NDDataset.
# %% [markdown]
# To get the list of all defined coordinates, we can use the `coords` attribute:
# %%
d3D.coordset # no coordinates, so it returns nothing (None)
# %%
d3D.t # the same for coordinate t, v, u which are not yet set
# %% [markdown]
# To add coordinates, on way is to set them one by one:
# %%
d3D.t = (
Coord.arange(6) * 0.1
) # we need a sequence of 6 values for `t` dimension (see shape above)
d3D.t.title = "time"
d3D.t.units = ur.seconds
d3D.coordset # now return a list of coordinates
# %%
d3D.t
# %%
d3D.coordset("t") # Alternative way to get a given coordinates
# %%
d3D["t"] # another alternative way to get a given coordinates
# %% [markdown]
# The two other coordinates u and v are still undefined
# %%
d3D.u, d3D.v
# %% [markdown]
# When the dataset is printed, only the information for the existing coordinates is given.
# %%
d3D
# %% [markdown]
# Programatically, we can use the attribute `is_empty` or `has_data` to check this
# %%
d3D.v.has_data, d3D.v.is_empty
# %% [markdown]
# An error is raised when a coordinate doesn't exist
# %%
try:
d3D.x
except KeyError as e:
scp.error_(e)
# %% [markdown]
# In some case it can also be usefull to get a coordinate from its title instead of its name (the limitation is that if
# several coordinates have the same title, then only the first ones that is found in the coordinate list, will be
# returned - this can be ambiguous)
# %%
d3D["time"]
# %%
d3D.time
# %% [markdown]
# ## Labels
# %% [markdown]
# It is possible to use labels instead of numerical coordinates. They are sequence of objects .The length of the
# sequence must be equal to the size of a dimension.
# %% [markdown]
# The labels can be simple strings, *e.g.,*
# %%
tags = list("ab")
d3D.u.title = "some tags"
d3D.u.labels = tags # TODO: avoid repetition
d3D
# %% [markdown]
# or more complex objects.
#
# For instance here we use datetime.timedelta objets:
# %%
from datetime import timedelta
start = timedelta(0)
times = [start + timedelta(seconds=x * 60) for x in range(6)]
d3D.t = None
d3D.t.labels = times
d3D.t.title = "time"
d3D
# %% [markdown]
# In this case, getting a coordinate that doesn't possess numerical data but labels, will return the labels
# %%
d3D.time
# %% [markdown]
# # More insight on coordinates
# %% [markdown]
# ## Sharing coordinates between dimensions
# %% [markdown]
# Sometimes it is not necessary to have different coordinates for the each axes. Some can be shared between axes.
#
# For example, if we have a square matrix with the same coordinate in the two dimensions, the second dimension can
# refer to the first. Here we create a square 2D dataset, using the `diag` method:
# %%
nd = NDDataset.diag((3, 3, 2.5))
nd
# %% [markdown]
# and then we add the same coordinate for both dimensions
# %%
coordx = Coord.arange(3)
nd.set_coordset(x=coordx, y="x")
nd
# %% [markdown]
# ## Setting coordinates using `set_coordset`
# %% [markdown]
# Lets create 3 `Coord` objects to be use a s coordinates for the 3 dimensions of the previous d3D dataset.
# %%
d3D.dims = ["t", "v", "u"]
s0, s1, s2 = d3D.shape
coord0 = Coord.linspace(10.0, 100.0, s0, units="m", title="distance")
coord1 = Coord.linspace(20.0, 25.0, s1, units="K", title="temperature")
coord2 = Coord.linspace(0.0, 1000.0, s2, units="hour", title="elapsed time")
# %% [markdown]
# ### Syntax 1
# %%
d3D.set_coordset(u=coord2, v=coord1, t=coord0)
d3D
# %% [markdown]
# ### Syntax 2
# %%
d3D.set_coordset({"u": coord2, "v": coord1, "t": coord0})
d3D
# %% [markdown]
# ## Adding several coordinates to a single dimension
# We can add several coordinates to the same dimension
# %%
coord1b = Coord([1, 2, 3, 4], units="millitesla", title="magnetic field")
# %%
d3D.set_coordset(u=coord2, v=[coord1, coord1b], t=coord0)
d3D
# %% [markdown]
# We can retrieve the various coordinates for a single dimention easily:
# %%
d3D.v_1
# %% [markdown]
# ## Summary of the coordinate setting syntax
# Some additional information about coordinate setting syntax
# %% [markdown]
# **A.** First syntax (probably the safer because the name of the dimension is specified, so this is less prone to
# errors!)
# %%
d3D.set_coordset(u=coord2, v=[coord1, coord1b], t=coord0)
# or equivalent
d3D.set_coordset(u=coord2, v=CoordSet(coord1, coord1b), t=coord0)
d3D
# %% [markdown]
# **B.** Second syntax assuming the coordinates are given in the order of the dimensions.
#
# Remember that we can check this order using the `dims` attribute of a NDDataset
# %%
d3D.dims
# %%
d3D.set_coordset((coord0, [coord1, coord1b], coord2))
# or equivalent
d3D.set_coordset(coord0, CoordSet(coord1, coord1b), coord2)
d3D
# %% [markdown]
# **C.** Third syntax (from a dictionary)
# %%
d3D.set_coordset({"t": coord0, "u": coord2, "v": [coord1, coord1b]})
d3D
# %% [markdown]
# **D.** It is also possible to use directly the `coordset` property
# %%
d3D.coordset = coord0, [coord1, coord1b], coord2
d3D
# %%
d3D.coordset = {"t": coord0, "u": coord2, "v": [coord1, coord1b]}
d3D
# %%
d3D.coordset = CoordSet(t=coord0, u=coord2, v=[coord1, coord1b])
d3D
# %% [markdown]
# <div class='alert alert-warning'>
# <b>WARNING</b>
#
# Do not use list for setting multiples coordinates! use tuples
# </div>
# %% [markdown]
# This raise an error (list have another signification: it's used to set a "same dim" CoordSet see example A or B)
# %%
try:
d3D.coordset = [coord0, coord1, coord2]
except ValueError:
scp.error_(
"Coordinates must be of the same size for a dimension with multiple coordinates"
)
# %% [markdown]
# This works : it use a tuple `()`, not a list `[]`
# %%
d3D.coordset = (
coord0,
coord1,
coord2,
) # equivalent to d3D.coordset = coord0, coord1, coord2
d3D
# %% [markdown]
# **E.** Setting the coordinates individually
# %% [markdown]
# Either a single coordinate
# %%
d3D.u = coord2
d3D
# %% [markdown]
# or multiple coordinates for a single dimension
# %%
d3D.v = [coord1, coord1b]
d3D
# %% [markdown]
# or using a CoorSet object.
# %%
d3D.v = CoordSet(coord1, coord1b)
d3D
# %% [markdown]
# # Methods to create NDDataset
#
# There are many ways to create `NDDataset` objects.
#
# Let's first create 2 coordinate objects, for which we can define `labels` and `units`! Note the use of the function
# `linspace`to generate the data.
# %%
c0 = Coord.linspace(
start=4000.0, stop=1000.0, num=5, labels=None, units="cm^-1", title="wavenumber"
)
# %%
c1 = Coord.linspace(
10.0, 40.0, 3, labels=["Cold", "RT", "Hot"], units="K", title="temperature"
)
# %% [markdown]
# The full coordset will be the following
# %%
cs = CoordSet(c0, c1)
cs
# %% [markdown]
# Now we will generate the full dataset, using a ``fromfunction`` method. All needed information are passed as
# parameter of the NDDataset instance constructor.
# %% [markdown]
# ## Create a dataset from a function
# %%
def func(x, y, extra):
return x * y / extra
# %%
ds = NDDataset.fromfunction(
func,
extra=100 * ur.cm ** -1, # extra arguments passed to the function
coordset=cs,
name="mydataset",
title="absorbance",
units=None,
) # when None, units will be determined from the function results
ds.description = """Dataset example created for this tutorial.
It's a 2-D dataset"""
ds.author = "Blake & Mortimer"
ds
# %% [markdown]
# ## Using numpy-like constructors of NDDatasets
# %%
dz = NDDataset.zeros(
(5, 3), coordset=cs, units="meters", title="Datasets with only zeros"
)
# %%
do = NDDataset.ones(
(5, 3), coordset=cs, units="kilograms", title="Datasets with only ones"
)
# %%
df = NDDataset.full(
(5, 3), fill_value=1.25, coordset=cs, units="radians", title="with only float=1.25"
)
df
# %% [markdown]
# As with numpy, it is also possible to take another dataset as a template:
# %%
df = NDDataset.full_like(d3D, dtype="int", fill_value=2)
df
# %%
nd = NDDataset.diag((3, 3, 2.5))
nd
# %% [markdown]
# ## Copying existing NDDataset
#
# To copy an existing dataset, this is as simple as:
# %%
d3D_copy = d3D.copy()
# %% [markdown]
# or alternatively:
# %%
d3D_copy = d3D[:]
# %% [markdown]
# Finally, it is also possible to initialize a dataset using an existing one:
# %%
d3Dduplicate = NDDataset(d3D, name="duplicate of %s" % d3D.name, units="absorbance")
d3Dduplicate
# %% [markdown]
# ## Importing from external dataset
#
# NDDataset can be created from the importation of external data
#
# A **test**'s data folder contains some data for experimenting some features of datasets.
# %%
# let check if this directory exists and display its actual content:
datadir = scp.preferences.datadir
if datadir.exists():
print(datadir.name)
# %% [markdown]
# Let's load grouped IR spectra acquired using OMNIC:
# %%
nd = NDDataset.read_omnic(datadir / "irdata/nh4y-activation.spg")
nd.preferences.reset()
_ = nd.plot()
# %% [markdown]
# Even if we do not specify the **datadir**, the application first look in tht directory by default.
# %% [markdown]
# Now, lets load a NMR dataset (in the Bruker format).
# %%
path = datadir / "nmrdata" / "bruker" / "tests" / "nmr" / "topspin_2d"
# load the data directly (no need to create the dataset first)
nd2 = NDDataset.read_topspin(path, expno=1, remove_digital_filter=True)
# view it...
nd2.x.to("s")
nd2.y.to("ms")
ax = nd2.plot(method="map")
|
#-------------------------------------------------------------------------------
# NodeCoupling
#-------------------------------------------------------------------------------
from PYB11Generator import *
@PYB11module("SpheralSPH")
class NodeCoupling:
"A functor base class encapsulating how we couple pairs of nodes."
def pyinit(self):
"Default constructor"
@PYB11virtual
@PYB11const
@PYB11cppname("operator()")
def __call__(self,
nodeListi = "const unsigned",
i = "const unsigned",
nodeListj = "const unsigned",
j = "const unsigned"):
"Functional method to override for coupling (nodeListi, i) <-> (nodeListj, j)"
return "double"
#-------------------------------------------------------------------------------
# DamagedNodeCoupling
#-------------------------------------------------------------------------------
@PYB11template("Dimension")
class DamagedNodeCoupling(NodeCoupling):
"A functor class encapsulating how we couple solid nodes in the presence of multiple materials and damage."
PYB11typedefs = """
typedef typename %(Dimension)s::Vector Vector;
typedef typename %(Dimension)s::SymTensor SymTensor;
"""
def pyinit(self,
damage = "const FieldList<%(Dimension)s, SymTensor>&",
damageGradient = "const FieldList<%(Dimension)s, Vector>&",
H = "const FieldList<%(Dimension)s, SymTensor>&"):
"Constructor"
@PYB11virtual
@PYB11const
@PYB11cppname("operator()")
def __call__(self,
nodeListi = "const unsigned",
i = "const unsigned",
nodeListj = "const unsigned",
j = "const unsigned"):
"Provides a damaged coupling between nodes (nodeListi, i) <-> (nodeListj, j)"
return "double"
#-------------------------------------------------------------------------------
# DamagedNodeCouplingWithFrags
#-------------------------------------------------------------------------------
@PYB11template("Dimension")
class DamagedNodeCouplingWithFrags(DamagedNodeCoupling):
"""A functor class encapsulating how we couple solid nodes in the presence of
multiple materials and damage. This version adds logic to decouple based
on fragment ID as well."""
PYB11typedefs = """
typedef typename %(Dimension)s::Scalar Scalar;
typedef typename %(Dimension)s::Vector Vector;
typedef typename %(Dimension)s::Tensor Tensor;
typedef typename %(Dimension)s::SymTensor SymTensor;
"""
def pyinit(self,
damage = "const FieldList<%(Dimension)s, SymTensor>&",
damageGradient = "const FieldList<%(Dimension)s, Vector>&",
H = "const FieldList<%(Dimension)s, SymTensor>&",
fragIDs = "const FieldList<%(Dimension)s, int>&"):
"Constructor"
@PYB11virtual
@PYB11const
@PYB11cppname("operator()")
def __call__(self,
nodeListi = "const unsigned",
i = "const unsigned",
nodeListj = "const unsigned",
j = "const unsigned"):
"Provides a damaged coupling between nodes (nodeListi, i) <-> (nodeListj, j)"
return "double"
|
from django.apps import AppConfig
class MovieRaterApiConfig(AppConfig):
name = 'movie_rater_api'
|
from torch.nn.functional import conv2d, conv3d
from torch.nn.modules.utils import _pair, _triple
from .reparam_layers import RTLayer, LRTLayer
class Conv2dRT(RTLayer):
def __init__(self,
in_channels,
out_channels,
kernel_size,
bias=True,
stride=1,
padding=0,
dilation=1,
groups=1,
prior=None,
posteriors=None,
kl_type='reverse'):
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = _pair(kernel_size)
weight_size = (out_channels, in_channels, self.kernel_size[0], self.kernel_size[1])
bias_size = (out_channels) if bias else None
super(Conv2dRT, self).__init__(layer_fn=conv2d,
weight_size=weight_size,
bias_size=bias_size,
prior=prior,
posteriors=posteriors,
kl_type=kl_type,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups)
class Conv3dRT(RTLayer):
def __init__(self,
in_channels,
out_channels,
kernel_size,
bias=True,
stride=1,
padding=0,
dilation=1,
groups=1,
prior=None,
posteriors=None,
kl_type='reverse'):
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = _triple(kernel_size)
weight_size = (out_channels, in_channels,
self.kernel_size[0], self.kernel_size[1], self.kernel_size[2])
bias_size = (out_channels) if bias else None
super(Conv3dRT, self).__init__(layer_fn=conv3d,
weight_size=weight_size,
bias_size=bias_size,
prior=prior,
posteriors=posteriors,
kl_type=kl_type,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups)
class Conv2dLRT(LRTLayer):
def __init__(self,
in_channels,
out_channels,
kernel_size,
bias=True,
stride=1,
padding=0,
dilation=1,
groups=1,
prior=None,
posteriors=None,
kl_type='reverse'):
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = _pair(kernel_size)
weight_size = (out_channels, in_channels, self.kernel_size[0], self.kernel_size[1])
bias_size = (out_channels) if bias else None
super(Conv2dLRT, self).__init__(layer_fn=conv2d,
weight_size=weight_size,
bias_size=bias_size,
prior=prior,
posteriors=posteriors,
kl_type=kl_type,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups)
class Conv3dLRT(LRTLayer):
def __init__(self,
in_channels,
out_channels,
kernel_size,
bias=True,
stride=1,
padding=0,
dilation=1,
groups=1,
prior=None,
posteriors=None,
kl_type='reverse'):
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = _triple(kernel_size)
weight_size = (out_channels, in_channels,
self.kernel_size[0], self.kernel_size[1], self.kernel_size[2])
bias_size = (out_channels) if bias else None
super(Conv3dLRT, self).__init__(layer_fn=conv3d,
weight_size=weight_size,
bias_size=bias_size,
prior=prior,
posteriors=posteriors,
kl_type=kl_type,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups)
|
import torch
import torch.nn as nn
from torchvision import models
from torch.autograd import Variable
pretrained_model = models.resnet152(pretrained=True)
class Resnet152Feats(nn.Module):
def __init__(self):
super(Resnet152Feats, self).__init__()
self.resnet = nn.Sequential(*list(pretrained_model.children())[:-1])
self.features_pool = list(pretrained_model.features.children())[-1]
self.classifier = nn.Sequential(*list(pretrained_model.classifier.children())[:-1])
def forward(self, x):
x = self.features_nopool(x)
x_pool = self.features_pool(x)
x_feat = x_pool.view(x_pool.size(0), -1)
y = self.classifier(x_feat)
return x_pool, y
|
import hashlib
from typing import List
from cryptography import x509
from cryptography.exceptions import InvalidSignature
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.asymmetric.ec import (
SECP256R1,
EllipticCurvePublicKey,
)
from webauthn.helpers import aaguid_to_string, validate_certificate_chain, verify_signature
from webauthn.helpers.cose import COSEAlgorithmIdentifier
from webauthn.helpers.decode_credential_public_key import (
DecodedEC2PublicKey,
decode_credential_public_key,
)
from webauthn.helpers.exceptions import InvalidCertificateChain, InvalidRegistrationResponse
from webauthn.helpers.structs import AttestationStatement
def verify_fido_u2f(
*,
attestation_statement: AttestationStatement,
client_data_json: bytes,
rp_id_hash: bytes,
credential_id: bytes,
credential_public_key: bytes,
aaguid: bytes,
pem_root_certs_bytes: List[bytes],
) -> bool:
"""Verify a "fido-u2f" attestation statement
See https://www.w3.org/TR/webauthn-2/#sctn-fido-u2f-attestation
"""
if not attestation_statement.sig:
raise InvalidRegistrationResponse(
"Attestation statement was missing signature (FIDO-U2F)"
)
if not attestation_statement.x5c:
raise InvalidRegistrationResponse(
"Attestation statement was missing certificate (FIDO-U2F)"
)
if len(attestation_statement.x5c) > 1:
raise InvalidRegistrationResponse(
"Attestation statement contained too many certificates (FIDO-U2F)"
)
# Validate the certificate chain
try:
validate_certificate_chain(
x5c=attestation_statement.x5c,
pem_root_certs_bytes=pem_root_certs_bytes,
)
except InvalidCertificateChain as err:
raise InvalidRegistrationResponse(f"{err} (FIDO-U2F)")
# FIDO spec requires AAGUID in U2F attestations to be all zeroes
# See https://fidoalliance.org/specs/fido-v2.1-rd-20191217/fido-client-to-authenticator-protocol-v2.1-rd-20191217.html#u2f-authenticatorMakeCredential-interoperability
actual_aaguid = aaguid_to_string(aaguid)
expected_aaguid = "00000000-0000-0000-0000-000000000000"
if actual_aaguid != expected_aaguid:
raise InvalidRegistrationResponse(
f"AAGUID {actual_aaguid} was not expected {expected_aaguid} (FIDO-U2F)"
)
# Get the public key from the leaf certificate
leaf_cert_bytes = attestation_statement.x5c[0]
leaf_cert = x509.load_der_x509_certificate(leaf_cert_bytes, default_backend())
leaf_cert_pub_key = leaf_cert.public_key()
# We need the cert's x and y points so make sure they exist
if not isinstance(leaf_cert_pub_key, EllipticCurvePublicKey):
raise InvalidRegistrationResponse(
"Leaf cert was not an EC2 certificate (FIDO-U2F)"
)
if not isinstance(leaf_cert_pub_key.curve, SECP256R1):
raise InvalidRegistrationResponse(
"Leaf cert did not use P-256 curve (FIDO-U2F)"
)
decoded_public_key = decode_credential_public_key(credential_public_key)
if not isinstance(decoded_public_key, DecodedEC2PublicKey):
raise InvalidRegistrationResponse(
"Credential public key was not EC2 (FIDO-U2F)"
)
# Convert the public key to "Raw ANSI X9.62 public key format"
public_key_u2f = b"".join(
[
bytes([0x04]),
decoded_public_key.x,
decoded_public_key.y,
]
)
# Generate a hash of client_data_json
client_data_hash = hashlib.sha256()
client_data_hash.update(client_data_json)
client_data_hash = client_data_hash.digest()
# Prepare the signature base (called "verificationData" in the WebAuthn spec)
verification_data = b"".join(
[
bytes([0x00]),
rp_id_hash,
client_data_hash,
credential_id,
public_key_u2f,
]
)
try:
verify_signature(
public_key=leaf_cert_pub_key,
signature_alg=COSEAlgorithmIdentifier.ECDSA_SHA_256,
signature=attestation_statement.sig,
data=verification_data,
)
except InvalidSignature:
raise InvalidRegistrationResponse(
"Could not verify attestation statement signature (FIDO-U2F)"
)
# If we make it to here we're all good
return True
|
from allennlp.data.iterators import EpochTrackingBucketIterator
from allennlp.tests.data.iterators.basic_iterator_test import IteratorTest
class EpochTrackingBucketIteratorTest(IteratorTest):
def setUp(self):
# The super class creates a self.instances field and populates it with some instances with
# TextFields.
super(EpochTrackingBucketIteratorTest, self).setUp()
self.iterator = EpochTrackingBucketIterator(sorting_keys=[["text", "num_tokens"]])
self.iterator.index_with(self.vocab)
# We'll add more to create a second dataset.
self.more_instances = [
self.create_instance(["this", "is", "a", "sentence"]),
self.create_instance(["this", "is", "in", "the", "second", "dataset"]),
self.create_instance(["so", "is", "this", "one"])
]
def test_iterator_tracks_epochs_per_dataset(self):
generated_dataset1 = list(self.iterator(self.instances, num_epochs=2))
generated_dataset2 = list(self.iterator(self.more_instances, num_epochs=2))
# First dataset has five sentences. See ``IteratorTest.setUp``
assert generated_dataset1[0]["epoch_num"] == [0, 0, 0, 0, 0]
assert generated_dataset1[1]["epoch_num"] == [1, 1, 1, 1, 1]
# Second dataset has three sentences.
assert generated_dataset2[0]["epoch_num"] == [0, 0, 0]
assert generated_dataset2[1]["epoch_num"] == [1, 1, 1]
|
from numpy import array
A = array([[1,2],[3,4]])
C = A * 10.0
D = A / 2.0
print('C = ',mult)
print('D = ',divi)
|
# -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida_core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
"""Module for all common top level AiiDA entity classes and methods"""
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from aiida.common import exceptions
from aiida.common.utils import classproperty, type_check
from . import backends
__all__ = ('Entity', 'Collection')
class Collection(object):
"""Container class that represents the collection of objects of a particular type."""
def __init__(self, backend, entity_class):
# assert issubclass(entity_class, Entity), "Must provide an entity type"
self._backend = backend
self._entity_type = entity_class
def __call__(self, backend):
"""
Create a new objects collection using a different backend
:param backend: the backend to use
:return: a new collection with the different backend
"""
if backend is self._backend:
# Special case if they actually want the same collection
return self
return self.__class__(backend, self._entity_type)
@property
def backend(self):
"""Return the backend."""
return self._backend
@property
def entity_type(self):
return self._entity_type
def query(self):
"""
Get a query builder for the objects of this collection
:return: a new query builder instance
:rtype: :class:`aiida.orm.QueryBuilder`
"""
# pylint: disable=no-self-use, fixme
from . import querybuilder
query = querybuilder.QueryBuilder()
query.append(self._entity_type, project='*')
return query
def get(self, **kwargs):
"""
Get a collection entry using keyword parameter filters
:param kwargs: the filters identifying the object to get
:return: the entry
"""
query = self.query()
query.add_filter(self.entity_type, kwargs)
res = [_[0] for _ in query.all()]
if not res:
raise exceptions.NotExistent("No {} with filter '{}' found".format(self.entity_type.__name__, kwargs))
if len(res) > 1:
raise exceptions.MultipleObjectsError("Multiple {}s found with the same id '{}'".format(
self.entity_type.__name__, id))
return res[0]
def find(self, **filters):
"""
Find entries matching the given filters
:param filters: the keyword value pair filters to match
:return: a list of resulting matches
:rtype: list
"""
query = self.query()
query.add_filter(self.entity_type, filters)
res = [_[0] for _ in query.all()]
if not res:
raise exceptions.NotExistent("No {} with filter '{}' found".format(self.entity_type.__name__, filters))
if len(res) > 1:
raise exceptions.MultipleObjectsError("Multiple {}s found with the same id '{}'".format(
self.entity_type.__name__, id))
return res
def all(self):
"""
Get all entities in this collection
:return: A collection of users matching the criteria
"""
return [_[0] for _ in self.query().all()]
class Entity(object):
"""An AiiDA entity"""
_BACKEND = None
_OBJECTS = None
# Define out collection type
Collection = Collection
@classproperty
def objects(cls, backend=None): # pylint: disable=no-self-use, no-self-argument
"""
Get an collection for objects of this type.
:param backend: the optional backend to use (otherwise use default)
:return: an object that can be used to access entites of this type
"""
backend = backend or backends.construct_backend()
return cls.Collection(backend, cls)
@classmethod
def get(cls, **kwargs):
# pylint: disable=redefined-builtin, invalid-name
return cls.objects.get(**kwargs) # pylint: disable=no-member
@classmethod
def from_backend_entity(cls, backend_entity):
"""
Construct an entity from a backend entity instance
:param backend_entity: the backend entity
:return: an AiiDA entity instance
"""
from . import implementation
type_check(backend_entity, implementation.BackendEntity)
computer = cls.__new__(cls)
computer.init_from_backend(backend_entity)
return computer
def __init__(self, backend_entity):
"""
:param backend_entity: the backend model supporting this entity
:type backend_entity: :class:`aiida.orm.implementation.BackendEntity`
"""
self._backend_entity = backend_entity
def init_from_backend(self, backend_entity):
"""
:param backend_entity: the backend model supporting this entity
:type backend_entity: :class:`aiida.orm.implementation.BackendEntity`
"""
self._backend_entity = backend_entity
@property
def id(self):
"""
Get the id for this entity. This is unique only amongst entities of this type
for a particular backend
:return: the entity id
"""
# pylint: disable=redefined-builtin, invalid-name
return self._backend_entity.id
@property
def pk(self):
"""
Get the primary key for this entity
.. note:: Deprecated because the backend need not be a database and so principle key doesn't
always make sense. Use `id()` instead.
:return: the principal key
"""
return self.id
@property
def uuid(self):
"""
Get the UUID for this entity. This is unique across all entities types and backends
:return: the entity uuid
:rtype: :class:`uuid.UUID`
"""
return self._backend_entity.uuid
def store(self):
"""
Store the entity.
"""
self._backend_entity.store()
return self
@property
def is_stored(self):
"""
Is the computer stored?
:return: True if stored, False otherwise
:rtype: bool
"""
return self._backend_entity.is_stored
@property
def backend(self):
"""
Get the backend for this entity
:return: the backend instance
"""
return self._backend_entity.backend
@property
def backend_entity(self):
"""
Get the implementing class for this object
:return: the class model
"""
return self._backend_entity
|
#
# Pure python logging mechanism for logging to AlertViz from
# pure python (ie not JEP). DO NOT USE IN PYTHON CALLED
# FROM JAVA.
#
#
# SOFTWARE HISTORY
#
# Date Ticket# Engineer Description
# ------------ ---------- ----------- --------------------------
# 11/03/10 5849 cjeanbap Initial Creation.
#
#
#
import os
import sys
class Record():
def __init__(self, level=0, msg='Test Message'):
self.levelno=level
self.message=msg
self.exc_info=sys.exc_info()
self.exc_text="TEST"
def getMessage(self):
return self.message
|
from collections import OrderedDict, defaultdict
from multiprocessing import Pool
import copy
import json
import io
import yaml
import os
import progressbar
import warnings
warnings.filterwarnings("ignore", message="numpy.dtype size changed")
warnings.filterwarnings("ignore", message="numpy.ufunc size changed")
warnings.filterwarnings("ignore", message="Polyfit may be poorly conditioned")
import pandas
from panoptes_aggregation import extractors
from panoptes_aggregation.csv_utils import flatten_data, order_columns
from panoptes_aggregation.extractors.utilities import annotation_by_task
def get_file_instance(file):
if not isinstance(file, io.IOBase):
file = open(file, 'r', encoding='utf-8') # pragma: no cover
return file
def get_major_version(s):
return s.split('.')[0]
def extract_classification(
classification_by_task,
classification_info,
extractor_key,
extractor_name,
keyword,
verbose
):
try:
extract = extractors.extractors[extractor_key](classification_by_task, **keyword)
new_extract_row = []
if isinstance(extract, list):
for e in extract:
new_extract_row.append(OrderedDict([
('classification_id', classification_info['classification_id']),
('user_name', classification_info['user_name']),
('user_id', classification_info['user_id']),
('workflow_id', classification_info['workflow_id']),
('task', keyword['task']),
('created_at', classification_info['created_at']),
('subject_id', classification_info['subject_ids']),
('extractor', extractor_name),
('data', e)
]))
else:
new_extract_row.append(OrderedDict([
('classification_id', classification_info['classification_id']),
('user_name', classification_info['user_name']),
('user_id', classification_info['user_id']),
('workflow_id', classification_info['workflow_id']),
('task', keyword['task']),
('created_at', classification_info['created_at']),
('subject_id', classification_info['subject_ids']),
('extractor', extractor_name),
('data', extract)
]))
except:
new_extract_row = None
if verbose:
print()
print('Incorrectly formatted annotation')
print(classification_info)
print(extractor_key)
print(classification_by_task)
return extractor_name, new_extract_row
CURRENT_PATH = os.path.abspath('.')
def extract_csv(
classification_csv,
config,
output_dir=CURRENT_PATH,
output_name='extractions',
order=False,
verbose=False,
cpu_count=1
):
config = get_file_instance(config)
with config as config_in:
config_yaml = yaml.load(config_in, Loader=yaml.SafeLoader)
extractor_config = config_yaml['extractor_config']
workflow_id = config_yaml['workflow_id']
version = config_yaml['workflow_version']
number_of_extractors = sum([len(value) for key, value in extractor_config.items()])
extracted_data = defaultdict(list)
classification_csv = get_file_instance(classification_csv)
with classification_csv as classification_csv_in:
classifications = pandas.read_csv(classification_csv_in, encoding='utf-8', dtype={'workflow_version': str})
wdx = classifications.workflow_id == workflow_id
assert (wdx.sum() > 0), 'There are no classifications matching the configured workflow ID'
if '.' in version:
vdx = classifications.workflow_version == version
else:
vdx = classifications.workflow_version.apply(get_major_version) == version
assert (vdx.sum() > 0), 'There are no classificaitons matching the configured version number'
assert ((vdx & wdx).sum() > 0), 'There are no classifications matching the combined workflow ID and version number'
widgets = [
'Extracting: ',
progressbar.Percentage(),
' ', progressbar.Bar(),
' ', progressbar.ETA()
]
max_pbar = (wdx & vdx).sum() * number_of_extractors
pbar = progressbar.ProgressBar(widgets=widgets, max_value=max_pbar)
counter = 0
def callback(name_with_row):
nonlocal extracted_data
nonlocal counter
nonlocal pbar
extractor_name, new_extract_row = name_with_row
if new_extract_row is not None:
extracted_data[extractor_name] += new_extract_row
counter += 1
pbar.update(counter)
pbar.start()
if cpu_count > 1:
pool = Pool(cpu_count)
for _, classification in classifications[wdx & vdx].iterrows():
classification_by_task = annotation_by_task({
'annotations': json.loads(classification.annotations),
'metadata': json.loads(classification.metadata)
})
classification_info = {
'classification_id': classification.classification_id,
'user_name': classification.user_name,
'user_id': classification.user_id,
'workflow_id': classification.workflow_id,
'created_at': classification.created_at,
'subject_ids': classification.subject_ids
}
for extractor_name, keywords in extractor_config.items():
extractor_key = extractor_name
if 'shape_extractor' in extractor_name:
extractor_key = 'shape_extractor'
for keyword in keywords:
if extractor_key in extractors.extractors:
if cpu_count > 1:
pool.apply_async(
extract_classification,
args=(
copy.deepcopy(classification_by_task),
classification_info,
extractor_key,
extractor_name,
keyword,
verbose
),
callback=callback
)
else:
name_with_row = extract_classification(
copy.deepcopy(classification_by_task),
classification_info,
extractor_key,
extractor_name,
keyword,
verbose
)
callback(name_with_row)
else:
callback((None, None))
if cpu_count > 1:
pool.close()
pool.join()
pbar.finish()
# create one flat csv file for each extractor used
output_base_name, _ = os.path.splitext(output_name)
output_files = []
for extractor_name, data in extracted_data.items():
output_path = os.path.join(output_dir, '{0}_{1}.csv'.format(extractor_name, output_base_name))
output_files.append(output_path)
non_flat_extract = pandas.DataFrame(data)
flat_extract = flatten_data(non_flat_extract)
if order:
flat_extract = order_columns(flat_extract, front=['choice'])
flat_extract.to_csv(output_path, index=False, encoding='utf-8')
return output_files
|
# Shortest Path in Binary Matrix
from queue import Queue
class Solution:
def shortestPathBinaryMatrix(self, grid):
rows = len(grid)
cols = len(grid[0])
dx = [1, 1, 1, 0, -1, -1, -1, 0]
dy = [1, 0, -1, -1, -1, 0, 1, 1]
if grid[0][0] == 1:
return -1
if rows == 1 and cols == 1:
return 1
visited = [[False] * cols for _ in range(rows)]
q = Queue()
# path from top-left to (0, 0) is length 1
q.put((0, 0, 1))
visited[0][0] = True
while not q.empty():
r, c, length = q.get()
for _dx, _dy in zip(dx, dy):
newR = r + _dx
newC = c + _dy
if not (0 <= newR < rows and 0 <= newC < cols):
continue
if grid[newR][newC] == 1:
continue
if visited[newR][newC]:
continue
if newR == rows - 1 and newC == cols - 1:
return length + 1
visited[newR][newC] = True
q.put((newR, newC, length + 1))
return -1
if __name__ == "__main__":
sol = Solution()
grid = [[1,0,0],[1,1,0],[1,1,0]]
grid = [[0, 1], [1, 0]]
grid = [[0]]
print(sol.shortestPathBinaryMatrix(grid))
|
import nox
@nox.session(python=["3.8"], reuse_venv=True)
def build(session):
session.install(".")
@nox.session(python=["3.8"], reuse_venv=True)
def test(session):
session.install(".[test]")
session.run("pytest", *session.posargs)
|
from dvc.utils import is_binary
def is_conda():
try:
from .build import PKG # patched during conda package build
return PKG == "conda"
except ImportError:
return False
def get_linux():
import distro
if not is_binary():
return "pip"
package_managers = {
"rhel": "yum",
"centos": "yum",
"fedora": "yum",
"amazon": "yum",
"opensuse": "yum",
"ubuntu": "apt",
"debian": "apt",
}
return package_managers.get(distro.id())
def get_darwin():
if not is_binary():
if __file__.startswith("/usr/local/Cellar"):
return "formula"
else:
return "pip"
return None
def get_windows():
return None if is_binary() else "pip"
def get_package_manager():
import platform
from dvc.exceptions import DvcException
if is_conda():
return "conda"
m = {
"Windows": get_windows(),
"Darwin": get_darwin(),
"Linux": get_linux(),
}
system = platform.system()
func = m.get(system)
if func is None:
raise DvcException("not supported system '{}'".format(system))
return func
|
import argparse
from ydk.services import CRUDService
from ydk.providers import NetconfServiceProvider
from ydk.models.cisco_ios_xr import Cisco_IOS_XR_l2vpn_cfg as l2vpn_xr_cfg
from ydk.models.cisco_ios_xe import Cisco_IOS_XE_native as xe_native
from ydk.types import Empty
import logging
import re
import sys
log = logging.getLogger('ydk')
log.setLevel(logging.INFO)
handler = logging.StreamHandler()
log.addHandler(handler)
def configure_l2vpn_xr(l2vpn_obj, remote_ip_address, intf_name, pw_id, pw_name):
'''Configure XConnect on PE device'''
xconnectGroup = l2vpn_obj.Database.XconnectGroups.XconnectGroup()
xconnectGroup.name = pw_name
p2pXconnect = xconnectGroup.P2pXconnects.P2pXconnect()
p2pXconnect.name = "p2p-ciscolive"
pwid = p2pXconnect.Pseudowires.Pseudowire()
pwid.pseudowire_id = int(pw_id)
neighbor = pwid.Neighbor()
neighbor.neighbor = remote_ip_address
pwid_ac = p2pXconnect.AttachmentCircuits.AttachmentCircuit()
pwid_ac.name = intf_name
pwid.neighbor.append(neighbor)
p2pXconnect.attachment_circuits.attachment_circuit.append(pwid_ac)
p2pXconnect.pseudowires.pseudowire.append(pwid)
xconnectGroup.p2p_xconnects.p2p_xconnect.append(p2pXconnect)
l2vpn_obj.database.xconnect_groups.xconnect_group.append(xconnectGroup)
def configure_l2vpn_xe(l2vpn_xe_obj, remote_ip_address, intf_name, pw_id, pw_name):
intf_name_fnd = re.search(r".*(\d+\.\d+)", intf_name)
if intf_name_fnd:
intf = l2vpn_xe_obj.Interface.GigabitEthernet()
intf.name = intf_name_fnd.group(1)
else:
print("Invalid interface name entered for XE device.")
sys.exit()
intf_encap = intf.Encapsulation()
intf_encap_dot1q = intf_encap.Dot1Q()
intf_encap_dot1q.vlan_id = int(pw_id) # making vlan_id = pw_id
xe_xconnect = intf.Xconnect()
xe_xconnect.address = remote_ip_address
xe_xconnect.vcid = int(pw_id)
xe_xconnect_encap = xe_xconnect.Encapsulation()
xe_xconnect.encapsulation = xe_xconnect_encap.mpls
intf.xconnect = xe_xconnect
intf_encap.dot1q = intf_encap_dot1q
intf.encapsulation = intf_encap
l2vpn_xe_obj.interface.gigabitethernet.append(intf)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--pe1_ip', nargs='?', help="IP address of PE1")
parser.add_argument('--pe1_platform', nargs='?', default="IOSXE", help="PE1 Platform")
parser.add_argument('--pe1_intf', nargs='?', help="Interface of PE1")
parser.add_argument('--pe2_ip', nargs='?', help="IP address of PE2")
parser.add_argument('--pe2_platform', nargs='?', default="IOSXR", help="PE2 Platform")
parser.add_argument('--pe2_intf', nargs='?', help="Interface of PE2")
parser.add_argument('--pw_id', nargs='?', help="Pseudo-wire ID")
parser.add_argument('--pw_name', nargs='?', default="ciscolive", help="Pseudo-wire XConnect Name")
parser.add_argument('--username', nargs='?', default="cisco", help="Username")
parser.add_argument('--password', nargs='?', default="cisco", help="Password")
args = parser.parse_args()
cisco_devices = [{"ip_address":args.pe1_ip,
"platform" : args.pe1_platform,
"intf_name":args.pe1_intf,
"dest_ip":args.pe2_ip},
{"ip_address":args.pe2_ip,
"platform": args.pe2_platform,
"intf_name":args.pe2_intf,
"dest_ip":args.pe1_ip}]
for cisco_device in cisco_devices:
pe_ip_address = cisco_device["ip_address"]
intf_name = cisco_device["intf_name"]
remote_ip_address = cisco_device["dest_ip"]
crud = CRUDService()
if cisco_device["platform"] == "IOSXE":
provider = NetconfServiceProvider(address=pe_ip_address, port=830, username=args.username,
password=args.password, protocol="ssh")
l2vpn_obj = xe_native.Native()
configure_l2vpn_xe(l2vpn_obj, remote_ip_address, intf_name, args.pw_id, args.pw_name)
elif cisco_device["platform"] == "IOSXR":
provider = NetconfServiceProvider(address=pe_ip_address, port=22, username=args.username,
password=args.password, protocol="ssh")
l2vpn_obj = l2vpn_xr_cfg.L2vpn()
configure_l2vpn_xr(l2vpn_obj, remote_ip_address, intf_name, args.pw_id, args.pw_name)
else:
print("Invalid platform type entered!!")
sys.exit()
crud.create(provider, l2vpn_obj)
|
#!/usr/bin/env python
# coding: utf-8
# # Meta-Analytic Subtraction Analysis
# In[1]:
# First, import the necessary modules and functions
import os
from datetime import datetime
import matplotlib.pyplot as plt
from myst_nb import glue
from nilearn import image, plotting
from repo2data.repo2data import Repo2Data
import nimare
start = datetime.now()
# Install the data if running locally, or points to cached data if running on neurolibre
DATA_REQ_FILE = os.path.join("../binder/data_requirement.json")
FIG_DIR = os.path.abspath("../images")
# Download data
repo2data = Repo2Data(DATA_REQ_FILE)
data_path = repo2data.install()
data_path = os.path.join(data_path[0], "data")
# Now, load the Datasets we will use in this chapter
sleuth_dset1 = nimare.dataset.Dataset.load(os.path.join(data_path, "sleuth_dset1.pkl.gz"))
sleuth_dset2 = nimare.dataset.Dataset.load(os.path.join(data_path, "sleuth_dset2.pkl.gz"))
# Subtraction analysis refers to the voxel-wise comparison of two meta-analytic samples.
# In image-based meta-analysis, comparisons between groups of maps can generally be accomplished within the standard meta-regression framework (i.e., by adding a covariate that codes for group membership).
# However, coordinate-based subtraction analysis requires special extensions for CBMA algorithms.
#
# Subtraction analysis to compare the results of two ALE meta-analyses was originally implemented by {cite:t}`Laird2005-qh` and later extended by {cite:t}`Eickhoff2012-hk`.
# In this approach, two groups of experiments (A and B) are compared using a group assignment randomization procedure in which voxel-wise null distributions are generated by randomly reassigning experiments between the two groups and calculating ALE-difference scores for each permutation.
# Real ALE-difference scores (i.e., the ALE values for one group minus the ALE values for the other) are compared against these null distributions to determine voxel-wise significance.
# In the original implementation of the algorithm, this procedure is performed separately for a group A > B contrast and a group B > A contrast, where each contrast is limited to voxels that were significant in the first group's original meta-analysis.
#
# ```{important}
# In NiMARE, we use an adapted version of the subtraction analysis method in {py:class}`nimare.meta.cbma.ale.ALESubtraction`.
# The NiMARE implementation analyzes all voxels, rather than only those that show a significant effect of A alone or B alone as in the original implementation.
# ```
# In[2]:
from nimare import meta
kern = meta.kernel.ALEKernel()
sub_meta = meta.cbma.ale.ALESubtraction(kernel_transformer=kern, n_iters=10000)
sub_results = sub_meta.fit(sleuth_dset1, sleuth_dset2)
# In[3]:
fig, ax = plt.subplots(figsize=(6, 2))
display = plotting.plot_stat_map(
sub_results.get_map("z_desc-group1MinusGroup2", return_type="image"),
annotate=False,
axes=ax,
cmap="RdBu_r",
cut_coords=[0, 0, 0],
draw_cross=False,
figure=fig,
)
ax.set_title("ALE Subtraction")
colorbar = display._cbar
colorbar_ticks = colorbar.get_ticks()
if colorbar_ticks[0] < 0:
new_ticks = [colorbar_ticks[0], 0, colorbar_ticks[-1]]
else:
new_ticks = [colorbar_ticks[0], colorbar_ticks[-1]]
colorbar.set_ticks(new_ticks, update_ticks=True)
glue("figure_subtraction", fig, display=False)
# ```{glue:figure} figure_subtraction
# :name: figure_subtraction
# :align: center
#
# Unthresholded z-statistic map for the subtraction analysis of the two example Sleuth-based `Datasets`.
# ```
#
# Alternatively, MKDA Chi-squared analysis is inherently a subtraction analysis method, in that it compares foci from two groups of studies.
# Generally, one of these groups is a sample of interest, while the other is a meta-analytic database (minus the studies in the sample).
# With this setup, meta-analysts can infer whether there is greater convergence of foci in a voxel as compared to the baseline across the field (as estimated with the meta-analytic database), much like SCALE.
# However, if the database is replaced with a second sample of interest, the analysis ends up comparing convergence between the two groups.
# In[4]:
end = datetime.now()
print(f"subtraction.md took {end - start} to build.")
|
from .w2v import Word2Vec
__all__ = ["Word2Vec"]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2019 Apple Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Context Manager for saving the value of an object's attribute, setting it to
# a new value (None, by default), and then restoring the original value.
#
# Used as:
#
# myObject.fooAttribute = 1
# print(myObject.fooAttribute)
# with AttributeSaver(myObject, "fooAttribute", 5):
# print(myObject.fooAttribute)
# print(myObject.fooAttribute)
#
# Prints: 1, 5, 1
class AttributeSaver:
def __init__(self, obj, attribute, value=None):
self.obj = obj
self.attribute = attribute
self.old_value = getattr(self.obj, self.attribute)
self.new_value = value
def __enter__(self):
setattr(self.obj, self.attribute, self.new_value)
def __exit__(self, exc_type, exc_value, traceback):
setattr(self.obj, self.attribute, self.old_value)
return None
|
from flask import Flask
from flask import Response
from flask import redirect
from flask import url_for
from oop import override
from template_context import context
_route_extensions = []
def route_extension_method(route_extension):
# type: (callable) -> callable
_route_extensions.append(route_extension)
return route_extension
@route_extension_method
def url(route_func):
# type: (callable) -> str
return url_for(route_func.func_name)
@route_extension_method
def route_to(route_func):
# type: (callable) -> Response
return redirect(url_for(route_func.func_name))
@override(Flask)
def route(_super, app, rule, **options):
# type: (callable, Flask, str, dict[str, any]) -> callable
def decorator(route_func):
# type: (callable) -> callable
route_func = _super(app, rule, **options)(route_func)
for _route_extension in _route_extensions:
func_name = _route_extension.func_name
def route_extension():
return _route_extension(route_func)
route_extension.func_name = func_name
setattr(route_func, func_name, route_extension)
# add to template context
context[route_func.func_name] = route_func
return route_func
return decorator
|
try:
from simplegeneric import *
except ImportError:
from _simplegeneric import *
|
import SQLDataManager
import DateParser
import ICSFileCreator
import EmailReceiver
import EmailMessageValidator
import EmailSender
import Cleanup
import datetime
def main():
received_emails = EmailReceiver.get_unread_emails()
validated_emails = EmailMessageValidator.validate_messages(received_emails)
for email in validated_emails:
__create_ics_file(email)
EmailSender.send_email_from_gmail(email)
Cleanup.clean_up()
print "Sent out email! " + str(datetime.datetime.now()) + " - " + email.from_address + " - " + email.subject
def __create_ics_file(email):
email.download_attachment()
month, year = DateParser.get_month_and_year(email.subject)
calendar_entries = SQLDataManager.get_schedule_data(month, year)
ICSFileCreator.create_ics_file(calendar_entries)
if __name__ == "__main__":
main() |
import json
from flask import Flask, render_template, Response, request, make_response
from flask_restful import Resource, Api, reqparse
from datetime import datetime as dt, timedelta as td, date as d
from flask_cors import CORS
from urllib.request import urlopen, Request
app = Flask(__name__)
api = Api(app)
CORS(app)
with open('./translate.json', "r", encoding="utf8") as data:
TRANSLATED_PRAYER_NAMES = json.load(data)
def cleanTimeText(prayer_value):
return prayer_value[:5] + ":00Z"
end_times_of_prayers = {
"Fajr": "Sunrise",
"Sunrise": "Dhuhr",
"Dhuhr": "Asr",
"Asr": "Maghrib",
"Maghrib": "Isha",
"Isha": "Midnight"
}
# dart dateTime format => 2019-08-15 03:19:00Z"
def timesConverterToDartDateTimeFormat(lat, lng, language, only_today):
json_data = {}
# get the dataset
url = f"http://api.aladhan.com/v1/calendar?latitude={lat}&longitude={lng}"
response = urlopen(Request(url, headers={'User-Agent': 'Mozilla/5.0'}))
# convert bytes to string type and string type to dict
string = response.read().decode('utf-8')
json_obj = json.loads(string)
today = d.today()
# YY-mm-dd
today_date = today.strftime("%Y-%m-%d")
# get all data objects from the json request
for data in json_obj.get("data"):
# convert date to german format and set it in date_of_this_object var to get it easy
date_of_this_object = dt.strptime(data.get("date").get(
"gregorian").get("date"), '%d-%m-%Y').strftime('%Y-%m-%d')
prayer_times_of_this_date = []
# get all prayerKeys and prayerValues from timings from the api request
for prayer_key, prayer_value in data.get("timings").items():
if language is None:
language = "english"
selectedTranslatingLanguage = TRANSLATED_PRAYER_NAMES[language]
# check if key in arabic keys
if prayer_key in selectedTranslatingLanguage:
# start date of prayer
start_date = (date_of_this_object + " " +
cleanTimeText(prayer_value))
# end date of prayer
end_date = (date_of_this_object + " " + cleanTimeText(data.get("timings").get(
end_times_of_prayers.get(prayer_key))))
if prayer_key == "Isha":
end_date = ((dt.strptime(date_of_this_object, '%Y-%m-%d') + td(days=1)).strftime('%Y-%m-%d') + " " + cleanTimeText(
data.get("timings").get(end_times_of_prayers.get(prayer_key))))
prayer_times_of_this_date.append({selectedTranslatingLanguage[prayer_key]:
[
start_date,
end_date
]
}
)
# set prayertimes to json_data var to returned it back as api
json_data[date_of_this_object] = prayer_times_of_this_date
if only_today is not None:
json_data = json_data.get(today_date)
pass
return json_data
class GebetsZeiten(Resource):
parser = reqparse.RequestParser()
parser.add_argument('lat', required=True)
parser.add_argument('lng', required=True)
parser.add_argument('language', required=False)
parser.add_argument('today', required=False)
def get(self):
args = self.parser.parse_args()
return timesConverterToDartDateTimeFormat(args.get('lat', False), args.get('lng', False), args.get('language', None), args.get('today', None))
api.add_resource(GebetsZeiten, "/")
class PrivacyPolicy(Resource):
def __init__(self):
pass
def get(self):
headers = {'Content-Type': 'text/html'}
return make_response(render_template('privacy_policy.html', title='Home', user='user'), 200, headers)
api.add_resource(PrivacyPolicy, "/privacy-policy")
class TermsAndConditions(Resource):
def __init__(self):
pass
def get(self):
headers = {'Content-Type': 'text/html'}
return make_response(render_template('terms_and_conditions.html', title='Home', user='user'), 200, headers)
api.add_resource(TermsAndConditions, "/terms-and-conditions")
if __name__ == '__main__':
app.run(port=5000, host="0.0.0.0")
|
#!/usr/bin/env python3
# Standard libraries
from re import error, escape, search
# Lists class
class Lists:
# Match
@staticmethod
def match(items, name, ignore_case, no_regex):
# Search without regex
if name in items:
return True
# Search with regex
if not no_regex:
for item in items:
# Search with string inclusion
if item in name:
return True
# Search with string case insensitive inclusion
if ignore_case and item.lower() in name.lower():
return True
# Search with real regex
try:
if search(item, escape(name)):
return True
if ignore_case and search(item.lower(), escape(name.lower())):
return True
except error:
pass
# Result
return False
|
#django
from django.db import models
#utils
from cinema.utils.models import CinemaModel
class Category(CinemaModel):
"""
Category model
Category movie like childs, or adults. Also they have a price of the ticket
for each category
"""
category = models.CharField('movie category', max_length=100)
price_ticket = models.FloatField(default=0)
comments = models.TextField(blank=True)
movie = models.ForeignKey("movies.Movie", on_delete=models.CASCADE)
def __str__(self):
"""Return summary."""
return 'movie: {} | category: {} | Price: Q{}'.format(
self.movie.title,
self.category,
self.price_ticket
) |
import argparse
import sys
from sys import stdin
import numpy as np
from utils import read_eval_file
def get_score(prop, props, power):
try:
i_prop = props.index(prop)
except ValueError:
i_prop = len(props)
score = (1 - (i_prop / len(props)) ** power)**(1/power)
return score
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('eval_file', type=str, help='Evaluation file.')
parser.add_argument('-p', '--power', default=2, type=int, help='Minkowski power.')
args, _ = parser.parse_known_args(sys.argv[1:])
eval_data = read_eval_file(args.eval_file)
words = stdin.read().split('\n')
words.remove('')
scores = []
for line in words:
line = line.split('\t')
word = line[0]
props = line[1:]
score = max((get_score(target, props, args.power) for target in eval_data[word]))
scores.append(score)
print(np.mean(scores))
|
import os.path
import os
import urllib2
import urllib
from urllib2 import Request
from urllib import urlencode
from requests import post
import sys
import pandas
import json
x = pandas.read_csv("sub_med.csv")
ndar_compare = x.columns.values.tolist()
token = ""
data = {
'token': '',
'content': 'record',
'format': 'json',
'type': 'flat',
'forms[0]': 'ace_subject_medical_history',
'rawOrLabel': 'raw',
'rawOrLabelHeaders': 'raw',
'exportCheckboxLabel': 'false',
'exportSurveyFields': 'false',
'exportDataAccessGroups': 'false',
'returnFormat': 'json'
}
r = post("https://redcap.duke.edu/redcap/api/", data)
r.content
d = urlencode(data)
req = urllib2.Request("https://redcap.duke.edu/redcap/api/", d)
response = urllib2.urlopen(req)
file = response.read()
result = json.loads(file)
df = pandas.DataFrame.from_records(result)
redcap_compare = df.columns.values
test = []
test2 = []
for i in range(len(redcap_compare)):
test.append(str(redcap_compare[i].split("___")[0]))
[x for x in test if "___" in x]
redcap_compare = list(set(test))
for i in range(len(redcap_compare)):
test2.append(str(redcap_compare[i].strip().lower().replace(' ', '_').replace('(', '').replace(')', '')))
print(len(ndar_compare), len(redcap_compare))
redcap_dev = list(set(ndar_compare) & set(test))
ndar_dev = list(set(ndar_compare) - set(test))
print(len(set(redcap_dev)))
print(ndar_dev)
test_df1 = pandas.DataFrame([], columns = [list(ndar_compare)])
test = []
for i in range(len(list(test_df1.columns))):
test.append(str(test_df1.columns[i][-1]).strip("(").strip(")").strip("'").strip("'").strip(","))
# print(test)
# print(type(test[0]))
test_df2 = pandas.DataFrame(columns = test, index=range(0))
test_df2.columns = test_df2.columns.str.strip("'")
test_df3 = pandas.concat([df,test_df2])
# print(test_df3)
# for i in range(len(test)):
# try:
# test_df3 = test_df3[[test[i]]]
# except:
# pass
# print(test_df2.columns)
df = test_df3[test_df3.columns & test_df2.columns]
print(df)
# x = pandas.concat([df, test_df1], ignore_index=True, axis=0)
# print(x)
|
# coding: utf-8
"""
This scripts scrapy the DOI of the scielo documents from the website
and load them into the Articlemeta, this process is necessary because the
legacy databases does not have the doi persisted for each document.
"""
import re
import os
import sys
import argparse
import logging
import logging.config
from datetime import datetime, timedelta
from lxml import etree
from io import BytesIO
import requests
from xylose.scielodocument import Article
from crossref.restful import Journals
from articlemeta import controller
logger = logging.getLogger(__name__)
SENTRY_DSN = os.environ.get('SENTRY_DSN', None)
LOGGING_LEVEL = os.environ.get('LOGGING_LEVEL', 'DEBUG')
MONGODB_HOST = os.environ.get('MONGODB_HOST', None)
DOI_REGEX = re.compile(r'[0-9][0-9]\.[0-9].*/.*\S')
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'formatters': {
'console': {
'format': '%(asctime)s - %(name)s - %(levelname)s - %(message)s',
'datefmt': '%H:%M:%S',
},
},
'handlers': {
'console': {
'level': LOGGING_LEVEL,
'class': 'logging.StreamHandler',
'formatter': 'console'
}
},
'loggers': {
'': {
'handlers': ['console'],
'level': LOGGING_LEVEL,
'propagate': False,
},
'processing.load_doi': {
'level': LOGGING_LEVEL,
'propagate': True,
},
}
}
if SENTRY_DSN:
LOGGING['handlers']['sentry'] = {
'level': 'ERROR',
'class': 'raven.handlers.logging.SentryHandler',
'dsn': SENTRY_DSN,
}
LOGGING['loggers']['']['handlers'].append('sentry')
FROM = datetime.now() - timedelta(days=15)
FROM = FROM.isoformat()[:10]
def collections_acronym(articlemeta_db):
collections = articlemeta_db['collections'].find({}, {'_id': 0})
return [i['code'] for i in collections]
def collection_info(articlemeta_db, collection):
info = articlemeta_db['collections'].find_one({'acron': collection}, {'_id': 0})
return info
def load_documents(articlemeta_db, collection, all_records=False):
fltr = {
'collection': collection
}
if all_records is False:
fltr['doi'] = {'$exists': 0}
documents = articlemeta_db['articles'].find(
fltr,
{'_id': 0, 'citations': 0},
no_cursor_timeout=True
)
for document in documents:
yield Article(document)
documents.close()
def do_request(url, json=True):
headers = {
'User-Agent': 'SciELO Processing ArticleMeta: LoadDoi'
}
try:
document = requests.get(url, headers=headers)
except:
logger.error(u'HTTP request error for: %s', url)
else:
if json:
return document.json()
else:
return document.text
def scrap_doi(data):
data = ' '.join([i.strip() for i in data.split('\n')])
parser = etree.HTMLParser(remove_blank_text=True, encoding='utf-8')
tree = etree.parse(BytesIO(data.encode('utf-8')), parser)
etree_doi = tree.find('.//meta[@name="citation_doi"]')
if etree_doi is None:
logger.debug('DOI not found')
return None
result = DOI_REGEX.match(etree_doi.get('content')).group()
if not result:
logger.debug('DOI not found')
return None
return result
def query_to_crossref(document):
title = document.original_title()
author = ' '.join([document.first_author.get('surname', ''), document.first_author.get('given_names', '')]).strip()
pub_date = document.publication_date
if title is None:
return None
result = [i for i in Journals().works(document.journal.scielo_issn).query(title=title, author=author).filter(from_pub_date=pub_date, until_pub_date=pub_date)]
if len(result) != 1:
return None
return result.get('DOI', None)
def run(articlemeta_db, collections, all_records=False, scrap_scielo=False,
query_crossref=False):
if not isinstance(collections, list):
logger.error('Collections must be a list o collection acronym')
exit()
for collection in collections:
coll_info = collection_info(articlemeta_db, collection)
logger.info(u'Loading DOI for %s', coll_info['domain'])
logger.info(u'Using mode all_records %s', str(all_records))
for document in load_documents(articlemeta_db, collection, all_records=all_records):
doi = None
if scrap_scielo is True:
try:
data = do_request(document.html_url(), json=False)
except:
logger.error('Fail to load url: %s', document.html_url())
try:
doi = scrap_doi(data)
except:
logger.error('Fail to scrap: %s', document.publisher_id)
if query_crossref is True and doi is None:
doi = query_to_crossref(document)
if doi is None:
logger.debug('No DOI defined for: %s', document.publisher_id)
continue
articlemeta_db['articles'].update(
{'code': document.publisher_id, 'collection': document.collection_acronym},
{'$set': {'doi': doi}}
)
logger.debug('DOI Found %s: %s', document.publisher_id, doi)
def main():
db_dsn = os.environ.get('MONGODB_HOST', 'mongodb://localhost:27017/articlemeta')
try:
articlemeta_db = controller.get_dbconn(db_dsn)
except:
print('Fail to connect to:', db_dsn)
sys.exit(1)
_collections = collections_acronym(articlemeta_db)
parser = argparse.ArgumentParser(
description="Load documents DOI from SciELO website"
)
parser.add_argument(
'--collection',
'-c',
choices=_collections,
help='Collection acronym'
)
parser.add_argument(
'--all_records',
'-a',
action='store_true',
help='Apply processing to all records or just records without the license parameter'
)
parser.add_argument(
'--scrap_scielo',
'-s',
action='store_true',
help='Try to Scrapy SciELO Website, articles page to get the DOI number'
)
parser.add_argument(
'--query_crossref',
'-d',
action='store_true',
help='Try to query to crossref API for the DOI number'
)
parser.add_argument(
'--logging_level',
'-l',
default=LOGGING_LEVEL,
choices=['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'],
help='Logggin level'
)
args = parser.parse_args()
LOGGING['handlers']['console']['level'] = args.logging_level
for lg, content in LOGGING['loggers'].items():
content['level'] = args.logging_level
logging.config.dictConfig(LOGGING)
collections = [args.collection] if args.collection else _collections
run(articlemeta_db, collections, args.all_records, args.scrap_scielo, args.query_crossref)
|
from . import game_interface |
# -*- coding: utf-8 -*-
from datetime import timedelta
import operator
from string import ascii_lowercase
import warnings
import numpy as np
import pytest
from pandas.compat import lrange
import pandas.util._test_decorators as td
import pandas as pd
from pandas import (
Categorical, DataFrame, MultiIndex, Series, Timestamp, date_range, isna,
notna, to_datetime, to_timedelta)
import pandas.core.algorithms as algorithms
import pandas.core.nanops as nanops
import pandas.util.testing as tm
def assert_stat_op_calc(opname, alternative, frame, has_skipna=True,
check_dtype=True, check_dates=False,
check_less_precise=False, skipna_alternative=None):
"""
Check that operator opname works as advertised on frame
Parameters
----------
opname : string
Name of the operator to test on frame
alternative : function
Function that opname is tested against; i.e. "frame.opname()" should
equal "alternative(frame)".
frame : DataFrame
The object that the tests are executed on
has_skipna : bool, default True
Whether the method "opname" has the kwarg "skip_na"
check_dtype : bool, default True
Whether the dtypes of the result of "frame.opname()" and
"alternative(frame)" should be checked.
check_dates : bool, default false
Whether opname should be tested on a Datetime Series
check_less_precise : bool, default False
Whether results should only be compared approximately;
passed on to tm.assert_series_equal
skipna_alternative : function, default None
NaN-safe version of alternative
"""
f = getattr(frame, opname)
if check_dates:
df = DataFrame({'b': date_range('1/1/2001', periods=2)})
result = getattr(df, opname)()
assert isinstance(result, Series)
df['a'] = lrange(len(df))
result = getattr(df, opname)()
assert isinstance(result, Series)
assert len(result)
if has_skipna:
def wrapper(x):
return alternative(x.values)
skipna_wrapper = tm._make_skipna_wrapper(alternative,
skipna_alternative)
result0 = f(axis=0, skipna=False)
result1 = f(axis=1, skipna=False)
tm.assert_series_equal(result0, frame.apply(wrapper),
check_dtype=check_dtype,
check_less_precise=check_less_precise)
# HACK: win32
tm.assert_series_equal(result1, frame.apply(wrapper, axis=1),
check_dtype=False,
check_less_precise=check_less_precise)
else:
skipna_wrapper = alternative
result0 = f(axis=0)
result1 = f(axis=1)
tm.assert_series_equal(result0, frame.apply(skipna_wrapper),
check_dtype=check_dtype,
check_less_precise=check_less_precise)
if opname in ['sum', 'prod']:
expected = frame.apply(skipna_wrapper, axis=1)
tm.assert_series_equal(result1, expected, check_dtype=False,
check_less_precise=check_less_precise)
# check dtypes
if check_dtype:
lcd_dtype = frame.values.dtype
assert lcd_dtype == result0.dtype
assert lcd_dtype == result1.dtype
# bad axis
with pytest.raises(ValueError, match='No axis named 2'):
f(axis=2)
# all NA case
if has_skipna:
all_na = frame * np.NaN
r0 = getattr(all_na, opname)(axis=0)
r1 = getattr(all_na, opname)(axis=1)
if opname in ['sum', 'prod']:
unit = 1 if opname == 'prod' else 0 # result for empty sum/prod
expected = pd.Series(unit, index=r0.index, dtype=r0.dtype)
tm.assert_series_equal(r0, expected)
expected = pd.Series(unit, index=r1.index, dtype=r1.dtype)
tm.assert_series_equal(r1, expected)
def assert_stat_op_api(opname, float_frame, float_string_frame,
has_numeric_only=False):
"""
Check that API for operator opname works as advertised on frame
Parameters
----------
opname : string
Name of the operator to test on frame
float_frame : DataFrame
DataFrame with columns of type float
float_string_frame : DataFrame
DataFrame with both float and string columns
has_numeric_only : bool, default False
Whether the method "opname" has the kwarg "numeric_only"
"""
# make sure works on mixed-type frame
getattr(float_string_frame, opname)(axis=0)
getattr(float_string_frame, opname)(axis=1)
if has_numeric_only:
getattr(float_string_frame, opname)(axis=0, numeric_only=True)
getattr(float_string_frame, opname)(axis=1, numeric_only=True)
getattr(float_frame, opname)(axis=0, numeric_only=False)
getattr(float_frame, opname)(axis=1, numeric_only=False)
def assert_bool_op_calc(opname, alternative, frame, has_skipna=True):
"""
Check that bool operator opname works as advertised on frame
Parameters
----------
opname : string
Name of the operator to test on frame
alternative : function
Function that opname is tested against; i.e. "frame.opname()" should
equal "alternative(frame)".
frame : DataFrame
The object that the tests are executed on
has_skipna : bool, default True
Whether the method "opname" has the kwarg "skip_na"
"""
f = getattr(frame, opname)
if has_skipna:
def skipna_wrapper(x):
nona = x.dropna().values
return alternative(nona)
def wrapper(x):
return alternative(x.values)
result0 = f(axis=0, skipna=False)
result1 = f(axis=1, skipna=False)
tm.assert_series_equal(result0, frame.apply(wrapper))
tm.assert_series_equal(result1, frame.apply(wrapper, axis=1),
check_dtype=False) # HACK: win32
else:
skipna_wrapper = alternative
wrapper = alternative
result0 = f(axis=0)
result1 = f(axis=1)
tm.assert_series_equal(result0, frame.apply(skipna_wrapper))
tm.assert_series_equal(result1, frame.apply(skipna_wrapper, axis=1),
check_dtype=False)
# bad axis
with pytest.raises(ValueError, match='No axis named 2'):
f(axis=2)
# all NA case
if has_skipna:
all_na = frame * np.NaN
r0 = getattr(all_na, opname)(axis=0)
r1 = getattr(all_na, opname)(axis=1)
if opname == 'any':
assert not r0.any()
assert not r1.any()
else:
assert r0.all()
assert r1.all()
def assert_bool_op_api(opname, bool_frame_with_na, float_string_frame,
has_bool_only=False):
"""
Check that API for boolean operator opname works as advertised on frame
Parameters
----------
opname : string
Name of the operator to test on frame
float_frame : DataFrame
DataFrame with columns of type float
float_string_frame : DataFrame
DataFrame with both float and string columns
has_bool_only : bool, default False
Whether the method "opname" has the kwarg "bool_only"
"""
# make sure op works on mixed-type frame
mixed = float_string_frame
mixed['_bool_'] = np.random.randn(len(mixed)) > 0.5
getattr(mixed, opname)(axis=0)
getattr(mixed, opname)(axis=1)
if has_bool_only:
getattr(mixed, opname)(axis=0, bool_only=True)
getattr(mixed, opname)(axis=1, bool_only=True)
getattr(bool_frame_with_na, opname)(axis=0, bool_only=False)
getattr(bool_frame_with_na, opname)(axis=1, bool_only=False)
class TestDataFrameAnalytics(object):
# ---------------------------------------------------------------------
# Correlation and covariance
@td.skip_if_no_scipy
def test_corr_pearson(self, float_frame):
float_frame['A'][:5] = np.nan
float_frame['B'][5:10] = np.nan
self._check_method(float_frame, 'pearson')
@td.skip_if_no_scipy
def test_corr_kendall(self, float_frame):
float_frame['A'][:5] = np.nan
float_frame['B'][5:10] = np.nan
self._check_method(float_frame, 'kendall')
@td.skip_if_no_scipy
def test_corr_spearman(self, float_frame):
float_frame['A'][:5] = np.nan
float_frame['B'][5:10] = np.nan
self._check_method(float_frame, 'spearman')
def _check_method(self, frame, method='pearson'):
correls = frame.corr(method=method)
expected = frame['A'].corr(frame['C'], method=method)
tm.assert_almost_equal(correls['A']['C'], expected)
@td.skip_if_no_scipy
def test_corr_non_numeric(self, float_frame, float_string_frame):
float_frame['A'][:5] = np.nan
float_frame['B'][5:10] = np.nan
# exclude non-numeric types
result = float_string_frame.corr()
expected = float_string_frame.loc[:, ['A', 'B', 'C', 'D']].corr()
tm.assert_frame_equal(result, expected)
@td.skip_if_no_scipy
@pytest.mark.parametrize('meth', ['pearson', 'kendall', 'spearman'])
def test_corr_nooverlap(self, meth):
# nothing in common
df = DataFrame({'A': [1, 1.5, 1, np.nan, np.nan, np.nan],
'B': [np.nan, np.nan, np.nan, 1, 1.5, 1],
'C': [np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan]})
rs = df.corr(meth)
assert isna(rs.loc['A', 'B'])
assert isna(rs.loc['B', 'A'])
assert rs.loc['A', 'A'] == 1
assert rs.loc['B', 'B'] == 1
assert isna(rs.loc['C', 'C'])
@td.skip_if_no_scipy
@pytest.mark.parametrize('meth', ['pearson', 'spearman'])
def test_corr_constant(self, meth):
# constant --> all NA
df = DataFrame({'A': [1, 1, 1, np.nan, np.nan, np.nan],
'B': [np.nan, np.nan, np.nan, 1, 1, 1]})
rs = df.corr(meth)
assert isna(rs.values).all()
def test_corr_int(self):
# dtypes other than float64 #1761
df3 = DataFrame({"a": [1, 2, 3, 4], "b": [1, 2, 3, 4]})
df3.cov()
df3.corr()
@td.skip_if_no_scipy
def test_corr_int_and_boolean(self):
# when dtypes of pandas series are different
# then ndarray will have dtype=object,
# so it need to be properly handled
df = DataFrame({"a": [True, False], "b": [1, 0]})
expected = DataFrame(np.ones((2, 2)), index=[
'a', 'b'], columns=['a', 'b'])
for meth in ['pearson', 'kendall', 'spearman']:
with warnings.catch_warnings(record=True):
warnings.simplefilter("ignore", RuntimeWarning)
result = df.corr(meth)
tm.assert_frame_equal(result, expected)
def test_corr_cov_independent_index_column(self):
# GH 14617
df = pd.DataFrame(np.random.randn(4 * 10).reshape(10, 4),
columns=list("abcd"))
for method in ['cov', 'corr']:
result = getattr(df, method)()
assert result.index is not result.columns
assert result.index.equals(result.columns)
def test_corr_invalid_method(self):
# GH 22298
df = pd.DataFrame(np.random.normal(size=(10, 2)))
msg = ("method must be either 'pearson', "
"'spearman', 'kendall', or a callable, ")
with pytest.raises(ValueError, match=msg):
df.corr(method="____")
def test_cov(self, float_frame, float_string_frame):
# min_periods no NAs (corner case)
expected = float_frame.cov()
result = float_frame.cov(min_periods=len(float_frame))
tm.assert_frame_equal(expected, result)
result = float_frame.cov(min_periods=len(float_frame) + 1)
assert isna(result.values).all()
# with NAs
frame = float_frame.copy()
frame['A'][:5] = np.nan
frame['B'][5:10] = np.nan
result = float_frame.cov(min_periods=len(float_frame) - 8)
expected = float_frame.cov()
expected.loc['A', 'B'] = np.nan
expected.loc['B', 'A'] = np.nan
# regular
float_frame['A'][:5] = np.nan
float_frame['B'][:10] = np.nan
cov = float_frame.cov()
tm.assert_almost_equal(cov['A']['C'],
float_frame['A'].cov(float_frame['C']))
# exclude non-numeric types
result = float_string_frame.cov()
expected = float_string_frame.loc[:, ['A', 'B', 'C', 'D']].cov()
tm.assert_frame_equal(result, expected)
# Single column frame
df = DataFrame(np.linspace(0.0, 1.0, 10))
result = df.cov()
expected = DataFrame(np.cov(df.values.T).reshape((1, 1)),
index=df.columns, columns=df.columns)
tm.assert_frame_equal(result, expected)
df.loc[0] = np.nan
result = df.cov()
expected = DataFrame(np.cov(df.values[1:].T).reshape((1, 1)),
index=df.columns, columns=df.columns)
tm.assert_frame_equal(result, expected)
def test_corrwith(self, datetime_frame):
a = datetime_frame
noise = Series(np.random.randn(len(a)), index=a.index)
b = datetime_frame.add(noise, axis=0)
# make sure order does not matter
b = b.reindex(columns=b.columns[::-1], index=b.index[::-1][10:])
del b['B']
colcorr = a.corrwith(b, axis=0)
tm.assert_almost_equal(colcorr['A'], a['A'].corr(b['A']))
rowcorr = a.corrwith(b, axis=1)
tm.assert_series_equal(rowcorr, a.T.corrwith(b.T, axis=0))
dropped = a.corrwith(b, axis=0, drop=True)
tm.assert_almost_equal(dropped['A'], a['A'].corr(b['A']))
assert 'B' not in dropped
dropped = a.corrwith(b, axis=1, drop=True)
assert a.index[-1] not in dropped.index
# non time-series data
index = ['a', 'b', 'c', 'd', 'e']
columns = ['one', 'two', 'three', 'four']
df1 = DataFrame(np.random.randn(5, 4), index=index, columns=columns)
df2 = DataFrame(np.random.randn(4, 4),
index=index[:4], columns=columns)
correls = df1.corrwith(df2, axis=1)
for row in index[:4]:
tm.assert_almost_equal(correls[row],
df1.loc[row].corr(df2.loc[row]))
def test_corrwith_with_objects(self):
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame()
cols = ['A', 'B', 'C', 'D']
df1['obj'] = 'foo'
df2['obj'] = 'bar'
result = df1.corrwith(df2)
expected = df1.loc[:, cols].corrwith(df2.loc[:, cols])
tm.assert_series_equal(result, expected)
result = df1.corrwith(df2, axis=1)
expected = df1.loc[:, cols].corrwith(df2.loc[:, cols], axis=1)
tm.assert_series_equal(result, expected)
def test_corrwith_series(self, datetime_frame):
result = datetime_frame.corrwith(datetime_frame['A'])
expected = datetime_frame.apply(datetime_frame['A'].corr)
tm.assert_series_equal(result, expected)
def test_corrwith_matches_corrcoef(self):
df1 = DataFrame(np.arange(10000), columns=['a'])
df2 = DataFrame(np.arange(10000) ** 2, columns=['a'])
c1 = df1.corrwith(df2)['a']
c2 = np.corrcoef(df1['a'], df2['a'])[0][1]
tm.assert_almost_equal(c1, c2)
assert c1 < 1
def test_corrwith_mixed_dtypes(self):
# GH 18570
df = pd.DataFrame({'a': [1, 4, 3, 2], 'b': [4, 6, 7, 3],
'c': ['a', 'b', 'c', 'd']})
s = pd.Series([0, 6, 7, 3])
result = df.corrwith(s)
corrs = [df['a'].corr(s), df['b'].corr(s)]
expected = pd.Series(data=corrs, index=['a', 'b'])
tm.assert_series_equal(result, expected)
def test_corrwith_index_intersection(self):
df1 = pd.DataFrame(np.random.random(size=(10, 2)),
columns=["a", "b"])
df2 = pd.DataFrame(np.random.random(size=(10, 3)),
columns=["a", "b", "c"])
result = df1.corrwith(df2, drop=True).index.sort_values()
expected = df1.columns.intersection(df2.columns).sort_values()
tm.assert_index_equal(result, expected)
def test_corrwith_index_union(self):
df1 = pd.DataFrame(np.random.random(size=(10, 2)),
columns=["a", "b"])
df2 = pd.DataFrame(np.random.random(size=(10, 3)),
columns=["a", "b", "c"])
result = df1.corrwith(df2, drop=False).index.sort_values()
expected = df1.columns.union(df2.columns).sort_values()
tm.assert_index_equal(result, expected)
def test_corrwith_dup_cols(self):
# GH 21925
df1 = pd.DataFrame(np.vstack([np.arange(10)] * 3).T)
df2 = df1.copy()
df2 = pd.concat((df2, df2[0]), axis=1)
result = df1.corrwith(df2)
expected = pd.Series(np.ones(4), index=[0, 0, 1, 2])
tm.assert_series_equal(result, expected)
@td.skip_if_no_scipy
def test_corrwith_spearman(self):
# GH 21925
df = pd.DataFrame(np.random.random(size=(100, 3)))
result = df.corrwith(df**2, method="spearman")
expected = Series(np.ones(len(result)))
tm.assert_series_equal(result, expected)
@td.skip_if_no_scipy
def test_corrwith_kendall(self):
# GH 21925
df = pd.DataFrame(np.random.random(size=(100, 3)))
result = df.corrwith(df**2, method="kendall")
expected = Series(np.ones(len(result)))
tm.assert_series_equal(result, expected)
# ---------------------------------------------------------------------
# Describe
def test_bool_describe_in_mixed_frame(self):
df = DataFrame({
'string_data': ['a', 'b', 'c', 'd', 'e'],
'bool_data': [True, True, False, False, False],
'int_data': [10, 20, 30, 40, 50],
})
# Integer data are included in .describe() output,
# Boolean and string data are not.
result = df.describe()
expected = DataFrame({'int_data': [5, 30, df.int_data.std(),
10, 20, 30, 40, 50]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'])
tm.assert_frame_equal(result, expected)
# Top value is a boolean value that is False
result = df.describe(include=['bool'])
expected = DataFrame({'bool_data': [5, 2, False, 3]},
index=['count', 'unique', 'top', 'freq'])
tm.assert_frame_equal(result, expected)
def test_describe_bool_frame(self):
# GH 13891
df = pd.DataFrame({
'bool_data_1': [False, False, True, True],
'bool_data_2': [False, True, True, True]
})
result = df.describe()
expected = DataFrame({'bool_data_1': [4, 2, True, 2],
'bool_data_2': [4, 2, True, 3]},
index=['count', 'unique', 'top', 'freq'])
tm.assert_frame_equal(result, expected)
df = pd.DataFrame({
'bool_data': [False, False, True, True, False],
'int_data': [0, 1, 2, 3, 4]
})
result = df.describe()
expected = DataFrame({'int_data': [5, 2, df.int_data.std(), 0, 1,
2, 3, 4]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'])
tm.assert_frame_equal(result, expected)
df = pd.DataFrame({
'bool_data': [False, False, True, True],
'str_data': ['a', 'b', 'c', 'a']
})
result = df.describe()
expected = DataFrame({'bool_data': [4, 2, True, 2],
'str_data': [4, 3, 'a', 2]},
index=['count', 'unique', 'top', 'freq'])
tm.assert_frame_equal(result, expected)
def test_describe_categorical(self):
df = DataFrame({'value': np.random.randint(0, 10000, 100)})
labels = ["{0} - {1}".format(i, i + 499) for i in range(0, 10000, 500)]
cat_labels = Categorical(labels, labels)
df = df.sort_values(by=['value'], ascending=True)
df['value_group'] = pd.cut(df.value, range(0, 10500, 500),
right=False, labels=cat_labels)
cat = df
# Categoricals should not show up together with numerical columns
result = cat.describe()
assert len(result.columns) == 1
# In a frame, describe() for the cat should be the same as for string
# arrays (count, unique, top, freq)
cat = Categorical(["a", "b", "b", "b"], categories=['a', 'b', 'c'],
ordered=True)
s = Series(cat)
result = s.describe()
expected = Series([4, 2, "b", 3],
index=['count', 'unique', 'top', 'freq'])
tm.assert_series_equal(result, expected)
cat = Series(Categorical(["a", "b", "c", "c"]))
df3 = DataFrame({"cat": cat, "s": ["a", "b", "c", "c"]})
result = df3.describe()
tm.assert_numpy_array_equal(result["cat"].values, result["s"].values)
def test_describe_categorical_columns(self):
# GH 11558
columns = pd.CategoricalIndex(['int1', 'int2', 'obj'],
ordered=True, name='XXX')
df = DataFrame({'int1': [10, 20, 30, 40, 50],
'int2': [10, 20, 30, 40, 50],
'obj': ['A', 0, None, 'X', 1]},
columns=columns)
result = df.describe()
exp_columns = pd.CategoricalIndex(['int1', 'int2'],
categories=['int1', 'int2', 'obj'],
ordered=True, name='XXX')
expected = DataFrame({'int1': [5, 30, df.int1.std(),
10, 20, 30, 40, 50],
'int2': [5, 30, df.int2.std(),
10, 20, 30, 40, 50]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'],
columns=exp_columns)
tm.assert_frame_equal(result, expected)
tm.assert_categorical_equal(result.columns.values,
expected.columns.values)
def test_describe_datetime_columns(self):
columns = pd.DatetimeIndex(['2011-01-01', '2011-02-01', '2011-03-01'],
freq='MS', tz='US/Eastern', name='XXX')
df = DataFrame({0: [10, 20, 30, 40, 50],
1: [10, 20, 30, 40, 50],
2: ['A', 0, None, 'X', 1]})
df.columns = columns
result = df.describe()
exp_columns = pd.DatetimeIndex(['2011-01-01', '2011-02-01'],
freq='MS', tz='US/Eastern', name='XXX')
expected = DataFrame({0: [5, 30, df.iloc[:, 0].std(),
10, 20, 30, 40, 50],
1: [5, 30, df.iloc[:, 1].std(),
10, 20, 30, 40, 50]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'])
expected.columns = exp_columns
tm.assert_frame_equal(result, expected)
assert result.columns.freq == 'MS'
assert result.columns.tz == expected.columns.tz
def test_describe_timedelta_values(self):
# GH 6145
t1 = pd.timedelta_range('1 days', freq='D', periods=5)
t2 = pd.timedelta_range('1 hours', freq='H', periods=5)
df = pd.DataFrame({'t1': t1, 't2': t2})
expected = DataFrame({'t1': [5, pd.Timedelta('3 days'),
df.iloc[:, 0].std(),
pd.Timedelta('1 days'),
pd.Timedelta('2 days'),
pd.Timedelta('3 days'),
pd.Timedelta('4 days'),
pd.Timedelta('5 days')],
't2': [5, pd.Timedelta('3 hours'),
df.iloc[:, 1].std(),
pd.Timedelta('1 hours'),
pd.Timedelta('2 hours'),
pd.Timedelta('3 hours'),
pd.Timedelta('4 hours'),
pd.Timedelta('5 hours')]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'])
result = df.describe()
tm.assert_frame_equal(result, expected)
exp_repr = (" t1 t2\n"
"count 5 5\n"
"mean 3 days 00:00:00 0 days 03:00:00\n"
"std 1 days 13:56:50.394919 0 days 01:34:52.099788\n"
"min 1 days 00:00:00 0 days 01:00:00\n"
"25% 2 days 00:00:00 0 days 02:00:00\n"
"50% 3 days 00:00:00 0 days 03:00:00\n"
"75% 4 days 00:00:00 0 days 04:00:00\n"
"max 5 days 00:00:00 0 days 05:00:00")
assert repr(result) == exp_repr
def test_describe_tz_values(self, tz_naive_fixture):
# GH 21332
tz = tz_naive_fixture
s1 = Series(range(5))
start = Timestamp(2018, 1, 1)
end = Timestamp(2018, 1, 5)
s2 = Series(date_range(start, end, tz=tz))
df = pd.DataFrame({'s1': s1, 's2': s2})
expected = DataFrame({'s1': [5, np.nan, np.nan, np.nan, np.nan, np.nan,
2, 1.581139, 0, 1, 2, 3, 4],
's2': [5, 5, s2.value_counts().index[0], 1,
start.tz_localize(tz),
end.tz_localize(tz), np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan]},
index=['count', 'unique', 'top', 'freq', 'first',
'last', 'mean', 'std', 'min', '25%', '50%',
'75%', 'max']
)
result = df.describe(include='all')
tm.assert_frame_equal(result, expected)
# ---------------------------------------------------------------------
# Reductions
def test_stat_op_api(self, float_frame, float_string_frame):
assert_stat_op_api('count', float_frame, float_string_frame,
has_numeric_only=True)
assert_stat_op_api('sum', float_frame, float_string_frame,
has_numeric_only=True)
assert_stat_op_api('nunique', float_frame, float_string_frame)
assert_stat_op_api('mean', float_frame, float_string_frame)
assert_stat_op_api('product', float_frame, float_string_frame)
assert_stat_op_api('median', float_frame, float_string_frame)
assert_stat_op_api('min', float_frame, float_string_frame)
assert_stat_op_api('max', float_frame, float_string_frame)
assert_stat_op_api('mad', float_frame, float_string_frame)
assert_stat_op_api('var', float_frame, float_string_frame)
assert_stat_op_api('std', float_frame, float_string_frame)
assert_stat_op_api('sem', float_frame, float_string_frame)
assert_stat_op_api('median', float_frame, float_string_frame)
try:
from scipy.stats import skew, kurtosis # noqa:F401
assert_stat_op_api('skew', float_frame, float_string_frame)
assert_stat_op_api('kurt', float_frame, float_string_frame)
except ImportError:
pass
def test_stat_op_calc(self, float_frame_with_na, mixed_float_frame):
def count(s):
return notna(s).sum()
def nunique(s):
return len(algorithms.unique1d(s.dropna()))
def mad(x):
return np.abs(x - x.mean()).mean()
def var(x):
return np.var(x, ddof=1)
def std(x):
return np.std(x, ddof=1)
def sem(x):
return np.std(x, ddof=1) / np.sqrt(len(x))
def skewness(x):
from scipy.stats import skew # noqa:F811
if len(x) < 3:
return np.nan
return skew(x, bias=False)
def kurt(x):
from scipy.stats import kurtosis # noqa:F811
if len(x) < 4:
return np.nan
return kurtosis(x, bias=False)
assert_stat_op_calc('nunique', nunique, float_frame_with_na,
has_skipna=False, check_dtype=False,
check_dates=True)
# mixed types (with upcasting happening)
assert_stat_op_calc('sum', np.sum, mixed_float_frame.astype('float32'),
check_dtype=False, check_less_precise=True)
assert_stat_op_calc('sum', np.sum, float_frame_with_na,
skipna_alternative=np.nansum)
assert_stat_op_calc('mean', np.mean, float_frame_with_na,
check_dates=True)
assert_stat_op_calc('product', np.prod, float_frame_with_na)
assert_stat_op_calc('mad', mad, float_frame_with_na)
assert_stat_op_calc('var', var, float_frame_with_na)
assert_stat_op_calc('std', std, float_frame_with_na)
assert_stat_op_calc('sem', sem, float_frame_with_na)
assert_stat_op_calc('count', count, float_frame_with_na,
has_skipna=False, check_dtype=False,
check_dates=True)
try:
from scipy import skew, kurtosis # noqa:F401
assert_stat_op_calc('skew', skewness, float_frame_with_na)
assert_stat_op_calc('kurt', kurt, float_frame_with_na)
except ImportError:
pass
# TODO: Ensure warning isn't emitted in the first place
@pytest.mark.filterwarnings("ignore:All-NaN:RuntimeWarning")
def test_median(self, float_frame_with_na, int_frame):
def wrapper(x):
if isna(x).any():
return np.nan
return np.median(x)
assert_stat_op_calc('median', wrapper, float_frame_with_na,
check_dates=True)
assert_stat_op_calc('median', wrapper, int_frame, check_dtype=False,
check_dates=True)
@pytest.mark.parametrize('method', ['sum', 'mean', 'prod', 'var',
'std', 'skew', 'min', 'max'])
def test_stat_operators_attempt_obj_array(self, method):
# GH#676
data = {
'a': [-0.00049987540199591344, -0.0016467257772919831,
0.00067695870775883013],
'b': [-0, -0, 0.0],
'c': [0.00031111847529610595, 0.0014902627951905339,
-0.00094099200035979691]
}
df1 = DataFrame(data, index=['foo', 'bar', 'baz'], dtype='O')
df2 = DataFrame({0: [np.nan, 2], 1: [np.nan, 3],
2: [np.nan, 4]}, dtype=object)
for df in [df1, df2]:
assert df.values.dtype == np.object_
result = getattr(df, method)(1)
expected = getattr(df.astype('f8'), method)(1)
if method in ['sum', 'prod']:
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('op', ['mean', 'std', 'var',
'skew', 'kurt', 'sem'])
def test_mixed_ops(self, op):
# GH#16116
df = DataFrame({'int': [1, 2, 3, 4],
'float': [1., 2., 3., 4.],
'str': ['a', 'b', 'c', 'd']})
result = getattr(df, op)()
assert len(result) == 2
with pd.option_context('use_bottleneck', False):
result = getattr(df, op)()
assert len(result) == 2
def test_reduce_mixed_frame(self):
# GH 6806
df = DataFrame({
'bool_data': [True, True, False, False, False],
'int_data': [10, 20, 30, 40, 50],
'string_data': ['a', 'b', 'c', 'd', 'e'],
})
df.reindex(columns=['bool_data', 'int_data', 'string_data'])
test = df.sum(axis=0)
tm.assert_numpy_array_equal(test.values,
np.array([2, 150, 'abcde'], dtype=object))
tm.assert_series_equal(test, df.T.sum(axis=1))
def test_nunique(self):
df = DataFrame({'A': [1, 1, 1],
'B': [1, 2, 3],
'C': [1, np.nan, 3]})
tm.assert_series_equal(df.nunique(), Series({'A': 1, 'B': 3, 'C': 2}))
tm.assert_series_equal(df.nunique(dropna=False),
Series({'A': 1, 'B': 3, 'C': 3}))
tm.assert_series_equal(df.nunique(axis=1), Series({0: 1, 1: 2, 2: 2}))
tm.assert_series_equal(df.nunique(axis=1, dropna=False),
Series({0: 1, 1: 3, 2: 2}))
@pytest.mark.parametrize('tz', [None, 'UTC'])
def test_mean_mixed_datetime_numeric(self, tz):
# https://github.com/pandas-dev/pandas/issues/24752
df = pd.DataFrame({"A": [1, 1],
"B": [pd.Timestamp('2000', tz=tz)] * 2})
result = df.mean()
expected = pd.Series([1.0], index=['A'])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('tz', [None, 'UTC'])
def test_mean_excludeds_datetimes(self, tz):
# https://github.com/pandas-dev/pandas/issues/24752
# Our long-term desired behavior is unclear, but the behavior in
# 0.24.0rc1 was buggy.
df = pd.DataFrame({"A": [pd.Timestamp('2000', tz=tz)] * 2})
result = df.mean()
expected = pd.Series()
tm.assert_series_equal(result, expected)
def test_var_std(self, datetime_frame):
result = datetime_frame.std(ddof=4)
expected = datetime_frame.apply(lambda x: x.std(ddof=4))
tm.assert_almost_equal(result, expected)
result = datetime_frame.var(ddof=4)
expected = datetime_frame.apply(lambda x: x.var(ddof=4))
tm.assert_almost_equal(result, expected)
arr = np.repeat(np.random.random((1, 1000)), 1000, 0)
result = nanops.nanvar(arr, axis=0)
assert not (result < 0).any()
with pd.option_context('use_bottleneck', False):
result = nanops.nanvar(arr, axis=0)
assert not (result < 0).any()
@pytest.mark.parametrize(
"meth", ['sem', 'var', 'std'])
def test_numeric_only_flag(self, meth):
# GH 9201
df1 = DataFrame(np.random.randn(5, 3), columns=['foo', 'bar', 'baz'])
# set one entry to a number in str format
df1.loc[0, 'foo'] = '100'
df2 = DataFrame(np.random.randn(5, 3), columns=['foo', 'bar', 'baz'])
# set one entry to a non-number str
df2.loc[0, 'foo'] = 'a'
result = getattr(df1, meth)(axis=1, numeric_only=True)
expected = getattr(df1[['bar', 'baz']], meth)(axis=1)
tm.assert_series_equal(expected, result)
result = getattr(df2, meth)(axis=1, numeric_only=True)
expected = getattr(df2[['bar', 'baz']], meth)(axis=1)
tm.assert_series_equal(expected, result)
# df1 has all numbers, df2 has a letter inside
msg = r"unsupported operand type\(s\) for -: 'float' and 'str'"
with pytest.raises(TypeError, match=msg):
getattr(df1, meth)(axis=1, numeric_only=False)
msg = "could not convert string to float: 'a'"
with pytest.raises(TypeError, match=msg):
getattr(df2, meth)(axis=1, numeric_only=False)
def test_sem(self, datetime_frame):
result = datetime_frame.sem(ddof=4)
expected = datetime_frame.apply(
lambda x: x.std(ddof=4) / np.sqrt(len(x)))
tm.assert_almost_equal(result, expected)
arr = np.repeat(np.random.random((1, 1000)), 1000, 0)
result = nanops.nansem(arr, axis=0)
assert not (result < 0).any()
with pd.option_context('use_bottleneck', False):
result = nanops.nansem(arr, axis=0)
assert not (result < 0).any()
@td.skip_if_no_scipy
def test_kurt(self):
index = MultiIndex(levels=[['bar'], ['one', 'two', 'three'], [0, 1]],
codes=[[0, 0, 0, 0, 0, 0],
[0, 1, 2, 0, 1, 2],
[0, 1, 0, 1, 0, 1]])
df = DataFrame(np.random.randn(6, 3), index=index)
kurt = df.kurt()
kurt2 = df.kurt(level=0).xs('bar')
tm.assert_series_equal(kurt, kurt2, check_names=False)
assert kurt.name is None
assert kurt2.name == 'bar'
@pytest.mark.parametrize("dropna, expected", [
(True, {'A': [12],
'B': [10.0],
'C': [1.0],
'D': ['a'],
'E': Categorical(['a'], categories=['a']),
'F': to_datetime(['2000-1-2']),
'G': to_timedelta(['1 days'])}),
(False, {'A': [12],
'B': [10.0],
'C': [np.nan],
'D': np.array([np.nan], dtype=object),
'E': Categorical([np.nan], categories=['a']),
'F': [pd.NaT],
'G': to_timedelta([pd.NaT])}),
(True, {'H': [8, 9, np.nan, np.nan],
'I': [8, 9, np.nan, np.nan],
'J': [1, np.nan, np.nan, np.nan],
'K': Categorical(['a', np.nan, np.nan, np.nan],
categories=['a']),
'L': to_datetime(['2000-1-2', 'NaT', 'NaT', 'NaT']),
'M': to_timedelta(['1 days', 'nan', 'nan', 'nan']),
'N': [0, 1, 2, 3]}),
(False, {'H': [8, 9, np.nan, np.nan],
'I': [8, 9, np.nan, np.nan],
'J': [1, np.nan, np.nan, np.nan],
'K': Categorical([np.nan, 'a', np.nan, np.nan],
categories=['a']),
'L': to_datetime(['NaT', '2000-1-2', 'NaT', 'NaT']),
'M': to_timedelta(['nan', '1 days', 'nan', 'nan']),
'N': [0, 1, 2, 3]})
])
def test_mode_dropna(self, dropna, expected):
df = DataFrame({"A": [12, 12, 19, 11],
"B": [10, 10, np.nan, 3],
"C": [1, np.nan, np.nan, np.nan],
"D": [np.nan, np.nan, 'a', np.nan],
"E": Categorical([np.nan, np.nan, 'a', np.nan]),
"F": to_datetime(['NaT', '2000-1-2', 'NaT', 'NaT']),
"G": to_timedelta(['1 days', 'nan', 'nan', 'nan']),
"H": [8, 8, 9, 9],
"I": [9, 9, 8, 8],
"J": [1, 1, np.nan, np.nan],
"K": Categorical(['a', np.nan, 'a', np.nan]),
"L": to_datetime(['2000-1-2', '2000-1-2',
'NaT', 'NaT']),
"M": to_timedelta(['1 days', 'nan',
'1 days', 'nan']),
"N": np.arange(4, dtype='int64')})
result = df[sorted(list(expected.keys()))].mode(dropna=dropna)
expected = DataFrame(expected)
tm.assert_frame_equal(result, expected)
def test_mode_sortwarning(self):
# Check for the warning that is raised when the mode
# results cannot be sorted
df = DataFrame({"A": [np.nan, np.nan, 'a', 'a']})
expected = DataFrame({'A': ['a', np.nan]})
with tm.assert_produces_warning(UserWarning, check_stacklevel=False):
result = df.mode(dropna=False)
result = result.sort_values(by='A').reset_index(drop=True)
tm.assert_frame_equal(result, expected)
def test_operators_timedelta64(self):
df = DataFrame(dict(A=date_range('2012-1-1', periods=3, freq='D'),
B=date_range('2012-1-2', periods=3, freq='D'),
C=Timestamp('20120101') -
timedelta(minutes=5, seconds=5)))
diffs = DataFrame(dict(A=df['A'] - df['C'],
B=df['A'] - df['B']))
# min
result = diffs.min()
assert result[0] == diffs.loc[0, 'A']
assert result[1] == diffs.loc[0, 'B']
result = diffs.min(axis=1)
assert (result == diffs.loc[0, 'B']).all()
# max
result = diffs.max()
assert result[0] == diffs.loc[2, 'A']
assert result[1] == diffs.loc[2, 'B']
result = diffs.max(axis=1)
assert (result == diffs['A']).all()
# abs
result = diffs.abs()
result2 = abs(diffs)
expected = DataFrame(dict(A=df['A'] - df['C'],
B=df['B'] - df['A']))
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(result2, expected)
# mixed frame
mixed = diffs.copy()
mixed['C'] = 'foo'
mixed['D'] = 1
mixed['E'] = 1.
mixed['F'] = Timestamp('20130101')
# results in an object array
result = mixed.min()
expected = Series([pd.Timedelta(timedelta(seconds=5 * 60 + 5)),
pd.Timedelta(timedelta(days=-1)),
'foo', 1, 1.0,
Timestamp('20130101')],
index=mixed.columns)
tm.assert_series_equal(result, expected)
# excludes numeric
result = mixed.min(axis=1)
expected = Series([1, 1, 1.], index=[0, 1, 2])
tm.assert_series_equal(result, expected)
# works when only those columns are selected
result = mixed[['A', 'B']].min(1)
expected = Series([timedelta(days=-1)] * 3)
tm.assert_series_equal(result, expected)
result = mixed[['A', 'B']].min()
expected = Series([timedelta(seconds=5 * 60 + 5),
timedelta(days=-1)], index=['A', 'B'])
tm.assert_series_equal(result, expected)
# GH 3106
df = DataFrame({'time': date_range('20130102', periods=5),
'time2': date_range('20130105', periods=5)})
df['off1'] = df['time2'] - df['time']
assert df['off1'].dtype == 'timedelta64[ns]'
df['off2'] = df['time'] - df['time2']
df._consolidate_inplace()
assert df['off1'].dtype == 'timedelta64[ns]'
assert df['off2'].dtype == 'timedelta64[ns]'
def test_sum_corner(self):
empty_frame = DataFrame()
axis0 = empty_frame.sum(0)
axis1 = empty_frame.sum(1)
assert isinstance(axis0, Series)
assert isinstance(axis1, Series)
assert len(axis0) == 0
assert len(axis1) == 0
@pytest.mark.parametrize('method, unit', [
('sum', 0),
('prod', 1),
])
def test_sum_prod_nanops(self, method, unit):
idx = ['a', 'b', 'c']
df = pd.DataFrame({"a": [unit, unit],
"b": [unit, np.nan],
"c": [np.nan, np.nan]})
# The default
result = getattr(df, method)
expected = pd.Series([unit, unit, unit], index=idx, dtype='float64')
# min_count=1
result = getattr(df, method)(min_count=1)
expected = pd.Series([unit, unit, np.nan], index=idx)
tm.assert_series_equal(result, expected)
# min_count=0
result = getattr(df, method)(min_count=0)
expected = pd.Series([unit, unit, unit], index=idx, dtype='float64')
tm.assert_series_equal(result, expected)
result = getattr(df.iloc[1:], method)(min_count=1)
expected = pd.Series([unit, np.nan, np.nan], index=idx)
tm.assert_series_equal(result, expected)
# min_count > 1
df = pd.DataFrame({"A": [unit] * 10, "B": [unit] * 5 + [np.nan] * 5})
result = getattr(df, method)(min_count=5)
expected = pd.Series(result, index=['A', 'B'])
tm.assert_series_equal(result, expected)
result = getattr(df, method)(min_count=6)
expected = pd.Series(result, index=['A', 'B'])
tm.assert_series_equal(result, expected)
def test_sum_nanops_timedelta(self):
# prod isn't defined on timedeltas
idx = ['a', 'b', 'c']
df = pd.DataFrame({"a": [0, 0],
"b": [0, np.nan],
"c": [np.nan, np.nan]})
df2 = df.apply(pd.to_timedelta)
# 0 by default
result = df2.sum()
expected = pd.Series([0, 0, 0], dtype='m8[ns]', index=idx)
tm.assert_series_equal(result, expected)
# min_count=0
result = df2.sum(min_count=0)
tm.assert_series_equal(result, expected)
# min_count=1
result = df2.sum(min_count=1)
expected = pd.Series([0, 0, np.nan], dtype='m8[ns]', index=idx)
tm.assert_series_equal(result, expected)
def test_sum_object(self, float_frame):
values = float_frame.values.astype(int)
frame = DataFrame(values, index=float_frame.index,
columns=float_frame.columns)
deltas = frame * timedelta(1)
deltas.sum()
def test_sum_bool(self, float_frame):
# ensure this works, bug report
bools = np.isnan(float_frame)
bools.sum(1)
bools.sum(0)
def test_mean_corner(self, float_frame, float_string_frame):
# unit test when have object data
the_mean = float_string_frame.mean(axis=0)
the_sum = float_string_frame.sum(axis=0, numeric_only=True)
tm.assert_index_equal(the_sum.index, the_mean.index)
assert len(the_mean.index) < len(float_string_frame.columns)
# xs sum mixed type, just want to know it works...
the_mean = float_string_frame.mean(axis=1)
the_sum = float_string_frame.sum(axis=1, numeric_only=True)
tm.assert_index_equal(the_sum.index, the_mean.index)
# take mean of boolean column
float_frame['bool'] = float_frame['A'] > 0
means = float_frame.mean(0)
assert means['bool'] == float_frame['bool'].values.mean()
def test_stats_mixed_type(self, float_string_frame):
# don't blow up
float_string_frame.std(1)
float_string_frame.var(1)
float_string_frame.mean(1)
float_string_frame.skew(1)
def test_sum_bools(self):
df = DataFrame(index=lrange(1), columns=lrange(10))
bools = isna(df)
assert bools.sum(axis=1)[0] == 10
# ---------------------------------------------------------------------
# Cumulative Reductions - cumsum, cummax, ...
def test_cumsum_corner(self):
dm = DataFrame(np.arange(20).reshape(4, 5),
index=lrange(4), columns=lrange(5))
# ?(wesm)
result = dm.cumsum() # noqa
def test_cumsum(self, datetime_frame):
datetime_frame.loc[5:10, 0] = np.nan
datetime_frame.loc[10:15, 1] = np.nan
datetime_frame.loc[15:, 2] = np.nan
# axis = 0
cumsum = datetime_frame.cumsum()
expected = datetime_frame.apply(Series.cumsum)
tm.assert_frame_equal(cumsum, expected)
# axis = 1
cumsum = datetime_frame.cumsum(axis=1)
expected = datetime_frame.apply(Series.cumsum, axis=1)
tm.assert_frame_equal(cumsum, expected)
# works
df = DataFrame({'A': np.arange(20)}, index=np.arange(20))
result = df.cumsum() # noqa
# fix issue
cumsum_xs = datetime_frame.cumsum(axis=1)
assert np.shape(cumsum_xs) == np.shape(datetime_frame)
def test_cumprod(self, datetime_frame):
datetime_frame.loc[5:10, 0] = np.nan
datetime_frame.loc[10:15, 1] = np.nan
datetime_frame.loc[15:, 2] = np.nan
# axis = 0
cumprod = datetime_frame.cumprod()
expected = datetime_frame.apply(Series.cumprod)
tm.assert_frame_equal(cumprod, expected)
# axis = 1
cumprod = datetime_frame.cumprod(axis=1)
expected = datetime_frame.apply(Series.cumprod, axis=1)
tm.assert_frame_equal(cumprod, expected)
# fix issue
cumprod_xs = datetime_frame.cumprod(axis=1)
assert np.shape(cumprod_xs) == np.shape(datetime_frame)
# ints
df = datetime_frame.fillna(0).astype(int)
df.cumprod(0)
df.cumprod(1)
# ints32
df = datetime_frame.fillna(0).astype(np.int32)
df.cumprod(0)
df.cumprod(1)
def test_cummin(self, datetime_frame):
datetime_frame.loc[5:10, 0] = np.nan
datetime_frame.loc[10:15, 1] = np.nan
datetime_frame.loc[15:, 2] = np.nan
# axis = 0
cummin = datetime_frame.cummin()
expected = datetime_frame.apply(Series.cummin)
tm.assert_frame_equal(cummin, expected)
# axis = 1
cummin = datetime_frame.cummin(axis=1)
expected = datetime_frame.apply(Series.cummin, axis=1)
tm.assert_frame_equal(cummin, expected)
# it works
df = DataFrame({'A': np.arange(20)}, index=np.arange(20))
result = df.cummin() # noqa
# fix issue
cummin_xs = datetime_frame.cummin(axis=1)
assert np.shape(cummin_xs) == np.shape(datetime_frame)
def test_cummax(self, datetime_frame):
datetime_frame.loc[5:10, 0] = np.nan
datetime_frame.loc[10:15, 1] = np.nan
datetime_frame.loc[15:, 2] = np.nan
# axis = 0
cummax = datetime_frame.cummax()
expected = datetime_frame.apply(Series.cummax)
tm.assert_frame_equal(cummax, expected)
# axis = 1
cummax = datetime_frame.cummax(axis=1)
expected = datetime_frame.apply(Series.cummax, axis=1)
tm.assert_frame_equal(cummax, expected)
# it works
df = DataFrame({'A': np.arange(20)}, index=np.arange(20))
result = df.cummax() # noqa
# fix issue
cummax_xs = datetime_frame.cummax(axis=1)
assert np.shape(cummax_xs) == np.shape(datetime_frame)
# ---------------------------------------------------------------------
# Miscellanea
def test_count(self):
# corner case
frame = DataFrame()
ct1 = frame.count(1)
assert isinstance(ct1, Series)
ct2 = frame.count(0)
assert isinstance(ct2, Series)
# GH#423
df = DataFrame(index=lrange(10))
result = df.count(1)
expected = Series(0, index=df.index)
tm.assert_series_equal(result, expected)
df = DataFrame(columns=lrange(10))
result = df.count(0)
expected = Series(0, index=df.columns)
tm.assert_series_equal(result, expected)
df = DataFrame()
result = df.count()
expected = Series(0, index=[])
tm.assert_series_equal(result, expected)
def test_count_objects(self, float_string_frame):
dm = DataFrame(float_string_frame._series)
df = DataFrame(float_string_frame._series)
tm.assert_series_equal(dm.count(), df.count())
tm.assert_series_equal(dm.count(1), df.count(1))
def test_pct_change(self):
# GH#11150
pnl = DataFrame([np.arange(0, 40, 10),
np.arange(0, 40, 10),
np.arange(0, 40, 10)]).astype(np.float64)
pnl.iat[1, 0] = np.nan
pnl.iat[1, 1] = np.nan
pnl.iat[2, 3] = 60
for axis in range(2):
expected = pnl.ffill(axis=axis) / pnl.ffill(axis=axis).shift(
axis=axis) - 1
result = pnl.pct_change(axis=axis, fill_method='pad')
tm.assert_frame_equal(result, expected)
# ----------------------------------------------------------------------
# Index of max / min
def test_idxmin(self, float_frame, int_frame):
frame = float_frame
frame.loc[5:10] = np.nan
frame.loc[15:20, -2:] = np.nan
for skipna in [True, False]:
for axis in [0, 1]:
for df in [frame, int_frame]:
result = df.idxmin(axis=axis, skipna=skipna)
expected = df.apply(Series.idxmin, axis=axis,
skipna=skipna)
tm.assert_series_equal(result, expected)
msg = ("No axis named 2 for object type"
" <class 'pandas.core.frame.DataFrame'>")
with pytest.raises(ValueError, match=msg):
frame.idxmin(axis=2)
def test_idxmax(self, float_frame, int_frame):
frame = float_frame
frame.loc[5:10] = np.nan
frame.loc[15:20, -2:] = np.nan
for skipna in [True, False]:
for axis in [0, 1]:
for df in [frame, int_frame]:
result = df.idxmax(axis=axis, skipna=skipna)
expected = df.apply(Series.idxmax, axis=axis,
skipna=skipna)
tm.assert_series_equal(result, expected)
msg = ("No axis named 2 for object type"
" <class 'pandas.core.frame.DataFrame'>")
with pytest.raises(ValueError, match=msg):
frame.idxmax(axis=2)
# ----------------------------------------------------------------------
# Logical reductions
@pytest.mark.parametrize('opname', ['any', 'all'])
def test_any_all(self, opname, bool_frame_with_na, float_string_frame):
assert_bool_op_calc(opname, getattr(np, opname), bool_frame_with_na,
has_skipna=True)
assert_bool_op_api(opname, bool_frame_with_na, float_string_frame,
has_bool_only=True)
def test_any_all_extra(self):
df = DataFrame({
'A': [True, False, False],
'B': [True, True, False],
'C': [True, True, True],
}, index=['a', 'b', 'c'])
result = df[['A', 'B']].any(1)
expected = Series([True, True, False], index=['a', 'b', 'c'])
tm.assert_series_equal(result, expected)
result = df[['A', 'B']].any(1, bool_only=True)
tm.assert_series_equal(result, expected)
result = df.all(1)
expected = Series([True, False, False], index=['a', 'b', 'c'])
tm.assert_series_equal(result, expected)
result = df.all(1, bool_only=True)
tm.assert_series_equal(result, expected)
# Axis is None
result = df.all(axis=None).item()
assert result is False
result = df.any(axis=None).item()
assert result is True
result = df[['C']].all(axis=None).item()
assert result is True
def test_any_datetime(self):
# GH 23070
float_data = [1, np.nan, 3, np.nan]
datetime_data = [pd.Timestamp('1960-02-15'),
pd.Timestamp('1960-02-16'),
pd.NaT,
pd.NaT]
df = DataFrame({
"A": float_data,
"B": datetime_data
})
result = df.any(1)
expected = Series([True, True, True, False])
tm.assert_series_equal(result, expected)
def test_any_all_bool_only(self):
# GH 25101
df = DataFrame({"col1": [1, 2, 3],
"col2": [4, 5, 6],
"col3": [None, None, None]})
result = df.all(bool_only=True)
expected = Series(dtype=np.bool)
tm.assert_series_equal(result, expected)
df = DataFrame({"col1": [1, 2, 3],
"col2": [4, 5, 6],
"col3": [None, None, None],
"col4": [False, False, True]})
result = df.all(bool_only=True)
expected = Series({"col4": False})
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('func, data, expected', [
(np.any, {}, False),
(np.all, {}, True),
(np.any, {'A': []}, False),
(np.all, {'A': []}, True),
(np.any, {'A': [False, False]}, False),
(np.all, {'A': [False, False]}, False),
(np.any, {'A': [True, False]}, True),
(np.all, {'A': [True, False]}, False),
(np.any, {'A': [True, True]}, True),
(np.all, {'A': [True, True]}, True),
(np.any, {'A': [False], 'B': [False]}, False),
(np.all, {'A': [False], 'B': [False]}, False),
(np.any, {'A': [False, False], 'B': [False, True]}, True),
(np.all, {'A': [False, False], 'B': [False, True]}, False),
# other types
(np.all, {'A': pd.Series([0.0, 1.0], dtype='float')}, False),
(np.any, {'A': pd.Series([0.0, 1.0], dtype='float')}, True),
(np.all, {'A': pd.Series([0, 1], dtype=int)}, False),
(np.any, {'A': pd.Series([0, 1], dtype=int)}, True),
pytest.param(np.all, {'A': pd.Series([0, 1], dtype='M8[ns]')}, False,
marks=[td.skip_if_np_lt_115]),
pytest.param(np.any, {'A': pd.Series([0, 1], dtype='M8[ns]')}, True,
marks=[td.skip_if_np_lt_115]),
pytest.param(np.all, {'A': pd.Series([1, 2], dtype='M8[ns]')}, True,
marks=[td.skip_if_np_lt_115]),
pytest.param(np.any, {'A': pd.Series([1, 2], dtype='M8[ns]')}, True,
marks=[td.skip_if_np_lt_115]),
pytest.param(np.all, {'A': pd.Series([0, 1], dtype='m8[ns]')}, False,
marks=[td.skip_if_np_lt_115]),
pytest.param(np.any, {'A': pd.Series([0, 1], dtype='m8[ns]')}, True,
marks=[td.skip_if_np_lt_115]),
pytest.param(np.all, {'A': pd.Series([1, 2], dtype='m8[ns]')}, True,
marks=[td.skip_if_np_lt_115]),
pytest.param(np.any, {'A': pd.Series([1, 2], dtype='m8[ns]')}, True,
marks=[td.skip_if_np_lt_115]),
(np.all, {'A': pd.Series([0, 1], dtype='category')}, False),
(np.any, {'A': pd.Series([0, 1], dtype='category')}, True),
(np.all, {'A': pd.Series([1, 2], dtype='category')}, True),
(np.any, {'A': pd.Series([1, 2], dtype='category')}, True),
# # Mix
# GH 21484
# (np.all, {'A': pd.Series([10, 20], dtype='M8[ns]'),
# 'B': pd.Series([10, 20], dtype='m8[ns]')}, True),
])
def test_any_all_np_func(self, func, data, expected):
# GH 19976
data = DataFrame(data)
result = func(data)
assert isinstance(result, np.bool_)
assert result.item() is expected
# method version
result = getattr(DataFrame(data), func.__name__)(axis=None)
assert isinstance(result, np.bool_)
assert result.item() is expected
def test_any_all_object(self):
# GH 19976
result = np.all(DataFrame(columns=['a', 'b'])).item()
assert result is True
result = np.any(DataFrame(columns=['a', 'b'])).item()
assert result is False
@pytest.mark.parametrize('method', ['any', 'all'])
def test_any_all_level_axis_none_raises(self, method):
df = DataFrame(
{"A": 1},
index=MultiIndex.from_product([['A', 'B'], ['a', 'b']],
names=['out', 'in'])
)
xpr = "Must specify 'axis' when aggregating by level."
with pytest.raises(ValueError, match=xpr):
getattr(df, method)(axis=None, level='out')
# ----------------------------------------------------------------------
# Isin
def test_isin(self):
# GH 4211
df = DataFrame({'vals': [1, 2, 3, 4], 'ids': ['a', 'b', 'f', 'n'],
'ids2': ['a', 'n', 'c', 'n']},
index=['foo', 'bar', 'baz', 'qux'])
other = ['a', 'b', 'c']
result = df.isin(other)
expected = DataFrame([df.loc[s].isin(other) for s in df.index])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("empty", [[], Series(), np.array([])])
def test_isin_empty(self, empty):
# GH 16991
df = DataFrame({'A': ['a', 'b', 'c'], 'B': ['a', 'e', 'f']})
expected = DataFrame(False, df.index, df.columns)
result = df.isin(empty)
tm.assert_frame_equal(result, expected)
def test_isin_dict(self):
df = DataFrame({'A': ['a', 'b', 'c'], 'B': ['a', 'e', 'f']})
d = {'A': ['a']}
expected = DataFrame(False, df.index, df.columns)
expected.loc[0, 'A'] = True
result = df.isin(d)
tm.assert_frame_equal(result, expected)
# non unique columns
df = DataFrame({'A': ['a', 'b', 'c'], 'B': ['a', 'e', 'f']})
df.columns = ['A', 'A']
expected = DataFrame(False, df.index, df.columns)
expected.loc[0, 'A'] = True
result = df.isin(d)
tm.assert_frame_equal(result, expected)
def test_isin_with_string_scalar(self):
# GH 4763
df = DataFrame({'vals': [1, 2, 3, 4], 'ids': ['a', 'b', 'f', 'n'],
'ids2': ['a', 'n', 'c', 'n']},
index=['foo', 'bar', 'baz', 'qux'])
with pytest.raises(TypeError):
df.isin('a')
with pytest.raises(TypeError):
df.isin('aaa')
def test_isin_df(self):
df1 = DataFrame({'A': [1, 2, 3, 4], 'B': [2, np.nan, 4, 4]})
df2 = DataFrame({'A': [0, 2, 12, 4], 'B': [2, np.nan, 4, 5]})
expected = DataFrame(False, df1.index, df1.columns)
result = df1.isin(df2)
expected['A'].loc[[1, 3]] = True
expected['B'].loc[[0, 2]] = True
tm.assert_frame_equal(result, expected)
# partial overlapping columns
df2.columns = ['A', 'C']
result = df1.isin(df2)
expected['B'] = False
tm.assert_frame_equal(result, expected)
def test_isin_tuples(self):
# GH 16394
df = pd.DataFrame({'A': [1, 2, 3], 'B': ['a', 'b', 'f']})
df['C'] = list(zip(df['A'], df['B']))
result = df['C'].isin([(1, 'a')])
tm.assert_series_equal(result,
Series([True, False, False], name="C"))
def test_isin_df_dupe_values(self):
df1 = DataFrame({'A': [1, 2, 3, 4], 'B': [2, np.nan, 4, 4]})
# just cols duped
df2 = DataFrame([[0, 2], [12, 4], [2, np.nan], [4, 5]],
columns=['B', 'B'])
with pytest.raises(ValueError):
df1.isin(df2)
# just index duped
df2 = DataFrame([[0, 2], [12, 4], [2, np.nan], [4, 5]],
columns=['A', 'B'], index=[0, 0, 1, 1])
with pytest.raises(ValueError):
df1.isin(df2)
# cols and index:
df2.columns = ['B', 'B']
with pytest.raises(ValueError):
df1.isin(df2)
def test_isin_dupe_self(self):
other = DataFrame({'A': [1, 0, 1, 0], 'B': [1, 1, 0, 0]})
df = DataFrame([[1, 1], [1, 0], [0, 0]], columns=['A', 'A'])
result = df.isin(other)
expected = DataFrame(False, index=df.index, columns=df.columns)
expected.loc[0] = True
expected.iloc[1, 1] = True
tm.assert_frame_equal(result, expected)
def test_isin_against_series(self):
df = pd.DataFrame({'A': [1, 2, 3, 4], 'B': [2, np.nan, 4, 4]},
index=['a', 'b', 'c', 'd'])
s = pd.Series([1, 3, 11, 4], index=['a', 'b', 'c', 'd'])
expected = DataFrame(False, index=df.index, columns=df.columns)
expected['A'].loc['a'] = True
expected.loc['d'] = True
result = df.isin(s)
tm.assert_frame_equal(result, expected)
def test_isin_multiIndex(self):
idx = MultiIndex.from_tuples([(0, 'a', 'foo'), (0, 'a', 'bar'),
(0, 'b', 'bar'), (0, 'b', 'baz'),
(2, 'a', 'foo'), (2, 'a', 'bar'),
(2, 'c', 'bar'), (2, 'c', 'baz'),
(1, 'b', 'foo'), (1, 'b', 'bar'),
(1, 'c', 'bar'), (1, 'c', 'baz')])
df1 = DataFrame({'A': np.ones(12),
'B': np.zeros(12)}, index=idx)
df2 = DataFrame({'A': [1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1],
'B': [1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1]})
# against regular index
expected = DataFrame(False, index=df1.index, columns=df1.columns)
result = df1.isin(df2)
tm.assert_frame_equal(result, expected)
df2.index = idx
expected = df2.values.astype(np.bool)
expected[:, 1] = ~expected[:, 1]
expected = DataFrame(expected, columns=['A', 'B'], index=idx)
result = df1.isin(df2)
tm.assert_frame_equal(result, expected)
def test_isin_empty_datetimelike(self):
# GH 15473
df1_ts = DataFrame({'date':
pd.to_datetime(['2014-01-01', '2014-01-02'])})
df1_td = DataFrame({'date':
[pd.Timedelta(1, 's'), pd.Timedelta(2, 's')]})
df2 = DataFrame({'date': []})
df3 = DataFrame()
expected = DataFrame({'date': [False, False]})
result = df1_ts.isin(df2)
tm.assert_frame_equal(result, expected)
result = df1_ts.isin(df3)
tm.assert_frame_equal(result, expected)
result = df1_td.isin(df2)
tm.assert_frame_equal(result, expected)
result = df1_td.isin(df3)
tm.assert_frame_equal(result, expected)
# ---------------------------------------------------------------------
# Rounding
def test_round(self):
# GH 2665
# Test that rounding an empty DataFrame does nothing
df = DataFrame()
tm.assert_frame_equal(df, df.round())
# Here's the test frame we'll be working with
df = DataFrame({'col1': [1.123, 2.123, 3.123],
'col2': [1.234, 2.234, 3.234]})
# Default round to integer (i.e. decimals=0)
expected_rounded = DataFrame(
{'col1': [1., 2., 3.], 'col2': [1., 2., 3.]})
tm.assert_frame_equal(df.round(), expected_rounded)
# Round with an integer
decimals = 2
expected_rounded = DataFrame({'col1': [1.12, 2.12, 3.12],
'col2': [1.23, 2.23, 3.23]})
tm.assert_frame_equal(df.round(decimals), expected_rounded)
# This should also work with np.round (since np.round dispatches to
# df.round)
tm.assert_frame_equal(np.round(df, decimals), expected_rounded)
# Round with a list
round_list = [1, 2]
with pytest.raises(TypeError):
df.round(round_list)
# Round with a dictionary
expected_rounded = DataFrame(
{'col1': [1.1, 2.1, 3.1], 'col2': [1.23, 2.23, 3.23]})
round_dict = {'col1': 1, 'col2': 2}
tm.assert_frame_equal(df.round(round_dict), expected_rounded)
# Incomplete dict
expected_partially_rounded = DataFrame(
{'col1': [1.123, 2.123, 3.123], 'col2': [1.2, 2.2, 3.2]})
partial_round_dict = {'col2': 1}
tm.assert_frame_equal(df.round(partial_round_dict),
expected_partially_rounded)
# Dict with unknown elements
wrong_round_dict = {'col3': 2, 'col2': 1}
tm.assert_frame_equal(df.round(wrong_round_dict),
expected_partially_rounded)
# float input to `decimals`
non_int_round_dict = {'col1': 1, 'col2': 0.5}
with pytest.raises(TypeError):
df.round(non_int_round_dict)
# String input
non_int_round_dict = {'col1': 1, 'col2': 'foo'}
with pytest.raises(TypeError):
df.round(non_int_round_dict)
non_int_round_Series = Series(non_int_round_dict)
with pytest.raises(TypeError):
df.round(non_int_round_Series)
# List input
non_int_round_dict = {'col1': 1, 'col2': [1, 2]}
with pytest.raises(TypeError):
df.round(non_int_round_dict)
non_int_round_Series = Series(non_int_round_dict)
with pytest.raises(TypeError):
df.round(non_int_round_Series)
# Non integer Series inputs
non_int_round_Series = Series(non_int_round_dict)
with pytest.raises(TypeError):
df.round(non_int_round_Series)
non_int_round_Series = Series(non_int_round_dict)
with pytest.raises(TypeError):
df.round(non_int_round_Series)
# Negative numbers
negative_round_dict = {'col1': -1, 'col2': -2}
big_df = df * 100
expected_neg_rounded = DataFrame(
{'col1': [110., 210, 310], 'col2': [100., 200, 300]})
tm.assert_frame_equal(big_df.round(negative_round_dict),
expected_neg_rounded)
# nan in Series round
nan_round_Series = Series({'col1': np.nan, 'col2': 1})
# TODO(wesm): unused?
expected_nan_round = DataFrame({ # noqa
'col1': [1.123, 2.123, 3.123],
'col2': [1.2, 2.2, 3.2]})
with pytest.raises(TypeError):
df.round(nan_round_Series)
# Make sure this doesn't break existing Series.round
tm.assert_series_equal(df['col1'].round(1), expected_rounded['col1'])
# named columns
# GH 11986
decimals = 2
expected_rounded = DataFrame(
{'col1': [1.12, 2.12, 3.12], 'col2': [1.23, 2.23, 3.23]})
df.columns.name = "cols"
expected_rounded.columns.name = "cols"
tm.assert_frame_equal(df.round(decimals), expected_rounded)
# interaction of named columns & series
tm.assert_series_equal(df['col1'].round(decimals),
expected_rounded['col1'])
tm.assert_series_equal(df.round(decimals)['col1'],
expected_rounded['col1'])
def test_numpy_round(self):
# GH 12600
df = DataFrame([[1.53, 1.36], [0.06, 7.01]])
out = np.round(df, decimals=0)
expected = DataFrame([[2., 1.], [0., 7.]])
tm.assert_frame_equal(out, expected)
msg = "the 'out' parameter is not supported"
with pytest.raises(ValueError, match=msg):
np.round(df, decimals=0, out=df)
def test_numpy_round_nan(self):
# See gh-14197
df = Series([1.53, np.nan, 0.06]).to_frame()
with tm.assert_produces_warning(None):
result = df.round()
expected = Series([2., np.nan, 0.]).to_frame()
tm.assert_frame_equal(result, expected)
def test_round_mixed_type(self):
# GH 11885
df = DataFrame({'col1': [1.1, 2.2, 3.3, 4.4],
'col2': ['1', 'a', 'c', 'f'],
'col3': date_range('20111111', periods=4)})
round_0 = DataFrame({'col1': [1., 2., 3., 4.],
'col2': ['1', 'a', 'c', 'f'],
'col3': date_range('20111111', periods=4)})
tm.assert_frame_equal(df.round(), round_0)
tm.assert_frame_equal(df.round(1), df)
tm.assert_frame_equal(df.round({'col1': 1}), df)
tm.assert_frame_equal(df.round({'col1': 0}), round_0)
tm.assert_frame_equal(df.round({'col1': 0, 'col2': 1}), round_0)
tm.assert_frame_equal(df.round({'col3': 1}), df)
def test_round_issue(self):
# GH 11611
df = pd.DataFrame(np.random.random([3, 3]), columns=['A', 'B', 'C'],
index=['first', 'second', 'third'])
dfs = pd.concat((df, df), axis=1)
rounded = dfs.round()
tm.assert_index_equal(rounded.index, dfs.index)
decimals = pd.Series([1, 0, 2], index=['A', 'B', 'A'])
msg = "Index of decimals must be unique"
with pytest.raises(ValueError, match=msg):
df.round(decimals)
def test_built_in_round(self):
# GH 11763
# Here's the test frame we'll be working with
df = DataFrame(
{'col1': [1.123, 2.123, 3.123], 'col2': [1.234, 2.234, 3.234]})
# Default round to integer (i.e. decimals=0)
expected_rounded = DataFrame(
{'col1': [1., 2., 3.], 'col2': [1., 2., 3.]})
tm.assert_frame_equal(round(df), expected_rounded)
def test_round_nonunique_categorical(self):
# See GH21809
idx = pd.CategoricalIndex(['low'] * 3 + ['hi'] * 3)
df = pd.DataFrame(np.random.rand(6, 3), columns=list('abc'))
expected = df.round(3)
expected.index = idx
df_categorical = df.copy().set_index(idx)
assert df_categorical.shape == (6, 3)
result = df_categorical.round(3)
assert result.shape == (6, 3)
tm.assert_frame_equal(result, expected)
# ---------------------------------------------------------------------
# Clip
def test_clip(self, float_frame):
median = float_frame.median().median()
original = float_frame.copy()
with tm.assert_produces_warning(FutureWarning):
capped = float_frame.clip_upper(median)
assert not (capped.values > median).any()
with tm.assert_produces_warning(FutureWarning):
floored = float_frame.clip_lower(median)
assert not (floored.values < median).any()
double = float_frame.clip(upper=median, lower=median)
assert not (double.values != median).any()
# Verify that float_frame was not changed inplace
assert (float_frame.values == original.values).all()
def test_inplace_clip(self, float_frame):
# GH 15388
median = float_frame.median().median()
frame_copy = float_frame.copy()
with tm.assert_produces_warning(FutureWarning):
frame_copy.clip_upper(median, inplace=True)
assert not (frame_copy.values > median).any()
frame_copy = float_frame.copy()
with tm.assert_produces_warning(FutureWarning):
frame_copy.clip_lower(median, inplace=True)
assert not (frame_copy.values < median).any()
frame_copy = float_frame.copy()
frame_copy.clip(upper=median, lower=median, inplace=True)
assert not (frame_copy.values != median).any()
def test_dataframe_clip(self):
# GH 2747
df = DataFrame(np.random.randn(1000, 2))
for lb, ub in [(-1, 1), (1, -1)]:
clipped_df = df.clip(lb, ub)
lb, ub = min(lb, ub), max(ub, lb)
lb_mask = df.values <= lb
ub_mask = df.values >= ub
mask = ~lb_mask & ~ub_mask
assert (clipped_df.values[lb_mask] == lb).all()
assert (clipped_df.values[ub_mask] == ub).all()
assert (clipped_df.values[mask] == df.values[mask]).all()
def test_clip_mixed_numeric(self):
# TODO(jreback)
# clip on mixed integer or floats
# with integer clippers coerces to float
df = DataFrame({'A': [1, 2, 3],
'B': [1., np.nan, 3.]})
result = df.clip(1, 2)
expected = DataFrame({'A': [1, 2, 2],
'B': [1., np.nan, 2.]})
tm.assert_frame_equal(result, expected, check_like=True)
# GH 24162, clipping now preserves numeric types per column
df = DataFrame([[1, 2, 3.4], [3, 4, 5.6]],
columns=['foo', 'bar', 'baz'])
expected = df.dtypes
result = df.clip(upper=3).dtypes
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("inplace", [True, False])
def test_clip_against_series(self, inplace):
# GH 6966
df = DataFrame(np.random.randn(1000, 2))
lb = Series(np.random.randn(1000))
ub = lb + 1
original = df.copy()
clipped_df = df.clip(lb, ub, axis=0, inplace=inplace)
if inplace:
clipped_df = df
for i in range(2):
lb_mask = original.iloc[:, i] <= lb
ub_mask = original.iloc[:, i] >= ub
mask = ~lb_mask & ~ub_mask
result = clipped_df.loc[lb_mask, i]
tm.assert_series_equal(result, lb[lb_mask], check_names=False)
assert result.name == i
result = clipped_df.loc[ub_mask, i]
tm.assert_series_equal(result, ub[ub_mask], check_names=False)
assert result.name == i
tm.assert_series_equal(clipped_df.loc[mask, i], df.loc[mask, i])
@pytest.mark.parametrize("inplace", [True, False])
@pytest.mark.parametrize("lower", [[2, 3, 4], np.asarray([2, 3, 4])])
@pytest.mark.parametrize("axis,res", [
(0, [[2., 2., 3.], [4., 5., 6.], [7., 7., 7.]]),
(1, [[2., 3., 4.], [4., 5., 6.], [5., 6., 7.]])
])
def test_clip_against_list_like(self, simple_frame,
inplace, lower, axis, res):
# GH 15390
original = simple_frame.copy(deep=True)
result = original.clip(lower=lower, upper=[5, 6, 7],
axis=axis, inplace=inplace)
expected = pd.DataFrame(res,
columns=original.columns,
index=original.index)
if inplace:
result = original
tm.assert_frame_equal(result, expected, check_exact=True)
@pytest.mark.parametrize("axis", [0, 1, None])
def test_clip_against_frame(self, axis):
df = DataFrame(np.random.randn(1000, 2))
lb = DataFrame(np.random.randn(1000, 2))
ub = lb + 1
clipped_df = df.clip(lb, ub, axis=axis)
lb_mask = df <= lb
ub_mask = df >= ub
mask = ~lb_mask & ~ub_mask
tm.assert_frame_equal(clipped_df[lb_mask], lb[lb_mask])
tm.assert_frame_equal(clipped_df[ub_mask], ub[ub_mask])
tm.assert_frame_equal(clipped_df[mask], df[mask])
def test_clip_against_unordered_columns(self):
# GH 20911
df1 = DataFrame(np.random.randn(1000, 4), columns=['A', 'B', 'C', 'D'])
df2 = DataFrame(np.random.randn(1000, 4), columns=['D', 'A', 'B', 'C'])
df3 = DataFrame(df2.values - 1, columns=['B', 'D', 'C', 'A'])
result_upper = df1.clip(lower=0, upper=df2)
expected_upper = df1.clip(lower=0, upper=df2[df1.columns])
result_lower = df1.clip(lower=df3, upper=3)
expected_lower = df1.clip(lower=df3[df1.columns], upper=3)
result_lower_upper = df1.clip(lower=df3, upper=df2)
expected_lower_upper = df1.clip(lower=df3[df1.columns],
upper=df2[df1.columns])
tm.assert_frame_equal(result_upper, expected_upper)
tm.assert_frame_equal(result_lower, expected_lower)
tm.assert_frame_equal(result_lower_upper, expected_lower_upper)
def test_clip_with_na_args(self, float_frame):
"""Should process np.nan argument as None """
# GH 17276
tm.assert_frame_equal(float_frame.clip(np.nan), float_frame)
tm.assert_frame_equal(float_frame.clip(upper=np.nan, lower=np.nan),
float_frame)
# GH 19992
df = DataFrame({'col_0': [1, 2, 3], 'col_1': [4, 5, 6],
'col_2': [7, 8, 9]})
result = df.clip(lower=[4, 5, np.nan], axis=0)
expected = DataFrame({'col_0': [4, 5, np.nan], 'col_1': [4, 5, np.nan],
'col_2': [7, 8, np.nan]})
tm.assert_frame_equal(result, expected)
result = df.clip(lower=[4, 5, np.nan], axis=1)
expected = DataFrame({'col_0': [4, 4, 4], 'col_1': [5, 5, 6],
'col_2': [np.nan, np.nan, np.nan]})
tm.assert_frame_equal(result, expected)
# ---------------------------------------------------------------------
# Matrix-like
def test_dot(self):
a = DataFrame(np.random.randn(3, 4), index=['a', 'b', 'c'],
columns=['p', 'q', 'r', 's'])
b = DataFrame(np.random.randn(4, 2), index=['p', 'q', 'r', 's'],
columns=['one', 'two'])
result = a.dot(b)
expected = DataFrame(np.dot(a.values, b.values),
index=['a', 'b', 'c'],
columns=['one', 'two'])
# Check alignment
b1 = b.reindex(index=reversed(b.index))
result = a.dot(b)
tm.assert_frame_equal(result, expected)
# Check series argument
result = a.dot(b['one'])
tm.assert_series_equal(result, expected['one'], check_names=False)
assert result.name is None
result = a.dot(b1['one'])
tm.assert_series_equal(result, expected['one'], check_names=False)
assert result.name is None
# can pass correct-length arrays
row = a.iloc[0].values
result = a.dot(row)
expected = a.dot(a.iloc[0])
tm.assert_series_equal(result, expected)
with pytest.raises(ValueError, match='Dot product shape mismatch'):
a.dot(row[:-1])
a = np.random.rand(1, 5)
b = np.random.rand(5, 1)
A = DataFrame(a)
# TODO(wesm): unused
B = DataFrame(b) # noqa
# it works
result = A.dot(b)
# unaligned
df = DataFrame(np.random.randn(3, 4),
index=[1, 2, 3], columns=lrange(4))
df2 = DataFrame(np.random.randn(5, 3),
index=lrange(5), columns=[1, 2, 3])
with pytest.raises(ValueError, match='aligned'):
df.dot(df2)
def test_matmul(self):
# matmul test is for GH 10259
a = DataFrame(np.random.randn(3, 4), index=['a', 'b', 'c'],
columns=['p', 'q', 'r', 's'])
b = DataFrame(np.random.randn(4, 2), index=['p', 'q', 'r', 's'],
columns=['one', 'two'])
# DataFrame @ DataFrame
result = operator.matmul(a, b)
expected = DataFrame(np.dot(a.values, b.values),
index=['a', 'b', 'c'],
columns=['one', 'two'])
tm.assert_frame_equal(result, expected)
# DataFrame @ Series
result = operator.matmul(a, b.one)
expected = Series(np.dot(a.values, b.one.values),
index=['a', 'b', 'c'])
tm.assert_series_equal(result, expected)
# np.array @ DataFrame
result = operator.matmul(a.values, b)
assert isinstance(result, DataFrame)
assert result.columns.equals(b.columns)
assert result.index.equals(pd.Index(range(3)))
expected = np.dot(a.values, b.values)
tm.assert_almost_equal(result.values, expected)
# nested list @ DataFrame (__rmatmul__)
result = operator.matmul(a.values.tolist(), b)
expected = DataFrame(np.dot(a.values, b.values),
index=['a', 'b', 'c'],
columns=['one', 'two'])
tm.assert_almost_equal(result.values, expected.values)
# mixed dtype DataFrame @ DataFrame
a['q'] = a.q.round().astype(int)
result = operator.matmul(a, b)
expected = DataFrame(np.dot(a.values, b.values),
index=['a', 'b', 'c'],
columns=['one', 'two'])
tm.assert_frame_equal(result, expected)
# different dtypes DataFrame @ DataFrame
a = a.astype(int)
result = operator.matmul(a, b)
expected = DataFrame(np.dot(a.values, b.values),
index=['a', 'b', 'c'],
columns=['one', 'two'])
tm.assert_frame_equal(result, expected)
# unaligned
df = DataFrame(np.random.randn(3, 4),
index=[1, 2, 3], columns=lrange(4))
df2 = DataFrame(np.random.randn(5, 3),
index=lrange(5), columns=[1, 2, 3])
with pytest.raises(ValueError, match='aligned'):
operator.matmul(df, df2)
@pytest.fixture
def df_duplicates():
return pd.DataFrame({'a': [1, 2, 3, 4, 4],
'b': [1, 1, 1, 1, 1],
'c': [0, 1, 2, 5, 4]},
index=[0, 0, 1, 1, 1])
@pytest.fixture
def df_strings():
return pd.DataFrame({'a': np.random.permutation(10),
'b': list(ascii_lowercase[:10]),
'c': np.random.permutation(10).astype('float64')})
@pytest.fixture
def df_main_dtypes():
return pd.DataFrame(
{'group': [1, 1, 2],
'int': [1, 2, 3],
'float': [4., 5., 6.],
'string': list('abc'),
'category_string': pd.Series(list('abc')).astype('category'),
'category_int': [7, 8, 9],
'datetime': pd.date_range('20130101', periods=3),
'datetimetz': pd.date_range('20130101',
periods=3,
tz='US/Eastern'),
'timedelta': pd.timedelta_range('1 s', periods=3, freq='s')},
columns=['group', 'int', 'float', 'string',
'category_string', 'category_int',
'datetime', 'datetimetz',
'timedelta'])
class TestNLargestNSmallest(object):
dtype_error_msg_template = ("Column {column!r} has dtype {dtype}, cannot "
"use method {method!r} with this dtype")
# ----------------------------------------------------------------------
# Top / bottom
@pytest.mark.parametrize('order', [
['a'],
['c'],
['a', 'b'],
['a', 'c'],
['b', 'a'],
['b', 'c'],
['a', 'b', 'c'],
['c', 'a', 'b'],
['c', 'b', 'a'],
['b', 'c', 'a'],
['b', 'a', 'c'],
# dups!
['b', 'c', 'c']])
@pytest.mark.parametrize('n', range(1, 11))
def test_n(self, df_strings, nselect_method, n, order):
# GH 10393
df = df_strings
if 'b' in order:
error_msg = self.dtype_error_msg_template.format(
column='b', method=nselect_method, dtype='object')
with pytest.raises(TypeError, match=error_msg):
getattr(df, nselect_method)(n, order)
else:
ascending = nselect_method == 'nsmallest'
result = getattr(df, nselect_method)(n, order)
expected = df.sort_values(order, ascending=ascending).head(n)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize('columns', [
['group', 'category_string'], ['group', 'string']])
def test_n_error(self, df_main_dtypes, nselect_method, columns):
df = df_main_dtypes
col = columns[1]
error_msg = self.dtype_error_msg_template.format(
column=col, method=nselect_method, dtype=df[col].dtype)
# escape some characters that may be in the repr
error_msg = (error_msg.replace('(', '\\(').replace(")", "\\)")
.replace("[", "\\[").replace("]", "\\]"))
with pytest.raises(TypeError, match=error_msg):
getattr(df, nselect_method)(2, columns)
def test_n_all_dtypes(self, df_main_dtypes):
df = df_main_dtypes
df.nsmallest(2, list(set(df) - {'category_string', 'string'}))
df.nlargest(2, list(set(df) - {'category_string', 'string'}))
@pytest.mark.parametrize('method,expected', [
('nlargest',
pd.DataFrame({'a': [2, 2, 2, 1], 'b': [3, 2, 1, 3]},
index=[2, 1, 0, 3])),
('nsmallest',
pd.DataFrame({'a': [1, 1, 1, 2], 'b': [1, 2, 3, 1]},
index=[5, 4, 3, 0]))])
def test_duplicates_on_starter_columns(self, method, expected):
# regression test for #22752
df = pd.DataFrame({
'a': [2, 2, 2, 1, 1, 1],
'b': [1, 2, 3, 3, 2, 1]
})
result = getattr(df, method)(4, columns=['a', 'b'])
tm.assert_frame_equal(result, expected)
def test_n_identical_values(self):
# GH 15297
df = pd.DataFrame({'a': [1] * 5, 'b': [1, 2, 3, 4, 5]})
result = df.nlargest(3, 'a')
expected = pd.DataFrame(
{'a': [1] * 3, 'b': [1, 2, 3]}, index=[0, 1, 2]
)
tm.assert_frame_equal(result, expected)
result = df.nsmallest(3, 'a')
expected = pd.DataFrame({'a': [1] * 3, 'b': [1, 2, 3]})
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize('order', [
['a', 'b', 'c'],
['c', 'b', 'a'],
['a'],
['b'],
['a', 'b'],
['c', 'b']])
@pytest.mark.parametrize('n', range(1, 6))
def test_n_duplicate_index(self, df_duplicates, n, order):
# GH 13412
df = df_duplicates
result = df.nsmallest(n, order)
expected = df.sort_values(order).head(n)
tm.assert_frame_equal(result, expected)
result = df.nlargest(n, order)
expected = df.sort_values(order, ascending=False).head(n)
tm.assert_frame_equal(result, expected)
def test_duplicate_keep_all_ties(self):
# GH 16818
df = pd.DataFrame({'a': [5, 4, 4, 2, 3, 3, 3, 3],
'b': [10, 9, 8, 7, 5, 50, 10, 20]})
result = df.nlargest(4, 'a', keep='all')
expected = pd.DataFrame({'a': {0: 5, 1: 4, 2: 4, 4: 3,
5: 3, 6: 3, 7: 3},
'b': {0: 10, 1: 9, 2: 8, 4: 5,
5: 50, 6: 10, 7: 20}})
tm.assert_frame_equal(result, expected)
result = df.nsmallest(2, 'a', keep='all')
expected = pd.DataFrame({'a': {3: 2, 4: 3, 5: 3, 6: 3, 7: 3},
'b': {3: 7, 4: 5, 5: 50, 6: 10, 7: 20}})
tm.assert_frame_equal(result, expected)
def test_series_broadcasting(self):
# smoke test for numpy warnings
# GH 16378, GH 16306
df = DataFrame([1.0, 1.0, 1.0])
df_nan = DataFrame({'A': [np.nan, 2.0, np.nan]})
s = Series([1, 1, 1])
s_nan = Series([np.nan, np.nan, 1])
with tm.assert_produces_warning(None):
with tm.assert_produces_warning(FutureWarning):
df_nan.clip_lower(s, axis=0)
for op in ['lt', 'le', 'gt', 'ge', 'eq', 'ne']:
getattr(df, op)(s_nan, axis=0)
def test_series_nat_conversion(self):
# GH 18521
# Check rank does not mutate DataFrame
df = DataFrame(np.random.randn(10, 3), dtype='float64')
expected = df.copy()
df.rank()
result = df
tm.assert_frame_equal(result, expected)
def test_multiindex_column_lookup(self):
# Check whether tuples are correctly treated as multi-level lookups.
# GH 23033
df = pd.DataFrame(
columns=pd.MultiIndex.from_product([['x'], ['a', 'b']]),
data=[[0.33, 0.13], [0.86, 0.25], [0.25, 0.70], [0.85, 0.91]])
# nsmallest
result = df.nsmallest(3, ('x', 'a'))
expected = df.iloc[[2, 0, 3]]
tm.assert_frame_equal(result, expected)
# nlargest
result = df.nlargest(3, ('x', 'b'))
expected = df.iloc[[3, 2, 1]]
tm.assert_frame_equal(result, expected)
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
@author: ogouvert
Variational Inference algorithm for Ordinal Non-Negative Matrix Factorization (OrdNMF)
- DCPF MODEL:
W ~ Gamma(aphaW,betaW) ## UxK (size of W)
H ~ Gamma(aphaH,betaH) ## IxK
C ~ OrdNMF(W*H) ## UxI
- VARIATIONAL INFERENCE:
p(W,H,C,N) \approx q(C|N)q(N)q(W)q(H)
where:
q(W) = Gamma()
q(H) = Gamma()
q(C|N) = Mult()
q(N) = ZTP()
"""
#%% Ordinal NMF
import numpy as np
import scipy.special as special
import scipy.sparse as sparse
import os
import time
import cPickle as pickle
import sys
import pandas as pd
class OrdNMF():
def __init__(self, K,
alphaW = 1., alphaH = 1., betaW=1., betaH = 1.):
"""
K (int) - number of latent factors
alphaW (float, >0) - shape parameter of the prior of W
alphaH (float, >0) - shape parameter of the prior of H
betaW (float, >0) - rate parameter of the prior of W
betaH (float, >0) - rate parameter of the prior of H
"""
self.K = K
self.alphaW = alphaW
self.alphaH = alphaH
self.betaW = betaW
self.betaH = betaH
self.score={}
self.classname = 'ONMF_implicit'
# Save arg
saved_args_init = locals()
saved_args_init.pop('self')
self.saved_args_init = saved_args_init
def fit(self, Y, T,
seed=None,
opt_hyper = ['beta'],
approx = False,
precision=10**(-5), max_iter=10**5, min_iter=0,
verbose=False,
save=True, save_dir='', prefix=None, suffix=None):
"""
------- INPUT VARIABLES -------
Y (sparse matrix of size UxI) - Observed data, values from 0 to T
T - maximum value in Y
------- OPTIONAL VARIABLES -------
seed (int)
opt_hyper (list of float)
'beta' - update the scale parameters of the gamma prior of W and H
betaW of size U, betaH of size I
'betaH' - update the scale parameters of the gamma prior of H
betaH is a scalar
approx (bool) - if True, the variable N is approximated by a dirac located in 1
precision (float) - stopping criterion on the ELBO
max_iter (int) - maximum iteration number
min_iter (int) - minimum iteration number
save (bool) - Saving the final class object
save_dir (str) - Path of the saved file
prefix, suffix (str) - prefix and suffix to use in the name of the saved file
------- SAVED VARIABLES -------
Ew, Elogw : Expectations: Ew = E[W] and Elogw = E[log(W)]
Eh, Elogh : idem for variable H
Elbo : Evolution of the ELBO
"""
self.seed = seed
np.random.seed(seed)
self.T = T
self.opt_hyper = opt_hyper
self.approx = approx
self.verbose = verbose
self.precision = precision
# Save
self.save = save
self.save_dir = save_dir
self.prefix = prefix
self.suffix = suffix
self.filename = self.filename(prefix, suffix)
# Save arg
saved_args_fit = locals()
saved_args_fit.pop('self')
saved_args_fit.pop('Y')
self.saved_args_fit = saved_args_fit
# Timer
start_time = time.time()
# Shape
U,I = Y.shape
u,i = Y.nonzero()
# Init - matrix companion
#delta = np.ones(T+1); delta[0]=0;
delta = self.init_delta(Y)
H = (np.triu(np.ones((T+1,T+1))).dot(delta[:,np.newaxis]))[:,0]
theta0 = H[0]
G = theta0 - H
Gy = transform_Y(Y,G)
# Init - W & H
Ew = np.random.gamma(1.,1.,(U,self.K))
Eh = np.random.gamma(1.,1.,(I,self.K))
s_wh = np.dot(np.sum(Ew,0,keepdims=True),np.sum(Eh,0,keepdims=True).T)[0,0]
# Local
Sw, Sh, En, elboLoc = self.q_loc(Y,delta,Ew,Eh)
self.Elbo = [-float("inf")]
self.info = []
for n in range(max_iter):
# Time
if verbose:
print('ITERATION #%d' % n)
start_t = _writeline_and_time('\tUpdates...')
# Hyper parameter
if np.isin('beta',opt_hyper):
self.betaW = self.alphaW/Ew.mean(axis=1,keepdims=True)
self.betaH = self.alphaH/Eh.mean(axis=1,keepdims=True)
if np.isin('betaH',opt_hyper):
self.betaH = self.alphaH / np.mean(Eh)
# Updates Delta
lbd = np.sum(Ew[u,:]*Eh[i,:],1)
S_lbd = s_wh
for l in range(T,0,-1): # {T,...,1}
S_lbd = S_lbd - np.sum(lbd[Y.data==l+1])
delta[l] = np.sum(En[Y.data==l])/S_lbd
H = (np.triu(np.ones((T+1,T+1))).dot(delta[:,np.newaxis]))[:,0]
theta0 = H[0]
G = theta0 - H
Gy = transform_Y(Y,G)
# Global updates
Ew, Elogw, elboW = q_Gamma(self.alphaW , Sw,
self.betaW, theta0*np.sum(Eh,0,keepdims=True) - Gy.dot(Eh))
Eh, Elogh, elboH = q_Gamma(self.alphaH, Sh,
self.betaH, theta0*np.sum(Ew,0,keepdims=True) - Gy.T.dot(Ew))
s_wh = np.dot(np.sum(Ew,0,keepdims=True),np.sum(Eh,0,keepdims=True).T)[0,0]
# Local updates
Sw, Sh, En, elboLoc = self.q_loc(Y,delta,np.exp(Elogw),np.exp(Elogh))
# Elbo update
elbo = elboLoc - theta0*s_wh + np.sum(Ew*Gy.dot(Eh)) + elboW + elboH
self.rate = (elbo-self.Elbo[-1])/np.abs(self.Elbo[-1])
if verbose:
print('\r\tUpdates: time=%.2f'% (time.time() - start_t))
print('\tRate:' + str(self.rate))
if elbo<self.Elbo[-1]:
self.Elbo.append(elbo)
raise ValueError('Elbo diminue!')
if np.isnan(elbo):
#pass
raise ValueError('elbo NAN')
elif self.rate<precision and n>=min_iter:
self.Elbo.append(elbo)
break
self.Elbo.append(elbo)
self.info.append(delta.copy())
self.delta = delta
self.theta = (np.triu(np.ones((T+1,T+1)),1).dot(delta[:,np.newaxis]))[:,0]
self.Ew = Ew.copy()
self.Eh = Eh.copy()
self.En = En.copy()
self.Elogw = Elogw.copy()
self.Elogh = Elogh.copy()
self.duration = time.time()-start_time
# Save
if self.save:
self.save_model()
def init_delta(self,Y):
""" Initialization of delta w.r.t. the histogram values of Y """
hist_values = np.bincount(Y.data)
hist_values[0] = Y.nnz
cum_hist = np.cumsum(hist_values, dtype=float)
delta = hist_values/cum_hist
delta[0]=0
return delta
def q_loc(self,Y,delta,W,H):
"""
q(C,N) = q(N)q(C|N)
q(C|N) = Multinomial
q(N) = ZTP
OUTPUT:
en - data of the sparse matrix En
Sw = \sum_i E[c_{uik}]
Sh = \sum_u E[c_{uik}]
"""
# Product u,i = Y.nonzero()
Lbd = np.sum(W[u,:]*H[i,:],1)
delta_y = transform_Y(Y,delta).data
# En
if self.approx == False:
en = Lbd*delta_y/(1.-np.exp(-Lbd*delta_y))#delta_y/(1.-np.exp(-Lbd*delta_y))
en[np.isnan(en)] = 1.
else :
en = np.ones_like(Lbd)
# Sum C
R = sparse.csr_matrix((en/Lbd,(u,i)),shape=Y.shape) # UxI
Sw = W*(R.dot(H))
Sh = H*(R.T.dot(W))
# ELBO
elbo = np.sum(np.log(np.expm1(Lbd*delta_y)))
return Sw, Sh, en, elbo
def filename(self,prefix,suffix):
if prefix is not None:
prefix = prefix+'_'
else:
prefix = ''
if suffix is not None:
suffix = '_'+suffix
else:
suffix = ''
return prefix + self.classname + \
'_K%d' % (self.K) + \
'_T%d' % (self.T) + \
'_alpha%.2f_%.2f' %(self.alphaW, self.alphaH) + \
'_beta%.2f_%.2f' % (self.betaW, self.betaH) + \
'_opthyper_' + '_'.join(sorted(self.opt_hyper)) + \
'_approxN_' + str(self.approx) + \
'_tol%.1e' %(self.precision) + \
'_seed' + str(self.seed) + suffix
def save_model(self):
with open(os.path.join(self.save_dir, self.filename), 'wb') as handle:
pickle.dump(self, handle, protocol=pickle.HIGHEST_PROTOCOL)
def copy_attributes(self,oobj):
self.__dict__ = oobj.__dict__.copy()
def stat_gamma(shape,rate):
"""
Statistic of a gamma distribution:
x \sim Gamma(shape, rate)
INPUT: shape and rate parameters
OUTPUT: E[x], E[log(x)], H the entropy
"""
E = shape/rate
dig_shape = special.digamma(shape)
Elog = dig_shape - np.log(rate)
entropy = shape - np.log(rate) + special.gammaln(shape) + (1-shape)*dig_shape
return E, Elog, entropy
def gamma_elbo(shape, rate, Ex, Elogx):
""" Part of the ELBO linked to the gamma prior """
return (shape-1)*Elogx -rate*Ex +shape*np.log(rate) -special.gammaln(shape)
def q_Gamma(shape, _shape, rate, _rate):
""" Calculate both statistic and ELBO """
E,Elog,entropy = stat_gamma(shape+_shape, rate+_rate)
elbo = gamma_elbo(shape, rate, E, Elog)
elbo = elbo.sum() + entropy.sum()
return E, Elog, elbo
def transform_Y(Y,values): # 1->values[1]; 2->values[2]; ...
transformation = Y.copy()
transformation = transformation.astype(float)
for l in range(1,len(values)):
transformation.data[Y.data==l] = values[l]
return transformation
def _writeline_and_time(s):
sys.stdout.write(s)
sys.stdout.flush()
return time.time() |
import unittest
class StackTraceTests(unittest.TestCase):
def test_bad_import(self):
obj = Utility()
obj.instance_method_a()
def test_not_equal(self):
self.assertEqual(1, 2)
def global_func():
def local_func():
import not_a_module # trigger exception
local_func()
class Utility(object):
@staticmethod
def class_static():
global_func()
def instance_method_b(self):
Utility.class_static()
def instance_method_a(self):
self.instance_method_b()
if __name__ == '__main__':
unittest.main()
|
__author__ = "Andrea Tramacere"
try:
from sherpa.models.model import ArithmeticModel, modelCacher1d, RegriddableModel1D
from sherpa.models.parameter import Parameter
from sherpa import data
from sherpa.fit import Fit
from sherpa.stats import Chi2
from sherpa.optmethods import LevMar
from sherpa.fit import Fit
from sherpa.stats import Chi2
from sherpa import data as sherpa_data
sherpa_installed = True
except:
raise ImportError('to use sherpa plugin you need to install sherpa: https://sherpa.readthedocs.io/en/latest/install.html')
import numpy as np
from .plot_sedfit import PlotSED
from .minimizer import Minimizer
__all__=['JetsetSherpaModel','plot_sherpa_model']
class JetsetSherpaModel(RegriddableModel1D):
"""
authomatic sherpa model generator
"""
def __init__(self, jetset_model,par_list=None,clone=False):
if clone is True:
self._jetset_model = jetset_model.clone()
else:
self._jetset_model=jetset_model
self._jp_list = []
self._jp_par_array = []
self._jp_list_names = []
setattr(self, '_jetset_ncalls', 0)
keep=True
#print('-->, ', par_list)
for p in self._jetset_model.parameters.par_array:
if par_list is None:
keep = True
else:
keep = p in par_list
#print('-->, ',p, p.name, keep)
if p is not None and keep is True:
if p.name.lower() in self._jp_list_names or p.name.upper() in self._jp_list_names:
name = p.name + '_sh'
print('jetset model name', p.name, 'renamed to ', name, 'due to sherpa internal naming convention')
else:
name = p.name
if p.fit_range_min is not None:
val_min=p.fit_range_min
else:
val_min = p.val_min
if p.fit_range_max is not None:
val_max = p.fit_range_max
else:
val_max = p.val_max
sh_p = Parameter(self._jetset_model.name, name, p.val, min=val_min, max=val_max, units=p.units)
setattr(self, sh_p.name, sh_p)
p._sherpa_ref = sh_p
if np.isnan(sh_p.max):
sh_p.max = sh_p.hard_max
if np.isnan(sh_p.min):
sh_p.min = sh_p.hard_min
self._jp_list.append(sh_p)
self._jp_par_array.append(p)
self._jp_list_names.append(p.name)
RegriddableModel1D.__init__(self, jetset_model.name,(p._sherpa_ref for p in self._jp_par_array))
def calc(self, pars, x):
for ID, p in enumerate(self._jp_list):
j_p = self._jp_par_array[ID]
j_p.val = p.val
self._jetset_ncalls +=1
return self._jetset_model.eval(get_model=True, nu=x)
def plot_model(self, fit_range, model_range=[1E10, 1E30], nu_grid_size=200, plot_obj=None, sed_data=None):
self._jetset_model.set_nu_grid(model_range[0], model_range[1], nu_grid_size)
self._jetset_model.eval()
plot_obj = self._jetset_model.plot_model(plot_obj=plot_obj, sed_data=sed_data)
plot_obj.add_model_residual_plot(data=sed_data, model=self._jetset_model,
fit_range=np.log10([fit_range[0], fit_range[1]]))
def plot_sherpa_model(sherpa_model, fit_range=None, model_range=[1E10, 1E30], nu_grid_size=200, sed_data=None,
add_res=False, plot_obj=None, label=None, line_style=None):
if fit_range is not None:
x = np.logspace(np.log10(fit_range[0]), np.log10(fit_range[1]), nu_grid_size)
else:
x = np.logspace(np.log10(model_range[0]), np.log10(model_range[1]), nu_grid_size)
y = sherpa_model(x)
if plot_obj is None:
plot_obj = PlotSED(frame='obs', density=False)
if sed_data is not None:
plot_obj.add_data_plot(sed_data=sed_data)
plot_obj.add_xy_plot(np.log10(x), np.log10(y), label=label, line_style=line_style)
if add_res is True and fit_range is not None:
nufnu_res = sherpa_model(sed_data.data['nu_data'])
y_res = (sed_data.data['nuFnu_data'] - nufnu_res) / sed_data.data['dnuFnu_data']
x_res = sed_data.data['nu_data']
plot_obj.add_xy_residual_plot(x=np.log10(x_res), y=y_res, fit_range=np.log10([fit_range[0], fit_range[1]]))
return plot_obj
class SherpaMinimizer(Minimizer):
def __init__(self, model,method=LevMar(),stat=Chi2()):
if sherpa_installed is True:
pass
else:
raise ImportError('sherpa not installed, \n to use sherpa plugin you need to install sherpa: https://sherpa.readthedocs.io/en/latest/install.html')
super(SherpaMinimizer, self).__init__(model)
self._method=method
self._stat=stat
self._sherpa_model = None
self._sherpa_data = None
self.pbar = None
def _create_sherpa_model(self):
self._sherpa_model = JetsetSherpaModel(jetset_model = self.model.fit_model, par_list=self.model.fit_par_free)
def _create_sherpa_data(self):
self._sherpa_data = sherpa_data.Data1D("sed", self.model.data['x'], self.model.data['y'], staterror=self.model.data['dy'])
@property
def sherpa_fitter(self):
return self._sherpa_fitter
@property
def calls(self):
if self._sherpa_model is not None:
return self._sherpa_model._jetset_ncalls
else:
return None
@calls.setter
def calls(self,n):
if self._sherpa_model is not None:
self._sherpa_model._jetset_ncalls = n
def _fit(self, max_ev,):
self._create_sherpa_model()
self._create_sherpa_data()
self._sherpa_model._jetset_ncalls = 0
self._sherpa_fitter=Fit(self._sherpa_data,self._sherpa_model, method=self._method,stat=self._stat)
self.mesg = self._sherpa_fitter.fit()
self.covar = self.mesg.covar
self.pout = [p for p in self.mesg.parvals]
self.p = [p for p in self.mesg.parvals]
def _set_fit_errors(self):
self.errors = [np.sqrt(np.fabs(self.covar[pi, pi])) for pi in range(len(self.model.fit_par_free))] |
# -*- coding: utf-8 -*-
from __future__ import print_function
import os
import sys
import argparse
import tqdm
# Adapted from
# https://gist.github.com/serycjon/c9ad58ecc3176d87c49b69b598f4d6c6
import tensorflow as tf
def parse_arguments():
parser = argparse.ArgumentParser(description='')
parser.add_argument('--event', help='event file', required=True)
return parser.parse_args()
def main(args):
out_path = os.path.dirname(args.event) + '_filtered'
writer = tf.summary.FileWriter(out_path)
total = None
for event in tqdm.tqdm(tf.train.summary_iterator(args.event), total=total):
event_type = event.WhichOneof('what')
if event_type != 'summary':
writer.add_event(event)
else:
wall_time = event.wall_time
step = event.step
filtered_values = [value for value in event.summary.value if 'rollouts' not in value.tag]
summary = tf.Summary(value=filtered_values)
filtered_event = tf.summary.Event(summary=summary,
wall_time=wall_time,
step=step)
writer.add_event(filtered_event)
writer.close()
return 0
if __name__ == '__main__':
args = parse_arguments()
sys.exit(main(args)) |
# Adaptive Card Design Schema for a sample form.
# To learn more about designing and working with buttons and cards,
# checkout https://developer.webex.com/docs/api/guides/cards
BUSY_CARD_CONTENT = {
"$schema": "http://adaptivecards.io/schemas/adaptive-card.json",
"type": "AdaptiveCard",
"version": "1.2",
"body": [
{
"type": "ColumnSet",
"columns": [
{
"type": "Column",
"width": 1,
"items": [
{
"type": "Image",
"url": "https://i.postimg.cc/2jMv5kqt/AS89975.jpg",
"size": "Stretch"
}
]
},
{
"type": "Column",
"width": 1,
"items": [
{
"type": "TextBlock",
"text": "Working on it....",
"color": "Dark",
"weight": "Bolder",
"wrap": True,
"size": "default",
"horizontalAlignment": "Center"
},
{
"type": "TextBlock",
"text": "I am busy working on your request. Please continue to look busy while I do your work.",
"color": "Dark",
"height": "stretch",
"wrap": True
}
]
}
]
}
]
}
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# This file is part of "Linux Studio Installer" project
#
# Author: Roman Gladyshev <remicollab@gmail.com>
# License: MIT License
#
# SPDX-License-Identifier: MIT
# License text is available in the LICENSE file and online:
# http://www.opensource.org/licenses/MIT
#
# Copyright (c) 2020 remico
"""Allows to run the application directly with 'python -m studioinstaller',
just like a regular python module
"""
from .app import main
if __name__ == '__main__':
main()
|
# Usage of \n in a print() function between the strings will create new line
print("Hello World!\nHello World!\nHello World!") |
#!/usr/bin/env python
# -*- coding: utf8 -*-
from astropy.io import fits
import numpy as np
import os
import time
import sys
from scipy.fftpack import fft,ifft,fft2,ifft2
def bignfft(pm, first,last,dimx,dimy,bxdim,bydim):
str0 = "work/"
xdim = dimx
ydim = dimy
x_inf=0
y_inf = 0
if xdim%2 != 0: xdim = xdim - 1
if ydim%2 != 0: ydim = ydim - 1
#~ strR=""
#~ strW=""
#Selection
#~ if pm == -1:
#~ strR = str0+"apo"
#~ strW = str0+"fft"
#~ print "direct FFT"
#~ elif pm == 1:
#~ strR = str0+"fft"
#~ strW = str0+"F"
#~ print "inverse FFT"
#~ else:
#~ sys.exit("Incorrect value. pm just can be -1 (forward FFT) or 1 (Inverse FFT)...")
# --------------------------------------------------------
# Reads separate images and computes their 2d-Fourier transform
for i in range(first,last+1):
dcn = str(i).zfill(3)
if pm == -1:
im = np.zeros([dimy,dimx],dtype=float)
else:
im = np.zeros([ydim,xdim],dtype="complex64")
print "Reading images..."
if pm == -1:
im = np.load(str0+"apo"+dcn+".npy")
fim = fft2(im[y_inf:y_inf+ydim,x_inf:x_inf+xdim],overwrite_x=True)/(dimx*dimy)
print "... now writing its 2d-Fourier transform in "+ str0
np.save(str0+"fft"+dcn,fim)
elif pm == 1:
im = np.load(str0+"fft"+dcn+".npy")
fim = ifft2(im[y_inf:y_inf+ydim,x_inf:x_inf+xdim],overwrite_x=True)*(dimx*dimy)
print "... now writing its 2d-Fourier transform in "+ str0
np.save(str0+"F"+dcn,fim)
del(fim)
del(im)
#Creates 3d subarrays (nu_x,nu_y,t) with dimensions in spatial freq. small
#enough to enter the 3d subarray in the cpu, and computes the fft of
#1d temporal arrays corresponding to separated x, y pixels
a = xdim % bxdim
b = ydim % bydim
n = int(xdim / bxdim)
m = int(ydim / bydim)
if a != 0:
dix = np.zeros([n+1],dtype=int)+bxdim
dix[n]=a
else:
dix = np.zeros([n],dtype=int)+bxdim
if b != 0:
diy = np.zeros([m+1],dtype=int)+bydim
diy[m] = b
else:
diy = np.zeros([m],dtype=int)+bydim
nelx = len(dix)
nely = len(diy)
print "Number of subarrays-->", str(nelx*nely)
fim = np.zeros([ydim,xdim],dtype="complex64")
for jbox in range(nely):
for ibox in range(nelx):
box3d = np.zeros([last-first+1,diy[jbox],dix[ibox]],dtype="complex64")
print "Now reading in the Fourier domain the sub-array-->",ibox,jbox
for i in range(first,last+1):
dcn = str(i).zfill(3)
if pm == -1:
fim = np.load(str0+"fft"+dcn+".npy")
if pm == 1:
fim = np.load(str0+"F"+dcn+".npy")
box3d[i-first,:,:]=fim[jbox*bydim:jbox*bydim+diy[jbox],
ibox*bxdim:ibox*bxdim+dix[ibox]]
for y in range(diy[jbox]):
for x in range(dix[ibox]):
tt = box3d[:,y,x]
if pm == -1:
box3d[:,y,x]=fft(tt,overwrite_x=True)/(last-first+1)
if pm == 1:
box3d[:,y,x]=ifft(tt,overwrite_x=True)*(last-first+1)
print "... writin' its t-Fourier transform "+str0
for i in range(first,last+1):
dcn = str(i).zfill(3)
if pm == -1:
fim = np.load(str0+"fft"+dcn+".npy")
if pm == 1:
fim = np.load(str0+"F"+dcn+".npy")
fim[jbox*bydim:jbox*bydim+diy[jbox],
ibox*bxdim:ibox*bxdim+dix[ibox]] = box3d[i-first,:,:]
if pm == -1:
np.save(str0+"fft"+dcn,fim)
if pm == 1:
np.save(str0+"F"+dcn,fim)
print " "
print "=== END of FFT ==="
|
from abstract.apps import AbstractConfig
from .loader import LetsPartyLoader
from .elastic_models import ElasticLetsPartyModel, lets_party_idx
class LetsPartyConfig(AbstractConfig):
name = "lets_party"
verbose_name = "Фінансові звіти партій та кандидатів"
short_name = "Звіти"
loader_class = LetsPartyLoader
@property
def data_model(self):
# Doing that to prevent circular imports of some kind
from .models import LetsPartyModel
return LetsPartyModel
@property
def sitemap(self):
from .sitemaps import LetsPartySitemap
return LetsPartySitemap
elastic_model = ElasticLetsPartyModel
elastic_index = lets_party_idx
|
from sparknlp_jsl.annotator import NerConverterInternal
class NerToChunkConverterLicensed:
@staticmethod
def get_default_model():
return NerConverterInternal() \
.setInputCols(["sentence", "token", "ner"]) \
.setOutputCol("entities")
|
"""Checks the validity of MachO binary signatures
MachO binaries sometimes include a LC_CODE_SIGNATURE load command
and corresponding section in the __LINKEDIT segment that together
work to "sign" the binary. This script is used to check the validity
of this signature.
Usage:
./code-signature-check.py my_binary 800 300 0 800
Arguments:
binary - The MachO binary to be tested
offset - The offset from the start of the binary to where the code signature section begins
size - The size of the code signature section in the binary
code_offset - The point in the binary to begin hashing
code_size - The length starting from code_offset to hash
"""
import argparse
import collections
import hashlib
import itertools
import struct
import sys
import typing
class CodeDirectoryVersion:
SUPPORTSSCATTER = 0x20100
SUPPORTSTEAMID = 0x20200
SUPPORTSCODELIMIT64 = 0x20300
SUPPORTSEXECSEG = 0x20400
class CodeDirectory:
@staticmethod
def make(buf: memoryview) -> typing.Union['CodeDirectoryBase', 'CodeDirectoryV20100', 'CodeDirectoryV20200', 'CodeDirectoryV20300', 'CodeDirectoryV20400']:
_magic, _length, version = struct.unpack_from(">III", buf, 0)
subtype = {
CodeDirectoryVersion.SUPPORTSSCATTER: CodeDirectoryV20100,
CodeDirectoryVersion.SUPPORTSTEAMID: CodeDirectoryV20200,
CodeDirectoryVersion.SUPPORTSCODELIMIT64: CodeDirectoryV20300,
CodeDirectoryVersion.SUPPORTSEXECSEG: CodeDirectoryV20400,
}.get(version, CodeDirectoryBase)
return subtype._make(struct.unpack_from(subtype._format(), buf, 0))
class CodeDirectoryBase(typing.NamedTuple):
magic: int
length: int
version: int
flags: int
hashOffset: int
identOffset: int
nSpecialSlots: int
nCodeSlots: int
codeLimit: int
hashSize: int
hashType: int
platform: int
pageSize: int
spare2: int
@staticmethod
def _format() -> str:
return ">IIIIIIIIIBBBBI"
class CodeDirectoryV20100(typing.NamedTuple):
magic: int
length: int
version: int
flags: int
hashOffset: int
identOffset: int
nSpecialSlots: int
nCodeSlots: int
codeLimit: int
hashSize: int
hashType: int
platform: int
pageSize: int
spare2: int
scatterOffset: int
@staticmethod
def _format() -> str:
return CodeDirectoryBase._format() + "I"
class CodeDirectoryV20200(typing.NamedTuple):
magic: int
length: int
version: int
flags: int
hashOffset: int
identOffset: int
nSpecialSlots: int
nCodeSlots: int
codeLimit: int
hashSize: int
hashType: int
platform: int
pageSize: int
spare2: int
scatterOffset: int
teamOffset: int
@staticmethod
def _format() -> str:
return CodeDirectoryV20100._format() + "I"
class CodeDirectoryV20300(typing.NamedTuple):
magic: int
length: int
version: int
flags: int
hashOffset: int
identOffset: int
nSpecialSlots: int
nCodeSlots: int
codeLimit: int
hashSize: int
hashType: int
platform: int
pageSize: int
spare2: int
scatterOffset: int
teamOffset: int
spare3: int
codeLimit64: int
@staticmethod
def _format() -> str:
return CodeDirectoryV20200._format() + "IQ"
class CodeDirectoryV20400(typing.NamedTuple):
magic: int
length: int
version: int
flags: int
hashOffset: int
identOffset: int
nSpecialSlots: int
nCodeSlots: int
codeLimit: int
hashSize: int
hashType: int
platform: int
pageSize: int
spare2: int
scatterOffset: int
teamOffset: int
spare3: int
codeLimit64: int
execSegBase: int
execSegLimit: int
execSegFlags: int
@staticmethod
def _format() -> str:
return CodeDirectoryV20300._format() + "QQQ"
class CodeDirectoryBlobIndex(typing.NamedTuple):
type_: int
offset: int
@staticmethod
def make(buf: memoryview) -> 'CodeDirectoryBlobIndex':
return CodeDirectoryBlobIndex._make(struct.unpack_from(CodeDirectoryBlobIndex.__format(), buf, 0))
@staticmethod
def bytesize() -> int:
return struct.calcsize(CodeDirectoryBlobIndex.__format())
@staticmethod
def __format() -> str:
return ">II"
class CodeDirectorySuperBlob(typing.NamedTuple):
magic: int
length: int
count: int
blob_indices: typing.List[CodeDirectoryBlobIndex]
@staticmethod
def make(buf: memoryview) -> 'CodeDirectorySuperBlob':
super_blob_layout = ">III"
super_blob = struct.unpack_from(super_blob_layout, buf, 0)
offset = struct.calcsize(super_blob_layout)
blob_indices = []
for idx in range(super_blob[2]):
blob_indices.append(CodeDirectoryBlobIndex.make(buf[offset:]))
offset += CodeDirectoryBlobIndex.bytesize()
return CodeDirectorySuperBlob(*super_blob, blob_indices)
def unpack_null_terminated_string(buf: memoryview) -> str:
b = bytes(itertools.takewhile(lambda b: b != 0, buf))
return b.decode()
def main():
parser = argparse.ArgumentParser()
parser.add_argument('binary', type=argparse.FileType('rb'), help='The file to analyze')
parser.add_argument('offset', type=int, help='Offset to start of Code Directory data')
parser.add_argument('size', type=int, help='Size of Code Directory data')
parser.add_argument('code_offset', type=int, help='Offset to start of code pages to hash')
parser.add_argument('code_size', type=int, help='Size of the code pages to hash')
args = parser.parse_args()
args.binary.seek(args.offset)
super_blob_bytes = args.binary.read(args.size)
super_blob_mem = memoryview(super_blob_bytes)
super_blob = CodeDirectorySuperBlob.make(super_blob_mem)
print(super_blob)
for blob_index in super_blob.blob_indices:
code_directory_offset = blob_index.offset
code_directory = CodeDirectory.make(super_blob_mem[code_directory_offset:])
print(code_directory)
ident_offset = code_directory_offset + code_directory.identOffset
print("Code Directory ID: " + unpack_null_terminated_string(super_blob_mem[ident_offset:]))
code_offset = args.code_offset
code_end = code_offset + args.code_size
page_size = 1 << code_directory.pageSize
args.binary.seek(code_offset)
hashes_offset = code_directory_offset + code_directory.hashOffset
for idx in range(code_directory.nCodeSlots):
hash_bytes = bytes(super_blob_mem[hashes_offset:hashes_offset+code_directory.hashSize])
hashes_offset += code_directory.hashSize
hasher = hashlib.sha256()
read_size = min(page_size, code_end - code_offset)
hasher.update(args.binary.read(read_size))
calculated_hash_bytes = hasher.digest()
code_offset += read_size
print("%s <> %s" % (hash_bytes.hex(), calculated_hash_bytes.hex()))
if hash_bytes != calculated_hash_bytes:
sys.exit(-1)
if __name__ == '__main__':
main()
|
def sticks_game_l2():
'''
Level 2 Game Rules
Allow each user to input a username.
Ensure that the user is able to enter how many sticks to remove (1, 2, or 3)
The user should ONLY be able to remove 1, 2, or 3 sticks.
Ensure that player turns are kept track of.
When the game is down to a small number of sticks, ensure that the user’s moves are legal. (with one stick left, the user should not be able to remove three sticks)
The game should end when the number of sticks reduces to zero.
Ensure that the correct winner is announced at the end of the game.
'''
sticks_left = 15
p1 = input("Player 1, enter a name please: ")
p2 = input("Player 2, enter a name please: ")
p1_turn = True
while sticks_left > 0:
turn_name = p1 if p1_turn else p2
remove = int(input('%s would you like to remove 1, 2, or 3 sticks? -->' % turn_name))
remove = 1 if remove <= 0 else remove
remove = 3 if remove > 3 else remove
sticks_left -= remove if sticks_left > remove else sticks_left
print(sticks_left)
p1_turn = not p1_turn
winner = p1 if p1_turn else p2
print("Congratulations %s, you won!" % winner)
|
#
# RAMFS.
#
##################################################
# import
##################################################
import uos
##################################################
# function
##################################################
class RAMFlashDev:
def __init__(self):
self.fs_size = 256 * 1024
self.fs_data = bytearray(256 * 1024)
self.erase_block = 32 * 1024
self.log_block_size = 64 * 1024
self.log_page_size = 4 * 1024
def read(self, buf, size, addr):
for i in range(len(buf)):
buf[i] = self.fs_data[addr + i]
def write(self, buf, size, addr):
for i in range(len(buf)):
self.fs_data[addr + i] = buf[i]
def erase(self, size, addr):
for i in range(size):
self.fs_data[addr + i] = 0xff
##################################################
# main
##################################################
blkdev = RAMFlashDev()
vfs = uos.VfsSpiffs(blkdev)
vfs.mkfs(vfs)
uos.mount(vfs,'/ramdisk')
text_str = "hello maixpy"
f = open("/ramdisk/test.txt", "w")
print("write:", text_str)
f.write(text_str)
f.close()
f = open("/ramdisk/test.txt", "r")
text = f.read()
print("read:", text)
f.close()
|
# Copyright (c) 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Helper script for starting/stopping/reloading Glance server programs.
Thanks for some of the code, Swifties ;)
"""
import argparse
import fcntl
import os
import resource
import signal
import subprocess
import sys
import tempfile
import time
# If ../glance/__init__.py exists, add ../ to Python search path, so that
# it will override what happens to be installed in /usr/(local/)lib/python...
possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
os.pardir,
os.pardir))
if os.path.exists(os.path.join(possible_topdir, 'glance', '__init__.py')):
sys.path.insert(0, possible_topdir)
from oslo_config import cfg
from oslo_utils import units
from glance.common import config
from glance.i18n import _
CONF = cfg.CONF
ALL_COMMANDS = ['start', 'status', 'stop', 'shutdown', 'restart',
'reload', 'force-reload']
ALL_SERVERS = ['api', 'scrubber']
RELOAD_SERVERS = ['glance-api']
GRACEFUL_SHUTDOWN_SERVERS = ['glance-api', 'glance-scrubber']
MAX_DESCRIPTORS = 32768
MAX_MEMORY = 2 * units.Gi # 2 GB
USAGE = """%(prog)s [options] <SERVER> <COMMAND> [CONFPATH]
Where <SERVER> is one of:
all, {0}
And command is one of:
{1}
And CONFPATH is the optional configuration file to use.""".format(
', '.join(ALL_SERVERS), ', '.join(ALL_COMMANDS))
exitcode = 0
def gated_by(predicate):
def wrap(f):
def wrapped_f(*args):
if predicate:
return f(*args)
else:
return None
return wrapped_f
return wrap
def pid_files(server, pid_file):
pid_files = []
if pid_file:
if os.path.exists(os.path.abspath(pid_file)):
pid_files = [os.path.abspath(pid_file)]
else:
if os.path.exists('/var/run/glance/%s.pid' % server):
pid_files = ['/var/run/glance/%s.pid' % server]
for pid_file in pid_files:
pid = int(open(pid_file).read().strip())
yield pid_file, pid
def do_start(verb, pid_file, server, args):
if verb != 'Respawn' and pid_file == CONF.pid_file:
for pid_file, pid in pid_files(server, pid_file):
if os.path.exists('/proc/%s' % pid):
print(_("%(serv)s appears to already be running: %(pid)s") %
{'serv': server, 'pid': pid_file})
return
else:
print(_("Removing stale pid file %s") % pid_file)
os.unlink(pid_file)
try:
resource.setrlimit(resource.RLIMIT_NOFILE,
(MAX_DESCRIPTORS, MAX_DESCRIPTORS))
resource.setrlimit(resource.RLIMIT_DATA,
(MAX_MEMORY, MAX_MEMORY))
except ValueError:
print(_('Unable to increase file descriptor limit. '
'Running as non-root?'))
os.environ['PYTHON_EGG_CACHE'] = '/tmp'
def write_pid_file(pid_file, pid):
with open(pid_file, 'w') as fp:
fp.write('%d\n' % pid)
def redirect_to_null(fds):
with open(os.devnull, 'r+b') as nullfile:
for desc in fds: # close fds
try:
os.dup2(nullfile.fileno(), desc)
except OSError:
pass
def redirect_to_syslog(fds, server):
log_cmd = 'logger'
log_cmd_params = '-t "%s[%d]"' % (server, os.getpid())
process = subprocess.Popen([log_cmd, log_cmd_params],
stdin=subprocess.PIPE)
for desc in fds: # pipe to logger command
try:
os.dup2(process.stdin.fileno(), desc)
except OSError:
pass
def redirect_stdio(server, capture_output):
input = [sys.stdin.fileno()]
output = [sys.stdout.fileno(), sys.stderr.fileno()]
redirect_to_null(input)
if capture_output:
redirect_to_syslog(output, server)
else:
redirect_to_null(output)
@gated_by(CONF.capture_output)
def close_stdio_on_exec():
fds = [sys.stdin.fileno(), sys.stdout.fileno(), sys.stderr.fileno()]
for desc in fds: # set close on exec flag
fcntl.fcntl(desc, fcntl.F_SETFD, fcntl.FD_CLOEXEC)
def launch(pid_file, conf_file=None, capture_output=False, await_time=0):
args = [server]
if conf_file:
args += ['--config-file', conf_file]
msg = (_('%(verb)sing %(serv)s with %(conf)s') %
{'verb': verb, 'serv': server, 'conf': conf_file})
else:
msg = (_('%(verb)sing %(serv)s') % {'verb': verb, 'serv': server})
print(msg)
close_stdio_on_exec()
pid = os.fork()
if pid == 0:
os.setsid()
redirect_stdio(server, capture_output)
try:
os.execlp('%s' % server, *args)
except OSError as e:
msg = (_('unable to launch %(serv)s. Got error: %(e)s') %
{'serv': server, 'e': e})
sys.exit(msg)
sys.exit(0)
else:
write_pid_file(pid_file, pid)
await_child(pid, await_time)
return pid
@gated_by(CONF.await_child)
def await_child(pid, await_time):
bail_time = time.time() + await_time
while time.time() < bail_time:
reported_pid, status = os.waitpid(pid, os.WNOHANG)
if reported_pid == pid:
global exitcode
exitcode = os.WEXITSTATUS(status)
break
time.sleep(0.05)
conf_file = None
if args and os.path.exists(args[0]):
conf_file = os.path.abspath(os.path.expanduser(args[0]))
return launch(pid_file, conf_file, CONF.capture_output, CONF.await_child)
def do_check_status(pid_file, server):
if os.path.exists(pid_file):
with open(pid_file, 'r') as pidfile:
pid = pidfile.read().strip()
print(_("%(serv)s (pid %(pid)s) is running...") %
{'serv': server, 'pid': pid})
else:
print(_("%s is stopped") % server)
def get_pid_file(server, pid_file):
pid_file = (os.path.abspath(pid_file) if pid_file else
'/var/run/glance/%s.pid' % server)
dir, file = os.path.split(pid_file)
if not os.path.exists(dir):
try:
os.makedirs(dir)
except OSError:
pass
if not os.access(dir, os.W_OK):
fallback = os.path.join(tempfile.mkdtemp(), '%s.pid' % server)
msg = (_('Unable to create pid file %(pid)s. Running as non-root?\n'
'Falling back to a temp file, you can stop %(service)s '
'service using:\n'
' %(file)s %(server)s stop --pid-file %(fb)s') %
{'pid': pid_file,
'service': server,
'file': __file__,
'server': server,
'fb': fallback})
print(msg)
pid_file = fallback
return pid_file
def do_reload(pid_file, server):
if server not in RELOAD_SERVERS:
msg = (_('Reload of %(serv)s not supported') % {'serv': server})
sys.exit(msg)
pid = None
if os.path.exists(pid_file):
with open(pid_file, 'r') as pidfile:
pid = int(pidfile.read().strip())
else:
msg = (_('Server %(serv)s is stopped') % {'serv': server})
sys.exit(msg)
sig = signal.SIGHUP
try:
print(_('Reloading %(serv)s (pid %(pid)s) with signal(%(sig)s)')
% {'serv': server, 'pid': pid, 'sig': sig})
os.kill(pid, sig)
except OSError:
print(_("Process %d not running") % pid)
def do_stop(server, args, graceful=False):
if graceful and server in GRACEFUL_SHUTDOWN_SERVERS:
sig = signal.SIGHUP
else:
sig = signal.SIGTERM
did_anything = False
pfiles = pid_files(server, CONF.pid_file)
for pid_file, pid in pfiles:
did_anything = True
try:
os.unlink(pid_file)
except OSError:
pass
try:
print(_('Stopping %(serv)s (pid %(pid)s) with signal(%(sig)s)')
% {'serv': server, 'pid': pid, 'sig': sig})
os.kill(pid, sig)
except OSError:
print(_("Process %d not running") % pid)
for pid_file, pid in pfiles:
for _junk in range(150): # 15 seconds
if not os.path.exists('/proc/%s' % pid):
break
time.sleep(0.1)
else:
print(_('Waited 15 seconds for pid %(pid)s (%(file)s) to die;'
' giving up') % {'pid': pid, 'file': pid_file})
if not did_anything:
print(_('%s is already stopped') % server)
def add_command_parsers(subparsers):
cmd_parser = argparse.ArgumentParser(add_help=False)
cmd_subparsers = cmd_parser.add_subparsers(dest='command')
for cmd in ALL_COMMANDS:
parser = cmd_subparsers.add_parser(cmd)
parser.add_argument('args', nargs=argparse.REMAINDER)
for server in ALL_SERVERS:
full_name = 'glance-' + server
parser = subparsers.add_parser(server, parents=[cmd_parser])
parser.set_defaults(servers=[full_name])
parser = subparsers.add_parser(full_name, parents=[cmd_parser])
parser.set_defaults(servers=[full_name])
parser = subparsers.add_parser('all', parents=[cmd_parser])
parser.set_defaults(servers=['glance-' + s for s in ALL_SERVERS])
def main():
global exitcode
opts = [
cfg.SubCommandOpt('server',
title='Server types',
help='Available server types',
handler=add_command_parsers),
cfg.StrOpt('pid-file',
metavar='PATH',
help='File to use as pid file. Default: '
'/var/run/glance/$server.pid.'),
cfg.IntOpt('await-child',
metavar='DELAY',
default=0,
help='Period to wait for service death '
'in order to report exit code '
'(default is to not wait at all).'),
cfg.BoolOpt('capture-output',
default=False,
help='Capture stdout/err in syslog '
'instead of discarding it.'),
cfg.BoolOpt('respawn',
default=False,
help='Restart service on unexpected death.'),
]
CONF.register_cli_opts(opts)
config.parse_args(usage=USAGE)
@gated_by(CONF.await_child)
@gated_by(CONF.respawn)
def mutually_exclusive():
sys.stderr.write('--await-child and --respawn are mutually exclusive')
sys.exit(1)
mutually_exclusive()
@gated_by(CONF.respawn)
def anticipate_respawn(children):
while children:
pid, status = os.wait()
if pid in children:
(pid_file, server, args) = children.pop(pid)
running = os.path.exists(pid_file)
one_second_ago = time.time() - 1
bouncing = (running and
os.path.getmtime(pid_file) >= one_second_ago)
if running and not bouncing:
args = (pid_file, server, args)
new_pid = do_start('Respawn', *args)
children[new_pid] = args
else:
rsn = 'bouncing' if bouncing else 'deliberately stopped'
print(_('Suppressed respawn as %(serv)s was %(rsn)s.')
% {'serv': server, 'rsn': rsn})
if CONF.server.command == 'start':
children = {}
for server in CONF.server.servers:
pid_file = get_pid_file(server, CONF.pid_file)
args = (pid_file, server, CONF.server.args)
pid = do_start('Start', *args)
children[pid] = args
anticipate_respawn(children)
if CONF.server.command == 'status':
for server in CONF.server.servers:
pid_file = get_pid_file(server, CONF.pid_file)
do_check_status(pid_file, server)
if CONF.server.command == 'stop':
for server in CONF.server.servers:
do_stop(server, CONF.server.args)
if CONF.server.command == 'shutdown':
for server in CONF.server.servers:
do_stop(server, CONF.server.args, graceful=True)
if CONF.server.command == 'restart':
for server in CONF.server.servers:
do_stop(server, CONF.server.args)
for server in CONF.server.servers:
pid_file = get_pid_file(server, CONF.pid_file)
do_start('Restart', pid_file, server, CONF.server.args)
if CONF.server.command in ('reload', 'force-reload'):
for server in CONF.server.servers:
pid_file = get_pid_file(server, CONF.pid_file)
do_reload(pid_file, server)
sys.exit(exitcode)
|
from typing import Dict
from moz_library.books import Books
from moz_library.rental_books import RentalBooks
from moz_library.reserved_books import ReservedBooks
from moz_library.rental_book_filter import RentalBookFilter
from moz_library.rental_book_expired_filter import RentalBookExpiredFilter
from moz_library.rental_book_expire_filter import RentalBookExpireFilter
from moz_library.reserved_book_filter import ReservedBookFilter
from moz_library.reserved_book_prepared_filter import ReservedBookPreparedFilter
class BookFilter:
TYPE_RENTAL_NORMAL = "rental_normal"
TYPE_RENTAL_EXPIRED = "rental_expired"
TYPE_RENTAL_EXPIRE = "rental_expire"
TYPE_RESERVED_NORMAL = "reserved_normal"
TYPE_RESERVED_PREPARED = "reserved_prepared"
@staticmethod
def do(books: Books, type: str = TYPE_RENTAL_NORMAL, param: Dict = {}) -> Books:
new_books = books
if isinstance(books, RentalBooks):
filter = BookFilter._create_rental_book_filter(type, param)
new_books = filter.do(books)
elif isinstance(books, ReservedBooks):
filter = BookFilter._create_reserved_book_filter(type, param)
new_books = filter.do(books)
new_books.filter_type = type
new_books.filter_param = param
return new_books
@staticmethod
def _create_rental_book_filter(type: str, param: Dict):
if type == BookFilter.TYPE_RENTAL_NORMAL:
# ソートするだけのフィルタ
return RentalBookFilter(param)
elif type == BookFilter.TYPE_RENTAL_EXPIRED:
# 期限切れの本のみにするフィルタ
return RentalBookExpiredFilter(param)
elif type == BookFilter.TYPE_RENTAL_EXPIRE:
# 期限切れが近い本のみにするフィルタ
return RentalBookExpireFilter(param)
@staticmethod
def _create_reserved_book_filter(type: str, param: Dict):
if type == BookFilter.TYPE_RESERVED_NORMAL:
# ソートするだけのフィルタ
return ReservedBookFilter(param)
elif type == BookFilter.TYPE_RESERVED_PREPARED:
# 準備がほぼ出来た本のみにするフィルタ
return ReservedBookPreparedFilter(param)
|
import warnings
from typing import List, Optional, Type, Union
import htmlgenerator as hg
from django import forms
from .helpers import ErrorList, HelpText, Label
from .widgets import BaseWidget, HiddenInput, TextInput
DEFAULT_FORM_CONTEXTNAME = "__bread_form"
DEFAULT_FORMSET_CONTEXTNAME = "__bread_formset_form"
class FormFieldMarker(hg.BaseElement):
# Internal helper class to mark form fields inside a render tree
# so that the fields an be automatically extracted from it in to
# generate a django form class, see bread.forms.forms
def __init__(self, fieldname, field):
self.fieldname = fieldname
super().__init__(field)
def generate_widget_element(
fieldname: str = None, # required to derive the widget from a django form field
form: Union[
forms.Form, hg.Lazy, str
] = DEFAULT_FORM_CONTEXTNAME, # required to derive the widget from a django form field
no_wrapper: bool = False, # wrapper produces less dense layout, from carbon styles
no_label: bool = False,
show_hidden_initial: bool = False, # required in special cases to add an initial value
#
#
# --------------------------------------------------------------------------
# parameters which are normally not required, when using a django form field
# but can be filled in to create form fields independently from django form fields or
# manually overriding values from the form field
widgetclass: Optional[
Type[BaseWidget]
] = None, # normally be taken from the django form field, will be carbon-ized
label: Union[
str, hg.BaseElement
] = None, # normally be taken from the django form field, will be carbon-ized
help_text: Union[
str, hg.BaseElement
] = None, # normally be taken from the django form field, will be carbon-ized
errors: Optional[
List[str]
] = None, # normally be taken from the django form field, will be carbon-ized
inputelement_attrs: Optional[
dict
] = None, # normally be taken from the django form field, will be carbon-ized
**attributes,
):
"""
Function to produce a carbon design based form field widget which is
compatible with Django forms and based on htmlgenerator.
"""
hidden = None
if show_hidden_initial:
hidden = generate_widget_element(
fieldname=fieldname,
form=form,
inputelement_attrs=inputelement_attrs,
widgetclass=HiddenInput,
no_wrapper=True,
no_label=True,
show_hidden_initial=False,
**attributes,
)
inputelement_attrs = inputelement_attrs or {}
boundfield = None
# warnings for deprecated API usage
if "widgetattributes" in attributes:
warnings.warn(
"FormField does no longer support the parameter 'widgetattributes'. "
"The parameter 'inputelement_attrs' serves the same purpose'"
)
if "elementattributes" in attributes:
warnings.warn(
"FormField does no longer support the parameter 'elementattributes'. "
"attributes can now be directly passed as kwargs."
)
# check if this field will be used with a django form if yes, derive the
# according values lazyly from the context
if fieldname is not None and form is not None:
if isinstance(form, str):
form = hg.C(form)
label = label or form[fieldname].label
help_text = help_text or form.fields[fieldname].help_text
errors = errors or form[fieldname].errors
# do this to preserve the original inputelement_attrs in the
# buildattribs scope
orig_inputattribs = inputelement_attrs
def buildattribs(context):
realform = hg.resolve_lazy(form, context)
id = None
if realform[fieldname].auto_id and "id" not in orig_inputattribs:
id = (
realform[fieldname].html_initial_id
if show_hidden_initial
else realform[fieldname].auto_id
)
return {
"id": id,
"name": realform[fieldname].html_initial_name
if show_hidden_initial
else realform[fieldname].html_name,
"value": realform[fieldname].value(),
**realform[fieldname].build_widget_attrs({}),
**realform[fieldname].field.widget.attrs,
**orig_inputattribs,
}
inputelement_attrs = hg.F(buildattribs)
labelfor = form[fieldname].id_for_label
boundfield = form[fieldname]
else:
labelfor = inputelement_attrs.get("id")
# helper elements
label = Label(
label,
required=inputelement_attrs.get("required"),
disabled=inputelement_attrs.get("disabled"),
_for=labelfor,
)
help_text = HelpText(help_text, disabled=inputelement_attrs.get("disabled"))
errors = ErrorList(errors)
# instantiate field (might create a lazy element when using _guess_widget)
widgetclass = _guess_widget(fieldname, form, widgetclass)
ret = widgetclass(
label=None if no_label else label,
help_text=help_text,
errors=errors,
inputelement_attrs=inputelement_attrs,
boundfield=boundfield,
**attributes,
)
if show_hidden_initial:
ret = hg.BaseElement(ret, hidden)
if not no_wrapper:
ret = hg.If(
hg.F(
lambda c: isinstance(
hg.resolve_lazy(boundfield, c).field.widget, forms.HiddenInput
)
),
ret,
ret.with_fieldwrapper(),
)
return FormFieldMarker(fieldname, ret)
# Using this alias we can prevent a huge refactoring across multiple repos This
# is slightly inconsistent with the default naming scheme of python where camel
# case denotes not a function but a class
# TODO: maybe refactor Formfield to be formfield
FormField = generate_widget_element
def _guess_widget(fieldname, form, suggested_widgetclass):
widget_map = {}
for cls in _all_subclasses(BaseWidget):
if cls.django_widget not in widget_map:
widget_map[cls.django_widget] = []
widget_map[cls.django_widget].append(cls)
def wrapper(context):
realform = hg.resolve_lazy(form, context)
widgetclass = type(realform[fieldname].field.widget)
fieldclass = type(realform[fieldname].field)
# Hidden widgets have highest priority
if issubclass(widgetclass, forms.HiddenInput):
return HiddenInput
# Manually passed widgets have second priority
if suggested_widgetclass is not None:
return suggested_widgetclass
# Automated detection via django-bread-widget-mapp have lowest priority
if fieldclass in widget_map:
return widget_map[fieldclass][0]
if widgetclass in widget_map:
return widget_map[widgetclass][0]
# Fallback for unknown widgets
warnings.warn(
f"Form field {type(realform).__name__}.{fieldname} ({fieldclass}) uses widget {widgetclass} but "
"bread has no implementation, default to TextInput"
)
return TextInput
return hg.F(wrapper)
def _all_subclasses(cls):
return set(cls.__subclasses__()).union(
[s for c in cls.__subclasses__() for s in _all_subclasses(c)]
)
|
from pymysql import connect
# DB 연결
def get_connection():
return connect(host='192.168.25.60',
user='pi',
password='Wkddmsgk0613!',
db='raspi',
charset='utf8')
|
"""
Created on Tue Jan 26 19:32:08 2021
@author: Lucas Valentim
"""
#Yield Strength (Fty)
#Ultimate Strength (Ftu)
#modulo de elasticidade (E)
#coeficiente de poisson (v)
mec_prop={
'aço c 1010 laminação quente':{
'Fty':165,
'Ftu':296,
'E':200100,
'v':0.32},
'aço c 1010 laminação fria':{
'Fty':414,
'Ftu':496,
'E':200100,
'v':0.32},
'aço c 1015 laminação quente':{
'Fty':228,
'Ftu':379,
'E':200100,
'v':0.32},
'aço c 1015 normalizado':{
'Fty':241,
'Ftu':345,
'E':200100,
'v':0.32},
'aço c 1025 laminação quente':{
'Fty':310,
'Ftu':462,
'E':200100,
'v':0.32},
'aço c 1025 normalizado':{
'Fty':331,
'Ftu':448,
'E':200100,
'v':0.32},
'aço c 1025 laminação fria':{
'Fty':483,
'Ftu':586,
'E':200100,
'v':0.32},
'aço inox 304':{
'Fty':517,
'Ftu':724,
'E':186300,
'v':0.27},
'aço inox 301':{
'Fty':517,
'Ftu':862,
'E':186300,
'v':0.27},
'aço aisi 4130 normalizado MIL-T-6736':{
'Fty':517,
'Ftu':655,
'E':200100,
'v':0.32},
'aço aisi 4130 laminação fria MIL-T-6736':{
'Fty':621,
'Ftu':690,
'E':200100,
'v':0.32},
'aluminio 6061-t4':{
'Fty':110,
'Ftu':207,
'E':68310,
'v':0.33},
'aluminio 6061-t6':{
'Fty':241,
'Ftu':290,
'E':68310,
'v':0.33},
'aluminio 6061-t6511 extrusado':{
'Fty':241,
'Ftu':290,
'E':68310,
'v':0.33},
'aluminio 2024-t3':{
'Fty':310,
'Ftu':455,
'E':72450,
'v':0.33},
'aluminio 2024-t42':{
'Fty':262,
'Ftu':427,
'E':72450,
'v':0.33},
'aluminio 7075-t6':{
'Fty':455,
'Ftu':531,
'E':71760,
'v':0.33},
'aluminio 7075-t73':{
'Fty':386,
'Ftu':455,
'E':71760,
'v':0.33},
'policloreto de vinila':{
'Fty':41,
'Ftu':51,
'E':2898,
'v':0.41},
'acrilonitrila butadieno estireno':{
'Fty':35,
'Ftu':41,
'E':2001,
'v':0.0},
'papelão':{
'Fty':0,
'Ftu':14,
'E':0,
'v':0.0},
'tubo de metal eletrico anelado':{
'Fty':296,
'Ftu':393,
'E':200100,
'v':0.32},
'tubo de metal eletrico laminado':{
'Fty':310,
'Ftu':414,
'E':200100,
'v':0.32}
}
'''
b=p['Fty']/p['Ftu']
print("Razão da força do material: ",'{:.5f}'.format(b))
B=(9.5833*b**4)+(-33.528*b**3)+(44.929*b**2)+(-28.479*b)+8.6475
print("Fator de ruptura: ",'{:.5f}'.format(B))
''' |
__version__ = '2.31.2'
|
__all__ = ["PortReservation", \
"PortManager", \
"PortServer"]
import sys
import socket
import select
import os
import re
import random
from browser.status import *
from base.log import VLOG
from base.bind import Bind
class PortReservation(object):
def __init__(self, on_free_func, port):
self.on_free_func = on_free_func
# port type: integer
self.port = port
def __del__(self):
if callable(self.on_free_func):
return self.on_free_func.Run()
return
def Leak(self):
VLOG(0, "Port leaked: " + str(self.port))
self.on_free_func = None
return
class PortManager(object):
def __init__(self, min_port, max_port):
self.min_port = min_port
self.max_port = max_port
self.taken = []
# return status and port<string> and reservation<PortReservation>
def ReservePort(self):
start = random.randint(self.min_port, self.max_port)
wrapped = False
try_port = start
while try_port != start or wrapped == False:
if try_port > self.max_port:
wrapped = True
if self.min_port == self.max_port:
break
try_port = self.min_port
if try_port in self.taken:
try_port = try_port + 1
continue
sock = socket.socket()
try:
sock.bind(('localhost', try_port))
except:
try_port = try_port + 1
continue
self.taken.append(try_port)
reservation = PortReservation(Bind(self.ReleasePort, [try_port]), try_port)
#VLOG(0, "from port manager get try_port: " + str(try_port))
return Status(kOk), str(try_port), reservation
return Status(kUnknownError, "unable to find open port"), "", PortReservation(None, None)
def ReleasePort(self, port):
self.taken.remove(port)
return
class PortServer(object):
def __init__(self, path):
if len(path) != 0 and path.startswith('\0'):
self.path = path
self,free = []
else:
VLOG(3, "path must be for Linux abstract namespace")
# return status and a valid port<string>
def RequestPort(self):
# The client sends its PID + \n, and the server responds with a port + \n,
# which is valid for the lifetime of the referred process.
port = ""
if 'linux2' != sys.platform:
return Status(kUnknownError, "not implemented for this platform"), port
try:
sock_fd = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock_fd.setblocking(0)
except:
return Status(kUnknownError, "unable to create socket"), port
try:
sock_fd.settimeout(10)
except:
return Status(kUnknownError, "unable to set socket timeout"), port
try:
sock_fd.connect(self.path)
except:
return Status(kUnknownError, "unable to connect"), port
try:
request = str(os.getpid()) + '\n'
VLOG(0, "PORTSERVER REQUEST " + request)
sock_fd.send(request)
response = ""
ready = select.select(sock_fd, None, None, 10)
if ready:
response = sock_fd.recv(1024)
if not response:
return Status(kUnknownError, "failed to receive portserver response"), port
VLOG(0, "PORTSERVER RESPONSE " + response)
# parse portserver response
matchObj = re.search(r'([0-9]+)\n', response)
if not matchObj:
return Status(kUnknownError, "failed to parse portserver response"), port
port = matchObj.groups()[0]
return Status(kOk), port
except socket.timeout:
""" This exception is raised when a timeout occurs on a socket which has had timeouts
enabled via a prior call to settimeout(). The accompanying value is a string whose
value is currently always timed out """
return Status(kUnknownError, "socket timeout"), port
except:
return Status(kUnknownError, "unable to send portserver request"), port
def ReleasePort(self, port):
self.free.append(port)
return
# return status and port<string> and reservation<PortReservation>
def ReservePort(self):
port = ""
port_reservation = PortReservation(None, None)
if self.free:
port = self.free[0]
del self.free[0]
status, port = self.RequestPort()
if status.IsError():
return status, port, port_reservation
port_reservation = PortReservation(Bind(self.ReleasePort, [port]), port)
return status, port, port_reservation
|
# -----------------------------------------------------------------------------
# Copyright (C) 2018 Nicolas P. Rougier
# Distributed under the terms of the BSD License.
# -----------------------------------------------------------------------------
import numpy as np
# Result is NaN
print(0 * np.nan)
# Result is False
print(np.nan == np.nan)
# Result is False
print(np.inf > np.nan)
# Result is NaN
print(np.nan - np.nan)
# Result is False !!!
print(0.3 == 3 * 0.1)
print("0.1 really is {:0.56f}".format(0.1))
|
from django.contrib.auth.models import User
from django.core.exceptions import ValidationError
from django.core.validators import RegexValidator
from .models import ForbiddenUsername
class UsernameValidator(RegexValidator):
regex = '^[a-z0-9-]+$'
message = 'Username can only contains a-z, -, 0-9'
def emailUniqueValidator(email):
if email and User.objects.filter(email=email).count():
raise ValidationError(u'A user with that email address already exists.')
def blacklistUsername(username):
if ForbiddenUsername.objects.filter(username=username).count():
raise ValidationError(u'You can\'t use that username')
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['ContactFlowModuleArgs', 'ContactFlowModule']
@pulumi.input_type
class ContactFlowModuleArgs:
def __init__(__self__, *,
instance_id: pulumi.Input[str],
content: Optional[pulumi.Input[str]] = None,
content_hash: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
filename: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
The set of arguments for constructing a ContactFlowModule resource.
:param pulumi.Input[str] instance_id: Specifies the identifier of the hosting Amazon Connect Instance.
:param pulumi.Input[str] content: Specifies the content of the Contact Flow Module, provided as a JSON string, written in Amazon Connect Contact Flow Language. If defined, the `filename` argument cannot be used.
:param pulumi.Input[str] description: Specifies the description of the Contact Flow Module.
:param pulumi.Input[str] filename: The path to the Contact Flow Module source within the local filesystem. Conflicts with `content`.
:param pulumi.Input[str] name: Specifies the name of the Contact Flow Module.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Tags to apply to the Contact Flow Module. If configured with a provider [`default_tags` configuration block](https://www.terraform.io/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags_all: A map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://www.terraform.io/docs/providers/aws/index.html#default_tags-configuration-block).
"""
pulumi.set(__self__, "instance_id", instance_id)
if content is not None:
pulumi.set(__self__, "content", content)
if content_hash is not None:
pulumi.set(__self__, "content_hash", content_hash)
if description is not None:
pulumi.set(__self__, "description", description)
if filename is not None:
pulumi.set(__self__, "filename", filename)
if name is not None:
pulumi.set(__self__, "name", name)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if tags_all is not None:
pulumi.set(__self__, "tags_all", tags_all)
@property
@pulumi.getter(name="instanceId")
def instance_id(self) -> pulumi.Input[str]:
"""
Specifies the identifier of the hosting Amazon Connect Instance.
"""
return pulumi.get(self, "instance_id")
@instance_id.setter
def instance_id(self, value: pulumi.Input[str]):
pulumi.set(self, "instance_id", value)
@property
@pulumi.getter
def content(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the content of the Contact Flow Module, provided as a JSON string, written in Amazon Connect Contact Flow Language. If defined, the `filename` argument cannot be used.
"""
return pulumi.get(self, "content")
@content.setter
def content(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "content", value)
@property
@pulumi.getter(name="contentHash")
def content_hash(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "content_hash")
@content_hash.setter
def content_hash(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "content_hash", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the description of the Contact Flow Module.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def filename(self) -> Optional[pulumi.Input[str]]:
"""
The path to the Contact Flow Module source within the local filesystem. Conflicts with `content`.
"""
return pulumi.get(self, "filename")
@filename.setter
def filename(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "filename", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the name of the Contact Flow Module.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Tags to apply to the Contact Flow Module. If configured with a provider [`default_tags` configuration block](https://www.terraform.io/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter(name="tagsAll")
def tags_all(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
A map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://www.terraform.io/docs/providers/aws/index.html#default_tags-configuration-block).
"""
return pulumi.get(self, "tags_all")
@tags_all.setter
def tags_all(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags_all", value)
@pulumi.input_type
class _ContactFlowModuleState:
def __init__(__self__, *,
arn: Optional[pulumi.Input[str]] = None,
contact_flow_module_id: Optional[pulumi.Input[str]] = None,
content: Optional[pulumi.Input[str]] = None,
content_hash: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
filename: Optional[pulumi.Input[str]] = None,
instance_id: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
Input properties used for looking up and filtering ContactFlowModule resources.
:param pulumi.Input[str] arn: The Amazon Resource Name (ARN) of the Contact Flow Module.
:param pulumi.Input[str] contact_flow_module_id: The identifier of the Contact Flow Module.
:param pulumi.Input[str] content: Specifies the content of the Contact Flow Module, provided as a JSON string, written in Amazon Connect Contact Flow Language. If defined, the `filename` argument cannot be used.
:param pulumi.Input[str] description: Specifies the description of the Contact Flow Module.
:param pulumi.Input[str] filename: The path to the Contact Flow Module source within the local filesystem. Conflicts with `content`.
:param pulumi.Input[str] instance_id: Specifies the identifier of the hosting Amazon Connect Instance.
:param pulumi.Input[str] name: Specifies the name of the Contact Flow Module.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Tags to apply to the Contact Flow Module. If configured with a provider [`default_tags` configuration block](https://www.terraform.io/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags_all: A map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://www.terraform.io/docs/providers/aws/index.html#default_tags-configuration-block).
"""
if arn is not None:
pulumi.set(__self__, "arn", arn)
if contact_flow_module_id is not None:
pulumi.set(__self__, "contact_flow_module_id", contact_flow_module_id)
if content is not None:
pulumi.set(__self__, "content", content)
if content_hash is not None:
pulumi.set(__self__, "content_hash", content_hash)
if description is not None:
pulumi.set(__self__, "description", description)
if filename is not None:
pulumi.set(__self__, "filename", filename)
if instance_id is not None:
pulumi.set(__self__, "instance_id", instance_id)
if name is not None:
pulumi.set(__self__, "name", name)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if tags_all is not None:
pulumi.set(__self__, "tags_all", tags_all)
@property
@pulumi.getter
def arn(self) -> Optional[pulumi.Input[str]]:
"""
The Amazon Resource Name (ARN) of the Contact Flow Module.
"""
return pulumi.get(self, "arn")
@arn.setter
def arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "arn", value)
@property
@pulumi.getter(name="contactFlowModuleId")
def contact_flow_module_id(self) -> Optional[pulumi.Input[str]]:
"""
The identifier of the Contact Flow Module.
"""
return pulumi.get(self, "contact_flow_module_id")
@contact_flow_module_id.setter
def contact_flow_module_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "contact_flow_module_id", value)
@property
@pulumi.getter
def content(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the content of the Contact Flow Module, provided as a JSON string, written in Amazon Connect Contact Flow Language. If defined, the `filename` argument cannot be used.
"""
return pulumi.get(self, "content")
@content.setter
def content(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "content", value)
@property
@pulumi.getter(name="contentHash")
def content_hash(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "content_hash")
@content_hash.setter
def content_hash(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "content_hash", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the description of the Contact Flow Module.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def filename(self) -> Optional[pulumi.Input[str]]:
"""
The path to the Contact Flow Module source within the local filesystem. Conflicts with `content`.
"""
return pulumi.get(self, "filename")
@filename.setter
def filename(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "filename", value)
@property
@pulumi.getter(name="instanceId")
def instance_id(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the identifier of the hosting Amazon Connect Instance.
"""
return pulumi.get(self, "instance_id")
@instance_id.setter
def instance_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "instance_id", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the name of the Contact Flow Module.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Tags to apply to the Contact Flow Module. If configured with a provider [`default_tags` configuration block](https://www.terraform.io/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter(name="tagsAll")
def tags_all(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
A map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://www.terraform.io/docs/providers/aws/index.html#default_tags-configuration-block).
"""
return pulumi.get(self, "tags_all")
@tags_all.setter
def tags_all(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags_all", value)
class ContactFlowModule(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
content: Optional[pulumi.Input[str]] = None,
content_hash: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
filename: Optional[pulumi.Input[str]] = None,
instance_id: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
"""
Provides an Amazon Connect Contact Flow Module resource. For more information see
[Amazon Connect: Getting Started](https://docs.aws.amazon.com/connect/latest/adminguide/amazon-connect-get-started.html)
This resource embeds or references Contact Flows Modules specified in Amazon Connect Contact Flow Language. For more information see
[Amazon Connect Flow language](https://docs.aws.amazon.com/connect/latest/adminguide/flow-language.html)
!> **WARN:** Contact Flow Modules exported from the Console [See Contact Flow import/export which is the same for Contact Flow Modules](https://docs.aws.amazon.com/connect/latest/adminguide/contact-flow-import-export.html) are not in the Amazon Connect Contact Flow Language and can not be used with this resource. Instead, the recommendation is to use the AWS CLI [`describe-contact-flow-module`](https://docs.aws.amazon.com/cli/latest/reference/connect/describe-contact-flow-module.html).
See example below which uses `jq` to extract the `Content` attribute and saves it to a local file.
## Example Usage
### Basic
```python
import pulumi
import pulumi_aws as aws
example = aws.connect.ContactFlowModule("example",
content=\"\"\" {
"Version": "2019-10-30",
"StartAction": "12345678-1234-1234-1234-123456789012",
"Actions": [
{
"Identifier": "12345678-1234-1234-1234-123456789012",
"Parameters": {
"Text": "Hello contact flow module"
},
"Transitions": {
"NextAction": "abcdef-abcd-abcd-abcd-abcdefghijkl",
"Errors": [],
"Conditions": []
},
"Type": "MessageParticipant"
},
{
"Identifier": "abcdef-abcd-abcd-abcd-abcdefghijkl",
"Type": "DisconnectParticipant",
"Parameters": {},
"Transitions": {}
}
],
"Settings": {
"InputParameters": [],
"OutputParameters": [],
"Transitions": [
{
"DisplayName": "Success",
"ReferenceName": "Success",
"Description": ""
},
{
"DisplayName": "Error",
"ReferenceName": "Error",
"Description": ""
}
]
}
}
\"\"\",
description="Example Contact Flow Module Description",
instance_id="aaaaaaaa-bbbb-cccc-dddd-111111111111",
tags={
"Application": "Terraform",
"Method": "Create",
"Name": "Example Contact Flow Module",
})
```
## Import
Amazon Connect Contact Flow Modules can be imported using the `instance_id` and `contact_flow_module_id` separated by a colon (`:`), e.g.,
```sh
$ pulumi import aws:connect/contactFlowModule:ContactFlowModule example f1288a1f-6193-445a-b47e-af739b2:c1d4e5f6-1b3c-1b3c-1b3c-c1d4e5f6c1d4e5
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] content: Specifies the content of the Contact Flow Module, provided as a JSON string, written in Amazon Connect Contact Flow Language. If defined, the `filename` argument cannot be used.
:param pulumi.Input[str] description: Specifies the description of the Contact Flow Module.
:param pulumi.Input[str] filename: The path to the Contact Flow Module source within the local filesystem. Conflicts with `content`.
:param pulumi.Input[str] instance_id: Specifies the identifier of the hosting Amazon Connect Instance.
:param pulumi.Input[str] name: Specifies the name of the Contact Flow Module.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Tags to apply to the Contact Flow Module. If configured with a provider [`default_tags` configuration block](https://www.terraform.io/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags_all: A map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://www.terraform.io/docs/providers/aws/index.html#default_tags-configuration-block).
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: ContactFlowModuleArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Provides an Amazon Connect Contact Flow Module resource. For more information see
[Amazon Connect: Getting Started](https://docs.aws.amazon.com/connect/latest/adminguide/amazon-connect-get-started.html)
This resource embeds or references Contact Flows Modules specified in Amazon Connect Contact Flow Language. For more information see
[Amazon Connect Flow language](https://docs.aws.amazon.com/connect/latest/adminguide/flow-language.html)
!> **WARN:** Contact Flow Modules exported from the Console [See Contact Flow import/export which is the same for Contact Flow Modules](https://docs.aws.amazon.com/connect/latest/adminguide/contact-flow-import-export.html) are not in the Amazon Connect Contact Flow Language and can not be used with this resource. Instead, the recommendation is to use the AWS CLI [`describe-contact-flow-module`](https://docs.aws.amazon.com/cli/latest/reference/connect/describe-contact-flow-module.html).
See example below which uses `jq` to extract the `Content` attribute and saves it to a local file.
## Example Usage
### Basic
```python
import pulumi
import pulumi_aws as aws
example = aws.connect.ContactFlowModule("example",
content=\"\"\" {
"Version": "2019-10-30",
"StartAction": "12345678-1234-1234-1234-123456789012",
"Actions": [
{
"Identifier": "12345678-1234-1234-1234-123456789012",
"Parameters": {
"Text": "Hello contact flow module"
},
"Transitions": {
"NextAction": "abcdef-abcd-abcd-abcd-abcdefghijkl",
"Errors": [],
"Conditions": []
},
"Type": "MessageParticipant"
},
{
"Identifier": "abcdef-abcd-abcd-abcd-abcdefghijkl",
"Type": "DisconnectParticipant",
"Parameters": {},
"Transitions": {}
}
],
"Settings": {
"InputParameters": [],
"OutputParameters": [],
"Transitions": [
{
"DisplayName": "Success",
"ReferenceName": "Success",
"Description": ""
},
{
"DisplayName": "Error",
"ReferenceName": "Error",
"Description": ""
}
]
}
}
\"\"\",
description="Example Contact Flow Module Description",
instance_id="aaaaaaaa-bbbb-cccc-dddd-111111111111",
tags={
"Application": "Terraform",
"Method": "Create",
"Name": "Example Contact Flow Module",
})
```
## Import
Amazon Connect Contact Flow Modules can be imported using the `instance_id` and `contact_flow_module_id` separated by a colon (`:`), e.g.,
```sh
$ pulumi import aws:connect/contactFlowModule:ContactFlowModule example f1288a1f-6193-445a-b47e-af739b2:c1d4e5f6-1b3c-1b3c-1b3c-c1d4e5f6c1d4e5
```
:param str resource_name: The name of the resource.
:param ContactFlowModuleArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(ContactFlowModuleArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
content: Optional[pulumi.Input[str]] = None,
content_hash: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
filename: Optional[pulumi.Input[str]] = None,
instance_id: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = ContactFlowModuleArgs.__new__(ContactFlowModuleArgs)
__props__.__dict__["content"] = content
__props__.__dict__["content_hash"] = content_hash
__props__.__dict__["description"] = description
__props__.__dict__["filename"] = filename
if instance_id is None and not opts.urn:
raise TypeError("Missing required property 'instance_id'")
__props__.__dict__["instance_id"] = instance_id
__props__.__dict__["name"] = name
__props__.__dict__["tags"] = tags
__props__.__dict__["tags_all"] = tags_all
__props__.__dict__["arn"] = None
__props__.__dict__["contact_flow_module_id"] = None
super(ContactFlowModule, __self__).__init__(
'aws:connect/contactFlowModule:ContactFlowModule',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
arn: Optional[pulumi.Input[str]] = None,
contact_flow_module_id: Optional[pulumi.Input[str]] = None,
content: Optional[pulumi.Input[str]] = None,
content_hash: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
filename: Optional[pulumi.Input[str]] = None,
instance_id: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None) -> 'ContactFlowModule':
"""
Get an existing ContactFlowModule resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] arn: The Amazon Resource Name (ARN) of the Contact Flow Module.
:param pulumi.Input[str] contact_flow_module_id: The identifier of the Contact Flow Module.
:param pulumi.Input[str] content: Specifies the content of the Contact Flow Module, provided as a JSON string, written in Amazon Connect Contact Flow Language. If defined, the `filename` argument cannot be used.
:param pulumi.Input[str] description: Specifies the description of the Contact Flow Module.
:param pulumi.Input[str] filename: The path to the Contact Flow Module source within the local filesystem. Conflicts with `content`.
:param pulumi.Input[str] instance_id: Specifies the identifier of the hosting Amazon Connect Instance.
:param pulumi.Input[str] name: Specifies the name of the Contact Flow Module.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Tags to apply to the Contact Flow Module. If configured with a provider [`default_tags` configuration block](https://www.terraform.io/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags_all: A map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://www.terraform.io/docs/providers/aws/index.html#default_tags-configuration-block).
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _ContactFlowModuleState.__new__(_ContactFlowModuleState)
__props__.__dict__["arn"] = arn
__props__.__dict__["contact_flow_module_id"] = contact_flow_module_id
__props__.__dict__["content"] = content
__props__.__dict__["content_hash"] = content_hash
__props__.__dict__["description"] = description
__props__.__dict__["filename"] = filename
__props__.__dict__["instance_id"] = instance_id
__props__.__dict__["name"] = name
__props__.__dict__["tags"] = tags
__props__.__dict__["tags_all"] = tags_all
return ContactFlowModule(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def arn(self) -> pulumi.Output[str]:
"""
The Amazon Resource Name (ARN) of the Contact Flow Module.
"""
return pulumi.get(self, "arn")
@property
@pulumi.getter(name="contactFlowModuleId")
def contact_flow_module_id(self) -> pulumi.Output[str]:
"""
The identifier of the Contact Flow Module.
"""
return pulumi.get(self, "contact_flow_module_id")
@property
@pulumi.getter
def content(self) -> pulumi.Output[str]:
"""
Specifies the content of the Contact Flow Module, provided as a JSON string, written in Amazon Connect Contact Flow Language. If defined, the `filename` argument cannot be used.
"""
return pulumi.get(self, "content")
@property
@pulumi.getter(name="contentHash")
def content_hash(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "content_hash")
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
"""
Specifies the description of the Contact Flow Module.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter
def filename(self) -> pulumi.Output[Optional[str]]:
"""
The path to the Contact Flow Module source within the local filesystem. Conflicts with `content`.
"""
return pulumi.get(self, "filename")
@property
@pulumi.getter(name="instanceId")
def instance_id(self) -> pulumi.Output[str]:
"""
Specifies the identifier of the hosting Amazon Connect Instance.
"""
return pulumi.get(self, "instance_id")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Specifies the name of the Contact Flow Module.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Tags to apply to the Contact Flow Module. If configured with a provider [`default_tags` configuration block](https://www.terraform.io/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter(name="tagsAll")
def tags_all(self) -> pulumi.Output[Mapping[str, str]]:
"""
A map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://www.terraform.io/docs/providers/aws/index.html#default_tags-configuration-block).
"""
return pulumi.get(self, "tags_all")
|
#!python
from numpy import *
import matplotlib.pyplot as plt
import os, string, sys
import numpy as np
import math
import csv
import numbers
from sklearn.decomposition import PCA
from sklearn import preprocessing
from matplotlib.patches import FancyArrowPatch
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from mpl_toolkits.mplot3d import proj3d
import seaborn as sns
from sklearn.metrics import euclidean_distances
from sklearn import manifold
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import train_test_split
from sklearn.multioutput import MultiOutputRegressor
from sklearn.feature_selection import SelectFromModel
from collections import OrderedDict
from sklearn.metrics import accuracy_score
from sklearn.metrics import mean_squared_error
from sklearn.metrics import mean_absolute_error
# ex. of how to run code:
# python the_comparator.py RF_portfolio.txt performance.csv
# each row in performance.csv is an instance, the columns are performance values for various solvers
# questions can be sent to Alex Georges: ageorges@ucsd.edu
###################################################################
# Section 1A: Grabs Data
###################################################################
PORTFOLIO_PREDICTED=sys.argv[1]
PORTFOLIO_ACTUAL=sys.argv[2]
with open("%s" %(PORTFOLIO_PREDICTED)) as f:
reader = csv.reader(f, delimiter=",")
data = list(reader)
portfolio_predicted = [line for line in data] #does include instance name as 0th column
instance_names_predicted = [line[0] for line in data]
solver_number = len(portfolio_predicted[0])-1
with open("%s" %(PORTFOLIO_ACTUAL)) as f:
reader = csv.reader(f, delimiter=",")
data = list(reader)
solvers = [line for line in data[0][1:]] #does include instance name as 0th column
instance_names_actual = [line[0] for line in data[1:]]
performance_actual = [line[1:] for line in data[1:]]
performance_actual=[[float(j) for j in i] for i in performance_actual]
performance_actual = np.array(performance_actual)
a=len(instance_names_actual)
portfolio_actual = [[] for i in range(a)]
for i in range(a):
indices = (performance_actual[i]).argsort()[:int(solver_number)]
name = str(instance_names_actual[i])
portfolio_actual[i] += name,
for index in indices:
portfolio_actual[i] += solvers[index],
matched_names=[]
portfolio_predicted_matched=[] #not necessary. just including this for symmetry.
portfolio_actual_matched=[]
for line in portfolio_predicted:
a=len(line)
for LINE in portfolio_actual:
if line[0]==LINE[0]:
#print('actual: %s' %(LINE))
#print('predicted: %s'%(line))
matched_names.append(line[0])
portfolio_predicted_matched.append(line)
portfolio_actual_matched.append(LINE)
indices_predicted=[]
indices_actual=[]
for line in portfolio_predicted_matched:
for solver in line[1:]:
indices_predicted.append(data[0].index(solver))
for line in portfolio_actual_matched:
for solver in line[1:]:
indices_actual.append(data[0].index(solver))
values_predicted=[]
values_actual=[]
for name in matched_names:
for line in data:
if line[0]==name:
for index in indices_predicted:
values_predicted.append(line[index])
for index in indices_actual:
values_actual.append(line[index])
values_diff = [abs(float(i)-float(j)) for i, j in zip(values_predicted,values_actual)]
print(values_diff) |
import os
import re
import sys
import logging
from ipaddress import IPv4Network, IPv4Address
import urllib.parse
import string
from utils import constant_time_is_equal, normalise_environment
from flask import Flask, request, Response, render_template
from random import choices
import urllib3
app = Flask(__name__, template_folder=os.path.dirname(__file__))
env = normalise_environment(os.environ)
# All requested URLs are eventually routed to to the same load balancer, which
# uses the host header to route requests to the correct application. So as
# long as we pass the application's host header, which urllib3 does
# automatically from the URL, to resolve the IP address of the origin server,
# we can use _any_ hostname that resolves to this load balancer. So if we use
# the _same_ hostname for all requests...
# - we allow onward persistant connections to the load balancer that are
# reused for all requests;
# - we avoid requests going back through the CDN, which is good for both
# latency, and (hopefully) debuggability since there are fewer hops;
# - we avoid routing requests to arbitrary targets on the internet as part of
# a defense-in-depth/least-privilege strategy.
PoolClass = \
urllib3.HTTPConnectionPool if env['ORIGIN_PROTO'] == 'http' else \
urllib3.HTTPSConnectionPool
http = PoolClass(env['ORIGIN_HOSTNAME'], maxsize=1000)
logging.basicConfig(stream=sys.stdout, level=env['LOG_LEVEL'])
logger = logging.getLogger(__name__)
request_id_alphabet = string.ascii_letters + string.digits
def render_access_denied(client_ip, forwarded_url, request_id):
return (render_template(
'access-denied.html',
client_ip=client_ip,
email_name=env['EMAIL_NAME'],
email=env['EMAIL'],
request_id=request_id,
forwarded_url=forwarded_url,
), 403)
@app.route('/', methods=['GET', 'POST', 'PUT', 'PATCH', 'DELETE', 'OPTIONS', 'HEAD'])
def handle_request():
request_id = request.headers.get('X-B3-TraceId') or ''.join(choices(request_id_alphabet, k=8))
logger.info('[%s] Start', request_id)
# Must have X-CF-Forwarded-Url to match route
try:
forwarded_url = request.headers['X-CF-Forwarded-Url']
except KeyError:
logger.error('[%s] Missing X-CF-Forwarded-Url header', request_id)
return render_access_denied('Unknown', 'Unknown', request_id)
logger.info('[%s] Forwarded URL: %s', request_id, forwarded_url)
parsed_url = urllib.parse.urlsplit(forwarded_url)
# Find x-forwarded-for
try:
x_forwarded_for = request.headers['X-Forwarded-For']
except KeyError:
logger.error('[%s] X-Forwarded-For header is missing', request_id)
return render_access_denied('Unknown', forwarded_url, request_id)
logger.debug('[%s] X-Forwarded-For: %s', request_id, x_forwarded_for)
def get_client_ip(route):
try:
return x_forwarded_for.split(',')[int(route['IP_DETERMINED_BY_X_FORWARDED_FOR_INDEX'])].strip()
except IndexError:
logger.debug('[%s] Not enough addresses in x-forwarded-for %s', request_id, x_forwarded_for)
routes = env['ROUTES']
hostname_ok = [
re.match(route['HOSTNAME_REGEX'], parsed_url.hostname)
for route in routes
]
client_ips = [
get_client_ip(route)
for route in routes
]
ip_ok = [
any(client_ips[i] and IPv4Address(client_ips[i]) in IPv4Network(ip_range) for ip_range in route['IP_RANGES'])
for i, route in enumerate(routes)
]
shared_secrets = [
route.get('SHARED_SECRET_HEADER', [])
for route in routes
]
shared_secret_ok = [
[
(
shared_secret['NAME'] in request.headers
and constant_time_is_equal(shared_secret['VALUE'].encode(), request.headers[shared_secret['NAME']].encode())
)
for shared_secret in shared_secrets[i]
]
for i, _ in enumerate(routes)
]
# In general, any matching basic auth credentials are accepted. However,
# on authentication paths, only those with that path are accepted, and
# on failure, a 401 is returned to request the correct credentials
basic_auths = [
route.get('BASIC_AUTH', [])
for route in routes
]
basic_auths_ok = [
[
request.authorization and
constant_time_is_equal(basic_auth['USERNAME'].encode(), request.authorization.username.encode()) and
constant_time_is_equal(basic_auth['PASSWORD'].encode(), request.authorization.password.encode())
for basic_auth in basic_auths[i]
]
for i, _ in enumerate(routes)
]
on_auth_path_and_ok = [
[
basic_auths_ok[i][j]
for j, basic_auth in enumerate(basic_auths[i])
if parsed_url.path == basic_auth['AUTHENTICATE_PATH']
]
for i, _ in enumerate(routes)
]
any_on_auth_path_and_ok = any([
any(on_auth_path_and_ok[i])
for i, _ in enumerate(routes)
])
should_request_auth = not any_on_auth_path_and_ok and any(
(
hostname_ok[i] and
ip_ok[i] and
(not shared_secrets[i] or any(shared_secret_ok[i])) and
len(on_auth_path_and_ok[i]) and
all(not ok for ok in on_auth_path_and_ok[i])
)
for i, _ in enumerate(routes)
)
should_respond_ok_to_auth_request = any(
(
hostname_ok[i] and
ip_ok[i] and
(not shared_secrets[i] or any(shared_secret_ok[i])) and
len(on_auth_path_and_ok[i]) and
any(on_auth_path_and_ok[i])
)
for i, _ in enumerate(routes)
)
any_route_with_all_checks_passed = any(
(
hostname_ok[i] and
ip_ok[i] and
(not shared_secrets[i] or any(shared_secret_ok[i])) and
(not basic_auths[i] or any(basic_auths_ok[i]))
)
for i, _ in enumerate(routes)
)
# There is no perfect answer as to which IP to present to the client in
# the light of multiple routes with different indexes of the
# x-forwarded-for header. However, in real cases it is likely that if the
# host matches, then that will be the correct one. If 'Unknown' is then
# shown to the user, it suggests something has been misconfigured
client_ip = next(
(client_ips[i] for i, _ in enumerate(routes) if hostname_ok[i])
, 'Unknown')
headers_to_remove = tuple(set(
shared_secret['NAME'].lower()
for i, _ in enumerate(routes)
for shared_secret in shared_secrets[i]
)) + ('host', 'x-cf-forwarded-url', 'connection')
if should_request_auth:
return Response(
'Could not verify your access level for that URL.\n'
'You have to login with proper credentials', 401,
{'WWW-Authenticate': 'Basic realm="Login Required"'})
if should_respond_ok_to_auth_request:
return 'ok'
if not any_route_with_all_checks_passed:
logger.warning(
'[%s] No matching route; host: %s client ip: %s',
request_id, parsed_url.hostname, client_ip)
return render_access_denied(client_ip, forwarded_url, request_id)
logger.info('[%s] Making request to origin', request_id)
def downstream_data():
while True:
contents = request.stream.read(65536)
if not contents:
break
yield contents
origin_response = http.request(
request.method,
forwarded_url,
headers={
k: v for k, v in request.headers
if k.lower() not in headers_to_remove
},
preload_content=False,
redirect=False,
assert_same_host=False,
body=downstream_data(),
)
logger.info('[%s] Origin response status: %s', request_id, origin_response.status)
def release_conn():
origin_response.release_conn()
logger.info('[%s] End', request_id)
downstream_response = Response(
origin_response.stream(65536, decode_content=False),
status=origin_response.status,
headers=[
(k, v) for k, v in origin_response.headers.items()
if k.lower() != 'connection'
],
)
downstream_response.autocorrect_location_header = False
downstream_response.call_on_close(release_conn)
logger.info('[%s] Starting response to client', request_id)
return downstream_response
|
def category_robotics(ctx):
return ctx.channel.category.name.lower() == "robotics facility"
def chan(ctx, name):
return ctx.channel.name == name
def chan_staff(ctx):
return chan(ctx, "staff")
def chan_commands(ctx):
return chan(ctx, "bot-commands")
def chan_assignment(ctx):
return chan(ctx, "role-assignment")
def chan_log(ctx):
return chan(ctx, "redbot_log")
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 4.0.0-a53ec6ee1b (http://hl7.org/fhir/StructureDefinition/ProdCharacteristic) on 2019-01-22.
# 2019, SMART Health IT.
from . import backboneelement
class ProdCharacteristic(backboneelement.BackboneElement):
"""
T
h
e
m
a
r
k
e
t
i
n
g
s
t
a
t
u
s
d
e
s
c
r
i
b
e
s
t
h
e
d
a
t
e
w
h
e
n
a
m
e
d
i
c
i
n
a
l
p
r
o
d
u
c
t
i
s
a
c
t
u
a
l
l
y
p
u
t
o
n
t
h
e
m
a
r
k
e
t
o
r
t
h
e
d
a
t
e
a
s
o
f
w
h
i
c
h
i
t
i
s
n
o
l
o
n
g
e
r
a
v
a
i
l
a
b
l
e
.
"""
resource_type = "ProdCharacteristic"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.color = None
"""
W
h
e
r
e
a
p
p
l
i
c
a
b
l
e
,
t
h
e
c
o
l
o
r
c
a
n
b
e
s
p
e
c
i
f
i
e
d
A
n
a
p
p
r
o
p
r
i
a
t
e
c
o
n
t
r
o
l
l
e
d
v
o
c
a
b
u
l
a
r
y
s
h
a
l
l
b
e
u
s
e
d
T
h
e
t
e
r
m
a
n
d
t
h
e
t
e
r
m
i
d
e
n
t
i
f
i
e
r
s
h
a
l
l
b
e
u
s
e
d
.
List of `str` items. """
self.depth = None
"""
W
h
e
r
e
a
p
p
l
i
c
a
b
l
e
,
t
h
e
d
e
p
t
h
c
a
n
b
e
s
p
e
c
i
f
i
e
d
u
s
i
n
g
a
n
u
m
e
r
i
c
a
l
v
a
l
u
e
a
n
d
i
t
s
u
n
i
t
o
f
m
e
a
s
u
r
e
m
e
n
t
T
h
e
u
n
i
t
o
f
m
e
a
s
u
r
e
m
e
n
t
s
h
a
l
l
b
e
s
p
e
c
i
f
i
e
d
i
n
a
c
c
o
r
d
a
n
c
e
w
i
t
h
I
S
O
1
1
2
4
0
a
n
d
t
h
e
r
e
s
u
l
t
i
n
g
t
e
r
m
i
n
o
l
o
g
y
T
h
e
s
y
m
b
o
l
a
n
d
t
h
e
s
y
m
b
o
l
i
d
e
n
t
i
f
i
e
r
s
h
a
l
l
b
e
u
s
e
d
.
Type `Quantity` (represented as `dict` in JSON). """
self.externalDiameter = None
"""
W
h
e
r
e
a
p
p
l
i
c
a
b
l
e
,
t
h
e
e
x
t
e
r
n
a
l
d
i
a
m
e
t
e
r
c
a
n
b
e
s
p
e
c
i
f
i
e
d
u
s
i
n
g
a
n
u
m
e
r
i
c
a
l
v
a
l
u
e
a
n
d
i
t
s
u
n
i
t
o
f
m
e
a
s
u
r
e
m
e
n
t
T
h
e
u
n
i
t
o
f
m
e
a
s
u
r
e
m
e
n
t
s
h
a
l
l
b
e
s
p
e
c
i
f
i
e
d
i
n
a
c
c
o
r
d
a
n
c
e
w
i
t
h
I
S
O
1
1
2
4
0
a
n
d
t
h
e
r
e
s
u
l
t
i
n
g
t
e
r
m
i
n
o
l
o
g
y
T
h
e
s
y
m
b
o
l
a
n
d
t
h
e
s
y
m
b
o
l
i
d
e
n
t
i
f
i
e
r
s
h
a
l
l
b
e
u
s
e
d
.
Type `Quantity` (represented as `dict` in JSON). """
self.height = None
"""
W
h
e
r
e
a
p
p
l
i
c
a
b
l
e
,
t
h
e
h
e
i
g
h
t
c
a
n
b
e
s
p
e
c
i
f
i
e
d
u
s
i
n
g
a
n
u
m
e
r
i
c
a
l
v
a
l
u
e
a
n
d
i
t
s
u
n
i
t
o
f
m
e
a
s
u
r
e
m
e
n
t
T
h
e
u
n
i
t
o
f
m
e
a
s
u
r
e
m
e
n
t
s
h
a
l
l
b
e
s
p
e
c
i
f
i
e
d
i
n
a
c
c
o
r
d
a
n
c
e
w
i
t
h
I
S
O
1
1
2
4
0
a
n
d
t
h
e
r
e
s
u
l
t
i
n
g
t
e
r
m
i
n
o
l
o
g
y
T
h
e
s
y
m
b
o
l
a
n
d
t
h
e
s
y
m
b
o
l
i
d
e
n
t
i
f
i
e
r
s
h
a
l
l
b
e
u
s
e
d
.
Type `Quantity` (represented as `dict` in JSON). """
self.image = None
"""
W
h
e
r
e
a
p
p
l
i
c
a
b
l
e
,
t
h
e
i
m
a
g
e
c
a
n
b
e
p
r
o
v
i
d
e
d
T
h
e
f
o
r
m
a
t
o
f
t
h
e
i
m
a
g
e
a
t
t
a
c
h
m
e
n
t
s
h
a
l
l
b
e
s
p
e
c
i
f
i
e
d
b
y
r
e
g
i
o
n
a
l
i
m
p
l
e
m
e
n
t
a
t
i
o
n
s
.
List of `Attachment` items (represented as `dict` in JSON). """
self.imprint = None
"""
W
h
e
r
e
a
p
p
l
i
c
a
b
l
e
,
t
h
e
i
m
p
r
i
n
t
c
a
n
b
e
s
p
e
c
i
f
i
e
d
a
s
t
e
x
t
.
List of `str` items. """
self.nominalVolume = None
"""
W
h
e
r
e
a
p
p
l
i
c
a
b
l
e
,
t
h
e
n
o
m
i
n
a
l
v
o
l
u
m
e
c
a
n
b
e
s
p
e
c
i
f
i
e
d
u
s
i
n
g
a
n
u
m
e
r
i
c
a
l
v
a
l
u
e
a
n
d
i
t
s
u
n
i
t
o
f
m
e
a
s
u
r
e
m
e
n
t
T
h
e
u
n
i
t
o
f
m
e
a
s
u
r
e
m
e
n
t
s
h
a
l
l
b
e
s
p
e
c
i
f
i
e
d
i
n
a
c
c
o
r
d
a
n
c
e
w
i
t
h
I
S
O
1
1
2
4
0
a
n
d
t
h
e
r
e
s
u
l
t
i
n
g
t
e
r
m
i
n
o
l
o
g
y
T
h
e
s
y
m
b
o
l
a
n
d
t
h
e
s
y
m
b
o
l
i
d
e
n
t
i
f
i
e
r
s
h
a
l
l
b
e
u
s
e
d
.
Type `Quantity` (represented as `dict` in JSON). """
self.scoring = None
"""
W
h
e
r
e
a
p
p
l
i
c
a
b
l
e
,
t
h
e
s
c
o
r
i
n
g
c
a
n
b
e
s
p
e
c
i
f
i
e
d
A
n
a
p
p
r
o
p
r
i
a
t
e
c
o
n
t
r
o
l
l
e
d
v
o
c
a
b
u
l
a
r
y
s
h
a
l
l
b
e
u
s
e
d
T
h
e
t
e
r
m
a
n
d
t
h
e
t
e
r
m
i
d
e
n
t
i
f
i
e
r
s
h
a
l
l
b
e
u
s
e
d
.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.shape = None
"""
W
h
e
r
e
a
p
p
l
i
c
a
b
l
e
,
t
h
e
s
h
a
p
e
c
a
n
b
e
s
p
e
c
i
f
i
e
d
A
n
a
p
p
r
o
p
r
i
a
t
e
c
o
n
t
r
o
l
l
e
d
v
o
c
a
b
u
l
a
r
y
s
h
a
l
l
b
e
u
s
e
d
T
h
e
t
e
r
m
a
n
d
t
h
e
t
e
r
m
i
d
e
n
t
i
f
i
e
r
s
h
a
l
l
b
e
u
s
e
d
.
Type `str`. """
self.weight = None
"""
W
h
e
r
e
a
p
p
l
i
c
a
b
l
e
,
t
h
e
w
e
i
g
h
t
c
a
n
b
e
s
p
e
c
i
f
i
e
d
u
s
i
n
g
a
n
u
m
e
r
i
c
a
l
v
a
l
u
e
a
n
d
i
t
s
u
n
i
t
o
f
m
e
a
s
u
r
e
m
e
n
t
T
h
e
u
n
i
t
o
f
m
e
a
s
u
r
e
m
e
n
t
s
h
a
l
l
b
e
s
p
e
c
i
f
i
e
d
i
n
a
c
c
o
r
d
a
n
c
e
w
i
t
h
I
S
O
1
1
2
4
0
a
n
d
t
h
e
r
e
s
u
l
t
i
n
g
t
e
r
m
i
n
o
l
o
g
y
T
h
e
s
y
m
b
o
l
a
n
d
t
h
e
s
y
m
b
o
l
i
d
e
n
t
i
f
i
e
r
s
h
a
l
l
b
e
u
s
e
d
.
Type `Quantity` (represented as `dict` in JSON). """
self.width = None
"""
W
h
e
r
e
a
p
p
l
i
c
a
b
l
e
,
t
h
e
w
i
d
t
h
c
a
n
b
e
s
p
e
c
i
f
i
e
d
u
s
i
n
g
a
n
u
m
e
r
i
c
a
l
v
a
l
u
e
a
n
d
i
t
s
u
n
i
t
o
f
m
e
a
s
u
r
e
m
e
n
t
T
h
e
u
n
i
t
o
f
m
e
a
s
u
r
e
m
e
n
t
s
h
a
l
l
b
e
s
p
e
c
i
f
i
e
d
i
n
a
c
c
o
r
d
a
n
c
e
w
i
t
h
I
S
O
1
1
2
4
0
a
n
d
t
h
e
r
e
s
u
l
t
i
n
g
t
e
r
m
i
n
o
l
o
g
y
T
h
e
s
y
m
b
o
l
a
n
d
t
h
e
s
y
m
b
o
l
i
d
e
n
t
i
f
i
e
r
s
h
a
l
l
b
e
u
s
e
d
.
Type `Quantity` (represented as `dict` in JSON). """
super(ProdCharacteristic, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(ProdCharacteristic, self).elementProperties()
js.extend([
("color", "color", str, True, None, False),
("depth", "depth", quantity.Quantity, False, None, False),
("externalDiameter", "externalDiameter", quantity.Quantity, False, None, False),
("height", "height", quantity.Quantity, False, None, False),
("image", "image", attachment.Attachment, True, None, False),
("imprint", "imprint", str, True, None, False),
("nominalVolume", "nominalVolume", quantity.Quantity, False, None, False),
("scoring", "scoring", codeableconcept.CodeableConcept, False, None, False),
("shape", "shape", str, False, None, False),
("weight", "weight", quantity.Quantity, False, None, False),
("width", "width", quantity.Quantity, False, None, False),
])
return js
import sys
try:
from . import attachment
except ImportError:
attachment = sys.modules[__package__ + '.attachment']
try:
from . import codeableconcept
except ImportError:
codeableconcept = sys.modules[__package__ + '.codeableconcept']
try:
from . import quantity
except ImportError:
quantity = sys.modules[__package__ + '.quantity']
|
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import least_squares
from scipy import interpolate
from scipy.signal import savgol_filter
from scipy.optimize import minimize
from numpy.linalg import inv
import find_nearest as fn
import Liquid_Phase_O2_Analysis as lp
from matplotlib import rcParams
rcParams['font.family'] = 'sans-serif'
rcParams['font.sans-serif'] = ['Arial']
def expo(a, x):
return a[0] * np.exp(a[1]*x)
def poly_expo(a, x):
return expo(a[:2], x) + lp.poly(a[2:], x)
def gaussian(a, x):
return a[0] * np.exp(-((x-a[1])**2)/(a[2]**2))
def voigt(a, x):
return a[0] * lorentzian(a[1:], x) + (1 - a[0]) * gaussian(a[1:], x)
def voigt_mult(a, x):
'''Sum of multiple Voigt functions with number of Voigt functions determined by length of a. Length of
a has to be a multiple of 4.'''
a_mat = np.reshape(a, (-1, 4))
y = voigt(a_mat[0], x)
for i in a_mat[1:,]:
y = y + voigt(i, x)
return y
def spline_fitting(x, y, x_full, y_full):
'''Spline fitting using spline interpolation implemented in SciPy.'''
tck = interpolate.splrep(x, y, s = 0)
y_fit = interpolate.splev(x_full, tck, der = 0)
y_corrected = y_full - y_fit
return y_fit, y_corrected
def polynomial_regression(x, y, x_full, y_full, order):
'''Polynomial regression function, where x and y are used for fitting and x_full and y_full represent full
data range.'''
X_raw = np.tile(x, (order+1, 1))
powers = np.arange(0, order+1)
X = np.power(X_raw.T, powers)
coef = np.dot(np.dot(inv(np.dot(X.T, X)), X.T), y)
y_fit = lp.poly(coef, x_full)
y_corrected = y_full - y_fit
return y_fit, y_corrected
def baseline_fitting_generic(data, feature_start, feature_end, function, p_guess = None, order_poly = 1):
'''Baseline Correction using either spline_fitting, polynomial_regression (using order_poly)
or any other defined function via least_squares (using p_guess).
se_array provides information on pre-edge peak region beginning, end and signal beginning and end (details see prep_data_fitting function)
function determines the function used for baseline correction.
p_guess is used for any functions fitted using least_squares. length of p_guess determines polynomial order or number of voigt peaks, if either of those two functions are used.
Order_poly is used for polynomial regression.'''
idx = fn.find_nearest(data, (feature_start, feature_end))
pre_feature = data[:idx[0]]
post_feature = data[idx[1]:]
baseline = np.r_[pre_feature, post_feature]
if function == spline_fitting:
y_baseline, y_corrected = spline_fitting(baseline[:,0], baseline[:,1], data[:,0], data[:,1])
elif function == polynomial_regression:
y_baseline, y_corrected = polynomial_regression(baseline[:,0], baseline[:,1], data[:,0], data[:,1], order_poly)
else:
bounds = np.zeros((2, len(p_guess)))
bounds[0] = -np.inf
bounds[1] = np.inf
if function == voigt:
bounds[0] = 0.0
bounds[1][0] = 1.0
p = least_squares(fun=lp.residual_generic, x0=p_guess, args=(baseline[:,0], baseline[:,1], function), bounds = bounds)
p_solved = p.x
y_baseline = function(p_solved, data[:,0])
y_corrected = data[:,1] - y_baseline
data_corr = np.c_[data[:,0], y_corrected]
return data_corr, y_baseline
def integrated_rate_law(k, t):
'''Integrated rate law for concentration of B in reaction scheme A -> B -> C'''
return k[2] * (k[0]/(k[1] - k[0])) * (np.exp(-k[0]*t) - np.exp(-k[1]*t))
def residual_rate_law(k, t, y):
y_fit = integrated_rate_law(k, t)
res = np.sum((y - y_fit)**2)
return res
def fit_follow_up(data_raw, start, end, y_multiplier):
intv = fn.find_nearest(data_raw, (start, end))
data = data_raw[intv[0]:intv[1]]
data_x = (data[:,0] - data[0][0])/3600
data_y = (data[:,1] - data[0][1])*y_multiplier
#guess = np.ones(3)
guess = np.array([1., 800., 0.1])
k_solved = minimize(fun=residual_rate_law, x0 = guess, args=(data_x, data_y), method='Nelder-Mead')
print(k_solved.success)
print(k_solved.x)
k_sol = k_solved.x
plt.plot(data_x, integrated_rate_law(k_sol, data_x))
plt.plot(data_x, data_y)
class Experiment:
def __init__(self, name, feature_start, feature_end, name_b = None, offset = None):
self.data = lp.import_txt('../Experimental_Data/Gas_Phase_O2_Data/%s' % name, channel = 2, sensors = 1)
self.feature_start = feature_start
self.feature_end = feature_end
if name_b is not None:
data_b = lp.import_txt('../Experimental_Data/Gas_Phase_O2_Data/%s' % name_b, channel = 2, sensors = 1)
final_x = self.data[:,0][-1]
data_b[:,0] = data_b[:,0] + final_x + offset
self.data = np.r_[self.data, data_b]
self.feature_start = self.feature_start + final_x + offset
def fit_baseline(self, function, order_poly = 3, p_guess = None):
self.data_corr, self.baseline = baseline_fitting_generic(self.data, self.feature_start, self.feature_end, function, order_poly = order_poly, p_guess = None)
def smooth_baseline_corrected_data(self, window_length, poly_order):
self.y_smooth = savgol_filter(self.data_corr[:,1], window_length, poly_order)
def fit_follow_up_kinetics(self, y_multiplier = 10000):
fit_follow_up(self.data_corr, self.feature_start, self.feature_end, y_multiplier)
def plot_raw_data(self):
plt.plot(self.data[:,0], self.data[:,1], linewidth = 0.7, color = 'black')
def plot_baseline_corrected_data(self, only_corrected = True, smoothed = False, offset_correction = False, ax = plt, width = 0.0003):
if smoothed == True:
ax.plot(self.data_corr[:,0], self.y_smooth, color = 'green', linewidth = 1.5, label = 'Data')
else:
ax.plot(self.data_corr[:,0], self.data_corr[:,1], color = 'black', linewidth = 0.7, label = 'Data')
if offset_correction == True:
self.data_corr[:,0] = self.data_corr[:,0] - self.feature_start
ax.plot((0., 0.), (0. - width, 0. + width), color = 'red', label = 'Irradiation Start', linewidth = 2)
else:
ax.plot((self.feature_start, self.feature_start), (0. - width, 0. + width), color = 'red', label = 'Irradiation Start', linewidth = 2)
if only_corrected is not True:
ax.plot(self.data[:,0], self.data[:,1])
ax.plot(self.data[:,0], self.baseline)
if ax != plt:
ax.grid(color = 'grey', linestyle = '--', linewidth = 0.2)
ax.set_xlabel('Time / s')
ax.set_ylabel(r'$O_{2}$ / Vol%')
ax.legend()
def main():
js_552 = Experiment('190619_JS_552_2.txt', 16350., 46300.)
js_555 = Experiment('190723_JS_555_2.txt', 5000., 47000., name_b = '190723_JS_555_3.txt', offset = 60)
js_552.fit_baseline(polynomial_regression, order_poly = 6)
js_555.fit_baseline(polynomial_regression, order_poly = 6)
js_555.smooth_baseline_corrected_data(101, 3)
fig, ax = plt.subplots()
js_552.plot_baseline_corrected_data(only_corrected = False, ax = ax, offset_correction = False)
#js_555.plot_baseline_corrected_data(ax = ax, smoothed = True, offset_correction = True)
return js_552
if __name__ == '__main__':
main()
plt.show() |
import cPickle as pickle
import numpy as np
file = open('/tmp/data_pos.out', 'r')
coordinates = []
for line in file.readlines():
t1, t2, t3 = map(float, line.split())
temp = np.array([t1, t2, t3])
coordinates.append(temp)
file.close()
file = open('/tmp/data_face.out', 'r')
face_data = []
for line in file.readlines():
t1, t2, t3, t4 = map(int, line.split())
temp = np.array([t1, t2, t3, t4], dtype=np.uint8)
face_data.append(temp)
file.close()
data = {'faces': np.array(face_data),
'world_pos': np.array(coordinates)}
with open("/home/swapnil/Project/Test Data/cube.mat", "w") as myFile:
pickle.dump(data, myFile)
|
import pdb
import re
"""
availableWorkers = 2
minimumStepLength = 1
inputFileName = "7test.txt"
"""
availableWorkers = 5
minimumStepLength = 61
inputFileName = "7.txt"
def stepSeconds(step):
return ord(step) - ord('A') + minimumStepLength
def tick(workers):
"""
Subtract one second from each worker task.
"""
for worker in workers:
workers[worker] -= 1
lineMatch = re.compile("Step (.) must be finished before step (.) can begin.")
order = []
ready = set()
steps = {}
workers = {}
totalSeconds = 0
with open(inputFileName, "r") as infile:
for line in infile:
match = lineMatch.match(line)
pre = match[1]
post = match[2]
if not pre in steps:
steps[pre] = set()
if not post in steps:
steps[post] = set(pre)
else:
steps[post].add(pre)
stepvalues = steps.values()
while len(steps) or len(workers):
#Add any ready steps to the set of steps that are ready
for step in steps:
if len(steps[step]) == 0:
ready.add(step)
#Give the ready steps to available workers
while len(ready) and len(workers) < availableWorkers:
#Give the next available step to a worker
nextstep = min(ready)
workers[nextstep] = stepSeconds(nextstep)
#Remove the step from consideration
ready.remove(nextstep)
del steps[nextstep]
#Elapse one second
tick(workers)
totalSeconds += 1
#Build a list of finished steps
newlyFinished = []
for worker in workers.items():
if worker[1] == 0:
#This step is finished. Add it to the list of newly-finished steps.
newlyFinished.append(worker[0])
#Process any finished steps
while len(newlyFinished):
nextstep = min(newlyFinished)
newlyFinished.remove(nextstep)
del workers[nextstep]
order.append(nextstep)
#Remove this step from all the prerequisites
for value in stepvalues:
value.difference_update(nextstep)
print(''.join(order))
print(totalSeconds)
|
#-*- coding:utf8 -*-
# =============================================================================
# FileName: fonts.py
# Desc: 加入自定义字体
# Author: ysw(zjuysw)
# Email: yinshaowen241@gmail.com
# HomePage: http://my.oschina.net/zjuysw
# Version: 0.0.1
# LastChange: 2014-12-28 19:23:48
# History:
# =============================================================================
from PyQt4.QtGui import QFont, QFontDatabase
from logger import log
class Font():
'''
@note::
添加字体类
'''
def __init__(self, font_path):
self.__font = QFont()
self.font_path = font_path
def addFont(self):
'''
@note::
成功或者失败
'''
font_path = self.font_path
fontId = QFontDatabase.addApplicationFont(font_path)
if(fontId != -1):
fontInfoList = QFontDatabase.applicationFontFamilies(fontId)
fontFamily = fontInfoList[0]
self.__font.setFamily(fontFamily)
log.info("添加字体成功")
return True
else:
log.warning("添加字体失败")
return False
def getFont(self):
self.addFont()
return self.__font
|
import time
import timeit
def main():
start_time = timeit.default_timer()
print("TimeIt Module Demo - Start")
time.sleep(2)
print("TimeIt Module Demo - End")
stop_time = timeit.default_timer()
print("[Finished in %.1fs]" % (stop_time - start_time))
if __name__ == "__main__":
main()
|
import matplotlib.pyplot as plt
"""
Loss=0,Corruption=0,t=0.1,m=10000
X-axis: Window size;
Y-axis: Throughput (ABT, GBN and SR) in one graph/plot.
"""
def graph_one():
# X-axis (ABT, GBN, SR)
wsize = [10, 50, 100, 200, 500]
# Y-axis (ABT)
abt_throughput = [0.0696589, 0.0696589, 0.0696589, 0.0696589, 0.0696589]
plt.plot(wsize, abt_throughput, marker='D', color='r', label='ABT')
# Y-axis (GBN)
gbn_throughput = [0.1412745, 0.0967342, 0.1264804, 0.1380641, 0.1381571]
plt.plot(wsize, gbn_throughput, marker='D', color='g', label='GBN')
# Y-axis (SR)
sr_throughput = [0.0110078, 0.0510701, 0.1015294, 0.1580224, 0.1579091]
plt.plot(wsize, sr_throughput, marker='D', color='b', label='SR')
# Axis labels
plt.xlabel('Window Size (packets)')
plt.ylabel('Throughput (packets/time unit)')
# Set Y-axis range
plt.ylim([0,.2])
# Legend
plt.legend()
# Title
plt.title('Throughput vs. Window Size\n(with 0.1 time units between each packet sent, \ncorruption probability 0.0, loss probability 0.0, and 10,000 total messages sent)')
# Show plot
plt.show()
graph_one() |
from distutils.core import setup
from setuptools import find_packages
setup(
name='mean_average_precision',
version='0.1',
packages=find_packages(),
url='',
license='MIT',
author='Mathieu Garon',
author_email='mathieugaron91@gmail.com',
description=''
)
|
#!/usr/bin/env python
# Copyright 2019 The FastEstimator Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import argparse
import sys
from fastestimator.cli.history import configure_history_parser
from fastestimator.cli.logs import configure_log_parser
from fastestimator.cli.plot import configure_plot_parser
from fastestimator.cli.run import configure_run_parser
from fastestimator.cli.train import configure_test_parser, configure_train_parser
def run_main(argv) -> None:
"""A function which invokes the various argument parsers and then runs the requested subroutine.
"""
parser = argparse.ArgumentParser(allow_abbrev=False)
subparsers = parser.add_subparsers()
# In python 3.7 the following 2 lines could be put into the .add_subparsers() call
subparsers.required = True
subparsers.dest = 'mode'
configure_train_parser(subparsers)
configure_test_parser(subparsers)
configure_run_parser(subparsers)
configure_log_parser(subparsers)
configure_plot_parser(subparsers)
configure_history_parser(subparsers)
args, unknown = parser.parse_known_args(argv)
args.func(vars(args), unknown)
def main() -> None:
run_main(sys.argv[1:])
if __name__ == '__main__':
main()
|
from lxml import etree
from common import NSM, setx, mmf, NSW, getap, ntf, setap, Body
from copy import deepcopy
import ct, txt
def xhspace(nd):
s = nd.attributes['len']
n = int(s.em*6)
t = txt.tr(u"\u2006"*n)
getap(nd).append(t)
|
from typing import Dict, Any # noqa: F401
from vint.linting.config.config_source import ConfigSource
class ConfigAbstractDynamicSource(ConfigSource):
""" A abstract class for ConfigSource that dynamically changed when linting. """
def __init__(self):
pass
def get_config_dict(self):
# type: () -> Dict[str, Any]
raise NotImplementedError()
def update_by_node(self, node):
# type: (Dict[str, Any]) -> None
raise NotImplementedError()
|
from django.db.models.fields.related import ManyToManyField
from django.forms.models import ModelMultipleChoiceField
from _helpers.models import areas_ar_en, areas_en
from typing import Any, List, Optional, Sequence, Tuple
from django import forms
from django.contrib import admin
from django.contrib.admin.options import InlineModelAdmin, ModelAdmin, TabularInline
from django.contrib.auth.admin import UserAdmin
from django.contrib.auth.forms import ReadOnlyPasswordHashField
from django.contrib.messages.constants import ERROR
from django.core.exceptions import ValidationError
from django.db.models.query import QuerySet
from django.http.request import HttpRequest
from django.http.response import HttpResponse, HttpResponseRedirect, JsonResponse
from django.urls.resolvers import URLPattern
from django.utils.translation import gettext, gettext_lazy, ugettext as _, ugettext_lazy
from rest_framework.authtoken import admin as rest_admin
from django.contrib.auth.models import Group, Permission
from django.utils.html import format_html
from app_user.models import Department, Job, User
from area.models import Area
from django.contrib import messages
from django.urls import path
class NoDeletion(admin.ModelAdmin):
def has_delete_permission(self, request: HttpRequest, obj: Optional["Model"]=None) -> bool:
return False
class UserCreationForm(forms.ModelForm):
"""A form for creating new users. Includes all the required
fields, plus a repeated password."""
area = forms.ModelChoiceField(queryset=Area.objects.all(), required=False, label=ugettext_lazy("Area"))
password1 = forms.CharField(label=ugettext_lazy("Password"), widget=forms.PasswordInput)
password2 = forms.CharField(
label=ugettext_lazy("Password confirmation"),
widget=forms.PasswordInput
)
class Meta:
model = User
fields = ("email", "last", "first", 'area')
def clean_password2(self):
# Check that the two password entries match
password1 = self.cleaned_data.get("password1")
password2 = self.cleaned_data.get("password2")
if password1 and password2 and password1 != password2:
raise ValidationError("Passwords don't match")
return password2
def save(self, commit=True):
# Save the provided password in hashed format
user = super().save(commit=False)
user.set_password(self.cleaned_data["password1"])
if commit:
user.save()
return user
class UserChangeForm(forms.ModelForm):
"""
A form for updating users. Includes all the fields on
the user, but replaces the password field with admin's
password hash display field.
"""
# password = ReadOnlyPasswordHashField(label=_('Password'))
new_password = forms.CharField(strip=False, widget=forms.PasswordInput(),label=_("New password"), error_messages={'confirm_password': _('passwords don\'t match')}, required=False)
confirm_password = forms.CharField(strip=False, widget=forms.PasswordInput(),label=_("Confirm new password"), error_messages={'confirm_password': _('passwords don\'t match')}, required=False)
class Meta:
model = User
fields = (
_('new_password'),
_('confirm_password'),
)
def clean_password(self):
password1 = self.cleaned_data.get("new_password")
password2 = self.cleaned_data.get("confirm_password")
if password1 and password2 and password1 != password2:
# self.add_error('new_password', ValidationError("Passwords don't match"))
return
return password2
def save(self, commit=True) -> None:
password = self.clean_password()
if password:
self.instance.set_password(password)
return super().save(commit)
class CustomUserAdmin(UserAdmin):
class Media:
js = ('dynamic_department.js',)
form = UserChangeForm
add_form = UserCreationForm
list_display = ("id", "email", "phone", "gender", "is_active", "last_login", 'area')
list_display_links = ("email",)
list_filter = ("last_login", "is_active", "gender")
ordering = ("-id",)
list_per_page = 15
readonly_fields = ('id', "last_login", 'img_html', 'is_staff', 'get_department')
search_fields = [
'country',
'email',
'phone',
'area__name'
]
actions = [_('activate'), _('deactivate'), ]
def get_search_results(self, request: HttpRequest, queryset: QuerySet, search_term: str) -> Tuple[QuerySet, bool]:
areas_ar_en_data = areas_ar_en()
for value in areas_ar_en_data :
value_en = value.get(search_term)
if value_en:
search_term = value_en
return super().get_search_results(request, queryset, search_term)
def activate(self, request, queryset):
count= queryset.update(is_active=True)
users_string = 'users' if count < 1 else 'user'
self.message_user(request, f'{count} {users_string} activated successfully')
activate.short_description = ugettext_lazy('Activate users')
def deactivate(self, request, queryset):
count= queryset.update(is_active=False)
users_string = 'users' if count < 1 else 'user'
self.message_user(request, f'{count} {users_string} deactivated successfully')
deactivate.short_description = ugettext_lazy('Deactivate users')
def has_delete_permission(self, request, obj=None):
return False
def get_urls(self) -> List[URLPattern]:
urls = super().get_urls()
return urls + [
path('<int:user_id>/job/<int:job_id>', self.get_department_ajax, name='job-department')
]
def get_department_ajax(self, request, user_id,job_id):
response = str(self.get_object(request, user_id).job.department)
department = Department.objects.filter(jobs=job_id)
if department.exists():
department = department.get()
response = str(department)
return JsonResponse({'department': response} , status=200)
def response_add(self, request, obj, post_url_continue=None):
from django.urls.base import reverse
"""
This makes the response after adding go to another
app's changelist for some model
"""
return HttpResponseRedirect(
reverse("admin:app_user_user_changelist")
)
fieldsets = (
(ugettext_lazy("Login"), {
'classes': ('collapse', "wide"),
"fields": (
_('id'),
"email",
"last_login"
)
}),
(_('Change Password'), {
'classes': ('collapse', 'wide'),
'fields':
(
_('new_password'),
_('confirm_password')
),
}),
(ugettext_lazy("Personal_info"),{
'classes': ("wide",),
"fields": (
("img", "img_html"),
("first", "last", 'phone', 'addtional_phone'),
('job', 'get_department'),
("gender",'age'),
("country", 'area', 'city', 'address')
)
},
),
(
ugettext_lazy("Important Dates"),
{
'classes': ('collapse', "wide"),
"fields": (_("is_active"), _("is_staff"), _("is_superuser"))},
),
(ugettext_lazy("Permissions"), {
'classes': ('collapse', "wide"),
"fields": ("groups",)}),
# "fields": ("user_permissions", "groups")}),
)
add_fieldsets = (
(gettext_lazy('login'),{"classes": ('wide',) , 'fields': ('email', 'password1', 'password2')},),
(gettext_lazy('user name'),{"classes": ('wide',) , 'fields': (('first', 'last',), )},),
(gettext_lazy('user location'),{"classes": ('wide',) , 'fields': (('area', 'country'),),}),
(gettext_lazy('general'),{"classes": ('wide',) , 'fields': ('job','img', ('gender', 'age'), ('phone', 'addtional_phone'))}),
(gettext_lazy('Permissinos'),{"classes": ('wide',) , 'fields': ('groups',)}),
)
class JobAdmin(NoDeletion):
pass
class DepartmanAdmin(NoDeletion):
pass
admin.site.register(User, CustomUserAdmin)
admin.site.register(Job, JobAdmin)
admin.site.register(Department, DepartmanAdmin)
admin.site.unregister(rest_admin.TokenProxy)
admin.site.index_title = _('ERP APP')
admin.site.site_header = _('ERP APP Administration')
admin.site.site_title = _('ERP APP Management')
from django.contrib.auth.models import Group, Permission
class GroupAdmin(ModelAdmin):
filter_vertical = ('permissions', )
def formfield_for_manytomany(self, db_field: ManyToManyField, request: Optional[HttpRequest], **kwargs: Any) -> ModelMultipleChoiceField:
excludes = [
'contenttype',
'session',
'logentry',
'token',
'tokenproxy',
'ctx',
'vat'
]
kwargs['queryset'] = Permission.objects.exclude(content_type__model__in=excludes)
return super().formfield_for_manytomany(db_field, request, **kwargs)
admin.site.unregister(Group)
admin.site.register(Group, GroupAdmin) |
from fileinput import filename
from tweepy.streaming import StreamListener
from tweepy import OAuthHandler
from tweepy import Stream
import json
tweets = []
#Variables that contains the user credentials to access Twitter API
access_token = "access_token"
access_token_secret = "access_token_secret"
consumer_key = "consumer_key"
consumer_secret = "consumer_secret"
#This is a basic listener that just prints received tweets to stdout.
class StdOutListener(StreamListener):
def on_data(self, data):
#print data
tweet = json.loads(data)
if 'text' in tweet:
createdAt = tweet['created_at']
tweetId = tweet['id']
userId = tweet['user']['id']
userName = tweet['user']['name']
tweetText = tweet['text']
else:
createdAt = " "
tweetId = " "
userId = " "
userName = " "
tweetText = " "
with open('tweets.csv', 'a') as f:
from csv import writer
csv = writer(f)
row = [createdAt,tweetId,userId,userName,tweetText]
values = [(value.encode('utf8') if hasattr(value, 'encode') else value) for value in row]
csv.writerow(values)
return True
def on_error(self, status):
print status
if __name__ == '__main__':
#This handles Twitter authetification and the connection to Twitter Streaming API
l = StdOutListener()
auth = OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
stream = Stream(auth, l)
#This line filter Twitter Streams to capture data by the keywords: 'python', 'javascript', 'ruby'
stream.userstream(track=['@barackobama, @HillaryClinton, @jimwebbUSA, @realDonalTrump, @Berniesanders, @jebBush']) # or f.write('...\n')
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# less required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""create_imagenet2012_label"""
import os
import json
import argparse
parser = argparse.ArgumentParser(description="resnet imagenet2012 label")
parser.add_argument("--img_path", type=str, required=True, help="imagenet2012 file path.")
args = parser.parse_args()
def create_label(file_path):
"""Create imagenet2012 label"""
print("[WARNING] Create imagenet label. Currently only use for Imagenet2012!")
dirs = os.listdir(file_path)
file_list = []
for file in dirs:
file_list.append(file)
file_list = sorted(file_list)
total = 0
img_label = {}
for i, file_dir in enumerate(file_list):
files = os.listdir(os.path.join(file_path, file_dir))
for f in files:
img_label[f] = i
total += len(files)
with open("imagenet_label.json", "w+") as label:
json.dump(img_label, label)
print("[INFO] Completed! Total {} data.".format(total))
if __name__ == '__main__':
create_label(args.img_path)
|
#!/usr/bin/env python3
# encoding: utf-8
#end_pymotw_header
import sys
print(sys.byteorder)
|
from antarest.study.storage.rawstudy.model.filesystem.config.model import (
FileStudyTreeConfig,
)
from antarest.study.storage.rawstudy.model.filesystem.context import (
ContextServer,
)
from antarest.study.storage.rawstudy.model.filesystem.folder_node import (
FolderNode,
)
from antarest.study.storage.rawstudy.model.filesystem.inode import TREE
from antarest.study.storage.rawstudy.model.filesystem.root.input.thermal.series.area.thermal.thermal import (
InputThermalSeriesAreaThermal,
)
class InputThermalSeriesArea(FolderNode):
def __init__(
self, context: ContextServer, config: FileStudyTreeConfig, area: str
):
FolderNode.__init__(self, context, config)
self.area = area
def build(self) -> TREE:
children: TREE = {
ther: InputThermalSeriesAreaThermal(
self.context, self.config.next_file(ther)
)
for ther in self.config.get_thermal_names(self.area)
}
return children
|
# -*- coding: utf-8 -*-
"""
This module allows for plotting of annotated graphs using matplotlib
"""
import matplotlib.pyplot as plt
from matplotlib import gridspec
import matplotlib.patches as mpatches
import matplotlib.path as mpath
import numpy as np
Path=mpath.Path
def _draw_beta(ax,start,stop,label="",linewidth=1.2):
'''
This is a private function which draws a named
arrow on a matplotlib axis frim start to stop
at 0.5 height
'''
ax.add_patch(mpatches.FancyArrow(start-0.5, 0.5, stop-start, 0,width=0.5, head_width=1, head_length=1,edgecolor='k',facecolor='k'))
ax.text(start+(stop-start)/2,-1,label,style='italic',horizontalalignment= 'center', verticalalignment= 'baseline')
def _draw_helix(ax,start,stop,label="",linewidth=1.2):
'''
This is a private function which draws a named
helix on a matplotlib axis frim start to stop
at 0.5 height
'''
ax.add_patch(mpatches.PathPatch(Path([(start-0.5,0),(start,-0.15),(start+0.5,0)],
[Path.MOVETO,Path.CURVE3,Path.CURVE3]),
fc="none", edgecolor='k',linewidth=linewidth))
for pos in np.arange(start,stop):
ax.add_patch(mpatches.Ellipse((pos+0.5, 0.35), 0.4, 0.7, angle=-20, linewidth=linewidth,edgecolor='k', fill=False, zorder=2))
ax.add_patch(mpatches.PathPatch(Path([(pos+0.5,0),(pos+1,-0.15),(pos+1.5,0)],
[Path.MOVETO,Path.CURVE3,Path.CURVE3]),
fc="none", edgecolor='k',linewidth=linewidth))
ax.text(start+(stop-start)/2,-1,label,style='italic',horizontalalignment= 'center', verticalalignment= 'baseline')
def _annotate_sequence(dataax, annot_ax,resids,features):
for feature in features:
start=feature['sel'][0]+resids[0]
stop=feature['sel'][1]+resids[0]
if feature['style']=='helix':
_draw_helix(annot_ax,start,stop,feature['text'])
elif feature['style']=='beta':
_draw_beta(annot_ax,start,stop,feature['text'])
elif feature['style']=='block':
label = dataax.xaxis.get_ticklabels(minor=True)[feature['sel'][0]]
label.set_bbox(dict(facecolor='none', edgecolor='red',boxstyle='square,pad=0.1'))
def plot_on_seq(data,seq,**kwargs):
'''
this function creates a new figure with sequence annotated bar chart.
data - 1d numpy array with per residue values
seq - string with sequence
available keyword arguments:
filename - name to save file (png, svg, pdf and other formats available)
features - dictionary with sequence features from
seq_tools.hist_ss.get_hist_ss_in_aln_for_shade
resids - np array with residue numbers of same length as data and seq
y_axis_label - string label of Y axis
y_axis_limits - tupple with (min, max) axis limits
figsize - tupple with figure size in inches (width,heigth)
dpi - int with DPI value
'''
# Trying to predict 'optimal' width for the plot
if not 'figsize' in kwargs:
width=len(seq)/5
heigth=width/10
figsize=(width,heigth)
else:
figsize=kwargs['figsize']
if not 'dpi' in kwargs:
dpi=300
else:
dpi=kwargs['dpi']
# Creating subplot grid (upper for data, lower for annotation)
gs = gridspec.GridSpec(2, 1, height_ratios=[1, 0.5])
fig = plt.figure(figsize=figsize,dpi=dpi)
ax1 = fig.add_subplot(gs[0])
ax2 = fig.add_subplot(gs[1],sharex=ax1)
# Populating values if abscent
if not 'resids' in kwargs:
resids=np.arange(data.size)
else:
resids=kwargs['resids']
# Plotting the data
ax1.bar(resids,data)
if 'y_axis_limits' in kwargs:
ax1.set_ylim(kwargs['y_axis_limits'])
ax1.xaxis.set_ticks(resids,minor=True)
ax1.xaxis.set_ticklabels(seq,minor=True)
if 'y_axis_label' in kwargs:
ax1.set_ylabel(kwargs['y_axis_label'])
ax1.grid(True,'major')
ax1.tick_params(axis=u'both', which=u'major',length=0,labeltop=True)
ax1.tick_params(axis=u'both', which=u'major',length=0,labelbottom=False)
ax1.tick_params(axis=u'both', which=u'minor',length=0)
# Preparing 2nd axis for annotating
ax2.set_aspect('equal')
ax2.set_xlim(ax1.get_xlim())
ax2.set_ylim(-0.5,1.1)
# Hiding all elements in the axes, except plot itself
ax2.axis('off')
if 'features' in kwargs:
_annotate_sequence(ax1,ax2,resids,kwargs['features'])
if 'filename' in kwargs:
fig.savefig(kwargs['filename'])
plt.show()
return(fig)
def heatplot_on_seq(data,seq,**kwargs):
'''
this function creates a new figure with sequence annotated bar chart.
data - 2d numpy array with per residue values
cols - resids, rows - values
seq - string with sequence
available keyword arguments:
filename - name to save file (png, svg, pdf and other formats available)
features - dictionary with sequence features from
seq_tools.hist_ss.get_hist_ss_in_aln_for_shade
resids - np array with residue numbers of same length as data and seq
y_axis_label - string label of Y axis
y_axis_values - 1d numpy array with row names
colorbar_limits - tupple with limits for coloring
colorbar_label - string label of colorbar axis
cmap - matplotlib colormap
figsize - tupple with figure size in inches (width,heigth)
dpi - int with DPI value
'''
# Try to predict 'optimal' width for the plot
if not 'figsize' in kwargs:
width=len(seq)/5
heigth=width/5
figsize=(width,heigth)
else:
figsize=kwargs['figsize']
if not 'dpi' in kwargs:
dpi=300
else:
dpi=kwargs['dpi']
factor=(figsize[0]/figsize[0])/15
# Creating subplot grid (upper for data, lower for annotation)
gs = gridspec.GridSpec(2, 1, height_ratios=[1, factor])
fig = plt.figure(figsize=figsize,dpi=dpi)
ax1 = fig.add_subplot(gs[0,0])
ax2 = fig.add_subplot(gs[1,0],sharex=ax1)
# Populating values if abscent
if not 'resids' in kwargs:
resids=np.arange(data.size)
else:
resids=kwargs['resids']
if not 'y_axis_values' in kwargs:
y_names=np.arange(data.shape[0])
else:
y_names=kwargs['y_axis_values']
if not 'cmap' in kwargs:
cmap='viridis'
else:
cmap=kwargs['cmap']
# Plotting ang adjusting the heatmap
im=ax1.imshow(data,extent=(resids[0]-0.5,resids[-1]+0.5,y_names[0],y_names[-1]),origin = 'lower',aspect = 'auto',cmap=cmap)
# Adding all ticks and their labels
ax1.xaxis.set_ticks(resids,minor=True)
ax1.xaxis.set_ticklabels(seq,minor=True)
if 'y_axis_label' in kwargs:
ax1.set_ylabel(kwargs['y_axis_label'])
# Tinkering with the grid and ticks
ax1.grid(True,'major')
ax1.tick_params(axis=u'both', which=u'major',length=0,labeltop=True)
ax1.tick_params(axis=u'both', which=u'major',length=0,labelbottom=False)
ax1.tick_params(axis=u'both', which=u'minor',length=0)
# Preparing 2nd axis for annotating
ax2.set_aspect('equal')
ax2.set_xlim(ax1.get_xlim())
ax2.set_ylim(-0.5,1.1)
# Hiding all elements in the axes, except plot itself
ax2.axis('off')
# Trying to tighten the graph, although I do not think it will work with gridspec subplots
fig.tight_layout()
# 'Uninvasive' addition of the colorbar
box = ax1.get_position()
if 'colorbar_limits' in kwargs:
cb1=fig.colorbar(im,ax=ax1,boundaries=np.arange(*kwargs['colorbar_limits'],0.01))
cb1.set_clim(kwargs['colorbar_limits'])
else:
cb1=plt.colorbar(im,ax=ax1,orientation="vertical")
ax1.set_position(box)
cb1.ax.set_position([box.x0*1.02 + box.width * 1.02, box.y0, 0.02, box.height])
if 'colorbar_label' in kwargs:
cb1.ax.set_ylabel(kwargs['colorbar_label'])
# Annotating the sequence at the 2nd axes
if 'features' in kwargs:
_annotate_sequence(ax1,ax2,resids,kwargs['features'])
# Optional saving
if 'filename' in kwargs:
fig.savefig(kwargs['filename'])
plt.show()
return(fig)
|
import logging
import os
from flask import Flask, make_response, redirect, render_template, request
from btfs import sessions
from btfs.auth import AuthorizedUser, find_auth_user
app = Flask(__name__)
app.debug = True
@app.route("/auth/users")
@sessions.flask_authorize("admin")
def auth_users():
"""Manage list of authorized users through web page.
The GET method shows page with current list of users and allows
deleting user or adding a new user by email address.
"""
template_values = {"authorized_users": AuthorizedUser.list_all()}
return render_template("auth_users.html", **template_values)
@app.route("/auth/useradd", methods=["POST"])
@sessions.flask_authorize("admin")
def user_add():
"""Manage list of authorized users through web page.
The POST method handles adding a new user.
"""
email = request.form["email"]
if not email:
return redirect("/auth/users?invalid")
auth_user = AuthorizedUser()
auth_user.email = email.lower()
auth_user.nickname = email.split("@")[0]
# auth_user.user = user.to_dict()
if request.form.get("roles"):
auth_user.roles = request.form.get("roles")
if "admin" in auth_user.roles.split(",") or "editor" in auth_user.roles.split(
","
):
auth_user.canWrite = True
else:
auth_user.canWrite = False
elif request.form.get("write"):
auth_user.roles = "editor"
auth_user.canWrite = True
else:
auth_user.roles = "reader"
auth_user.canWrite = False
auth_user.claims = None
auth_user.put()
return redirect("/auth/users?updated")
@app.route("/auth/userdelete", methods=["GET"])
@sessions.flask_authorize("admin")
def user_delete():
"""Delete an authorized user from the datastore."""
email = request.args["email"]
if not email:
return redirect("/auth/users?invalid")
auth_user = find_auth_user(email)
if auth_user and auth_user.email == email.lower():
auth_user.delete()
else:
logging.error("Invalid user to delete: %s" % auth_user)
return redirect("/auth/users?deleted")
# See also session cookies at https://firebase.google.com/docs/auth/admin/manage-cookies
@app.route("/auth/")
def user_home():
# if we already retrieved this in users or a decorator
session = sessions.get_current_session(request.environ)
access = "read"
if session.has_access("write"):
access = "write"
resp = make_response(
render_template(
"auth_home.html",
auth_user=session,
user_claims=session.claims,
login_url=sessions.LOGIN_URL,
logout_url=sessions.LOGOUT_URL,
access=access,
)
)
# set persistent session cookie corresponding to the id_token
key = sessions.get_cookie_name("session_id")
value = session.session_id
max_age = (
sessions.EXPIRE_DAYS * 24 * 60 * 60
) # set to EXPIRE_DAYS days here (id_token expires in 1 hour)
PROXY_PREFIX = os.environ.get("PROXY_PREFIX", "")
path = "%s/" % PROXY_PREFIX
resp.set_cookie(key, value, max_age=max_age, path=path)
key = sessions.get_cookie_name("id_token")
resp.set_cookie(key, "", max_age=None, path=path)
return resp
@app.route("/auth/nologin", methods=["GET", "POST"])
def user_login():
return redirect("/auth/?hello")
@app.route("/auth/logout", methods=["GET", "POST"])
def user_logout():
session = sessions.get_current_session(request.environ)
if session.is_user():
session.delete()
return redirect("/auth/?goodbye")
@app.route("/auth/login", methods=["GET", "POST"])
@app.route("/auth/token", methods=["GET", "POST"])
def user_token():
# if we already retrieved this in users or a decorator
session = sessions.get_current_session(request.environ)
error_message = request.environ.get("ID_TOKEN_ERROR")
return render_template(
"auth_token.html",
user_claims=session.claims,
error_message=error_message,
auth_url=sessions.AUTH_URL,
logout_url=sessions.LOGOUT_URL,
FIREBASE_PROJECT_ID=os.environ.get("FIREBASE_PROJECT_ID", "MY_PROJECT_ID"),
FIREBASE_API_KEY=os.environ.get("FIREBASE_API_KEY", "MY_API_KEY"),
FIREBASE_ID_TOKEN=sessions.get_cookie_name("id_token"),
FIREBASEJS_SDK_VERSION=os.environ.get("FIREBASEJS_SDK_VERSION", "9.1.0"),
FIREBASEJS_UI_VERSION=os.environ.get("FIREBASEJS_UI_VERSION", "5.0.0"),
PROXY_PREFIX=os.environ.get("PROXY_PREFIX", ""),
)
|
"""
SleekXMPP: The Sleek XMPP Library
Copyright (C) 2013 Nathanael C. Fritz, Lance J.T. Stout
This file is part of SleekXMPP.
See the file LICENSE for copying permission.
"""
from datetime import datetime, timedelta
from sleekxmpp.stanza import Presence
from sleekxmpp.plugins import BasePlugin
from sleekxmpp.xmlstream import register_stanza_plugin
from sleekxmpp.xmlstream.handler import Callback
from sleekxmpp.xmlstream.matcher import StanzaPath
from sleekxmpp.plugins.xep_0319 import stanza
class XEP_0319(BasePlugin):
name = 'xep_0319'
description = 'XEP-0319: Last User Interaction in Presence'
dependencies = set(['xep_0012'])
stanza = stanza
def plugin_init(self):
self._idle_stamps = {}
register_stanza_plugin(Presence, stanza.Idle)
self.api.register(self._set_idle,
'set_idle',
default=True)
self.api.register(self._get_idle,
'get_idle',
default=True)
self.xmpp.register_handler(
Callback('Idle Presence',
StanzaPath('presence/idle'),
self._idle_presence))
self.xmpp.add_filter('out', self._stamp_idle_presence)
def session_bind(self, jid):
self.xmpp['xep_0030'].add_feature('urn:xmpp:idle:0')
def plugin_end(self):
self.xmpp['xep_0030'].del_feature(feature='urn:xmpp:idle:0')
self.xmpp.del_filter('out', self._stamp_idle_presence)
self.xmpp.remove_handler('Idle Presence')
def idle(self, jid=None, since=None):
seconds = None
if since is None:
since = datetime.now()
else:
seconds = datetime.now() - since
self.api['set_idle'](jid, None, None, since)
self.xmpp['xep_0012'].set_last_activity(jid=jid, seconds=seconds)
def active(self, jid=None):
self.api['set_idle'](jid, None, None, None)
self.xmpp['xep_0012'].del_last_activity(jid)
def _set_idle(self, jid, node, ifrom, data):
self._idle_stamps[jid] = data
def _get_idle(self, jid, node, ifrom, data):
return self._idle_stamps.get(jid, None)
def _idle_presence(self, pres):
self.xmpp.event('presence_idle', pres)
def _stamp_idle_presence(self, stanza):
if isinstance(stanza, Presence):
since = self.api['get_idle'](stanza['from'] or self.xmpp.boundjid)
if since:
stanza['idle']['since'] = since
return stanza
|
import traceback
import time
from kernel import OpenStratKernel
from util import DataHandler
class OpenStrat:
def __init__(self):
self.info_keys=['Symbol',
'FrontMonth',
'BackMonth',
'NetDebit',
'MaxLoss',
'MaxProfit',
'BreakEvens',
'ChanceOfProfit']
self.current_symbol = ""
self.handler = DataHandler(self.info_keys)
self.kernel = OpenStratKernel()
self.kernel.HEADLESS_MODE=True
self.kernel.start_browser_engine()
def __fetch_and_put_info(self):
i = y = fstatus = bstatus = 0
self.kernel.reset_fmonth_indx()
while (fstatus != -1):
fstatus = self.kernel.goto_fmonth(self.kernel.fmonth_indx)
self.kernel.fmonth_indx += 1
self.kernel.reset_bmonth_indx()
bstatus = 0
while (bstatus != -1):
bstatus = self.kernel.goto_bmonth(self.kernel.bmonth_indx)
self.kernel.bmonth_indx += 1
try:
info = self.kernel.fetch_calendar_call_info()
except Exception as e:
continue
info['Symbol'] = self.current_symbol
self.handler.put(info)
print(".", end="", flush=True)
def get_data_by_symbol_iteration(self):
i = 1
INTERVAL = 10
self.kernel.load_symbols()
for symbol in self.kernel.read_symbols():
self.current_symbol = symbol
if (i % INTERVAL == 0):
print("[*] Sleeping for 5 mins to avoid loading server")
time.sleep(60*5)
self.kernel.load_calendar_call(symbol)
print("[+] Fetching info from page")
try:
self.__fetch_and_put_info()
except:
continue
print("\n")
print("====="*10)
i += 1
def save_data(self, fname="data.csv"):
data = self.handler.as_dataframe()
data.to_csv(fname, index=False)
print(f"[+] Saved data to {fname}")
def close(self):
self.kernel.shutdown_browser_engine()
if __name__ == "__main__":
app = OpenStrat()
app.get_data_by_symbol_iteration()
app.save_data()
app.close()
|
import typing
from std_number_validation import typings
class BaseValidator:
def __init__(self, number: int,
algorithm: typing.Type[typings.BaseAlgorithmType],
exc_to_raise: typing.Type[typings.BaseExceptionType] = None):
self.param = number
self.algorithm = algorithm()
self.exception = exc_to_raise
def parameter_validation(self, param) -> bool:
"""
Since we are valid mostly number we expect this methid to be common
across all validators.
:param param:
:return:
"""
return isinstance(param, int)
def is_valid(self) -> bool:
"""
Attaches main public interface with algorithms execution
controls flow (return bool or raise an exception according to configuration)
:return:
"""
if self.parameter_validation(self.param):
if self.algorithm.is_valid(self.param):
return True
else:
return self.determine_output(False)
else:
return self.determine_output(False)
def determine_output(self, validation_result) -> bool:
"""
default behaviour to process output
:param validation_result:
:return:
"""
return validation_result
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.