content stringlengths 5 1.05M |
|---|
from _context import kgmodels
from kgmodels import util
from util import d
import torch
from torch import nn
import torch.nn.functional as F
from torch.autograd import Variable
import rdflib as rdf
import pandas as pd
import numpy as np
import random, sys, tqdm
from tqdm import trange
import rgat
from argparse import ArgumentParser
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import kgbench as kg
"""
Full batch training GAT and RGCN
"""
EPSILON = 0.000000001
global repeats
def prt(str, end='\n'):
if repeats == 1:
print(str, end=end)
def go(arg):
global repeats
repeats = arg.repeats
dev = 'cuda' if torch.cuda.is_available() else 'cpu'
train_accs = []
test_accs = []
edges = triples = None
train = test = None
if arg.name == 'random':
edges, N, (train_idx, train_lbl), (test_idx, test_lbl) = \
kgmodels.random_graph(base=arg.base, bgp0=arg.bgp0, bgp1=arg.bgp1)
num_cls = 2
print(f'Generated random graph with {N} nodes.')
if arg.name == 'fan':
edges, N, (train_idx, train_lbl), (test_idx, test_lbl) = \
kgmodels.fan(depth=arg.rdepth, diffusion=arg.fdiff, others=1000)
num_cls = 2
# print(f'Generated random graph with {N} nodes.')
# print(list(zip(* (edges[0]))) )
# sys.exit()
elif arg.name in kg.names:
data = kg.load(arg.name, torch=True, prune_dist=arg.depth)
n2i,i2n = data.e2i, data.i2e
r2i, i2r = data.r2i, data.i2r
train_idx, train_lbl = data.training[:, 0], data.training[:, 1]
test_idx, test_lbl = data.withheld[:, 0], data.withheld[:, 1]
triples = util.enrich(data.triples, n=len(i2n), r=len(i2r))
# -- enrich: add inverse links and self loops.
N = len(i2n)
num_cls = data.num_classes
else:
edges, (n2i, i2n), (r2i, i2r), train, test = \
kgmodels.load(arg.name, final=arg.final, limit=arg.limit, bidir=True, prune=arg.prune)
# Convert test and train to tensors
train_idx = [n2i[name] for name, _ in train.items()]
train_lbl = [cls for _, cls in train.items()]
train_idx = torch.tensor(train_idx, dtype=torch.long, device=dev)
train_lbl = torch.tensor(train_lbl, dtype=torch.long, device=dev)
test_idx = [n2i[name] for name, _ in test.items()]
test_lbl = [cls for _, cls in test.items()]
test_idx = torch.tensor(test_idx, dtype=torch.long, device=dev)
test_lbl = torch.tensor(test_lbl, dtype=torch.long, device=dev)
# count nr of classes
cls = set([int(l) for l in test_lbl] + [int(l) for l in train_lbl])
N = len(i2n)
num_cls = len(cls)
print('some test set ids and labels', test_idx[:10], test_lbl[:10])
tnodes = len(i2n)
totaledges = sum([len(x[0]) for _, x in edges.items()]) if triples is None else triples.size(0)
print(f'{tnodes} nodes')
print(f'{len(i2r)} relations')
print(f'{totaledges} edges (including self loops and inverse)')
print(f'{(totaledges-tnodes)//2} edges (originally)')
if train:
print(f'{len(train.keys())} training labels')
print(f'{len(test.keys())} test labels')
for r in tqdm.trange(repeats) if repeats > 1 else range(repeats):
"""
Define model
"""
if arg.mixer == 'classic':
model = kgmodels.RGCNClassic(edges=edges, n=N, numcls=num_cls, num_rels=len(i2r)*2+1, emb=arg.emb, bases=arg.bases, softmax=arg.softmax, triples=triples)
elif arg.mixer == 'emb':
model = kgmodels.RGCNEmb(edges=edges, n=N, numcls=num_cls, emb=arg.emb1, h=arg.emb, bases=arg.bases, separate_emb=arg.sep_emb)
elif arg.mixer == 'lgcn':
model = kgmodels.LGCN(
triples=triples if triples is not None else util.triples(edges),
n=N, rp=arg.latents, numcls=num_cls, emb=arg.emb1,
ldepth=arg.latent_depth, lwidth=arg.latent_width, bases=arg.bases)
elif arg.mixer == 'weighted':
model = kgmodels.RGCNWeighted(edges=edges, n=N, numcls=num_cls, emb=arg.emb1, h=arg.emb, bases=arg.bases,
separate_emb=arg.sep_emb, indep=arg.indep, sample=arg.sample)
else:
model = kgmodels.NodeClassifier(edges=edges, n=N, depth=arg.depth, emb=arg.emb, mixer=arg.mixer, numcls=num_cls,
dropout=arg.do, bases=arg.bases, norm_method=arg.norm_method, heads=arg.heads,
unify=arg.unify, dropin=arg.dropin, sep_emb=arg.sep_emb, res=not arg.nores,
norm=not arg.nonorm, ff=not arg.noff)
if torch.cuda.is_available():
prt('Using CUDA.')
model.cuda()
train_lbl = train_lbl.cuda()
test_lbl = test_lbl.cuda()
if arg.opt == 'adam':
opt = torch.optim.Adam(model.parameters(), lr=arg.lr, weight_decay=arg.wd)
elif arg.opt == 'adamw':
opt = torch.optim.AdamW(model.parameters(), lr=arg.lr, weight_decay=arg.wd)
else:
raise Exception
plt.figure()
for e in range(arg.epochs):
model.train(True)
opt.zero_grad()
cls = model()[train_idx, :]
loss = F.cross_entropy(cls, train_lbl)
if arg.l2weight is not None:
loss = loss + arg.l2weight * model.penalty(p=2)
if arg.l1 > 0.0:
loss = loss + arg.l1 * model.edgeweights().norm(p=1)
loss.backward()
opt.step()
prt(f'epoch {e}, loss {loss.item():.2}', end='')
# Evaluate
with torch.no_grad():
model.train(False)
cls = model()[train_idx, :]
agreement = cls.argmax(dim=1) == train_lbl
accuracy = float(agreement.sum()) / agreement.size(0)
prt(f', train accuracy {float(accuracy):.2}', end='')
if e == arg.epochs - 1:
train_accs.append(float(accuracy))
cls = model()[test_idx, :]
agreement = cls.argmax(dim=1) == test_lbl
accuracy = float(agreement.sum()) / agreement.size(0)
prt(f', test accuracy {float(accuracy):.2}')
if e == arg.epochs - 1:
test_accs.append(float(accuracy))
if arg.mixer == 'weighted':
# plot edgeweights
weights = model.edgeweights().cpu().numpy()
plt.hist(weights, bins=100)
plt.yscale('log')
plt.savefig(f'edgeweights.{e:03}.png')
plt.clf()
if torch.cuda.is_available():
del loss # clear memory
torch.cuda.empty_cache()
# print(model.gblocks[0].mixer.weights.mean(-1).mean(-1))
print('training finished.')
tracc, teacc = torch.tensor(train_accs), torch.tensor(test_accs)
print(f'mean training accuracy {tracc.mean():.3} ({tracc.std():.3}) \t{train_accs}')
print(f'mean test accuracy {teacc.mean():.3} ({teacc.std():.3}) \t{test_accs}')
if __name__ == "__main__":
## Parse the command line options
parser = ArgumentParser()
parser.add_argument("-e", "--epochs",
dest="epochs",
help="Size (nr of dimensions) of the input.",
default=150, type=int)
parser.add_argument("-d", "--depth",
dest="depth",
help="Nr of layers.",
default=2, type=int)
parser.add_argument("-E", "--embedding-size",
dest="emb",
help="Size (nr of dimensions) of the hidden layer.",
default=16, type=int)
parser.add_argument("--embedding-init",
dest="emb1",
help="Size (nr of dimensions) of the _initial_ node embeddings.",
default=128, type=int)
parser.add_argument("-l", "--learn-rate",
dest="lr",
help="Learning rate",
default=0.001, type=float)
parser.add_argument("--weight-decay",
dest="wd",
help="Weight decay (using AdamW implementation).",
default=0.0, type=float)
parser.add_argument("--l2-weight", dest="l2weight",
help="Weight for explicit L2 loss term.",
default=None, type=float)
parser.add_argument("--do",
dest="do",
help="Dropout",
default=None, type=float)
parser.add_argument("-D", "--dataset-name",
dest="name",
help="Name of dataset to use [aifb, am]",
default='aifb', type=str)
parser.add_argument("-m", "--mixer",
dest="mixer",
help="Which mixing layer to use [gcn, gat]",
default='gcn', type=str)
parser.add_argument("--indep",
dest="indep",
help="Learn independent attention weights for each edge instead of ones derived from node embeddings).",
action="store_true")
parser.add_argument("--normalize",
dest="normalize",
help="Normalize the weighted adjacency matrix.",
action="store_true")
parser.add_argument("-F", "--final", dest="final",
help="Use the canonical test set instead of a validation split.",
action="store_true")
parser.add_argument("--limit",
dest="limit",
help="Limit the number of relations.",
default=None, type=int)
parser.add_argument("--bgp0",
dest="bgp0",
help="BGP for class 0.",
default='[(0, 1, 1)]', type=str)
parser.add_argument("--bgp1",
dest="bgp1",
help="BGP for class 1.",
default='[(0, 2, 2)]', type=str)
parser.add_argument("--heads",
dest="heads",
help="Number of attention heads per relation.",
default=4, type=int)
parser.add_argument("--bases",
dest="bases",
help="Number of bases.",
default=None, type=int)
parser.add_argument("--repeats",
dest="repeats",
help="Number of times to repeat the experiment.",
default=1, type=int)
parser.add_argument("--random-base",
dest="base",
help="Base network for the random graph experiment.",
default='aifb', type=str)
parser.add_argument("--random-depth",
dest="rdepth",
help="Depth of random graph (if applicable).",
default=2, type=int)
parser.add_argument("--fan-diffusion",
dest="fdiff",
help="Amount of diffusion in the fan graph.",
default=5, type=int)
parser.add_argument("--latents",
dest="latents",
help="Number of latent relations in the LGCN.",
default=None, type=int)
parser.add_argument("--latent-depth",
dest="latent_depth",
help="Number of hidden layers in the latent MLP.",
default=0, type=int)
parser.add_argument("--latent-width",
dest="latent_width",
help="Number of hidden units in the hidden layers of the MLP.",
default=128, type=int)
parser.add_argument("--l1",
dest="l1",
help="L1 loss term for the weights.",
default=0.0, type=float)
parser.add_argument("--nm",
dest="norm_method",
help="Method for row-normalizing the GAT attention weights.",
default='abs', type=str)
parser.add_argument("--unify",
dest="unify",
help="Method for unifying the relations.",
default='sum', type=str)
parser.add_argument("--opt",
dest="opt",
help="Optimizer.",
default='adamw', type=str)
parser.add_argument("--conditional", dest="cond",
help="Condition on the target node.",
action="store_true")
parser.add_argument("--dropin", dest="dropin",
help="Randomly mask out connections by atte tion weight.",
action="store_true")
parser.add_argument("--separate-embeddings", dest="sep_emb",
help="Separate embeddings per relation (expensive, but closer to original RGCN).",
action="store_true")
parser.add_argument("--no-res", dest="nores",
help="Disable residual connections.",
action="store_true")
parser.add_argument("--no-norm", dest="nonorm",
help="Disable batch norm.",
action="store_true")
parser.add_argument("--no-ff", dest="noff",
help="Disable local feed-forward (activation only).",
action="store_true")
parser.add_argument("--prune", dest="prune",
help="Prune the graph to remove irrelevant links.",
action="store_true")
parser.add_argument("--apply-softmax", dest="softmax",
help="Apply the softmax (apparently twice).",
action="store_true")
parser.add_argument("--sample", dest="sample",
help="Subsample the graph according to the weights.",
action="store_true")
options = parser.parse_args()
print('OPTIONS ', options)
go(options)
|
import sys
import logging
from config_parser import cfg
from logging.handlers import TimedRotatingFileHandler
logging.getLogger("qrcode_machinecontrol").setLevel(logging.ERROR)
log_formatter = logging.Formatter("%(asctime)s.%(msecs)03d %(levelname)7s %(processName)10s - %(message)s", "%H:%M:%S")
mylogger = logging.getLogger()
mylogger.setLevel(logging.DEBUG)
console_handler = logging.StreamHandler(sys.stdout)
console_handler.setFormatter(log_formatter)
rotate_logs_handler = TimedRotatingFileHandler(cfg.file.log_path, when="midnight", backupCount=30)
rotate_logs_handler.setFormatter(log_formatter)
mylogger.addHandler(console_handler)
mylogger.addHandler(rotate_logs_handler)
|
#!/usr/bin/env python
"""Create lxc-conf.yml"""
# Copyright 2018 IBM Corp.
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import nested_scopes, generators, division, absolute_import, \
with_statement, print_function, unicode_literals
import sys
import os
import platform
import pwd
import re
import os.path
from jinja2 import Environment, FileSystemLoader, TemplateNotFound, \
TemplateSyntaxError, TemplateAssertionError
import lib.logger as logger
from lib.config import Config
from lib.genesis import GEN_PLAY_PATH, HOME, OPSYS
USERNAME = pwd.getpwuid(os.getuid())[0]
class LxcConf(object):
"""Create lxc-conf.yml"""
TEMPLATE_DIR = GEN_PLAY_PATH + 'templates/localhost'
TEMPLATE_FILE = 'lxc-conf.j2'
LXC_CONF = GEN_PLAY_PATH + 'lxc-conf.yml'
TYPE = 'type'
VLAN = 'vlan'
IPADDR = 'ipaddr'
PREFIX = 'prefix'
def __init__(self, config_path=None):
self.log = logger.getlogger()
self.cfg = Config(config_path)
if OPSYS not in ('Ubuntu', 'redhat'):
raise Exception('Unsupported Operating System')
def create(self):
"""Create lxc-conf.yml"""
env = Environment(loader=FileSystemLoader(self.TEMPLATE_DIR))
try:
template = env.get_template(self.TEMPLATE_FILE)
except TemplateNotFound as exc:
self.log.error('Template not found: %s' % exc.name)
print('Template not found: %s' % exc.name)
sys.exit(1)
except TemplateAssertionError as exc:
self.log.error('Template assertion error: %s in %s, line %d' % (
exc.message, exc.filename, exc.lineno))
print('Template assertion error: %s in %s, line %d' % (
exc.message, exc.filename, exc.lineno))
sys.exit(1)
except TemplateSyntaxError as exc:
self.log.error('Template syntax error: %s in %s, line %d' % (
exc.message, exc.filename, exc.lineno))
print('Template syntax error: %s in %s, line %d' % (
exc.message, exc.filename, exc.lineno))
sys.exit(1)
nets = []
for index, vlan in enumerate(self.cfg.yield_depl_netw_mgmt_vlan()):
if vlan is not None:
net = {}
net[self.VLAN] = vlan
net[self.IPADDR] = self.cfg.get_depl_netw_mgmt_cont_ip(index)
net[self.PREFIX] = self.cfg.get_depl_netw_mgmt_prefix(index)
nets.append(net)
for index, type_ in enumerate(self.cfg.yield_depl_netw_client_type()):
if type_ is not None:
net = {}
net[self.TYPE] = type_
net[self.VLAN] = self.cfg.get_depl_netw_client_vlan(index)
net[self.IPADDR] = self.cfg.get_depl_netw_client_cont_ip(index)
net[self.PREFIX] = self.cfg.get_depl_netw_client_prefix(index)
nets.append(net)
distname, _, _ = platform.linux_distribution()
uid_range, gid_range = self.get_lxc_uid_gid_range()
assert(int(uid_range.split()[0]) + int(uid_range.split()[1]) > 101000)
assert(int(gid_range.split()[0]) + int(gid_range.split()[1]) > 101000)
try:
with open(self.LXC_CONF, 'w') as lxc_conf:
lxc_conf.write(template.render(
distribution=distname, networks=nets,
uidrange=uid_range, gidrange=gid_range))
except:
self.log.error('Failed to create: %s' % self.LXC_CONF)
sys.exit(1)
self.log.debug('Successfully created: %s' % self.LXC_CONF)
if not os.path.exists(os.path.join(HOME, '.config', 'lxc')):
self.log.debug('Creating path(s) {}'.format('.config/lxc'))
os.makedirs(os.path.join(HOME, '.config', 'lxc'))
os.system('cp ' + os.path.join(GEN_PLAY_PATH, 'lxc-conf.yml') + ' ' +
os.path.join(HOME, '.config', 'lxc', 'default.conf'))
if not os.path.exists(os.path.join(HOME, '.local', 'share', 'lxc')):
self.log.debug('Creating path(s) {}'.format('.local/share/lxc'))
os.makedirs(os.path.join(HOME, '.local', 'share', 'lxc'))
def get_lxc_uid_gid_range(self):
username = pwd.getpwuid(os.getuid())[0]
if OPSYS == 'Ubuntu':
try:
f = open('/etc/subuid', 'r')
data = f.read()
uid_range = re.search(username + r':(\d+):(\d+)',
data, re.MULTILINE)
uid_range = uid_range.group(1) + ' ' + uid_range.group(2)
except IOError as e:
self.log.error(e)
raise Exception(e)
except AttributeError as e:
self.log.error('Error getting uid for user: {}'.format(username))
raise Exception(e)
try:
f = open('/etc/subgid', 'r')
data = f.read()
gid_range = re.search(username + r':(\d+):(\d+)',
data, re.MULTILINE)
gid_range = gid_range.group(1) + ' ' + gid_range.group(2)
except IOError as e:
self.log.error(e)
raise
except AttributeError as e:
self.log.error('Error getting uid for user: {}'.format(username))
raise
return uid_range, gid_range
if __name__ == '__main__':
logger.create()
LXC_CONF = LxcConf()
LXC_CONF.create()
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
class Migration(DataMigration):
def forwards(self, orm):
for source in orm.Source.objects.all():
if source.visibility == 'i':
source.visibility = 'v'
source.save()
def backwards(self, orm):
print """NOTE: We're rolling back a migration that turned invisible
Sources into private Sources, but now, there's no way to know which
private Sources were previously invisible. So this migration
rollback does nothing."""
models = {
'images.camerainfo': {
'Meta': {'object_name': 'CameraInfo'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '45', 'blank': 'True'}),
'height': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '45', 'blank': 'True'}),
'photographer': ('django.db.models.fields.CharField', [], {'max_length': '45', 'blank': 'True'}),
'pixel_cm_ratio': ('django.db.models.fields.IntegerField', [], {}),
'water_quality': ('django.db.models.fields.CharField', [], {'max_length': '45', 'blank': 'True'}),
'width': ('django.db.models.fields.IntegerField', [], {})
},
'images.image': {
'Meta': {'object_name': 'Image'},
'camera': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['images.CameraInfo']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['images.Source']"}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '1', 'blank': 'True'}),
'total_points': ('django.db.models.fields.IntegerField', [], {})
},
'images.point': {
'Meta': {'object_name': 'Point'},
'annotation_status': ('django.db.models.fields.CharField', [], {'max_length': '1', 'blank': 'True'}),
'column': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['images.Image']"}),
'point_number': ('django.db.models.fields.IntegerField', [], {}),
'row': ('django.db.models.fields.IntegerField', [], {})
},
'images.source': {
'Meta': {'object_name': 'Source'},
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key1': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'key2': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'key3': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'key4': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'key5': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'latitude': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}),
'longitude': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'}),
'visibility': ('django.db.models.fields.CharField', [], {'default': "'v'", 'max_length': '1'})
}
}
complete_apps = ['images']
|
from flask import current_app as app
from flask import Blueprint, render_template
from flask import session, request
from flask import redirect, url_for, abort
from werkzeug.routing import RequestRedirect
from .discord import make_session, get_twitch_name, add_role, DISCORD_AUTH_BASE_URL, DISCORD_TOKEN_URL
from .twitch import is_following
main = Blueprint('main', __name__)
@main.route('/')
def index():
return render_template('index.html', title='home', streamer=app.config['TWITCH_NAME'], discord=app.config['INVITE'], linked=session.get('linked'))
@main.route('/error/twitch_unlinked')
def twitch_unlinked():
return render_template('error.html', title='error', desc='your discord account must be linked to twitch to continue')
@main.route('/error/twitch')
def twitch_error():
return render_template('error.html', title='error', desc='oops, we had some kind of error communicating with the Twitch API')
@main.route('/error/follow')
def follow():
return render_template('follow.html', title='not following', streamer=app.config['TWITCH_NAME'])
@main.route('/error/discord')
def discord_error():
return render_template('error.html', title='error', desc='an unhandled error happened while adding roles')
@main.errorhandler(404)
@main.errorhandler(401)
@main.errorhandler(400)
def generic_error(desc):
return render_template('error.html', title='error', desc=desc)
def get_auth_url():
with make_session() as discord:
url, state = discord.authorization_url(DISCORD_AUTH_BASE_URL)
session['oauth2_state'] = state
return url
@main.route('/login')
def login():
return redirect(get_auth_url())
@main.route('/discord')
def callback():
state = session.get('oauth2_state')
if not state and request.values.get('error'):
return redirect(url_for('.index'))
with make_session(state=state) as discord:
token = discord.fetch_token(
DISCORD_TOKEN_URL,
client_secret=app.config['DISCORD_SECRET_KEY'],
authorization_response=request.url)
session['auth_token'] = token
session.permanent = True
verify_twitch()
def verify_twitch():
username = get_twitch_name()
if username is None:
abort(400, 'couldn\'t find your twitch username')
elif username == '__not_linked!':
raise RequestRedirect(url_for('.twitch_unlinked'))
following = is_following(username)
if following is None:
raise RequestRedirect(url_for('.twitch_error'))
elif following is False:
raise RequestRedirect(url_for('.follow'))
role = add_role()
if role is None:
raise RequestRedirect(url_for('.discord_error'))
session['linked'] = True
raise RequestRedirect(url_for('.index')) |
# Copyright 2021 Nokia
# Licensed under the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
from a10.rules import baserule
import a10.utils.constants
class TestQuote(baserule.BaseRule):
def __init__(self, cid, ps):
super().__init__(cid, ps)
self.description = "TestQuote Test Rule Rulez OK"
def apply(self):
# Applies the rule
print("Checking Magic Number")
mn = self.claim["quote"]["magic"]
print("Magic Number is ", mn)
print("Additional parameters are ", self.parameters)
# Returns a 2-tuple
# 0 - The result
# 1 - A description structure as a python dict
return self.returnMessage(
a10.utils.constants.VERIFYSUCCEED,
self.description + " Always returns true",
[{"claimID": self.claimID}],
)
|
import numpy as np
import openmc
from collections.abc import Iterable
def legendre_from_expcoef(coef, domain=(-1, 1)):
"""Return a Legendre series object based on expansion coefficients.
Given a list of coefficients from FET tally and a array of down, return
the numpy Legendre object.
Parameters
----------
coef : Iterable of float
A list of coefficients of each term in Legendre polynomials
domain : (2,) List of float
Domain of the Legendre polynomial
Returns
-------
numpy.polynomial.Legendre
A numpy Legendre series class
"""
n = np.arange(len(coef))
c = (2*n + 1) * np.asarray(coef) / (domain[1] - domain[0])
return np.polynomial.Legendre(c, domain)
class Polynomial(object):
"""Abstract Polynomial Class for creating polynomials.
"""
def __init__(self, coef):
self.coef = np.asarray(coef)
class ZernikeRadial(Polynomial):
"""Create radial only Zernike polynomials given coefficients and domain.
The radial only Zernike polynomials are defined as in
:class:`ZernikeRadialFilter`.
Parameters
----------
coef : Iterable of float
A list of coefficients of each term in radial only Zernike polynomials
radius : float
Domain of Zernike polynomials to be applied on. Default is 1.
r : float
Position to be evaluated, normalized on radius [0,1]
Attributes
----------
order : int
The maximum (even) order of Zernike polynomials.
radius : float
Domain of Zernike polynomials to be applied on. Default is 1.
norm_coef : iterable of float
The list of coefficients of each term in the polynomials after
normailization.
"""
def __init__(self, coef, radius=1):
super().__init__(coef)
self._order = 2 * (len(self.coef) - 1)
self.radius = radius
norm_vec = (2 * np.arange(len(self.coef)) + 1) / (np.pi * radius**2)
self._norm_coef = norm_vec * self.coef
@property
def order(self):
return self._order
def __call__(self, r):
import openmc.capi as capi
if isinstance(r, Iterable):
return [np.sum(self._norm_coef * capi.calc_zn_rad(self.order, r_i))
for r_i in r]
else:
return np.sum(self._norm_coef * capi.calc_zn_rad(self.order, r))
|
# Configuration for quiz maker
quiz_configurations = {
"amount_songs":50,
"guessing_time":15,
"result_time":5,
}
# File related configurations
file_configurations = {
"output_folder":"Output",
"output_file_name":"Automated guess album",
"output_file_extension":"mp4",
"input_folder":"songs",
"default_image":"black_image.jpg",
"image_size":(382*2,595*2),
}
# Its easier to combine video and audio files together, thats why temporary files are necessary, will automatically be deleted after combining
temp_files = {
"audio":"audio.mp3",
"video":"video.avi"
}
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Aug 21 10:42:30 2019
@author: ghosh128
"""
import sys
sys.path.append("../../")
import os
import config
import numpy as np
import tensorflow as tf
from collections import defaultdict
from sklearn.metrics import f1_score
import matplotlib.pyplot as plt
tf.set_random_seed(1)
# %%
print("LOAD DATA")
test_data_fine = np.load(os.path.join(config.NUMPY_DIR, "sentence_test_data.npy"))
test_data_coarse = np.load(os.path.join(config.NUMPY_DIR, "review_test_data.npy"))
num_features_fine = config.max_sentence_length
num_features_coarse = config.max_review_length
print("LOAD EMBEDDING")
word_to_index = dict()
index_to_embedding = []
with open(os.path.join(config.EMBEDDING_DIR, "glove.6B.100d.txt"), "r", encoding="utf-8") as f:
for (i, line) in enumerate(f):
split = line.split(' ')
word = split[0]
representation = split[1:]
representation = np.array([float(val) for val in representation])
word_to_index[word] = i
index_to_embedding.append(representation)
_WORD_NOT_FOUND = [0.0]* len(representation)
_LAST_INDEX = i + 1
word_to_index = defaultdict(lambda: _LAST_INDEX, word_to_index)
index_to_embedding = np.array(index_to_embedding + [_WORD_NOT_FOUND])
# %%
print("BUILD MODEL")
tf.reset_default_graph()
with tf.name_scope('data'):
X_fine = tf.placeholder(tf.int32, [None, num_features_fine], name="fine_res_inputs")
Y_fine = tf.placeholder(tf.float32, [None, 1], name="fine_res_labels")
X_coarse = tf.placeholder(tf.int32, [None, num_features_coarse], name="corase_res_inputs")
Y_coarse = tf.placeholder(tf.float32, [None, 1], name="coarse_res_labels")
tf_embedding_placeholder = tf.placeholder(tf.float32, shape=[400001, 100])
with tf.variable_scope("Variables", reuse=tf.AUTO_REUSE):
W_fine = tf.get_variable("Weights_layer_1_fine", [64, 1], initializer=tf.contrib.layers.xavier_initializer())
b_fine = tf.get_variable("Biases_layer_1_fine", [1], initializer=tf.zeros_initializer())
W_coarse = tf.get_variable("Weights_layer_1_coarse", [64, 1], initializer=tf.contrib.layers.xavier_initializer())
b_coarse = tf.get_variable("Biases_layer_1_coarse", [1], initializer=tf.zeros_initializer())
tf_embedding = tf.Variable(tf.constant(0.0, shape=[400001, 100]), trainable=False, name="Embedding")
tf_embedding_init = tf_embedding.assign(tf_embedding_placeholder)
with tf.variable_scope("coarse", reuse=tf.AUTO_REUSE):
lstm_cell_coarse = tf.nn.rnn_cell.LSTMCell(64, forget_bias=1.0)
state_series_coarse, current_state_coarse = tf.nn.dynamic_rnn(lstm_cell_coarse, tf.nn.embedding_lookup(params=tf_embedding, ids=X_coarse), dtype=tf.float32)
Z_coarse = tf.nn.sigmoid(tf.add(tf.matmul(state_series_coarse[:,-1,:], W_coarse, name="multiply_weights"), b_coarse, name="add_bias"))
with tf.variable_scope("fine", reuse=tf.AUTO_REUSE):
lstm_cell_fine = tf.nn.rnn_cell.LSTMCell(64, forget_bias=1.0)
state_series_fine, current_state_fine = tf.nn.dynamic_rnn(lstm_cell_fine, tf.nn.embedding_lookup(params=tf_embedding, ids=X_fine), dtype=tf.float32)
Z_fine = tf.nn.sigmoid(tf.add(tf.matmul(state_series_fine[:,-1,:], W_fine, name="multiply_weights"), b_fine, name="add_bias"))
#%%
print("TEST MODEL")
saver = tf.train.Saver()
with tf.Session() as sess:
saver.restore(sess, os.path.join(config.MODEL_DIR, "MULTI_RES", "Attention", "model.ckpt"))
sess.run(tf_embedding_init, feed_dict={tf_embedding_placeholder: index_to_embedding})
data_fine = test_data_fine[:,:num_features_fine]
data_coarse = test_data_coarse[:,:num_features_coarse]
feed_dict = {X_fine: data_fine, X_coarse: data_coarse}
preds_fine, preds_coarse = sess.run([Z_fine, Z_coarse], feed_dict=feed_dict)
pred_labels_fine = np.zeros(preds_fine.shape)
pred_labels_fine[preds_fine > 0.5] = 1
pred_labels_fine[preds_fine < 0.5] = 0
labels_fine = np.reshape(test_data_fine[:, -1], [-1, 1])
pred_labels_coarse = np.zeros(preds_coarse.shape)
pred_labels_coarse[preds_coarse > 0.5] = 1
pred_labels_coarse[preds_coarse < 0.5] = 0
labels_coarse = np.reshape(test_data_coarse[:, -1], [-1, 1])
print("Accuracy_fine:", len(np.where([pred_labels_fine == labels_fine])[1])/int(len(labels_fine)), "Accuracy_coarse:", len(np.where([pred_labels_coarse == labels_coarse])[1])/int(len(labels_coarse)))
print("f1_score_fine:", f1_score(labels_fine, pred_labels_fine), "f1_score_coarse:", f1_score(labels_coarse, pred_labels_coarse))
plt.hist(preds_fine[labels_fine==0], color="red")
plt.hist(preds_fine[labels_fine==1], color="green")
plt.show()
plt.hist(preds_coarse[labels_coarse==0], color="red")
plt.hist(preds_coarse[labels_coarse==1], color="green")
plt.show()
|
import click
import random
import time
from gameoflife.Game import Game
def random_pop(width, height, initialpop):
"""Populate the board randomly, with specified population percentage."""
initial_state = [
[ True if random.random() > (1 - initialpop/100) else False
for y in range(0, height) ]
for x in range (0, width)
]
return initial_state
def glider(width, height):
"""Populate the board with a glider at upper left."""
initial_state = [
[ False
for y in range(0, height) ]
for x in range (0, width)
]
initial_state[1][0] = True
initial_state[2][1] = True
initial_state[0][2] = True
initial_state[1][2] = True
initial_state[2][2] = True
return initial_state
def beacon(width, height):
"""Populate the board with a beacon."""
initial_state = [
[ False
for y in range(0, height) ]
for x in range (0, width)
]
x = width//2 - 6
y = height//2 - 6
def row_one(x, y):
x = x + 2
initial_state[x][y] = True
x += 1
initial_state[x][y] = True
x += 1
initial_state[x][y] = True
x += 4
initial_state[x][y] = True
x += 1
initial_state[x][y] = True
x += 1
initial_state[x][y] = True
def row_two(x, y):
x = x
initial_state[x][y] = True
x += 5
initial_state[x][y] = True
x += 2
initial_state[x][y] = True
x += 5
initial_state[x][y] = True
row_one(x, y)
y += 2
row_two(x, y)
y += 1
row_two(x, y)
y += 1
row_two(x, y)
y += 1
row_one(x, y)
y += 2
row_one(x, y)
y += 1
row_two(x, y)
y += 1
row_two(x, y)
y += 1
row_two(x, y)
y += 2
row_one(x, y)
return initial_state
@click.command()
@click.option('--width', type=click.IntRange(20,100), default=25, help='width of game board')
@click.option('--height', type=click.IntRange(20,50), default=25, help='height of game board')
@click.option('--startpattern', type=click.Choice(['random', 'glider', 'beacon'], case_sensitive=False), default='random')
@click.option('--initialpop', type=click.IntRange(5,50), default=20, help='percentage of the board which should be alive initially')
@click.option('--generations', type=click.IntRange(10,2000, clamp=True), default=100, help='number of generations to simulate')
@click.option('--framerate', type=click.IntRange(1,100, clamp=True), default=5, help='number of frames per second')
def cli(width, height, startpattern, initialpop, generations, framerate):
initial_state = None
if startpattern == 'random':
initial_state = random_pop(width, height, initialpop)
elif startpattern == 'glider':
initial_state = glider(width, height)
else:
initial_state = beacon(width, height)
game = Game(width, height, initial_state)
game.run(generations, framerate)
print("complete")
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
Pattern.search(string[, pos[, endpos]])
"""
import re
def test1():
pattern = re.compile("d")
match = pattern.search("dog") # Match at index 0
print(match) # <_sre.SRE_Match object; span=(0, 1), match='d'>
# No match; search doesn't include the "d"
match = pattern.search("dog", 1)
print(match) # None
def test2():
"""
search 只匹配到了第一个符合的就结束了,没有继续匹配第二个 dog
"""
print('\ntest2')
pattern = re.compile("dog")
match = pattern.search("dog, I have a dog")
print(match) # <_sre.SRE_Match object; span=(0, 3), match='dog'>
print(match.group()) # dog
def main():
test1()
test2()
if __name__ == "__main__":
main()
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT license.
"""
tf2onnx.rewrite - rewrite tensorflow subgraph to onnx random_uniform op
"""
from tf2onnx.graph_matcher import OpTypePattern, GraphMatcher
from tf2onnx import utils
# pylint: disable=missing-docstring
def rewrite_random_uniform(g, ops):
pattern = \
OpTypePattern('Add', name='output', inputs=[
OpTypePattern('Mul', inputs=[
OpTypePattern('RandomUniform', name='input1', inputs=["*"]),
OpTypePattern('Sub', name='input2', inputs=["*", "*"]),
]), None
])
matcher = GraphMatcher(pattern)
match_results = list(matcher.match_ops(ops))
for match in match_results:
input2 = match.get_op('input2')
output = match.get_op('output')
ru_op = match.get_op('input1')
# max is on input 0
tmax = input2.inputs[0].get_tensor_value()
tmin = input2.inputs[1].get_tensor_value()
new_node = create_onnx_random_uniform_op(g, tmax, tmin, ru_op, output)
ops = g.replace_subgraph(ops, match, [], [output], [], [new_node])
return ops
# rewriter function when fold_const is enabled
def rewrite_random_uniform_fold_const(g, ops):
pattern = \
OpTypePattern('Add', name='output', inputs=[
OpTypePattern('Mul', name='mul', inputs=[
OpTypePattern('RandomUniform', name='input1', inputs=["*"]),
None,
]),
None,
])
matcher = GraphMatcher(pattern)
match_results = list(matcher.match_ops(ops))
for match in match_results:
output = match.get_op('output')
mul = match.get_op('mul')
ru_op = match.get_op('input1')
tmax_minus_tmin = mul.inputs[1].get_tensor_value()
tmin = output.inputs[1].get_tensor_value()
tmax = tmin + tmax_minus_tmin
new_node = create_onnx_random_uniform_op(g, tmax, tmin, ru_op, output)
ops = g.replace_subgraph(ops, match, [], [output], [], [new_node])
return ops
def create_onnx_random_uniform_op(g, tmax, tmin, ru_op, output):
dtype = g.get_dtype(output.output[0])
op_name = utils.make_name("RandomUniform")
if ru_op.inputs[0].type == "Shape":
shape_node = ru_op.inputs[0]
new_node = g.make_node("RandomUniformLike", inputs=[shape_node.input[0]], name=op_name,
attr={"low": tmin, "high": tmax, "dtype": dtype})
else:
shape = g.get_shape(output.output[0])
new_node = g.make_node("RandomUniform", [], name=op_name,
attr={"low": tmin, "high": tmax, "dtype": dtype, "shape": shape})
return new_node
|
# Lemoneval Project
# Author: Abhabongse Janthong <6845502+abhabongse@users.noreply.github.com>
"""Parameter classes for each attribute in exercise frameworks."""
from typing import Sequence
from ..utils.argdefault import EMPTY_DEFAULT
class BaseParameter(object):
"""Parameter descriptor for framework classes.
This descriptor does not allow reassignment of values once it is assigned
for the first time. It also validates the data before assignment with the
method parameter_validate.
It uses __dict__ of the host instance to store the actual data.
"""
__slots__ = ("name",)
def __init__(self, *, name=EMPTY_DEFAULT):
self.name = name
def __set_name__(self, owner, name):
self.name = self.name or name
def __get__(self, instance, owner):
if instance is None:
return self
return instance.__dict__[self.name]
def __set__(self, instance, value):
if self.name in instance.__dict__:
raise AttributeError("reassigning parameter is not allowed")
if value is EMPTY_DEFAULT:
raise ValueError(f"missing parameter '{self.name}'")
instance.__dict__[self.name] = self.parameter_validate(value)
def __delete__(self, instance):
raise AttributeError("deleting parameter is not allowed")
def parameter_validate(self, value):
"""Parameter assignment validation: to be overriden by subclasses.
This method is called when there is an assignment of value to the
parameter of host instance to ensure the integrity of assigned value.
This method returns the sanitized value when the validation is
successful; otherwise, it raises an exception describing what went
wrong.
"""
return value
class DataTypeMixin(BaseParameter):
"""Parameter mixin class for parameter data type support.
Attributes:
dtype (type): Type or class for parameter values.
annotation: Annotation for parameter values to be used in signatures.
"""
def __init__(self, *, dtype=EMPTY_DEFAULT, annotation=EMPTY_DEFAULT,
**kwargs):
super().__init__(**kwargs)
if annotation is EMPTY_DEFAULT:
annotation = dtype
if dtype is not EMPTY_DEFAULT and not isinstance(dtype, type):
raise TypeError("invalid dtype specified")
self.dtype = dtype
self.annotation = annotation
def check_dtype(self, value, raise_error=True):
"""Check the data type of the given value."""
if hasattr(self, "dtype") and not isinstance(value, self.dtype):
dtype = getattr(self.dtype, "__qualname__", self.dtype)
if raise_error:
raise TypeError(
f"expecting value type '{dtype}' for '{self.name}' but "
f"{value!r} is given"
)
return False
return True
class DefaultValueMixin(BaseParameter):
"""Parameter mixin class for parameter default values."""
def __init__(self, *, default=EMPTY_DEFAULT, **kwargs):
super().__init__(**kwargs)
self.default = default
def __set__(self, instance, value):
if value is EMPTY_DEFAULT:
value = self.default
super().__set__(instance, value)
class ValidatorMixin(BaseParameter):
"""Parameter mixin class to add parameter validation support."""
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.validators = []
def add_validators(self, *validators):
"""Attach a sequence of validators to the parameter. Each validator
will be run against a single value argument when it is assigned to the
parameter.
A validator could be an object of BaseValidator type or any callable
which expects one value argument. If the value is considered valid by
the validator, then it should return True. Otherwise, it should either
return False or raise an exception describing what went wrong.
"""
from .validators import BaseValidator, PredicateValidator
for validator in validators:
if isinstance(validator, BaseValidator):
self.validators.append(validator)
elif callable(validator):
self.validators.append(PredicateValidator(validator))
else:
alien = getattr(validator, "__qualname__", validator)
raise TypeError(
f"expected a validator but {alien} was given"
)
def add_validator(self, validator):
"""This method is the same as add_validators except that only one
validator may be given at a time.
This method will return the validator itself so it could be used as a
decorator for a staticmethod validator in framework classes. It will
automatically be wrapped by @staticmethod upon return.
"""
self.add_validators(validator)
return staticmethod(validator)
def check_with_validators(self, value, raise_error=True):
"""Run validators against the given value."""
for validator in self.validators:
if not validator(value, self.name):
if raise_error:
raise ValueError(
f"the given value {value!r} failed the validation "
f"for '{self.name}'"
)
return False
return True
class Parameter(ValidatorMixin, DefaultValueMixin,
DataTypeMixin, BaseParameter):
"""Single-value parameter descriptor for exercise framework."""
def __init__(self, *, name=EMPTY_DEFAULT, dtype=EMPTY_DEFAULT,
default=EMPTY_DEFAULT):
super().__init__(name=name, dtype=dtype, default=default)
def parameter_validate(self, value):
self.check_dtype(value)
self.check_with_validators(value)
return value
class SequenceParameter(ValidatorMixin, DefaultValueMixin,
DataTypeMixin, BaseParameter):
"""Sequence of parameters descriptor for exercise framework.
This is similar to Parameter, except that it is a sequence of values
rather than a single value.
Lower bound and upper bound lengths (denoted as 'lb_length' and
'ub_length' respectively) can be provided through the keyword 'length'
to limit the length of sequence in this parameter. The 'length' could
be specified as a single integer (meaning the sequence has such fixed
length), or an iterable of size 1 or 2. In this latter case, the first
and the second integer of the iterable provides the lower bound and the
upper bound limit on the sequence length, respectively. If the second
integer is missing, then there is no upper bound limit.
"""
__slots__ = ("lb_length", "ub_length")
def __init__(self, *, name=EMPTY_DEFAULT, dtype=EMPTY_DEFAULT,
default=EMPTY_DEFAULT, length=(0,)):
annotation = dtype and Sequence[dtype]
super().__init__(
name=name, dtype=dtype, annotation=annotation, default=default
)
self.lb_length, self.ub_length = self._resolve_lengths(length)
def parameter_validate(self, values):
length, values = self.sanitize_values(values)
self.check_sequence_length(length)
for index, value in enumerate(values):
try:
self.check_dtype(value)
self.check_with_validators(value)
except Exception as e:
raise ValueError(
f"error for value at index {index} of parameter "
f"sequence '{self.name}'"
) from e
return values
@staticmethod
def _resolve_lengths(length):
"""Process the length input.
The 'length' could be specified as a single integer (meaning the
sequence has such fixed length), or an iterable of size 1 or 2. In this
latter case, the first and the second integer of the iterable provides
the lower bound and the upper bound limit on the sequence length,
respectively. If the second integer is missing, then there is no upper
bound limit."""
from math import inf as INF
if isinstance(length, int):
if length < 0:
raise ValueError(
f"'length' should be a non-negative interger but "
f"{length} is given"
)
return length, length
try:
size = len(length); length = tuple(length)
except TypeError as e:
raise TypeError(
f"'length' must be an integer or an iterable of 1 or 2 "
f"integers",
) from e
if not 1 <= len(length) <= 2:
raise ValueError(
f"expected an iterable 'length' of size 1 or 2, but one of "
f"size {size} was given"
)
if not all(isinstance(l, int) for l in length):
raise TypeError(
f"lower & upper bound limit of 'length' must be "
f"integers but {length} was given"
)
if length[0] < 0:
raise ValueError(
f"lower bound of 'length' should be a non-negative "
f"interger but {length[0]} is given"
)
if len(length) == 1:
return length[0], INF
if length[0] > length[1]:
raise ValueError(
f"lower bound of 'length' cannot be greater than upper bound "
f"but {length} were given"
)
return length
def sanitize_values(self, values):
"""Sanitize the values container by obtaining the finite length of
the container and convert it to a tuple sequence."""
try:
length = len(values) # make sure it is finitely countable
values = tuple(values) # works if iterable
except TypeError as e:
raise TypeError(
f"the values for '{self.name}' must be an iterable of finite "
f"length"
) from e
return length, values
def check_sequence_length(self, length):
"""Check if the length of the tuples is within the bounds."""
if not self.lb_length <= length <= self.ub_length:
if self.lb_length == self.ub_length:
raise ValueError(
f"expecting '{self.name}' of length {self.lb_length} but "
f"one of length {length} was given"
)
import math
if self.ub_length == math.inf:
raise ValueError(
f"expecting '{self.name}' of length at least "
f"{self.lb_length} but one of length {length} was given"
)
raise ValueError(
f"expecting '{self.name}' of length between {self.lb_length} "
f"and {self.ub_length} but one of length {length} was given"
)
|
#!/usr/bin/python
#
# Copyright 2018-2021 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from corsheaders.defaults import default_headers
from polyaxon.services.headers import PolyaxonServiceHeaders
from polycommon.config_manager import ConfigManager
def set_cors(context, config: ConfigManager):
# session settings
context["CORS_ALLOW_CREDENTIALS"] = True
allowed_list = config.get_list(
"POLYAXON_CORS_ALLOWED_ORIGINS", is_optional=True, default=[]
)
context["CORS_ALLOWED_ORIGINS"] = allowed_list
context["CORS_ALLOW_ALL_ORIGINS"] = False if allowed_list else True
context["CORS_ALLOW_HEADERS"] = (
default_headers + PolyaxonServiceHeaders.get_headers()
)
ssl_enabled = config.get_boolean(
"POLYAXON_SSL_ENABLED", is_optional=True, default=False
)
ssl_redirect_enabled = config.get_boolean(
"POLYAXON_SSL_REDIRECT_ENABLED", is_optional=True, default=False
)
context["SSL_ENABLED"] = ssl_enabled
context["PROTOCOL"] = "http"
context["WS_PROTOCOL"] = "ws"
if ssl_enabled:
context["SESSION_COOKIE_SECURE"] = True
context["CSRF_COOKIE_SECURE"] = True
context["SECURE_PROXY_SSL_HEADER"] = ("HTTP_X_FORWARDED_PROTO", "https")
context["PROTOCOL"] = "https"
context["WS_PROTOCOL"] = "wss"
if ssl_redirect_enabled:
context["SECURE_SSL_REDIRECT"] = True
|
#!/usr/bin/env python3
import glob, os, shutil, time
print('File watcher started')
while True:
i = 0
while True:
i = i + 1
shareEnable = os.getenv('PROXY{}_ENABLE'.format(i))
if shareEnable == None:
break
elif not shareEnable == "1":
continue
shareDirectory = '/share{}'.format(i)
remoteMount = '/remote{}'.format(i)
files = glob.glob(shareDirectory + '/*.pdf')
for file in files:
currentTime = time.time()
modifiedTime = os.path.getmtime(file)
fileAge = currentTime - modifiedTime
if fileAge < 15:
continue
_, filename = os.path.split(file)
name, ext = os.path.splitext(filename)
i = 0
while True:
i = i + 1
remotePath = remoteMount + "/" + name + '{:04d}'.format(i) + ext
if not os.path.exists(remotePath):
break
try:
print("Move Scan: '" + file + "' -> '" + remotePath + "'")
shutil.copyfile(file, remotePath)
os.remove(file)
except (FileNotFoundError, OSError) as err:
print("↳ " + str(err))
time.sleep(5) |
import numpy as np
import pytest
from numpy.testing import assert_equal, assert_almost_equal
from skimage import data
from skimage._shared._warnings import expected_warnings
from skimage.metrics import (peak_signal_noise_ratio, normalized_root_mse,
mean_squared_error, normalized_mutual_information)
np.random.seed(5)
cam = data.camera()
sigma = 20.0
cam_noisy = np.clip(cam + sigma * np.random.randn(*cam.shape), 0, 255)
cam_noisy = cam_noisy.astype(cam.dtype)
def test_PSNR_vs_IPOL():
""" Tests vs. imdiff result from the following IPOL article and code:
https://www.ipol.im/pub/art/2011/g_lmii/.
Notes
-----
To generate p_IPOL, we need a local copy of cam_noisy:
>>> from skimage import io
>>> io.imsave('/tmp/cam_noisy.png', cam_noisy)
Then, we use the following command:
$ ./imdiff -m psnr <path to camera.png>/camera.png /tmp/cam_noisy.png
Values for current data.camera() calculated by Gregory Lee on Sep, 2020.
Available at:
https://github.com/scikit-image/scikit-image/pull/4913#issuecomment-700653165
"""
p_IPOL = 22.409353363576034
p = peak_signal_noise_ratio(cam, cam_noisy)
assert_almost_equal(p, p_IPOL, decimal=4)
@pytest.mark.parametrize('dtype', [np.float16, np.float32, np.float64])
def test_PSNR_float(dtype):
p_uint8 = peak_signal_noise_ratio(cam, cam_noisy)
camf = (cam / 255.).astype(dtype, copy=False)
camf_noisy = (cam_noisy / 255.).astype(dtype, copy=False)
p_float64 = peak_signal_noise_ratio(camf, camf_noisy, data_range=1)
assert p_float64.dtype == np.float64
decimal = 3 if dtype == np.float16 else 5
assert_almost_equal(p_uint8, p_float64, decimal=decimal)
# mixed precision inputs
p_mixed = peak_signal_noise_ratio(cam / 255., np.float32(cam_noisy / 255.),
data_range=1)
assert_almost_equal(p_mixed, p_float64, decimal=decimal)
# mismatched dtype results in a warning if data_range is unspecified
with expected_warnings(['Inputs have mismatched dtype']):
p_mixed = peak_signal_noise_ratio(cam / 255.,
np.float32(cam_noisy / 255.))
assert_almost_equal(p_mixed, p_float64, decimal=decimal)
# mismatched dtype results in a warning if data_range is unspecified
with expected_warnings(['Inputs have mismatched dtype']):
p_mixed = peak_signal_noise_ratio(cam / 255.,
np.float32(cam_noisy / 255.))
assert_almost_equal(p_mixed, p_float64, decimal=decimal)
def test_PSNR_errors():
# shape mismatch
with pytest.raises(ValueError):
peak_signal_noise_ratio(cam, cam[:-1, :])
@pytest.mark.parametrize('dtype', [np.float16, np.float32, np.float64])
def test_NRMSE(dtype):
x = np.ones(4, dtype=dtype)
y = np.asarray([0., 2., 2., 2.], dtype=dtype)
nrmse = normalized_root_mse(y, x, normalization='mean')
assert nrmse.dtype == np.float64
assert_equal(nrmse, 1 / np.mean(y))
assert_equal(normalized_root_mse(y, x, normalization='euclidean'),
1 / np.sqrt(3))
assert_equal(normalized_root_mse(y, x, normalization='min-max'),
1 / (y.max() - y.min()))
# mixed precision inputs are allowed
assert_almost_equal(normalized_root_mse(y, np.float32(x),
normalization='min-max'),
1 / (y.max() - y.min()))
def test_NRMSE_no_int_overflow():
camf = cam.astype(np.float32)
cam_noisyf = cam_noisy.astype(np.float32)
assert_almost_equal(mean_squared_error(cam, cam_noisy),
mean_squared_error(camf, cam_noisyf))
assert_almost_equal(normalized_root_mse(cam, cam_noisy),
normalized_root_mse(camf, cam_noisyf))
def test_NRMSE_errors():
x = np.ones(4)
# shape mismatch
with pytest.raises(ValueError):
normalized_root_mse(x[:-1], x)
# invalid normalization name
with pytest.raises(ValueError):
normalized_root_mse(x, x, normalization='foo')
def test_nmi():
assert_almost_equal(normalized_mutual_information(cam, cam), 2)
assert (normalized_mutual_information(cam, cam_noisy)
< normalized_mutual_information(cam, cam))
def test_nmi_different_sizes():
assert normalized_mutual_information(cam[:, :400], cam[:400, :]) > 1
@pytest.mark.parametrize('dtype', [np.float16, np.float32, np.float64])
def test_nmi_random(dtype):
rng = np.random.default_rng()
random1 = rng.random((100, 100)).astype(dtype)
random2 = rng.random((100, 100)).astype(dtype)
nmi = normalized_mutual_information(random1, random2, bins=10)
assert nmi.dtype == np.float64
assert_almost_equal(nmi, 1, decimal=2)
def test_nmi_random_3d():
random1, random2 = np.random.random((2, 10, 100, 100))
assert_almost_equal(
normalized_mutual_information(random1, random2, bins=10),
1,
decimal=2,
)
|
"""
# Details:
# Migration Tool
# Usage:
# python databaseMigrate.py db init
# Authors:
# Andrej Berg
# Michael King
# History:
# -
# Last modified: 12.07.2018
# ToDo:
# -
# Bugs:
# -
"""
__author__ = ["Andrej Berg", "Michael King"]
__date__ = "17.04.2018"
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_script import Manager
from flask_migrate import Migrate, MigrateCommand
from simdb.app.config import Config
# init Flask object and load configuration
app = Flask(__name__)
app.config.from_object(Config)
# init database and migration tool
db = SQLAlchemy(app)
# Import database models with app context
with app.app_context():
from simdb.databaseModel import *
migrate = Migrate(app, db)
manager = Manager(app)
manager.add_command('db', MigrateCommand)
if __name__ == '__main__':
manager.run() |
#! /usr/bin/env python3
#encoding: UTF-8
from os import *
import traceback
import sys, random
try:
(rd, wd) = pipe()
if fork():
# père
N = random.randint(0,0xFFFFFFFF)
print ("envoi de : ", N)
close(rd)
write(wd,N.to_bytes(4, sys.byteorder))
close(wd)
exit(0)
else:
close(wd)
bN = read(rd,4)
N = int.from_bytes(bN, sys.byteorder)
print ("reçu : ", N)
close(rd)
exit(0)
except OSError as e:
traceback.print_exc()
print (e.strerror)
exit(1)
|
#
# PySNMP MIB module WL400-GLOBAL-REG (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/WL400-GLOBAL-REG
# Produced by pysmi-0.3.4 at Wed May 1 15:36:39 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
ObjectIdentifier, OctetString, Integer = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "OctetString", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueRangeConstraint, SingleValueConstraint, ConstraintsUnion, ConstraintsIntersection, ValueSizeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueRangeConstraint", "SingleValueConstraint", "ConstraintsUnion", "ConstraintsIntersection", "ValueSizeConstraint")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
NotificationType, Bits, Counter64, ObjectIdentity, iso, Integer32, Gauge32, Counter32, TimeTicks, ModuleIdentity, enterprises, MibIdentifier, Unsigned32, IpAddress, MibScalar, MibTable, MibTableRow, MibTableColumn = mibBuilder.importSymbols("SNMPv2-SMI", "NotificationType", "Bits", "Counter64", "ObjectIdentity", "iso", "Integer32", "Gauge32", "Counter32", "TimeTicks", "ModuleIdentity", "enterprises", "MibIdentifier", "Unsigned32", "IpAddress", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn")
DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention")
wl400GlobalRegModule = ModuleIdentity((1, 3, 6, 1, 4, 1, 232, 143, 1, 1))
if mibBuilder.loadTexts: wl400GlobalRegModule.setLastUpdated('9905260000Z')
if mibBuilder.loadTexts: wl400GlobalRegModule.setOrganization('Compaq Computer Corporation')
if mibBuilder.loadTexts: wl400GlobalRegModule.setContactInfo(' Name: Compaq Computer Corporation Address: 20555 SH 249 Zip: 77070 City: Houston Country: USA Phone: Fax: e-mail: ')
if mibBuilder.loadTexts: wl400GlobalRegModule.setDescription('The Compaq WL400 central registration module.')
compaq = MibIdentifier((1, 3, 6, 1, 4, 1, 232))
wl400Reg = MibIdentifier((1, 3, 6, 1, 4, 1, 232, 143))
wl400Modules = MibIdentifier((1, 3, 6, 1, 4, 1, 232, 143, 1))
wl400Generic = MibIdentifier((1, 3, 6, 1, 4, 1, 232, 144))
wl400Products = MibIdentifier((1, 3, 6, 1, 4, 1, 232, 145))
wl400Experimental = MibIdentifier((1, 3, 6, 1, 4, 1, 232, 146))
wl400HAP = ObjectIdentity((1, 3, 6, 1, 4, 1, 232, 143, 7))
if mibBuilder.loadTexts: wl400HAP.setStatus('current')
if mibBuilder.loadTexts: wl400HAP.setDescription('WL400 Wireless HAP')
mibBuilder.exportSymbols("WL400-GLOBAL-REG", wl400GlobalRegModule=wl400GlobalRegModule, wl400Modules=wl400Modules, PYSNMP_MODULE_ID=wl400GlobalRegModule, wl400Generic=wl400Generic, compaq=compaq, wl400Products=wl400Products, wl400Experimental=wl400Experimental, wl400HAP=wl400HAP, wl400Reg=wl400Reg)
|
import argparse
import torch
from torchvision import datasets
import torchvision.transforms as transforms
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.autograd import Variable
import numpy as np
class Generator(nn.Module):
''' Definition of the generator class
'''
def __init__(self, latent_dim, shape):
super(Generator, self).__init__()
self._shape = shape
self.model = nn.Sequential(
*self._block(latent_dim, 128),
*self._block(128, 256),
*self._block(256, 512),
*self._block(512, 1024),
nn.Linear(1024, int(np.prod(self._shape))),
nn.Tanh()
)
def _block(self, in_features, out_features, batch_norm=0.8):
layers = [nn.Linear(in_features, out_features)]
layers.append(nn.BatchNorm1d(out_features, 0.8))
layers.append(nn.LeakyReLU(0.2, inplace=True))
return layers
def forward(self, x):
img = self.model(x)
img = img.view(img.size(0), *self._shape)
return img
class Discriminator(nn.Module):
def __init__(self, dims, shape):
super(Discriminator, self).__init__()
self._shape = shape
self.model = nn.Sequential(
nn.Linear(int(np.prod(shape)), 512),
nn.LeakyReLU(0.2, inplace=True),
nn.Linear(512, 512),
nn.Dropout(0.4),
nn.LeakyReLU(0.2, inplace=True),
nn.Linear(512, 512),
nn.Sigmoid(),
nn.Linear(512, 1),
)
def forward(self, img):
data = img.view(img.shape[0], -1)
return self.model(data)
def boundary_loss(valid):
return 0.5 * torch.mean((torch.log(valid) - torch.log(1 - valid))**2)
parser = argparse.ArgumentParser()
parser.add_argument("--epochs", type=int, default=200, help="number of epochs of training")
parser.add_argument("--batch_size", type=int, default=64, help="size of the batches")
parser.add_argument("--lr", type=float, default=0.0002, help="adam: learning rate")
parser.add_argument("--d1", type=float, default=0.5, help="adam: decay of first order momentum of gradient")
parser.add_argument("--d2", type=float, default=0.999, help="adam: decay of first order momentum of gradient")
parser.add_argument("--cpus", type=int, default=8, help="number of cpu threads to use during batch generation")
parser.add_argument("--latent_dim", type=int, default=100, help="dimensionality of the latent space")
parser.add_argument("--classes", type=int, default=10, help="number of classes for dataset")
parser.add_argument("--channels", type=int, default=1, help="image channels")
parser.add_argument("--img_size", type=int, default=32, help="size of each image dimension")
opt = parser.parse_args()
adversarial_loss = torch.nn.MSELoss()
generator = Generator(opt.latent_dim, (opt.channels, opt.img_size, opt.img_size))
discriminator = Discriminator(opt.latent_dim, (opt.channels, opt.img_size, opt.img_size))
trainset = datasets.MNIST('../data', train=True, download=True,
transform=transforms.Compose([
transforms.Resize(opt.img_size),
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
]))
trainloader = torch.utils.data.DataLoader(trainset, batch_size=4,
shuffle=True, num_workers=2)
optimizer_G = torch.optim.Adam(generator.parameters(), lr=opt.lr, betas=(opt.d1, opt.d2))
optimizer_D = torch.optim.Adam(discriminator.parameters(), lr=opt.lr, betas=(opt.d1, opt.d2))
for epoch in range(opt.epochs):
for i, (imgs, _) in enumerate(trainloader):
image = Variable(imgs.type(torch.FloatTensor))
valid = Variable(torch.FloatTensor(imgs.shape[0], 1).fill_(1.0), requires_grad=False)
fake = Variable(torch.FloatTensor(imgs.shape[0], 1).fill_(0.0), requires_grad=False)
img = Variable(imgs.type(torch.FloatTensor))
optimizer_G.zero_grad()
# Sample noice
z = Variable(torch.FloatTensor(np.random.normal(0, 1, (imgs.shape[0], opt.latent_dim))))
gen_imgs = generator(z)
bs_loss = boundary_loss(discriminator(gen_imgs))
bs_loss.backward()
optimizer_G.step()
optimizer_D.zero_grad()
validity_real = discriminator(image)
a_loss_real = adversarial_loss(validity_real, valid)
validity_fake = discriminator(gen_imgs.detach())
a_loss_fake = adversarial_loss(validity_fake, fake)
d_loss = 0.5 * (a_loss_real + a_loss_fake)
d_loss.backward()
optimizer_D.step()
print(
"[Epoch %d/%d] [Batch %d/%d] [D loss: %f] [G loss: %f]"
% (epoch, opt.epochs, i, len(trainloader), d_loss.item(), bs_loss.item())
)
batches_done = epoch * len(trainloader) + i
#if batches_done % opt.sample_interval == 0:
# sample_image(n_row=10, batches_done=batches_done)
|
from pydantic import AnyUrl, ValidationError, BaseModel
from typing import List, Dict, Optional, Any
from enum import Enum
class DataciteIdentifier(BaseModel):
class DataciteIdentifierType(Enum):
DOI="DOI"
identifier: str = ""
identifierType: DataciteIdentifierType
class DataciteCreator(BaseModel):
creatorName: str = ""
nameIdentifiers: Optional[str] = ""
affiliations : Optional[List[str]]= []
familyName: Optional[str] = ""
givenName: Optional[str] = ""
class DataciteTitle(BaseModel):
class DataciteTitleType(Enum):
AlternativeTitle = "AlternativeTitle"
Subtitle = "Subtitle"
TranslatedTitle = "TranslatedTitle"
Other = "Other"
title: str = ""
type: Optional[DataciteTitleType]
lang: Optional[str]
class DataciteSubject(BaseModel):
subject : str = ""
subjectScheme : Optional[str] = ""
schemeURI: Optional[AnyUrl] = ""
valueURI: Optional[AnyUrl] = ""
lang : str = ""
class DataciteResourceTypeGeneral(Enum):
Audiovisual = "Audiovisual"
Collection = "Collection"
Dataset = "Dataset"
Event = "Event"
Image = "Image"
InteractiveResource = "InteractiveResource"
Model = "Model"
PhysicalObject = "PhysicalObject"
Service = "Service"
Software = "Software"
Sound = "Sound"
Text = "Text"
Workflow = "Workflow"
Other = "Other"
class DataciteResourceType(BaseModel):
resourceType : Optional[str] = ""
resourceTypeGeneral: DataciteResourceTypeGeneral = None
class DataciteContributor(BaseModel):
class DataciteContributorType(Enum):
ContactPerson = "ContactPerson"
DataCollector = "DataCollector"
DataCurator = "DataCurator"
DataManager = "DataManager"
Editor = "Editor"
HostingInstitution = "HostingInstitution"
Other = "Other"
Producer = "Producer"
ProjectLeader = "ProjectLeader"
ProjectManager = "ProjectManager"
ProjectMember = "ProjectMember"
RegistrationAgency = "RegistrationAgency"
RegistrationAuthority = "RegistrationAuthority"
RelatedPerson = "RelatedPerson"
ResearchGroup = "ResearchGroup"
RightsHolder = "RightsHolder"
Researcher = "Researcher"
Sponsor = "Sponsor"
Supervisor = "Supervisor"
WorkPackageLeader = "WorkPackageLeader"
contributorType: DataciteContributorType = None
contributorName: str = ""
affiliations: Optional[List[str]] = []
familyName: Optional[str] = ""
givenName: Optional[str] = ""
class DataciteAlternateIdentifier(BaseModel):
alternateIdentifier: str = ""
alternateIdentifierType : str = ""
class DataciteRelatedIdentifier(BaseModel):
class DataciteRelatedIdentifierType(Enum):
ARK = "ARK"
arXiv = "arXiv"
bibcode = "bibcode"
DOI = "DOI"
EAN13 = "EAN13"
EISSN = "EISSN"
Handle = "Handle"
IGSN = "IGSN"
ISBN = "ISBN"
ISSN = "ISSN"
ISTC = "ISTC"
LISSN = "LISSN"
LSID = "LSID"
PMID = "PMID"
PURL = "PURL"
UPC = "UPC"
URL = "URL"
URN = "URN"
class DataciteRelationType(Enum):
IsCitedBy = "IsCitedBy"
Cites = "Cites"
IsSupplementTo = "IsSupplementTo"
IsSupplementedBy = "IsSupplementedBy"
IsContinuedBy = "IsContinuedBy"
Continues = "Continues"
IsNewVersionOf = "IsNewVersionOf"
IsPreviousVersionOf = "IsPreviousVersionOf"
IsPartOf = "IsPartOf"
HasPart = "HasPart"
IsReferencedBy = "IsReferencedBy"
References = "References"
IsDocumentedBy = "IsDocumentedBy"
Documents = "Documents"
IsCompiledBy = "IsCompiledBy"
Compiles = "Compiles"
IsVariantFormOf = "IsVariantFormOf"
IsOriginalFormOf = "IsOriginalFormOf"
IsIdenticalTo = "IsIdenticalTo"
HasMetadata = "HasMetadata"
IsMetadataFor = "IsMetadataFor"
Reviews = "Reviews"
IsReviewedBy = "IsReviewedBy"
IsDerivedFrom = "IsDerivedFrom"
IsSourceOf = "IsSourceOf"
relatedIdentifer : str = ""
relatedIdentiferType : DataciteRelatedIdentifierType = None
relatedMetadataScheme: str = ""
schemeURI : AnyUrl = ""
class Datacite(BaseModel):
identifier: DataciteIdentifier
creators: List[DataciteCreator] = []
titles: List[DataciteTitle] = []
publisher : str = ""
publicationYear : str = ""
subjects : List[DataciteSubject]
resourceType : Optional[DataciteResourceType] = None
contributors : List[DataciteContributor] = []
language: Optional[str] = None
resourceType: DataciteResourceType = None
alternateIdentifiers : List[DataciteAlternateIdentifier] = []
relatedIdentifiers: List[DataciteRelatedIdentifier] = []
__datacite_version : Optional[str] = "4.3"
def to_json(self, exclude_unset=True):
return self.json(exclude_unset=exclude_unset)
def to_xml(self):
pass
def describe(self):
print("Datacite version: {}".format(self.__datacite_version)) |
### only inferencing
### with 100 images of COCO test2017 dataset
# reference : https://tutorials.pytorch.kr/beginner/transfer_learning_tutorial.html
# ResNet, GoogLeNet
import torch
import torch.nn as nn # All neural network modules, nn.Linear, nn.Conv2d, BatchNorm, Loss functions
import torch.optim as optim # For all Optimization algorithms, SGD, Adam, etc.
from torch.optim import lr_scheduler
import torchvision.transforms as transforms # Transformations we can perform on our dataset
import torchvision
import os
import pandas as pd
from skimage import io
from PIL import Image
from torch.utils.data import Dataset, Subset, DataLoader # Gives easier dataset managment and creates mini batches
import time
from sklearn.model_selection import train_test_split
from tensorboardX import SummaryWriter
import matplotlib.pyplot as plt
class PlaceDataset(Dataset):
def __init__(self, csv_file, img_dir, transform=None):
self.annotations = pd.read_csv(csv_file)
self.img_dir = img_dir
self.transform = transform
def __len__(self):
return len(self.annotations)
def __getitem__(self, index):
img_path = os.path.join(self.img_dir, str(self.annotations.iloc[index, 0]) + '.jpg')
# img_path = os.path.join(self.img_dir, str(self.annotations.iloc[index, 0]))
image = io.imread(img_path)
image = Image.fromarray(image).convert('RGB')
y_label = torch.tensor(int(self.annotations.iloc[index, 1]))
# print(img_path)
if self.transform:
image = self.transform(image)
return (image, y_label)
# Check accuracy on training to see how good our model is
def check_accuracy(loader, model, mode, epoch):
num_correct = 0
num_samples = 0
model.eval()
with torch.no_grad():
for x, y in loader:
x = x.to(device=device)
y = y.to(device=device)
scores = model(x)
_, predictions = scores.max(1)
num_correct += (predictions == y).sum()
num_samples += predictions.size(0)
# print(num_correct/num_samples)
print("-------"+mode+"-------")
print(f'Got {num_correct} / {num_samples} with accuracy {float(num_correct.item() / num_samples) * 100:.2f}')
print(num_correct.item())
# Set device
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# Hyperparameters
in_channel = 3
num_classes = 3
learning_rate = 1e-3
batch_size = 128
num_epochs = 200
def set_parameter_requires_grad(model, feature_extracting):
if feature_extracting:
for param in model.parameters():
param.requires_grad = False
# Load Data and Augment
rgb_mean = (0.4914, 0.4822, 0.4465)
transform = transforms.Compose([transforms.Resize((224, 224)), transforms.RandomHorizontalFlip(), transforms.ToTensor()])
test_set = PlaceDataset(csv_file='data/csv/movie_gt3.csv', img_dir='data/test3', transform=transform)
print(len(test_set))
test_loader = DataLoader(dataset=test_set, batch_size=batch_size, num_workers=16, shuffle=False, pin_memory=True)
# Model
# ### ResNext101, ResNet50, wide_resnet101_2
# model_conv = torchvision.models.wide_resnet101_2(pretrained=True) # resnet101-fc.in_features
# print(model_conv)
# for param in model_conv.parameters():
# param.requires_grad = False
# num_ftrs = model_conv.fc.in_features
# model_conv.fc = nn.Linear(num_ftrs, num_classes)
# model_conv = model_conv.to(device)
# torchvision.models.shufflenet_v2_x0_5(pretrained=True)
# torchvision.models.mobilenet_v2(pretrained=True)
### MNasNet1_0, 0_5, 0_75(x), 1_3(x)
model_conv = torchvision.models.mnasnet0_5(pretrained=True)
for param in model_conv.parameters():
param.requires_grad = False
num_ftrs = model_conv.classifier[1].in_features
model_conv.classifier[1] = nn.Linear(num_ftrs, num_classes)
model_conv = model_conv.to(device)
# ### GoogLeNet
# model_conv = torchvision.models.googlenet(pretrained=True)
# for param in model_conv.parameters():
# param.requires_grad = False
# num_ftrs = model_conv.fc.in_features
# model_conv.fc = nn.Linear(num_ftrs, num_classes)
# model_conv = model_conv.to(device)
# ### VGG19
# model_conv = torchvision.models.vgg19(pretrained=True)
# for param in model_conv.parameters():
# param.requires_grad = False
# num_ftrs = model_conv.classifier[6].in_features
# model_conv.classifier[6] = nn.Linear(num_ftrs, num_classes)
# model_conv = model_conv.to(device)
# ### DenseNet
# model_conv = torchvision.models.densenet161(pretrained=True)
# for param in model_conv.parameters():
# param.requires_grad = False
# num_ftrs = model_conv.classifier.in_features
# model_conv.classifier = nn.Linear(num_ftrs, num_classes)
# model_conv = model_conv.to(device)
# ### Inception-v3
# model_conv = torchvision.models.inception_v3(pretrained=True)
# for param in model_conv.parameters():
# param.requires_grad = False
# num_ftrs = model_conv.fc.in_features
# model_conv.fc = nn.Linear(num_ftrs, num_classes)
# model_conv = model_conv.to(device)
model = torch.load(PATH)
print("Checking accuracy on Test Set")
check_accuracy(test_loader, model, "Test", 200)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import json
import io
import time
import random
from selenium import webdriver
from selenium.webdriver.support.ui import Select
import winsound
CONFIG_FILE = './config_toefl.json'
BROWSER_WAIT_TIME = 5
HOST = 'toefl.neea.cn'
FRONT_PAGE_URL = 'https://' + HOST
BASE_LOGIN_URL = 'https://' + HOST + '/login'
# BASE_QUERY_URL = 'https://' + HOST + '/testSites'
class UserInfo(object):
def __init__(self, config_file):
with open(config_file, 'r') as fp:
obj = json.load(fp)
self.neea_id = obj['neea_id']
self.password = obj['password']
self.ymd = obj['ymd']
# self.cities = obj['cities']
# self.cities_names = obj['cities_names']
# self.full_result = obj['full_result']
self.query_interval = obj['query_interval']
self.query_interval_error = obj['query_interval_error']
def login(browser, user_info):
# dcap = dict(DesiredCapabilities.PHANTOMJS)
# dcap['phantomjs.page.settings.userAgent'] = (
# 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.94 Safari/537.36'
# )
browser.get(BASE_LOGIN_URL)
browser.implicitly_wait(BROWSER_WAIT_TIME)
neea_id_input = browser.find_element_by_id('userName')
neea_id_input.clear()
neea_id_input.send_keys(user_info.neea_id)
password_input = browser.find_element_by_id('textPassword')
password_input.clear()
password_input.send_keys(user_info.password)
input('Go to order page, select city and then press the <ENTER> key to continue...')
def order(browser, ymd):
ym_combo_box = Select(browser.find_element_by_id('testDays'))
ym_combo_box.select_by_value(ymd)
query_button = browser.find_element_by_id('btnQuerySeat')
query_button.click()
browser.implicitly_wait(BROWSER_WAIT_TIME)
table = browser.find_element_by_xpath('//*[@id="qrySeatResult"]/div/table/tbody')
tr_list = table.find_elements_by_css_selector("tr")
for row in tr_list:
td_list = row.find_elements_by_tag_name('td')
if td_list[1].text[10::] in ['上海交通大学', '上海海事大学']:
if td_list[3].text != '名额暂满':
print(ymd)
continue
if td_list[3].text != '名额暂满':
td_list[3].click()
winsound.PlaySound('D:\\huahai.wav', flags=1)
input() # pause
def main():
browser = webdriver.Firefox()
user_info = UserInfo(CONFIG_FILE)
login(browser, user_info)
while True:
try:
for eachdate in user_info.ymd:
order(browser, eachdate)
except Exception as e:
print(e)
login(browser, user_info)
continue
# print('Query again after %d second(s)' % user_info.query_interval)
# time.sleep(user_info.query_interval + random.randint(
# -user_info.query_interval_error, user_info.query_interval_error))
if __name__ == '__main__':
main()
|
"""
API operations on remote files.
"""
import os
import time
import hashlib
from galaxy import exceptions
from galaxy.web import _future_expose_api as expose_api
from galaxy.util import jstree
from galaxy.web.base.controller import BaseAPIController
from operator import itemgetter
import logging
log = logging.getLogger( __name__ )
class RemoteFilesAPIController( BaseAPIController ):
@expose_api
def index( self, trans, **kwd ):
"""
GET /api/remote_files/
Displays remote files.
:param target: target to load available datasets from, defaults to ftp
possible values: ftp, userdir, importdir
:type target: str
:param format: requested format of data, defaults to flat
possible values: flat, jstree, ajax
:returns: list of available files
:rtype: list
"""
target = kwd.get( 'target', None )
format = kwd.get( 'format', None )
if target == 'userdir':
user_login = trans.user.email
user_base_dir = trans.app.config.user_library_import_dir
if user_base_dir is None:
raise exceptions.ConfigDoesNotAllowException( 'The configuration of this Galaxy instance does not allow upload from user directories.' )
full_import_dir = os.path.join( user_base_dir, user_login )
if full_import_dir is not None:
if format == 'jstree':
disable = kwd.get( 'disable', 'folders')
try:
userdir_jstree = self.__create_jstree( full_import_dir, disable )
response = userdir_jstree.jsonData()
except Exception, exception:
log.debug( str( exception ) )
raise exceptions.InternalServerError( 'Could not create tree representation of the given folder: ' + str( full_import_dir ) )
elif format == 'ajax':
raise exceptions.NotImplemented( 'Not implemented yet. Sorry.' )
else:
try:
response = self.__load_all_filenames( full_import_dir )
except Exception, exception:
log.error( 'Could not get user import files: %s', str( exception ), exc_info=True )
raise exceptions.InternalServerError( 'Could not get the files from your user directory folder.' )
else:
raise exceptions.InternalServerError( 'Could not get the files from your user directory folder.' )
elif target == 'importdir':
base_dir = trans.app.config.library_import_dir
if base_dir is None:
raise exceptions.ConfigDoesNotAllowException( 'The configuration of this Galaxy instance does not allow usage of import directory.' )
if format == 'jstree':
disable = kwd.get( 'disable', 'folders')
try:
importdir_jstree = self.__create_jstree( base_dir, disable )
response = importdir_jstree.jsonData()
except Exception, exception:
log.debug( str( exception ) )
raise exceptions.InternalServerError( 'Could not create tree representation of the given folder: ' + str( base_dir ) )
elif format == 'ajax':
raise exceptions.NotImplemented( 'Not implemented yet. Sorry.' )
else:
try:
response = self.__load_all_filenames( base_dir )
except Exception, exception:
log.error( 'Could not get user import files: %s', str( exception ), exc_info=True )
raise exceptions.InternalServerError( 'Could not get the files from your import directory folder.' )
else:
user_ftp_base_dir = trans.app.config.ftp_upload_dir
if user_ftp_base_dir is None:
raise exceptions.ConfigDoesNotAllowException( 'The configuration of this Galaxy instance does not allow upload from FTP directories.' )
try:
user_ftp_dir = None
identifier = trans.app.config.ftp_upload_dir_identifier
user_ftp_dir = os.path.join( user_ftp_base_dir, getattr(trans.user, identifier) )
if user_ftp_dir is not None:
response = self.__load_all_filenames( user_ftp_dir )
else:
raise exceptions.ConfigDoesNotAllowException( 'You do not have an FTP directory named as your login at this Galaxy instance.' )
except Exception, exception:
log.error( 'Could not get ftp files: %s', str( exception ), exc_info=True )
raise exceptions.InternalServerError( 'Could not get the files from your FTP folder.' )
return response
def __load_all_filenames( self, directory ):
"""
Loads recursively all files within the given folder and its
subfolders and returns a flat list.
"""
response = []
if os.path.exists( directory ):
for ( dirpath, dirnames, filenames ) in os.walk( directory ):
for filename in filenames:
path = os.path.relpath( os.path.join( dirpath, filename ), directory )
statinfo = os.lstat( os.path.join( dirpath, filename ) )
response.append( dict( path=path,
size=statinfo.st_size,
ctime=time.strftime( "%m/%d/%Y %I:%M:%S %p", time.localtime( statinfo.st_ctime ) ) ) )
else:
raise exceptions.ConfigDoesNotAllowException( 'The given directory does not exist.' )
# sort by path
response = sorted(response, key=itemgetter("path"))
return response
def __create_jstree( self, directory, disable='folders' ):
"""
Loads recursively all files and folders within the given folder
and its subfolders and returns jstree representation
of its structure.
"""
userdir_jstree = None
jstree_paths = []
if os.path.exists( directory ):
for ( dirpath, dirnames, filenames ) in os.walk( directory ):
for dirname in dirnames:
dir_path = os.path.relpath( os.path.join( dirpath, dirname ), directory )
dir_path_hash = hashlib.sha1( dir_path.encode('utf-8') ).hexdigest()
disabled = True if disable == 'folders' else False
jstree_paths.append( jstree.Path( dir_path, dir_path_hash, { 'type': 'folder', 'state': { 'disabled': disabled }, 'li_attr': { 'full_path': dir_path } } ) )
for filename in filenames:
file_path = os.path.relpath( os.path.join( dirpath, filename ), directory )
file_path_hash = hashlib.sha1( file_path.encode('utf-8') ).hexdigest()
disabled = True if disable == 'files' else False
jstree_paths.append( jstree.Path( file_path, file_path_hash, { 'type': 'file', 'state': { 'disabled': disabled }, 'li_attr': { 'full_path': file_path } } ) )
else:
raise exceptions.ConfigDoesNotAllowException( 'The given directory does not exist.' )
userdir_jstree = jstree.JSTree( jstree_paths )
return userdir_jstree
|
# Standard library imports
from typing import TYPE_CHECKING
# Related third party imports
import pytest
# Local application/library specific imports
if TYPE_CHECKING:
from pyaww import AlwaysOnTask, User
@pytest.mark.asyncio
async def test_restart(always_on_task: "AlwaysOnTask") -> None:
assert await always_on_task.restart() is None
@pytest.mark.asyncio
async def test_get_always_on_task_by_id(
client: "User", always_on_task: "AlwaysOnTask"
) -> None:
assert await client.get_always_on_task_by_id(always_on_task.id) == always_on_task
@pytest.mark.asyncio
async def test_update(always_on_task: "AlwaysOnTask") -> None:
await always_on_task.update(description="A")
await always_on_task.update(description="B")
assert always_on_task.description == "B"
|
'''Creates latex table row to log experiment results'''
def create_latex_table_row(results_dict, model_name, train_name = "multinli"):
f = open("./results/"+model_name+"_"+train_name+"_results.txt", "w")
latex_string = model_name+" & "
if "Numerical Stress Test" in results_dict:
latex_string +=str(results_dict["Numerical Stress Test"])+"\% &"
else:
latex_string+=" & "
if "RTE" in results_dict:
latex_string +=str(results_dict["RTE"])+"\% &"
else:
latex_string+=" & "
if "AWP" in results_dict:
latex_string +=str(results_dict["AWP"])+"\% &"
else:
latex_string+=" & "
if "QuantNLI" in results_dict:
latex_string +=str(results_dict["QuantNLI"])+"\% \\\\ \hline"
else:
latex_string+=" \\\\ \hline "
f.write(latex_string)
f.close()
|
"""Token bucket throttle backed by Redis."""
import time
import pkg_resources
KEY_FORMAT = 'throttle:{}'
# throttle knob defaults
THROTTLE_BURST_DEFAULT = 1
THROTTLE_WINDOW_DEFAULT = 5
THROTTLE_REQUESTED_TOKENS_DEFAULT = 1
# The default is to extend a throttle's knob settings TTL out
# 7 days each time the throttle is used.
DEFAULT_KNOBS_TTL = 60 * 60 * 24 * 7
throttle_script = None
redis = None
def _validate_throttle(key, params):
check_values_pipe = redis.pipeline()
for param, param_name in params:
if param is not None:
# Throttle values can only be positive floats
try:
assert float(param) >= 0
except (ValueError, AssertionError):
raise ValueError(
'"{}" is not a valid throttle value. Throttle values must '
'be positive floats.'.format(param)
)
else:
check_values_pipe.hexists(key, param_name)
if not all(check_values_pipe.execute()):
raise IndexError(
"Throttle knob {} doesn't exist or is invalid".format(key)
)
def _verify_configured():
if not redis or not throttle_script:
raise RuntimeError('Throttle is not configured')
def throttle(
name,
rps,
burst=THROTTLE_BURST_DEFAULT,
window=THROTTLE_WINDOW_DEFAULT,
requested_tokens=THROTTLE_REQUESTED_TOKENS_DEFAULT,
knobs_ttl=DEFAULT_KNOBS_TTL,
):
"""
Throttle that allows orchestration of distributed workers.
Args:
name: Name of throttle. Used as part of the Redis key.
rps: Default requests per second allowed by this throttle
burst: Default burst multiplier
window: Default limit window in seconds
requested_tokens: Number of tokens required for this work request
knobs_ttl: Throttle's knob TTL value (0 disables setting TTL)
Returns:
allowed: True if work is allowed
tokens: Number of tokens left in throttle bucket
sleep: Seconds before next limit window starts. If work is
not allowed you should sleep this many seconds. (float)
The first use of a throttle will set the default values in redis for
rps, burst, and window. Subsequent calls will use the values stored in
Redis. This allows changes to the throttle knobs to be made on the fly by
simply changing the values stored in redis.
See throttle_set function to set the throttle.
Setting RPS to 0 causes all work requests to be denied and a full sleep.
Setting RPS to -1 causes all work requests to be allowed.
"""
_verify_configured()
allowed, tokens, sleep = throttle_script(
keys=[],
args=[
KEY_FORMAT.format(name),
rps,
burst,
window,
requested_tokens,
knobs_ttl,
],
)
# Converting the string sleep to a float causes floating point rounding
# issues that limits having true microsecond resolution for the sleep
# value.
return allowed == 1, int(tokens), float(sleep)
def throttle_configure(redis_instance, testing=False):
"""Register Lua throttle script in Redis."""
global redis, throttle_script
redis = redis_instance
lua_script = pkg_resources.resource_string(
__name__, 'throttle.lua'
).decode()
# Modify scripts when testing so time can be frozen
if testing:
lua_script = lua_script.replace(
'local time = redis.call("time")',
'local time\n'
'if redis.call("exists", "frozen_second") == 1 then\n'
' time = redis.call("mget", "frozen_second", "frozen_microsecond")\n' # noqa: E501
'else\n'
' time = redis.call("time")\n'
'end',
)
throttle_script = redis.register_script(lua_script)
def throttle_delete(name):
"""Delete Redis throttle data."""
_verify_configured()
key = KEY_FORMAT.format(name)
pipeline = redis.pipeline()
pipeline.delete(key)
pipeline.delete(key + ':knobs')
pipeline.execute()
def throttle_get(name):
"""
Get throttle values from redis.
Returns: (tokens, refreshed, rps, burst, window)
"""
key = KEY_FORMAT.format(name) + ':knobs'
# Get each value in hashes individually in case they don't exist
get_values_pipe = redis.pipeline()
key = KEY_FORMAT.format(name)
get_values_pipe.hget(key, 'tokens')
get_values_pipe.hget(key, 'refreshed')
key = KEY_FORMAT.format(name) + ':knobs'
get_values_pipe.hget(key, 'rps')
get_values_pipe.hget(key, 'burst')
get_values_pipe.hget(key, 'window')
values = get_values_pipe.execute()
return values
def throttle_reset(name):
"""Reset throttle settings."""
_verify_configured()
key = KEY_FORMAT.format(name) + ':knobs'
redis.delete(key)
def throttle_set(name, rps=None, burst=None, window=None, knobs_ttl=None):
"""
Adjust throttle values in redis.
If knobs_ttl is used here the throttle() call needs to be called
with knobs_ttl=0 so the ttl isn't also set in the Lua script
"""
_verify_configured()
key = KEY_FORMAT.format(name) + ':knobs'
params = [(rps, 'rps'), (burst, 'burst'), (window, 'window')]
_validate_throttle(key, params)
set_values_pipe = redis.pipeline()
for param, param_name in params:
if param is not None:
set_values_pipe.hset(key, param_name, param)
if knobs_ttl:
set_values_pipe.expire(key, knobs_ttl)
set_values_pipe.execute()
def throttle_wait(name, *args, **kwargs):
"""Sleeps time specified by throttle if needed.
This will wait potentially forever to get permission to do work
Usage:
throttle = throttle_wait('name', rps=123)
for ...:
throttle()
do_work()
"""
max_wait = kwargs.pop('max_wait', None)
def throttle_func(requested_tokens=1):
start_time = time.time()
allowed, tokens, sleep = throttle(
name, *args, requested_tokens=requested_tokens, **kwargs
)
while not allowed:
if max_wait is not None and time.time() - start_time > max_wait:
break
time.sleep(sleep)
allowed, tokens, sleep = throttle(
name, *args, requested_tokens=requested_tokens, **kwargs
)
return allowed, tokens, sleep
return throttle_func
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 27 14:13:05 2019
@author: matusmacbookpro
"""
import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.optim as optim
import torchvision
#adapted from: https://github.com/NVlabs/Deep_Object_Pose/blob/master/scripts/train.py
def create_stage(in_channels, out_channels, first=False):
'''Create the neural network layers for a single stage.'''
model = nn.Sequential()
mid_channels = 128
if first:
padding = 1
kernel = 3
count = 6
final_channels = 512
else:
padding = 3
kernel = 7
count = 10
final_channels = mid_channels
# First convolution
model.add_module("0",
nn.Conv2d(
in_channels,
mid_channels,
kernel_size=kernel,
stride=1,
padding=padding)
)
# Middle convolutions
i = 1
while i < count - 1:
model.add_module(str(i), nn.ReLU(inplace=True))
i += 1
model.add_module(str(i),
nn.Conv2d(
mid_channels,
mid_channels,
kernel_size=kernel,
stride=1,
padding=padding))
i += 1
# Penultimate convolution
model.add_module(str(i), nn.ReLU(inplace=True))
i += 1
model.add_module(str(i), nn.Conv2d(mid_channels, final_channels, kernel_size=1, stride=1))
i += 1
# Last convolution
model.add_module(str(i), nn.ReLU(inplace=True))
i += 1
model.add_module(str(i), nn.Conv2d(final_channels, out_channels, kernel_size=1, stride=1))
i += 1
return model
class Net(nn.Module):
def __init__(self,pretrained=True,numBeliefMap=9,numAffinity=16,stop_at_stage=6): # number of stages to process (if less than total number of stages):
torch.manual_seed(123)
super(Net, self).__init__()
self.stop_at_stage = stop_at_stage
vgg_full = torchvision.models.vgg19(pretrained=pretrained).features
self.vgg = nn.Sequential()
for i_layer in range(24):
self.vgg.add_module(str(i_layer), vgg_full[i_layer])
# Add some layers
i_layer = 23
self.vgg.add_module(str(i_layer), nn.Conv2d(512, 256, kernel_size=3, stride=1, padding=1))
self.vgg.add_module(str(i_layer+1), nn.ReLU(inplace=True))
self.vgg.add_module(str(i_layer+2), nn.Conv2d(256, 128, kernel_size=3, stride=1, padding=1))
self.vgg.add_module(str(i_layer+3), nn.ReLU(inplace=True))
# _2 are the belief map stages
self.m1_2 = create_stage(128, numBeliefMap, True)
self.m2_2 = create_stage(128 + numBeliefMap + numAffinity,
numBeliefMap, False)
self.m3_2 = create_stage(128 + numBeliefMap + numAffinity,
numBeliefMap, False)
self.m4_2 = create_stage(128 + numBeliefMap + numAffinity,
numBeliefMap, False)
self.m5_2 = create_stage(128 + numBeliefMap + numAffinity,
numBeliefMap, False)
self.m6_2 = create_stage(128 + numBeliefMap + numAffinity,
numBeliefMap, False)
# _1 are the affinity map stages
self.m1_1 = create_stage(128, numAffinity, True)
self.m2_1 = create_stage(128 + numBeliefMap + numAffinity,
numAffinity, False)
self.m3_1 = create_stage(128 + numBeliefMap + numAffinity,
numAffinity, False)
self.m4_1 = create_stage(128 + numBeliefMap + numAffinity,
numAffinity, False)
self.m5_1 = create_stage(128 + numBeliefMap + numAffinity,
numAffinity, False)
self.m6_1 = create_stage(128 + numBeliefMap + numAffinity,
numAffinity, False)
def forward(self, x):
'''Runs inference on the neural network'''
out1 = self.vgg(x)
out1_2 = self.m1_2(out1)
out1_1 = self.m1_1(out1)
if self.stop_at_stage == 1:
return [out1_2],\
[out1_1]
out2 = torch.cat([out1_2, out1_1, out1], 1)
out2_2 = self.m2_2(out2)
out2_1 = self.m2_1(out2)
if self.stop_at_stage == 2:
return [out1_2, out2_2],\
[out1_1, out2_1]
out3 = torch.cat([out2_2, out2_1, out1], 1)
out3_2 = self.m3_2(out3)
out3_1 = self.m3_1(out3)
if self.stop_at_stage == 3:
return [out1_2, out2_2, out3_2],\
[out1_1, out2_1, out3_1]
out4 = torch.cat([out3_2, out3_1, out1], 1)
out4_2 = self.m4_2(out4)
out4_1 = self.m4_1(out4)
if self.stop_at_stage == 4:
return [out1_2, out2_2, out3_2, out4_2],\
[out1_1, out2_1, out3_1, out4_1]
out5 = torch.cat([out4_2, out4_1, out1], 1)
out5_2 = self.m5_2(out5)
out5_1 = self.m5_1(out5)
if self.stop_at_stage == 5:
return [out1_2, out2_2, out3_2, out4_2, out5_2],\
[out1_1, out2_1, out3_1, out4_1, out5_1]
out6 = torch.cat([out5_2, out5_1, out1], 1)
out6_2 = self.m6_2(out6)
out6_1 = self.m6_1(out6)
return [out1_2, out2_2, out3_2, out4_2, out5_2, out6_2],\
[out1_1, out2_1, out3_1, out4_1, out5_1, out6_1]
class dope_net():
def __init__(self,learning_rate,gpu_device):
self.cud = torch.cuda.is_available()
self.gpu_device = gpu_device
self.learning_rate = learning_rate
self.net = Net()
if self.cud:
self.net.cuda(device=self.gpu_device)
self.optimizer = optim.Adam(self.net.parameters(), lr=self.learning_rate)
def compute_loss(self,output_belief,output_affinities,target_belief,target_affinity):
loss = None
for l in output_belief: #output, each belief map layers.
if loss is None:
loss = ((l - target_belief) * (l-target_belief)).mean()
else:
loss_tmp = ((l - target_belief) * (l-target_belief)).mean()
loss += loss_tmp
# Affinities loss
for l in output_affinities: #output, each belief map layers.
loss_tmp = ((l - target_affinity) * (l-target_affinity)).mean()
loss += loss_tmp
return loss
def adjust_learning_rate(self,optimizer,lr):
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def train(self,train_images,train_affinities,train_beliefs):
# INPUT:
# train_images: batch of images (float32), size: (batch_size,3,x,y)
# train_affinities: batch of affinity maps (float32), size: (batch_size,16,x/8,y/8)
# train_beliefs: batch of belief maps (float32), size: (batch_size,9,x/8,y/8)
# OUTPUTS:
# loss: scalar
if self.cud:
train_images_v = Variable(train_images.cuda(device=self.gpu_device))
train_affinities_v = Variable(train_affinities.cuda(device=self.gpu_device))
train_beliefs_v = Variable(train_beliefs.cuda(device=self.gpu_device))
else:
train_images_v = Variable(train_images)
train_affinities_v = Variable(train_affinities)
train_beliefs_v = Variable(train_beliefs)
self.optimizer.zero_grad()
output_belief,output_affinity = self.net.forward(train_images_v)
J = self.compute_loss(output_belief,output_affinity,train_beliefs_v,train_affinities_v)
J.backward()
self.optimizer.step()
if self.cud:
loss = J.data.cpu().numpy()
else:
loss = J.data.numpy()
return loss
def test(self,test_images,test_affinities,test_beliefs):
# INPUT:
# test_images: batch of images (float32), size: (test_batch_size,3,x,y)
# test_affinities: batch of affinity maps (float32), size: (test_batch_size,16,x/8,y/8)
# test_beliefs: batch of belief maps (float32), size: (test_batch_size,9,x/8,y/8)
# OUTPUTS:
# loss: scalar
# belief: output belief maps, size: size: (test_batch_size,9,x/8,y/8)
# affinity: output affinity maps, size: (test_batch_size,16,x/8,y/8)
if self.cud:
test_images_v = Variable(test_images.cuda(device=self.gpu_device))
test_beliefs_v = Variable(test_beliefs.cuda(device=self.gpu_device))
test_affinities_v = Variable(test_affinities.cuda(device=self.gpu_device))
else:
test_images_v = Variable(test_images)
test_beliefs_v = Variable(test_beliefs)
test_affinities_v = Variable(test_affinities)
with torch.no_grad():
output_belief,output_affinity = self.net.forward(test_images_v)
J = self.compute_loss(output_belief,output_affinity,test_beliefs_v,test_affinities_v)
if self.cud:
belief = output_belief[5].data.cpu().numpy()
affinity = output_affinity[5].data.cpu().numpy()
loss = J.data.cpu().numpy()
else:
belief = output_belief[5].data.numpy()
affinity = output_affinity[5].data.numpy()
loss = J.data.numpy()
return belief,affinity,loss
def save_model(self,filename):
torch.save(self.net.state_dict(),filename)
def empty_cuda_cache(self):
torch.cuda.empty_cache()
def load_model(self,filename):
if self.cud:
self.net.load_state_dict(torch.load(filename,map_location = 'cuda:0'))
else:
self.net.load_state_dict(torch.load(filename,map_location='cpu')) |
import pytest
from vlde import Validator, ValidateError, RulesError
def test_str():
'''test str type'''
v = Validator()
v.set_rules('hello', 'str|string')
v.set_rules('', 'str|string')
v.set_rules(str(123), 'str|string')
with pytest.raises(ValidateError):
v.set_rules(123, 'str')
with pytest.raises(ValidateError):
v.set_rules([], 'str')
with pytest.raises(ValidateError):
v.set_rules({}, 'str')
with pytest.raises(ValidateError):
v.set_rules((), 'str')
with pytest.raises(ValidateError):
v.set_rules(None, 'str')
with pytest.raises(ValidateError):
v.set_rules(bool(), 'str')
with pytest.raises(ValidateError):
v.set_rules(float(), 'str')
with pytest.raises(ValidateError):
v.set_rules(bytes(), 'str')
|
import pixels
pixels.Pixels().brightness_decrease(step = 10)
|
"Yang/Wu's OEP implementation, in PyQuante."
from math import sqrt
import settings
from PyQuante.NumWrap import zeros,matrixmultiply,transpose,dot,identity,\
array,solve
from PyQuante.Ints import getbasis, getints, getJ,get2JmK,getK
from PyQuante.LA2 import geigh,mkdens,trace2,simx
from PyQuante.hartree_fock import get_fock
from PyQuante.CGBF import three_center
from PyQuante.optimize import fminBFGS
from PyQuante.fermi_dirac import get_efermi, get_fermi_occs,mkdens_occs,\
get_entropy,mkdens_fermi
import logging
logger = logging.getLogger("pyquante")
gradcall=0
class EXXSolver:
"EXXSolver(solver)"
def __init__(self,solver):
# Solver is a pointer to a HF or a DFT calculation that has
# already converged
self.solver = solver
self.bfs = self.solver.bfs
self.nbf = len(self.bfs)
self.S = self.solver.S
self.h = self.solver.h
self.Ints = self.solver.Ints
self.molecule = self.solver.molecule
self.nel = self.molecule.get_nel()
self.nclosed, self.nopen = self.molecule.get_closedopen()
self.Enuke = self.molecule.get_enuke()
self.norb = self.nbf
self.orbs = self.solver.orbs
self.orbe = self.solver.orbe
self.Gij = []
for g in xrange(self.nbf):
gmat = zeros((self.nbf,self.nbf),'d')
self.Gij.append(gmat)
gbf = self.bfs[g]
for i in xrange(self.nbf):
ibf = self.bfs[i]
for j in xrange(i+1):
jbf = self.bfs[j]
gij = three_center(ibf,gbf,jbf)
gmat[i,j] = gij
gmat[j,i] = gij
D0 = mkdens(self.orbs,0,self.nclosed)
J0 = getJ(self.Ints,D0)
Vfa = (2.0*(self.nel-1.0)/self.nel)*J0
self.H0 = self.h + Vfa
self.b = zeros(self.nbf,'d')
return
def iterate(self,**kwargs):
self.iter = 0
self.etemp = kwargs.get("etemp",settings.DFTElectronTemperature)
logging.debug("iter Energy <b|b>")
logging.debug("---- ------ -----")
self.b = fminBFGS(self.get_energy,self.b,self.get_gradient,logger=logging)
return
def get_energy(self,b):
self.iter += 1
self.Hoep = get_Hoep(b,self.H0,self.Gij)
self.orbe,self.orbs = geigh(self.Hoep,self.S)
if self.etemp:
self.D,self.entropy = mkdens_fermi(self.nel,self.orbe,self.orbs,
self.etemp)
else:
self.D = mkdens(self.orbs,0,self.nclosed)
self.entropy=0
self.F = get_fock(self.D,self.Ints,self.h)
self.energy = trace2(self.h+self.F,self.D)+self.Enuke + self.entropy
if self.iter == 1 or self.iter % 10 == 0:
logging.debug("%4d %10.5f %10.5f" % (self.iter,self.energy,dot(b,b)))
return self.energy
def get_gradient(self,b):
energy = self.get_energy(b)
Fmo = simx(self.F,self.orbs)
bp = zeros(self.nbf,'d')
for g in xrange(self.nbf):
# Transform Gij[g] to MOs. This is done over the whole
# space rather than just the parts we need. I can speed
# this up later by only forming the i,a elements required
Gmo = simx(self.Gij[g],self.orbs)
# Now sum the appropriate terms to get the b gradient
for i in xrange(self.nclosed):
for a in xrange(self.nclosed,self.norb):
bp[g] = bp[g] + Fmo[i,a]*Gmo[i,a]/(self.orbe[i]-self.orbe[a])
#logging.debug("EXX Grad: %10.5f" % (sqrt(dot(bp,bp))))
return bp
class UEXXSolver:
"EXXSolver(solver)"
def __init__(self,solver):
# Solver is a pointer to a UHF calculation that has
# already converged
self.solver = solver
self.bfs = self.solver.bfs
self.nbf = len(self.bfs)
self.S = self.solver.S
self.h = self.solver.h
self.Ints = self.solver.Ints
self.molecule = self.solver.molecule
self.nel = self.molecule.get_nel()
self.nalpha, self.nbeta = self.molecule.get_alphabeta()
self.Enuke = self.molecule.get_enuke()
self.norb = self.nbf
self.orbsa = self.solver.orbsa
self.orbsb = self.solver.orbsb
self.orbea = self.solver.orbea
self.orbeb = self.solver.orbeb
self.Gij = []
for g in xrange(self.nbf):
gmat = zeros((self.nbf,self.nbf),'d')
self.Gij.append(gmat)
gbf = self.bfs[g]
for i in xrange(self.nbf):
ibf = self.bfs[i]
for j in xrange(i+1):
jbf = self.bfs[j]
gij = three_center(ibf,gbf,jbf)
gmat[i,j] = gij
gmat[j,i] = gij
D0 = mkdens(self.orbsa,0,self.nalpha)+mkdens(self.orbsb,0,self.nbeta)
J0 = getJ(self.Ints,D0)
Vfa = ((self.nel-1.)/self.nel)*J0
self.H0 = self.h + Vfa
self.b = zeros(2*self.nbf,'d')
return
def iterate(self,**kwargs):
self.etemp = kwargs.get("etemp",settings.DFTElectronTemperature)
self.iter = 0
logging.debug("iter Energy <b|b>")
logging.debug("---- ------ -----")
self.b = fminBFGS(self.get_energy,self.b,self.get_gradient,logger=logging)
return
def get_energy(self,b):
self.iter += 1
ba = b[:self.nbf]
bb = b[self.nbf:]
self.Hoepa = get_Hoep(ba,self.H0,self.Gij)
self.Hoepb = get_Hoep(bb,self.H0,self.Gij)
self.orbea,self.orbsa = geigh(self.Hoepa,self.S)
self.orbeb,self.orbsb = geigh(self.Hoepb,self.S)
if self.etemp:
self.Da,entropya = mkdens_fermi(2*self.nalpha,self.orbea,self.orbsa,
self.etemp)
self.Db,entropyb = mkdens_fermi(2*self.nbeta,self.orbeb,self.orbsb,
self.etemp)
self.entropy = 0.5*(entropya+entropyb)
else:
self.Da = mkdens(self.orbsa,0,self.nalpha)
self.Db = mkdens(self.orbsb,0,self.nbeta)
self.entropy=0
J = getJ(self.Ints,self.Da+self.Db)
Ka = getK(self.Ints,self.Da)
Kb = getK(self.Ints,self.Db)
self.Fa = self.h + J - Ka
self.Fb = self.h + J - Kb
self.energy = 0.5*(trace2(self.h+self.Fa,self.Da) +
trace2(self.h+self.Fb,self.Db))\
+ self.Enuke + self.entropy
if self.iter == 1 or self.iter % 10 == 0:
logging.debug("%4d %10.5f %10.5f" % (self.iter,self.energy,dot(b,b)))
return self.energy
def get_gradient(self,b):
energy = self.get_energy(b)
Fmoa = simx(self.Fa,self.orbsa)
Fmob = simx(self.Fb,self.orbsb)
bp = zeros(2*self.nbf,'d')
for g in xrange(self.nbf):
# Transform Gij[g] to MOs. This is done over the whole
# space rather than just the parts we need. I can speed
# this up later by only forming the i,a elements required
Gmo = simx(self.Gij[g],self.orbsa)
# Now sum the appropriate terms to get the b gradient
for i in xrange(self.nalpha):
for a in xrange(self.nalpha,self.norb):
bp[g] += Fmoa[i,a]*Gmo[i,a]/(self.orbea[i]-self.orbea[a])
for g in xrange(self.nbf):
# Transform Gij[g] to MOs. This is done over the whole
# space rather than just the parts we need. I can speed
# this up later by only forming the i,a elements required
Gmo = simx(self.Gij[g],self.orbsb)
# Now sum the appropriate terms to get the b gradient
for i in xrange(self.nbeta):
for a in xrange(self.nbeta,self.norb):
bp[self.nbf+g] += Fmob[i,a]*Gmo[i,a]/(self.orbeb[i]-self.orbeb[a])
#logging.debug("EXX Grad: %10.5f" % (sqrt(dot(bp,bp))))
return bp
def exx(atoms,orbs,**kwargs):
return oep_hf(atoms,orbs,**kwargs)
def oep_hf(atoms,orbs,**kwargs):
"""oep_hf - Form the optimized effective potential for HF exchange.
See notes on options and other args in oep routine.
"""
return oep(atoms,orbs,get_exx_energy,get_exx_gradient,**kwargs)
def oep(atoms,orbs,energy_func,grad_func=None,**kwargs):
"""oep - Form the optimized effective potential for a given energy expression
oep(atoms,orbs,energy_func,grad_func=None,**kwargs)
atoms A Molecule object containing a list of the atoms
orbs A matrix of guess orbitals
energy_func The function that returns the energy for the given method
grad_func The function that returns the force for the given method
Options
-------
verbose False Output terse information to stdout (default)
True Print out additional information
ETemp False Use ETemp value for finite temperature DFT (default)
float Use (float) for the electron temperature
bfs None The basis functions to use. List of CGBF's
basis_data None The basis data to use to construct bfs
integrals None The one- and two-electron integrals to use
If not None, S,h,Ints
"""
verbose = kwargs.get('verbose')
ETemp = kwargs.get('ETemp',settings.DFTElectronTemperature)
opt_method = kwargs.get('opt_method',settings.OEPOptMethod)
bfs = getbasis(atoms,**kwargs)
# The basis set for the potential can be set different from
# that used for the wave function
pbfs = kwargs.get('pbfs')
if not pbfs: pbfs = bfs
npbf = len(pbfs)
S,h,Ints = getints(bfs,atoms,**kwargs)
nel = atoms.get_nel()
nocc,nopen = atoms.get_closedopen()
Enuke = atoms.get_enuke()
# Form the OEP using Yang/Wu, PRL 89 143002 (2002)
nbf = len(bfs)
norb = nbf
bp = zeros(nbf,'d')
bvec = kwargs.get('bvec')
if bvec:
assert len(bvec) == npbf
b = array(bvec)
else:
b = zeros(npbf,'d')
# Form and store all of the three-center integrals
# we're going to need.
# These are <ibf|gbf|jbf> (where 'bf' indicates basis func,
# as opposed to MO)
# N^3 storage -- obviously you don't want to do this for
# very large systems
Gij = []
for g in xrange(npbf):
gmat = zeros((nbf,nbf),'d')
Gij.append(gmat)
gbf = pbfs[g]
for i in xrange(nbf):
ibf = bfs[i]
for j in xrange(i+1):
jbf = bfs[j]
gij = three_center(ibf,gbf,jbf)
gmat[i,j] = gij
gmat[j,i] = gij
# Compute the Fermi-Amaldi potential based on the LDA density.
# We're going to form this matrix from the Coulombic matrix that
# arises from the input orbitals. D0 and J0 refer to the density
# matrix and corresponding Coulomb matrix
D0 = mkdens(orbs,0,nocc)
J0 = getJ(Ints,D0)
Vfa = (2*(nel-1.)/nel)*J0
H0 = h + Vfa
b = fminBFGS(energy_func,b,grad_func,
(nbf,nel,nocc,ETemp,Enuke,S,h,Ints,H0,Gij),
logger=logging)
energy,orbe,orbs = energy_func(b,nbf,nel,nocc,ETemp,Enuke,
S,h,Ints,H0,Gij,return_flag=1)
return energy,orbe,orbs
def get_exx_energy(b,nbf,nel,nocc,ETemp,Enuke,S,h,Ints,H0,Gij,**kwargs):
"""Computes the energy for the OEP/HF functional
Options:
return_flag 0 Just return the energy
1 Return energy, orbe, orbs
2 Return energy, orbe, orbs, F
"""
return_flag = kwargs.get('return_flag')
Hoep = get_Hoep(b,H0,Gij)
orbe,orbs = geigh(Hoep,S)
if ETemp:
efermi = get_efermi(nel,orbe,ETemp)
occs = get_fermi_occs(efermi,orbe,ETemp)
D = mkdens_occs(orbs,occs)
entropy = get_entropy(occs,ETemp)
else:
D = mkdens(orbs,0,nocc)
F = get_fock(D,Ints,h)
energy = trace2(h+F,D)+Enuke
if ETemp: energy += entropy
iref = nel/2
gap = 627.51*(orbe[iref]-orbe[iref-1])
logging.debug("EXX Energy, B, Gap: %10.5f %10.5f %10.5f"
% (energy,sqrt(dot(b,b)),gap))
#logging.debug("%s" % orbe)
if return_flag == 1:
return energy,orbe,orbs
elif return_flag == 2:
return energy,orbe,orbs,F
return energy
def get_exx_gradient(b,nbf,nel,nocc,ETemp,Enuke,S,h,Ints,H0,Gij,**kwargs):
"""Computes the gradient for the OEP/HF functional.
return_flag 0 Just return gradient
1 Return energy,gradient
2 Return energy,gradient,orbe,orbs
"""
# Dump the gradient every 10 steps so we can restart...
global gradcall
gradcall += 1
#if gradcall % 5 == 0: logging.debug("B vector:\n%s" % b)
# Form the new potential and the new orbitals
energy,orbe,orbs,F = get_exx_energy(b,nbf,nel,nocc,ETemp,Enuke,
S,h,Ints,H0,Gij,return_flag=2)
Fmo = matrixmultiply(transpose(orbs),matrixmultiply(F,orbs))
norb = nbf
bp = zeros(nbf,'d') # dE/db
for g in xrange(nbf):
# Transform Gij[g] to MOs. This is done over the whole
# space rather than just the parts we need. I can speed
# this up later by only forming the i,a elements required
Gmo = matrixmultiply(transpose(orbs),matrixmultiply(Gij[g],orbs))
# Now sum the appropriate terms to get the b gradient
for i in xrange(nocc):
for a in xrange(nocc,norb):
bp[g] = bp[g] + Fmo[i,a]*Gmo[i,a]/(orbe[i]-orbe[a])
#logging.debug("EXX Grad: %10.5f" % (sqrt(dot(bp,bp))))
return_flag = kwargs.get('return_flag')
if return_flag == 1:
return energy,bp
elif return_flag == 2:
return energy,bp,orbe,orbs
return bp
def get_Hoep(b,H0,Gij):
Hoep = H0
# Add the contributions from the gaussian potential functions
# H[ij] += b[g]*<ibf|g|jbf>
for g in xrange(len(b)):
Hoep = Hoep + b[g]*Gij[g]
return Hoep
# Here's a much faster way to do this. Haven't figured out how to
# do it for more generic functions like OEP-GVB
def oep_hf_an(atoms,orbs,**kwargs):
"""oep_hf - Form the optimized effective potential for HF exchange.
Implementation of Wu and Yang's Approximate Newton Scheme
from J. Theor. Comp. Chem. 2, 627 (2003).
oep_hf(atoms,orbs,**kwargs)
atoms A Molecule object containing a list of the atoms
orbs A matrix of guess orbitals
Options
-------
bfs None The basis functions to use for the wfn
pbfs None The basis functions to use for the pot
basis_data None The basis data to use to construct bfs
integrals None The one- and two-electron integrals to use
If not None, S,h,Ints
"""
maxiter = kwargs.get('maxiter',settings.OEPIters)
tol = kwargs.get('tol',settings.OEPTolerance)
bfs = getbasis(atoms,**kwargs)
# The basis set for the potential can be set different from
# that used for the wave function
pbfs = kwargs.get('pbfs')
if not pbfs: pbfs = bfs
npbf = len(pbfs)
S,h,Ints = getints(bfs,atoms)
nel = atoms.get_nel()
nocc,nopen = atoms.get_closedopen()
Enuke = atoms.get_enuke()
# Form the OEP using Yang/Wu, PRL 89 143002 (2002)
nbf = len(bfs)
norb = nbf
bp = zeros(nbf,'d')
bvec = kwargs.get('bvec')
if bvec:
assert len(bvec) == npbf
b = array(bvec)
else:
b = zeros(npbf,'d')
# Form and store all of the three-center integrals
# we're going to need.
# These are <ibf|gbf|jbf> (where 'bf' indicates basis func,
# as opposed to MO)
# N^3 storage -- obviously you don't want to do this for
# very large systems
Gij = []
for g in xrange(npbf):
gmat = zeros((nbf,nbf),'d')
Gij.append(gmat)
gbf = pbfs[g]
for i in xrange(nbf):
ibf = bfs[i]
for j in xrange(i+1):
jbf = bfs[j]
gij = three_center(ibf,gbf,jbf)
gmat[i,j] = gij
gmat[j,i] = gij
# Compute the Fermi-Amaldi potential based on the LDA density.
# We're going to form this matrix from the Coulombic matrix that
# arises from the input orbitals. D0 and J0 refer to the density
# matrix and corresponding Coulomb matrix
D0 = mkdens(orbs,0,nocc)
J0 = getJ(Ints,D0)
Vfa = (2*(nel-1.)/nel)*J0
H0 = h + Vfa
b = zeros(nbf,'d')
eold = 0
for iter in xrange(maxiter):
Hoep = get_Hoep(b,H0,Gij)
orbe,orbs = geigh(Hoep,S)
D = mkdens(orbs,0,nocc)
Vhf = get2JmK(Ints,D)
energy = trace2(2*h+Vhf,D)+Enuke
if abs(energy-eold) < tol:
break
else:
eold = energy
logging.debug("OEP AN Opt: %d %f" % (iter,energy))
dV_ao = Vhf-Vfa
dV = matrixmultiply(transpose(orbs),matrixmultiply(dV_ao,orbs))
X = zeros((nbf,nbf),'d')
c = zeros(nbf,'d')
Gkt = zeros((nbf,nbf),'d')
for k in xrange(nbf):
# This didn't work; in fact, it made things worse:
Gk = matrixmultiply(transpose(orbs),matrixmultiply(Gij[k],orbs))
for i in xrange(nocc):
for a in xrange(nocc,norb):
c[k] += dV[i,a]*Gk[i,a]/(orbe[i]-orbe[a])
for l in xrange(nbf):
Gl = matrixmultiply(transpose(orbs),matrixmultiply(Gij[l],orbs))
for i in xrange(nocc):
for a in xrange(nocc,norb):
X[k,l] += Gk[i,a]*Gl[i,a]/(orbe[i]-orbe[a])
# This should actually be a pseudoinverse...
b = solve(X,c)
logger.info("Final OEP energy = %f" % energy)
return energy,orbe,orbs
def oep_uhf_an(atoms,orbsa,orbsb,**kwargs):
"""oep_hf - Form the optimized effective potential for HF exchange.
Implementation of Wu and Yang's Approximate Newton Scheme
from J. Theor. Comp. Chem. 2, 627 (2003).
oep_uhf(atoms,orbs,**kwargs)
atoms A Molecule object containing a list of the atoms
orbs A matrix of guess orbitals
Options
-------
bfs None The basis functions to use for the wfn
pbfs None The basis functions to use for the pot
basis_data None The basis data to use to construct bfs
integrals None The one- and two-electron integrals to use
If not None, S,h,Ints
"""
maxiter = kwargs.get('maxiter',settings.OEPIters)
tol = kwargs.get('tol',settings.OEPTolerance)
ETemp = kwargs.get('ETemp',settings.DFTElectronTemperature)
bfs = getbasis(atoms,**kwargs)
# The basis set for the potential can be set different from
# that used for the wave function
pbfs = kwargs.get('pbfs')
if not pbfs: pbfs = bfs
npbf = len(pbfs)
S,h,Ints = getints(bfs,atoms,**kwargs)
nel = atoms.get_nel()
nclosed,nopen = atoms.get_closedopen()
nalpha,nbeta = nclosed+nopen,nclosed
Enuke = atoms.get_enuke()
# Form the OEP using Yang/Wu, PRL 89 143002 (2002)
nbf = len(bfs)
norb = nbf
ba = zeros(npbf,'d')
bb = zeros(npbf,'d')
# Form and store all of the three-center integrals
# we're going to need.
# These are <ibf|gbf|jbf> (where 'bf' indicates basis func,
# as opposed to MO)
# N^3 storage -- obviously you don't want to do this for
# very large systems
Gij = []
for g in xrange(npbf):
gmat = zeros((nbf,nbf),'d')
Gij.append(gmat)
gbf = pbfs[g]
for i in xrange(nbf):
ibf = bfs[i]
for j in xrange(i+1):
jbf = bfs[j]
gij = three_center(ibf,gbf,jbf)
gmat[i,j] = gij
gmat[j,i] = gij
# Compute the Fermi-Amaldi potential based on the LDA density.
# We're going to form this matrix from the Coulombic matrix that
# arises from the input orbitals. D0 and J0 refer to the density
# matrix and corresponding Coulomb matrix
D0 = mkdens(orbsa,0,nalpha)+mkdens(orbsb,0,nbeta)
J0 = getJ(Ints,D0)
Vfa = ((nel-1.)/nel)*J0
H0 = h + Vfa
eold = 0
for iter in xrange(maxiter):
Hoepa = get_Hoep(ba,H0,Gij)
Hoepb = get_Hoep(ba,H0,Gij)
orbea,orbsa = geigh(Hoepa,S)
orbeb,orbsb = geigh(Hoepb,S)
if ETemp:
efermia = get_efermi(2*nalpha,orbea,ETemp)
occsa = get_fermi_occs(efermia,orbea,ETemp)
Da = mkdens_occs(orbsa,occsa)
efermib = get_efermi(2*nbeta,orbeb,ETemp)
occsb = get_fermi_occs(efermib,orbeb,ETemp)
Db = mkdens_occs(orbsb,occsb)
entropy = 0.5*(get_entropy(occsa,ETemp)+get_entropy(occsb,ETemp))
else:
Da = mkdens(orbsa,0,nalpha)
Db = mkdens(orbsb,0,nbeta)
J = getJ(Ints,Da) + getJ(Ints,Db)
Ka = getK(Ints,Da)
Kb = getK(Ints,Db)
energy = (trace2(2*h+J-Ka,Da)+trace2(2*h+J-Kb,Db))/2\
+Enuke
if ETemp: energy += entropy
if abs(energy-eold) < tol:
break
else:
eold = energy
logging.debug("OEP AN Opt: %d %f" % (iter,energy))
# Do alpha and beta separately
# Alphas
dV_ao = J-Ka-Vfa
dV = matrixmultiply(orbsa,matrixmultiply(dV_ao,transpose(orbsa)))
X = zeros((nbf,nbf),'d')
c = zeros(nbf,'d')
for k in xrange(nbf):
Gk = matrixmultiply(orbsa,matrixmultiply(Gij[k],
transpose(orbsa)))
for i in xrange(nalpha):
for a in xrange(nalpha,norb):
c[k] += dV[i,a]*Gk[i,a]/(orbea[i]-orbea[a])
for l in xrange(nbf):
Gl = matrixmultiply(orbsa,matrixmultiply(Gij[l],
transpose(orbsa)))
for i in xrange(nalpha):
for a in xrange(nalpha,norb):
X[k,l] += Gk[i,a]*Gl[i,a]/(orbea[i]-orbea[a])
# This should actually be a pseudoinverse...
ba = solve(X,c)
# Betas
dV_ao = J-Kb-Vfa
dV = matrixmultiply(orbsb,matrixmultiply(dV_ao,transpose(orbsb)))
X = zeros((nbf,nbf),'d')
c = zeros(nbf,'d')
for k in xrange(nbf):
Gk = matrixmultiply(orbsb,matrixmultiply(Gij[k],
transpose(orbsb)))
for i in xrange(nbeta):
for a in xrange(nbeta,norb):
c[k] += dV[i,a]*Gk[i,a]/(orbeb[i]-orbeb[a])
for l in xrange(nbf):
Gl = matrixmultiply(orbsb,matrixmultiply(Gij[l],
transpose(orbsb)))
for i in xrange(nbeta):
for a in xrange(nbeta,norb):
X[k,l] += Gk[i,a]*Gl[i,a]/(orbeb[i]-orbeb[a])
# This should actually be a pseudoinverse...
bb = solve(X,c)
logger.info("Final OEP energy = %f" % energy)
return energy,(orbea,orbeb),(orbsa,orbsb)
def test_old():
from PyQuante.Molecule import Molecule
from PyQuante.Ints import getbasis,getints
from PyQuante.hartree_fock import rhf
logging.basicConfig(level=logging.DEBUG,format="%(message)s")
#mol = Molecule('HF',[('H',(0.,0.,0.)),('F',(0.,0.,0.898369))],
# units='Angstrom')
mol = Molecule('LiH',[(1,(0,0,1.5)),(3,(0,0,-1.5))],units = 'Bohr')
bfs = getbasis(mol)
S,h,Ints = getints(bfs,mol)
print "after integrals"
E_hf,orbe_hf,orbs_hf = rhf(mol,bfs=bfs,integrals=(S,h,Ints),DoAveraging=True)
print "RHF energy = ",E_hf
E_exx,orbe_exx,orbs_exx = exx(mol,orbs_hf,bfs=bfs,integrals=(S,h,Ints))
return
def test():
from PyQuante import Molecule, HFSolver, DFTSolver, UHFSolver
logging.basicConfig(level=logging.DEBUG,format="%(message)s")
mol = Molecule("He",[(2,(0,0,0))])
solver = HFSolver(mol)
solver.iterate()
print "HF energy = ",solver.energy
dft_solver = DFTSolver(mol)
dft_solver.iterate()
print "DFT energy = ",dft_solver.energy
oep = EXXSolver(solver)
# Testing 0 temp
oep.iterate()
# Testing finite temp
oep.iterate(etemp=40000)
return
def utest():
from PyQuante import Molecule, HFSolver, DFTSolver, UHFSolver
logging.basicConfig(level=logging.DEBUG,format="%(message)s")
mol = Molecule("He",[(2,(0,0,0))])
mol = Molecule("Li",[(3,(0,0,0))],multiplicity=2)
solver = UHFSolver(mol)
solver.iterate()
print "HF energy = ",solver.energy
dft_solver = DFTSolver(mol)
dft_solver.iterate()
print "DFT energy = ",dft_solver.energy
oep = UEXXSolver(solver)
# Testing 0 temp
oep.iterate()
# Testing finite temp
oep.iterate(etemp=10000)
return
if __name__ == '__main__':
test()
utest()
|
from balebot.models.messages.json_message import JsonMessage
from balebot.filters.filter import Filter
class JsonFilter(Filter):
def match(self, message):
return isinstance(message, JsonMessage)
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
def get_package_data():
formats = "fits root xml json conf txt csv".split()
files = ["data/*.{}".format(_) for _ in formats]
return {"gammapy.irf.tests": files}
|
from os.path import join, dirname
from ovos_plugin_common_play.ocp import MediaType, PlaybackType
from ovos_utils.parse import fuzzy_match, MatchStrategy
from ovos_workshop.skills.common_play import OVOSCommonPlaybackSkill, \
ocp_search
from tutubo import YoutubeSearch
from tutubo.models import *
class YoutubeFullMoviesSkill(OVOSCommonPlaybackSkill):
def __init__(self):
super(YoutubeFullMoviesSkill, self).__init__("YoutubeFullMovies")
self.supported_media = [MediaType.GENERIC, MediaType.MOVIE]
self.skill_icon = join(dirname(__file__), "ui", "logo.png")
# score
def calc_score(self, phrase, match, idx=0, base_score=0):
if self.voc_match(match.title, "full_movie"):
return 0
# idx represents the order from youtube
score = base_score - idx # - 1% as we go down the results list
score += 100 * fuzzy_match(phrase.lower(), match.title.lower(),
strategy=MatchStrategy.TOKEN_SET_RATIO)
return min(100, score)
# common play
@ocp_search()
def search_youtube(self, phrase, media_type):
base_score = 0
if self.voc_match(phrase, "full_movie"):
base_score += 10
phrase = self.remove_voc(phrase, "full_movie")
elif self.voc_match(phrase, "movie"):
base_score += 5
phrase = self.remove_voc(phrase, "movie")
elif media_type != MediaType.MOVIE:
# only search db if user explicitly requested movies
return
if self.voc_match(phrase, "youtube"):
# explicitly requested youtube
base_score += 40
phrase = self.remove_voc(phrase, "youtube")
idx = 0
for v in YoutubeSearch(phrase + " full movie").iterate_youtube(
max_res=10):
if isinstance(v, Video) or isinstance(v, VideoPreview):
if v.length < 3600:
continue # not a full movie if len < 1 hour
score = self.calc_score(phrase, v, idx, base_score=base_score)
if score < 50:
continue
# return as a video result (single track dict)
yield {
"match_confidence": score,
"media_type": MediaType.MOVIE,
"length": v.length * 1000,
"uri": "youtube//" + v.watch_url,
"playback": PlaybackType.VIDEO,
"image": v.thumbnail_url,
"bg_image": v.thumbnail_url,
"skill_icon": self.skill_icon,
"title": v.title,
"skill_id": self.skill_id
}
idx += 1
else:
continue
def create_skill():
return YoutubeFullMoviesSkill()
|
class Base(object):
def configure(self):
pass
def get_include_paths(self):
return []
def get_lib_paths(self):
return []
def get_static_lib_paths(self):
return []
def get_libs(self):
return []
def get_static_libs(self):
return []
def get_linkflags(self):
return []
def get_static_linkflags(self):
return []
def as_dict(self, **kwargs):
libpath = []
libpath.extend(self.get_lib_paths())
return dict(
LIBPATH=libpath,
CPPPATH=self.get_include_paths(),
LINKFLAGS=self.get_linkflags()
)
|
from . import __version__, client
print('\u001b[38;5;208m\033[1mGroupManagerBot is running.. version %s\033[0m' % __version__)
client.run() |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class PreCreateEnsServiceRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Ens', '2017-11-10', 'PreCreateEnsService','ens')
self.set_method('POST')
def get_BandwidthType(self):
return self.get_query_params().get('BandwidthType')
def set_BandwidthType(self,BandwidthType):
self.add_query_param('BandwidthType',BandwidthType)
def get_SchedulingPriceStrategy(self):
return self.get_query_params().get('SchedulingPriceStrategy')
def set_SchedulingPriceStrategy(self,SchedulingPriceStrategy):
self.add_query_param('SchedulingPriceStrategy',SchedulingPriceStrategy)
def get_ImageId(self):
return self.get_query_params().get('ImageId')
def set_ImageId(self,ImageId):
self.add_query_param('ImageId',ImageId)
def get_InstanceSpec(self):
return self.get_query_params().get('InstanceSpec')
def set_InstanceSpec(self,InstanceSpec):
self.add_query_param('InstanceSpec',InstanceSpec)
def get_KeyPairName(self):
return self.get_query_params().get('KeyPairName')
def set_KeyPairName(self,KeyPairName):
self.add_query_param('KeyPairName',KeyPairName)
def get_UserData(self):
return self.get_query_params().get('UserData')
def set_UserData(self,UserData):
self.add_query_param('UserData',UserData)
def get_Password(self):
return self.get_query_params().get('Password')
def set_Password(self,Password):
self.add_query_param('Password',Password)
def get_BuyResourcesDetail(self):
return self.get_query_params().get('BuyResourcesDetail')
def set_BuyResourcesDetail(self,BuyResourcesDetail):
self.add_query_param('BuyResourcesDetail',BuyResourcesDetail)
def get_SystemDiskSize(self):
return self.get_query_params().get('SystemDiskSize')
def set_SystemDiskSize(self,SystemDiskSize):
self.add_query_param('SystemDiskSize',SystemDiskSize)
def get_InstanceBandwithdLimit(self):
return self.get_query_params().get('InstanceBandwithdLimit')
def set_InstanceBandwithdLimit(self,InstanceBandwithdLimit):
self.add_query_param('InstanceBandwithdLimit',InstanceBandwithdLimit)
def get_EnsServiceName(self):
return self.get_query_params().get('EnsServiceName')
def set_EnsServiceName(self,EnsServiceName):
self.add_query_param('EnsServiceName',EnsServiceName)
def get_Version(self):
return self.get_query_params().get('Version')
def set_Version(self,Version):
self.add_query_param('Version',Version)
def get_NetLevel(self):
return self.get_query_params().get('NetLevel')
def set_NetLevel(self,NetLevel):
self.add_query_param('NetLevel',NetLevel)
def get_SchedulingStrategy(self):
return self.get_query_params().get('SchedulingStrategy')
def set_SchedulingStrategy(self,SchedulingStrategy):
self.add_query_param('SchedulingStrategy',SchedulingStrategy)
def get_DataDiskSize(self):
return self.get_query_params().get('DataDiskSize')
def set_DataDiskSize(self,DataDiskSize):
self.add_query_param('DataDiskSize',DataDiskSize) |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2011 Loic Jaquemet loic.jaquemet+python@gmail.com
#
import logging
log = logging.getLogger('view')
from PyQt4 import QtGui, QtCore, QtOpenGL
from PyQt4.Qt import Qt
from .. import model
LINE_SIZE = 512
PAGE_SIZE = 4096
# LINE_SIZE=512*4
# PAGE_SIZE=4096*16
class MemoryMappingScene(QtGui.QGraphicsScene):
'''
Binds a MemoryHandler mapping to a QGraphicsScene
'''
def __init__(self, mapping, parent=None):
QtGui.QGraphicsScene.__init__(self, parent)
self.mapping = mapping
class MemoryMappingView(QtGui.QGraphicsView):
'''
We need to define our own QGraphicsView to play with.
zoom-able QGraphicsView.
from http://www.qtcentre.org/wiki/index.php?title=QGraphicsView:_Smooth_Panning_and_Zooming
'''
# Holds the current centerpoint for the view, used for panning and zooming
CurrentCenterPoint = QtCore.QPointF()
# From panning the view
LastPanPoint = QtCore.QPoint()
def __init__(self, parent=None):
QtGui.QGraphicsView.__init__(self, parent)
self.setRenderHints(
QtGui.QPainter.Antialiasing | QtGui.QPainter.SmoothPixmapTransform)
# opengl ? !
# self.setViewport(QtOpenGL.QGLWidget(QtOpenGL.QGLFormat(QtOpenGL.QGL.SampleBuffers)))
# self.setCursor(Qt.OpenHandCursor)
self.setCursor(Qt.ArrowCursor)
self.SetCenter(
QtCore.QPointF(
0.0,
0.0)) # A modified version of centerOn(), handles special cases
def loadMapping(self, mapping):
# Set-up the scene
scene = MemoryMappingScene(mapping, parent=self)
self.setScene(scene)
self.mapping = mapping
# Set-up the view
if mapping:
# Populate the scene
# self._debugFill(scene)
self.drawPages(mapping)
self.setSceneRect(0, 0, LINE_SIZE, (len(mapping) // LINE_SIZE) + 1)
# draw a square around
self.scene().addRect(
0,
0,
LINE_SIZE,
(len(mapping) // LINE_SIZE) + 1,
QtGui.QPen(
Qt.SolidLine))
log.debug(
'set sceneRect to %d,%d' %
(LINE_SIZE, (len(mapping) // LINE_SIZE) + 1))
else:
self.setSceneRect(0, 0, LINE_SIZE, LINE_SIZE)
self.SetCenter(
QtCore.QPointF(
0.0,
0.0)) # A modified version of centerOn(), handles special cases
return
def drawPages(self, mapping):
''' draw a page delimitor every PAGE_SIZE '''
pageSize = PAGE_SIZE
# 15 is the mapping's size//PAGE_SIZE
for y in xrange(
PAGE_SIZE // LINE_SIZE, (len(mapping) // LINE_SIZE) - 1, PAGE_SIZE // LINE_SIZE):
self.scene().addLine(0, y, LINE_SIZE, y, QtGui.QPen(Qt.DotLine))
def _debugFill(self, scene):
for x in xrange(0, LINE_SIZE, 25):
for y in xrange(0, LINE_SIZE, 25):
if (x % 100 == 0)and (y % 100 == 0):
scene.addRect(x, y, 2, 2)
pointString = QtCore.QString()
stream = QtCore.QTextStream(pointString)
stream << "(" << x << "," << y << ")"
item = scene.addText(pointString)
item.setPos(x, y)
else:
scene.addRect(x, y, 1, 1)
def GetScene(self):
return self.scene()
def GetCenter(self):
return self.CurrentCenterPoint
'''
* Sets the current centerpoint. Also updates the scene's center point.
* Unlike centerOn, which has no way of getting the floating point center
* back, SetCenter() stores the center point. It also handles the special
* sidebar case. This function will claim the centerPoint to sceneRec ie.
* the centerPoint must be within the sceneRec.
'''
# Set the current centerpoint in the
def SetCenter(self, centerPoint):
# Get the rectangle of the visible area in scene coords
visibleArea = self.mapToScene(self.rect()).boundingRect()
# Get the scene area
sceneBounds = self.sceneRect()
boundX = visibleArea.width() / 2.0
boundY = visibleArea.height() / 2.0
boundWidth = sceneBounds.width() - 2.0 * boundX
boundHeight = sceneBounds.height() - 2.0 * boundY
# The max boundary that the centerPoint can be to
bounds = QtCore.QRectF(boundX, boundY, boundWidth, boundHeight)
if (bounds.contains(centerPoint)):
# We are within the bounds
self.CurrentCenterPoint = centerPoint
else:
# We need to clamp or use the center of the screen
if(visibleArea.contains(sceneBounds)):
# Use the center of scene ie. we can see the whole scene
self.CurrentCenterPoint = sceneBounds.center()
else:
self.CurrentCenterPoint = centerPoint
# We need to clamp the center. The centerPoint is too large
if(centerPoint.x() > bounds.x() + bounds.width()):
self.CurrentCenterPoint.setX(bounds.x() + bounds.width())
elif (centerPoint.x() < bounds.x()):
self.CurrentCenterPoint.setX(bounds.x())
if(centerPoint.y() > bounds.y() + bounds.height()):
self.CurrentCenterPoint.setY(bounds.y() + bounds.height())
elif (centerPoint.y() < bounds.y()):
self.CurrentCenterPoint.setY(bounds.y())
# Update the scrollbars
self.centerOn(self.CurrentCenterPoint)
return
'''
* Handles when the mouse button is pressed
'''
def mousePressEvent(self, event):
''' todo
wierd, quand pointers et nullwords sont affiches, on ne peut plus selecter le pointer..
ca tombe sur l'itemgroup des null words.
'''
# For panning the view
self.LastPanPoint = event.pos()
self.setCursor(Qt.ClosedHandCursor)
item = self.itemAt(event.pos())
log.debug('Mouse press on ' + str(item))
if item is None:
return
item.setSelected(True)
pitem = item.parentItem()
if pitem is None:
# no parent item, that must be lonely....
if self.mapping:
# read mapping value
addr = event.pos().y() * LINE_SIZE + event.pos().x()
value = self.mapping.read_word(self.mapping.start + addr)
log.debug('@0x%x: 0x%x' % (self.mapping.start + addr, value))
else:
# parent item, check for haystack types
log.debug('Mouse press on parent item ' + str(pitem))
if hasattr(pitem, 'value') and model.isRegistered(pitem.value):
log.debug('showing info for %s' % (pitem))
# update info view
self.parent().showInfo(pitem)
elif hasattr(pitem, 'onSelect'):
# print status for pointers and nulls
log.debug('running parent onSelect')
pitem.onSelect()
elif hasattr(item, 'onSelect'):
log.debug('running item onSelect')
pitem.onSelect()
else:
log.debug('%s has no onSelect method' % item)
return
'''
* Handles when the mouse button is released
'''
def mouseReleaseEvent(self, event):
# self.setCursor(Qt.OpenHandCursor)
self.setCursor(Qt.ArrowCursor)
self.LastPanPoint = QtCore.QPoint()
return
'''
*Handles the mouse move event
'''
def mouseMoveEvent(self, event):
if (not self.LastPanPoint.isNull()):
# Get how much we panned
delta = self.mapToScene(
self.LastPanPoint) - self.mapToScene(event.pos())
self.LastPanPoint = event.pos()
# Update the center ie. do the pan
self.SetCenter(self.GetCenter() + delta)
return
'''
* Zoom the view in and out.
'''
def wheelEvent(self, event):
# Get the position of the mouse before scaling, in scene coords
pointBeforeScale = QtCore.QPointF(self.mapToScene(event.pos()))
# Get the original screen centerpoint
# CurrentCenterPoint; //(visRect.center());
screenCenter = self.GetCenter()
# Scale the view ie. do the zoom
scaleFactor = 1.15 # How fast we zoom
if(event.delta() > 0):
# Zoom in
self.scale(scaleFactor, scaleFactor)
else:
# Zooming out
self.scale(1.0 / scaleFactor, 1.0 / scaleFactor)
# Get the position after scaling, in scene coords
pointAfterScale = QtCore.QPointF(self.mapToScene(event.pos()))
# Get the offset of how the screen moved
offset = pointBeforeScale - pointAfterScale
# Adjust to the new center for correct zooming
newCenter = screenCenter + offset
self.SetCenter(newCenter) # QPointF
return
'''
* Need to update the center so there is no jolt in the
* interaction after resizing the widget.
'''
def resizeEvent(self, event):
# Get the rectangle of the visible area in scene coords
visibleArea = self.mapToScene(self.rect()).boundingRect()
self.SetCenter(visibleArea.center())
# Call the subclass resize so the scrollbars are updated correctly
super(QtGui.QGraphicsView, self).resizeEvent(event)
return
|
#
# Post social media broadcast about news.
#
# This doesn't actually make a post -- it just places them in the
# outbound queue for the global social media script to handle.
#
# Copyright (C) 2019, PostgreSQL Europe
from django.core.management.base import BaseCommand
from django.db import transaction
from django.utils import timezone
from django.template.defaultfilters import slugify
from django.conf import settings
from datetime import timedelta
from postgresqleu.confreg.models import ConferenceNews
from postgresqleu.confreg.twitter import post_conference_social
class Command(BaseCommand):
help = 'Schedule social media posts about conference news'
class ScheduledJob:
scheduled_interval = timedelta(minutes=5)
internal = True
@classmethod
def should_run(self):
# Any unposted news from a conference where the news is dated in the past (so that it
# is actually visible), but not more than 7 days in the past (in which case we skip it).
return ConferenceNews.objects.filter(tweeted=False, datetime__lt=timezone.now(), datetime__gt=timezone.now() - timedelta(days=7)).exists()
@transaction.atomic
def handle(self, *args, **options):
for n in ConferenceNews.objects.filter(tweeted=False, datetime__lt=timezone.now(), datetime__gt=timezone.now() - timedelta(days=7)):
statusstr = "{0} {1}/events/{2}/news/{3}-{4}/".format(
n.title[:250 - 40],
settings.SITEBASE,
n.conference.urlname,
slugify(n.title),
n.id,
)
post_conference_social(n.conference, statusstr, approved=True)
n.tweeted = True
n.save()
|
#!C:/Users/Kushang Darbar/AppData/Local/Programs/Python/Python38-32/python
#print("Content-Type:text/html; charset=utf-8\n\n")
print()
import cgi
#!/usr/bin/env python
# coding: utf-8
import cv2
import os
import numpy as np
import FaceRecognition as fr
import retrieve as rt
import cgitb
cgitb.enable()
form = cgi.FieldStorage()
photo=form['Photo']
if photo.filename:
fn=os.path.basename(photo.filename)
open('C:/xampp/htdocs/Criminal_Identification_System/tmp/'+fn,'wb').write(photo.file.read())
#print('\nFile'+fn+'was uploaded')
else:
print('\nNo file uploaded')
#print(photo)
#print(test_img)
#This module takes images stored in diskand performs face recognition
test_img=cv2.imread('C:/xampp/htdocs/Criminal_Identification_System/tmp/'+fn)#test_img path
faces_detected,gray_img=fr.faceDetection(test_img)
print("faces_detected:",faces_detected)
#Comment belows lines when running this program second time.Since it saves training.yml file in directory
#faces,faceID=fr.labels_for_training_data(r'C:\Users\Kushang Darbar\PROJECT\trainingImages')
#face_recognizer = fr.train_classifier(faces,faceID)
#face_recognizer.save(r'C:\Users\Kushang Darbar\PROJECT\trainingData.yml')
#Uncomment below line for subsequent runs
face_recognizer=cv2.face.LBPHFaceRecognizer_create()
face_recognizer.read(r'C:\Users\Kushang Darbar\PROJECT\trainingData.yml')#use this to load training data for subsequent runs
name={0:"Kushang",1:"Milind"}#creating dictionary containing names for each label
for face in faces_detected:
(x,y,w,h)=face
roi_gray=gray_img[y:y+h,x:x+h]
label,confidence=face_recognizer.predict(roi_gray)#predicting the label of given image
print("confidence:",confidence)
print("label:",label)
fr.draw_rect(test_img,face)
predicted_name=name[label]
if(confidence<35):#If confidence more than 30 then don't print predicted face text on screen
fr.put_text(test_img,predicted_name,x,y)
rt.retrieveData(predicted_name)
else:
print("No data found in database")
resized_img=cv2.resize(test_img,(800,800))
cv2.imshow("face recognition",resized_img)
#Waits indefinitely until a key is pressed
cv2.waitKey(0)
cv2.destroyAllWindows() |
import random
import string
from unittest import mock
from django.core.files.uploadedfile import SimpleUploadedFile
from django.test import SimpleTestCase
from django.urls import reverse
from django.utils.crypto import get_random_string
from mtp_common.auth.api_client import MoJOAuth2Session
from mtp_common.auth.test_utils import generate_tokens
from prisoner_location_admin import required_permissions
TEST_PRISONS = ['IXB', 'INP']
class PrisonerLocationUploadTestCase(SimpleTestCase):
def setUp(self):
super().setUp()
self.notifications_mock = mock.patch('mtp_common.templatetags.mtp_common.notifications_for_request',
return_value=[])
self.notifications_mock.start()
self.disable_cache = mock.patch('security.models.cache')
self.disable_cache.start().get.return_value = None
def tearDown(self):
self.notifications_mock.stop()
self.disable_cache.stop()
super().tearDown()
@mock.patch('mtp_common.auth.backends.api_client')
def login(self, mock_api_client):
mock_api_client.authenticate.return_value = {
'pk': 5,
'token': generate_tokens(),
'user_data': {
'first_name': 'Sam',
'last_name': 'Hall',
'username': 'shall',
'email': 'sam@mtp.local',
'permissions': required_permissions,
'prisons': [],
'flags': [],
'roles': ['prisoner-location-upload']
}
}
response = self.client.post(
reverse('login'),
data={'username': 'shall', 'password': 'pass'},
follow=True
)
self.assertEqual(response.status_code, 200)
return mock_api_client.authenticate.return_value
def setup_mock_get_authenticated_api_session(self, mock_api_client):
mock_session = MoJOAuth2Session()
mock_session.token = generate_tokens()
mock_api_client.get_authenticated_api_session.return_value = mock_session
def get_csv_data_as_file(data, filename='example.csv'):
return SimpleUploadedFile(
filename,
bytes(data, 'utf-8'),
content_type='text/csv'
)
def random_prisoner_name():
return (
'%s%s' % (
get_random_string(allowed_chars=string.ascii_uppercase, length=1),
get_random_string(allowed_chars=string.ascii_lowercase, length=random.randint(3, 6))
),
'%s%s' % (
get_random_string(allowed_chars=string.ascii_uppercase, length=1),
get_random_string(allowed_chars=string.ascii_lowercase, length=random.randint(3, 9)),
)
)
def random_prisoner_num():
return '%s%s%s%s' % (
random.choice(string.ascii_uppercase),
random.randint(1000, 9999),
random.choice(string.ascii_uppercase),
random.choice(string.ascii_uppercase)
)
def random_dob():
date = {
'day': random.randint(1, 28),
'month': random.randint(1, 12),
'year': random.randint(1920, 1997),
}
time_format = random.randint(0, 2)
if time_format == 0:
date['time'] = ' 00:00'
elif time_format == 1:
date['time'] = ' 0:00:00'
elif time_format == 2:
date['time'] = ''
return (
'%(year)s-%(month)02d-%(day)02d' % date,
'%(day)s/%(month)s/%(year)s%(time)s' % date,
)
def generate_testable_location_data(length=20, extra_rows=None, excel_csv=False):
file_data = ['NOMS Number,Offender Surname,Offender Given Name 1,Date of Birth,Establishment Code']
expected_data = []
if extra_rows:
file_data.extend(extra_rows)
for _ in range(length):
firstname, surname = random_prisoner_name()
num = random_prisoner_num()
expected_dob, file_dob = random_dob()
prison = random.choice(TEST_PRISONS)
file_data.append('%s,%s,%s,%s,%s' % (num, surname, firstname, file_dob, prison))
expected_data.append({
'prisoner_number': num,
'prisoner_name': ' '.join([firstname, surname]),
'prisoner_dob': expected_dob,
'prison': prison
})
penultimate_line = 'Latest Business Data Available'
ultimate_line = random_dob()[1]
if excel_csv:
penultimate_line += ',,,,'
ultimate_line += ',,,,'
file_data.append(penultimate_line)
file_data.append(ultimate_line)
return '\n'.join(file_data), expected_data
|
import os
import sys
import time
import subprocess
from tornado import gen
from traitlets import Dict
from jupyterhub.utils import random_port
from jupyterhub.spawner import Spawner
__all__ = ['SGESpawner']
class SGESpawner(Spawner):
sge_env = Dict({}, config=True,
help="Extra SGE environment variables to pass through")
def __init__(self, *args, **kwargs):
super(SGESpawner, self).__init__(*args, **kwargs)
self.cmd_prefix = ['sudo', '-u', self.user.name]
def qstat_t(self, jobid, column):
"""
Call qstat -t and extract information about a job.
Parameters
----------
jobid : `int`
The numeric ID of the job to search for.
column : `string`
The name of the column to extract the information about, can be
"host" or "state".
Returns
-------
result : `string`
The value of the column, or None if the job can not be found
"""
qstat_columns = {'state': 4, 'host': 7}
ret = subprocess.run(self.cmd_prefix + ['qstat', '-t'],
stdout=subprocess.PIPE, env=self.env)
jobinfo = ret.stdout.decode('utf-8')
state = None
for line in jobinfo.split('\n'):
line = line.strip()
if line.startswith('{}'.format(jobid)):
state = line.split()[qstat_columns[column]]
return state
def load_state(self, state):
super(SGESpawner, self).load_state(state)
if 'jobid' in state:
self.jobid = state['jobid']
def get_state(self):
state = super(SGESpawner, self).get_state()
if self.jobid:
state['jobid'] = self.jobid
return state
def clear_state(self):
super(SGESpawner, self).clear_state()
self.jobid = None
def _env_default(self):
env = super(SGESpawner, self)._env_default()
env.update(self.sge_env)
return env
@gen.coroutine
def start(self):
"""
Submit the job to the queue and wait for it to start
"""
self.user.server.port = random_port()
cmd = self.cmd_prefix.copy()
cmd.extend(['qsub', '-b', 'y', '-j', 'y',
'-N', 'jupyterhub', '-wd', '/home/{}'.format(self.user.name)])
cmd.extend([sys.executable, '-m', 'jupyterhub.singleuser'])
cmd.extend(self.get_args())
self.log.info("SGE: CMD: {}".format(cmd))
env = self.env.copy()
self.proc = subprocess.Popen(cmd, env=env, stdout=subprocess.PIPE)
r = self.proc.stdout.read().decode('utf-8')
self.log.info("SGE: {}".format(r))
jid = int(r.split('Your job ')[1].split()[0])
self.jobid = jid
state = self.qstat_t(jid, 'state')
while state != 'r':
time.sleep(2)
state = self.qstat_t(jid, 'state')
self.log.info("SGE: Job State: {}".format(state))
host = self.qstat_t(jid, 'host')
host = host.split('@')[1].split('.')[0]
self.log.info("SGE: The single user server"
" is running on: {}".format(host))
self.user.server.ip = host
@gen.coroutine
def stop(self, now=False):
if self.jobid:
ret = subprocess.Popen(self.cmd_prefix + ['qdel', '{}'.format(self.jobid)],
env=self.env)
self.log.info("SGE: {}".format(ret))
@gen.coroutine
def poll(self):
state = self.qstat_t(self.jobid, 'state')
if state:
if state == 'r':
return None
else: # qw is not an option here.
return 1
else:
return 0
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-12-28 11:32
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [("challenges", "0006_changed_path_to_upload")]
operations = [
migrations.RenameModel(
old_name="TestEnvironment", new_name="ChallengePhase"
),
migrations.AlterModelTable(
name="challengephase", table="challenge_phase"
),
]
|
import random
import re
import subprocess
import time
from threading import Thread
import httpx # type: ignore
import pytest
import mosec
TEST_PORT = "8090"
URI = f"http://localhost:{TEST_PORT}"
@pytest.fixture(scope="module")
def http_client():
client = httpx.Client()
yield client
client.close()
@pytest.fixture(scope="session")
def mosec_service():
service = subprocess.Popen(
["python", "tests/square_service.py", "--port", TEST_PORT],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
time.sleep(2) # wait for service to start
assert not service.poll(), service.stdout.read().decode("utf-8")
yield service
service.terminate()
def test_square_service(mosec_service, http_client):
resp = http_client.get(URI)
assert resp.status_code == 200
assert f"mosec/{mosec.__version__}" == resp.headers["server"]
resp = http_client.get(f"{URI}/metrics")
assert resp.status_code == 200
resp = http_client.post(f"{URI}/inference", json={"msg": 2})
assert resp.status_code == 422
resp = http_client.post(f"{URI}/inference", content=b"bad-binary-request")
assert resp.status_code == 400
validate_square_service(http_client, 2)
def test_square_service_mp(mosec_service, http_client):
threads = []
for _ in range(20):
t = Thread(
target=validate_square_service,
args=(http_client, random.randint(-500, 500)),
)
t.start()
threads.append(t)
for t in threads:
t.join()
assert_batch_larger_than_one(http_client)
assert_empty_queue(http_client)
def validate_square_service(http_client, x):
resp = http_client.post(f"{URI}/inference", json={"x": x})
assert resp.json()["x"] == x ** 2
def assert_batch_larger_than_one(http_client):
metrics = http_client.get(f"{URI}/metrics").content.decode()
bs = re.findall(r"batch_size_bucket.+", metrics)
get_bs_int = lambda x: int(x.split(" ")[-1]) # noqa
assert get_bs_int(bs[-1]) > get_bs_int(bs[0])
def assert_empty_queue(http_client):
metrics = http_client.get(f"{URI}/metrics").content.decode()
remain = re.findall(r"mosec_service_remaining_task \d+", metrics)[0]
assert int(remain.split(" ")[-1]) == 0
|
#!/usr/bin/env python
import os
import click
import pandas as pd
import numpy as np
@click.command()
@click.argument('orig_csv', type=click.Path())
@click.argument('out_csv', type=click.Path())
def clean_afm_db(orig_csv, out_csv):
df_mg = pd.read_csv(orig_csv) # open csv to dataframe
df_mg = df_mg.loc[0:3360] # take rows that have already been labeled
df_mg.imPath = [os.path.split(orig_csv)[0]+'/'+str(i)+'.tif' for i in df_mg['id'].tolist()] # build file names from ids
# df_mg = df_mg[df_mg.noise!='x'] # Remove bad images
# df_mg = df_mg[df_mg.channel!='AmplitudeActual'] # Remove channels that are low population
# df_mg = df_mg[df_mg.channel!='DeflectionActual']
# Fill NaNs
df_mg['noise'] = df_mg['noise'].fillna(value='c') # c for 'clean'
df_mg['fiber'] = df_mg['fiber'].fillna(value='n') # n for 'not fiber'
df_mg['channel'] = df_mg['channel'].str.replace('ZSensor','Height') # these channels are essentially equivalent
# Simplify the noise category to 'noise' or 'clean'
df_mg['noise_simple'] = df_mg['noise'].copy()
df_mg.loc[df_mg['noise_simple']!='c','noise_simple'] = 'n'
# Clean up the noise labels - some have an 'h' in front of them that's superfluous
df_mg['noise'] = df_mg['noise'].str.replace('hb','b')
df_mg['noise'] = df_mg['noise'].str.replace('hl','l')
df_mg['noise'] = df_mg['noise'] = df_mg['noise'].str.replace('hp','p')
df_mg['noise'] = df_mg['noise'].str.replace('hs','s')
df_mg['noise'] = df_mg['noise'].str.replace('vg','g')
# Remove images with horizontal gradient, vertical line, or 'X' class - not enough images in these classes
df_mg = df_mg.loc[df_mg['noise']!='hg']
df_mg = df_mg.loc[df_mg['noise']!='vl']
df_mg = df_mg.loc[df_mg['noise']!='x']
# Write out the file
df_mg.to_csv(path_or_buf=out_csv, index=False)
if __name__ == '__main__':
clean_afm_db()
|
t = int(input())
for _ in range(t):
r, s = input().split()
r = int(r)
print(''.join(c for c in s for _ in range(r)))
|
class Pattern_Twenty_Nine:
'''Pattern twenty_nine
***
* *
* *
* *
* *
* *
***
'''
def __init__(self, strings='*'):
if not isinstance(strings, str):
strings = str(strings)
for i in range(7):
if i in [0, 6]:
print(f' {strings * 3}')
else:
print(f'{strings} {strings}')
if __name__ == '__main__':
Pattern_Twenty_Nine()
|
"""
The project is developed as part of Computer Architecture class.
Project Name: Functional Simulator for subset of RISC-V Processor
-------------------------------------------------
| Developer's Name | Developer's Email ID |
|-----------------------------------------------|
| Akhil Arya | 2019csb1066@iitrpr.ac.in |
| Harshwardhan Kumar | 2019csb1089@iitrpr.ac.in |
| Krithika Goyal | 2019csb1094@iitrpr.ac.in |
| Rhythm Jain | 2019csb1111@iitrpr.ac.in |
| Tarun Singla | 2019csb1126@iitrpr.ac.in |
-------------------------------------------------
"""
# main.py
# Purpose of this file: This file controls the overall functioning of the Simulator.
from Gui import display, take_input
from myRISCVSim import State, Processor, BTB, HDU
from memory import Memory
import time
stats = [
"Total number of cycles: ",
"Total instructions executed: ",
"CPI: ",
"Number of data-transfer(load and store): ",
"Number of ALU instructions executed: ",
"Number of Control instructions: ",
"Number of stalls/bubbles in the pipeline: ",
"Number of data hazards: ",
"Number of control hazards: ",
"Number of branch mispredictions: ",
"Number of stalls due to data hazards: ",
"Number of stalls due to control hazards: "
]
instruction_cache_stats = [
"Number of read accesses: ",
"Number of read hits: ",
"Number of read misses: ",
"Number of write-through no-write allocates: ",
]
data_cache_stats = [
"Number of read accesses: ",
"Number of read hits: ",
"Number of read misses: ",
"Number of write-through no-write allocates: ",
]
s = [0]*12
ic = [0]*4
dc = [0]*4
l = []
l_dash = []
pc_tmp = []
data_hazard_pairs = []
control_hazard_signals = []
stage = {1: "fetch", 2: "decode", 3: "execute", 4: "memory", 5: "write_back"}
# phase 3
memory_table = []
# Function for pipelined execution
def evaluate(processor, pipeline_ins):
processor.write_back(pipeline_ins[0])
gui_mem = processor.mem(pipeline_ins[1])
processor.execute(pipeline_ins[2])
control_hazard, control_pc, entering, color = processor.decode(pipeline_ins[3], btb)
if entering:
control_hazard_signals.append(2)
elif pipeline_ins[2].is_dummy and color != 0 and len(control_hazard_signals) > 0 and control_hazard_signals[-1] == 2:
control_hazard_signals.append(control_hazard_signals[-1])
else:
control_hazard_signals.append(color)
gui_fetch = processor.fetch(pipeline_ins[4], btb)
pipeline_ins = [pipeline_ins[1], pipeline_ins[2], pipeline_ins[3], pipeline_ins[4]]
memory_table.append([gui_fetch,gui_mem])
return pipeline_ins, control_hazard, control_pc
if __name__ == '__main__':
# set .mc file, input knobs and cache inputs
prog_mc_file, pipelining_enabled, forwarding_enabled, print_registers_each_cycle, print_pipeline_registers, print_specific_pipeline_registers, cache_in = take_input()
# Knobs
# pipelining_enabled = True # Knob1
# forwarding_enabled = False # Knob2
# print_registers_each_cycle = False # Knob3
# print_pipeline_registers = False # Knob4
# print_specific_pipeline_registers = [False, 10] # Knob5
# Data cache inputs
data_cache_size = int(cache_in[0])
data_cache_block_size = int(cache_in[1]) # Word is 4B
data_cache_associativity = int(cache_in[2]) # 0/1/2[FA/DM/SA]
data_cache_ways = int(cache_in[3])
# Instruction cache inputs
instruction_cache_size = int(cache_in[4])
instruction_cache_block_size = int(cache_in[5]) # Word is 4B
instruction_cache_associativity = int(cache_in[6]) # 0/1/2[FA/DM/SA]
instruction_cache_ways = int(cache_in[7])
# invoke classes
data_cache = Memory(data_cache_size, data_cache_block_size, data_cache_associativity, data_cache_ways)
instruction_cache = Memory(instruction_cache_size, instruction_cache_block_size, instruction_cache_associativity, instruction_cache_ways)
processor = Processor(prog_mc_file, data_cache, instruction_cache)
hdu = HDU()
btb = BTB()
# Signals
PC = 0
clock_cycles = 0
prog_end = False
# Various Counts
number_of_stalls_due_to_control_hazards = 0
number_of_data_hazards = 0
number_of_stalls_due_to_data_hazards = 0
total_number_of_stalls = 0
if not pipelining_enabled:
# Multi-cycle
processor.pipelining_enabled = False
while True:
instruction = State(PC)
gui_read = processor.fetch(instruction)
clock_cycles += 1
if print_registers_each_cycle:
print("CLOCK CYCLE:", clock_cycles)
print("Register Data:-")
for i in range(32):
print("R" + str(i) + ":", processor.R[i], end=" ")
print("\n")
pc_tmp.append([-1, -1, -1, -1, instruction.PC])
data_hazard_pairs.append({'who': -1, 'from_whom': -1})
memory_table.append([gui_read,False])
processor.decode(instruction)
pc_tmp.append([-1, -1, -1, instruction.PC, -1])
data_hazard_pairs.append({'who': -1, 'from_whom': -1})
clock_cycles += 1
if print_registers_each_cycle:
print("CLOCK CYCLE:", clock_cycles)
print("Register Data:-")
for i in range(32):
print("R" + str(i) + ":", processor.R[i], end=" ")
print("\n")
if processor.terminate:
prog_end = True
break
memory_table.append([False,False])
processor.execute(instruction)
pc_tmp.append([-1, -1, instruction.PC, -1, -1])
data_hazard_pairs.append({'who': -1, 'from_whom': -1})
clock_cycles += 1
if print_registers_each_cycle:
print("CLOCK CYCLE:", clock_cycles)
print("Register Data:-")
for i in range(32):
print("R" + str(i) + ":", processor.R[i], end=" ")
print("\n")
memory_table.append([False,False])
gui_data = processor.mem(instruction)
pc_tmp.append([-1, instruction.PC, -1, -1, -1])
data_hazard_pairs.append({'who': -1, 'from_whom': -1})
clock_cycles += 1
if print_registers_each_cycle:
print("CLOCK CYCLE:", clock_cycles)
print("Register Data:-")
for i in range(32):
print("R" + str(i) + ":", processor.R[i], end=" ")
print("\n")
memory_table.append([False,gui_data])
processor.write_back(instruction)
pc_tmp.append([instruction.PC, -1, -1, -1, -1])
data_hazard_pairs.append({'who': -1, 'from_whom': -1})
control_hazard_signals += [0,0,0,0,0]
clock_cycles += 1
if print_registers_each_cycle:
print("CLOCK CYCLE:", clock_cycles)
print("Register Data:-")
for i in range(32):
print("R" + str(i) + ":", processor.R[i], end=" ")
print("\n")
PC = processor.next_PC
memory_table.append([False,False])
else:
processor.pipelining_enabled = True
pipeline_instructions = [State(0) for _ in range(5)]
for i in range(4):
pipeline_instructions[i].is_dummy = True
while not prog_end:
if not forwarding_enabled:
data_hazard = hdu.data_hazard_stalling(pipeline_instructions)
old_states = pipeline_instructions
pipeline_instructions, control_hazard, control_pc = evaluate(processor, pipeline_instructions)
tmp = []
for i in range(5):
if(old_states[i].is_dummy):
tmp.append("bubble")
else:
tmp.append(old_states[i].PC)
pc_tmp.append(tmp)
data_hazard_pairs.append(data_hazard[2])
branch_taken = pipeline_instructions[3].branch_taken
branch_pc = pipeline_instructions[3].next_pc
PC += 4
if branch_taken and not data_hazard[0]:
PC = branch_pc
if control_hazard and not data_hazard[0]:
number_of_stalls_due_to_control_hazards += 1
PC = control_pc
pipeline_instructions.append(State(PC))
pipeline_instructions[-2].is_dummy = True
if data_hazard[0]:
number_of_data_hazards += data_hazard[1]
number_of_stalls_due_to_data_hazards += 1
pipeline_instructions = pipeline_instructions[:2] + [State(0)] + old_states[3:]
pipeline_instructions[2].is_dummy = True
PC -= 4
if not control_hazard and not data_hazard[0]:
pipeline_instructions.append(State(PC))
pipeline_instructions[-2].next_pc = PC
prog_end = True
for i in range(4):
x = pipeline_instructions[i]
if not x.is_dummy:
prog_end = False
break
else:
data_hazard, if_stall, stall_position, pipeline_instructions, gui_pair = hdu.data_hazard_forwarding(pipeline_instructions)
old_states = pipeline_instructions
pipeline_instructions, control_hazard, control_pc = evaluate(processor, pipeline_instructions)
tmp = []
for i in range(5):
if(old_states[i].is_dummy):
tmp.append("bubble")
else:
tmp.append(old_states[i].PC)
pc_tmp.append(tmp)
data_hazard_pairs.append(gui_pair)
branch_taken = pipeline_instructions[3].branch_taken
branch_pc = pipeline_instructions[3].next_pc
PC += 4
if branch_taken and not if_stall:
PC = branch_pc
if control_hazard and not if_stall:
number_of_stalls_due_to_control_hazards += 1
PC = control_pc
pipeline_instructions.append(State(PC))
pipeline_instructions[-2].is_dummy = True
if if_stall:
number_of_stalls_due_to_data_hazards += 1
if stall_position == 0:
pipeline_instructions = pipeline_instructions[:1] + [State(0)] + old_states[2:]
pipeline_instructions[1].is_dummy = True
PC -= 4
elif stall_position == 1:
pipeline_instructions = pipeline_instructions[:2] + [State(0)] + old_states[3:]
pipeline_instructions[2].is_dummy = True
PC -= 4
number_of_data_hazards += data_hazard
if not control_hazard and not if_stall:
pipeline_instructions.append(State(PC))
pipeline_instructions[-2].next_pc = PC
for inst in pipeline_instructions:
inst.decode_forwarding_op1 = False
inst.decode_forwarding_op2 = False
prog_end = True
for i in range(4):
x = pipeline_instructions[i]
if not x.is_dummy:
prog_end = False
break
clock_cycles += 1
if print_registers_each_cycle:
print("CLOCK CYCLE:", clock_cycles)
print("Register Data:-")
for i in range(32):
print("R" + str(i) + ":", processor.R[i], end=" ")
print("\n")
# Print specific pipeline register
if print_specific_pipeline_registers[0]:
for inst in pipeline_instructions:
if inst.PC/4 == print_specific_pipeline_registers[1]:
if not print_registers_each_cycle:
print("CLOCK CYCLE:", clock_cycles)
print("Pipeline Registers:-")
print("Fetch # Decode =>", "Instruction:", pipeline_instructions[3].instruction_word)
print("Decode # Execute => ", "Operand1: ", pipeline_instructions[2].operand1, ", Operand2: ", pipeline_instructions[2].operand2, sep="")
print("Execute # Memory => ", "Data: ", pipeline_instructions[1].register_data, sep="")
print("Memory # WriteBack => ", "Data: ", pipeline_instructions[0].register_data, sep="")
print("\n")
# Print pipeline registers
elif print_pipeline_registers:
if not print_registers_each_cycle:
print("CLOCK CYCLE:", clock_cycles)
print("Pipeline Registers:-")
print("Fetch # Decode =>", "Instruction:", pipeline_instructions[3].instruction_word)
print("Decode # Execute => ", "Operand1: ", pipeline_instructions[2].operand1, ", Operand2: ", pipeline_instructions[2].operand2, sep="")
print("Execute # Memory => ", "Data: ", pipeline_instructions[1].register_data, sep="")
print("Memory # WriteBack => ", "Data: ", pipeline_instructions[0].register_data, sep="")
print("\n")
# Print Statistics
s[0] = clock_cycles
s[1] = processor.count_total_inst
s[2] = s[0]/s[1]
s[3] = processor.count_mem_inst
s[4] = processor.count_alu_inst
s[5] = processor.count_control_inst
s[7] = number_of_data_hazards
if pipelining_enabled:
s[8] = s[5]
s[9] = processor.count_branch_mispredictions
s[10] = number_of_stalls_due_to_data_hazards
s[11] = number_of_stalls_due_to_control_hazards
s[6] = s[10] + s[11]
ic[0] = instruction_cache.count_reads
ic[1] = instruction_cache.count_read_hits
ic[2] = instruction_cache.count_read_misses
ic[3] = instruction_cache.count_writes
dc[0] = data_cache.count_reads
dc[1] = data_cache.count_read_hits
dc[2] = data_cache.count_read_misses
dc[3] = data_cache.count_writes
if prog_end:
processor.write_data_memory()
statfile = open("stats.txt", "w")
for i in range(12):
stats[i] += str(s[i]) + '\n'
statfile.writelines(stats)
statfile.write("\nInstruction Cache: \n")
for i in range(4):
instruction_cache_stats[i] += str(ic[i]) + '\n'
statfile.writelines(instruction_cache_stats)
statfile.write("\nData Cache: \n")
for i in range(4):
data_cache_stats[i] += str(dc[i]) + '\n'
statfile.writelines(data_cache_stats)
statfile.close()
for i in range(len(pc_tmp)):
tmp = [str(processor.get_code[x]) for x in pc_tmp[i]]
l.append(tmp)
tmp = []
for j in range(5):
if forwarding_enabled and pipelining_enabled:
if data_hazard_pairs[i]['from'][j] != '':
tmp.append(str(processor.get_code[pc_tmp[i][j]]) + "\n" + data_hazard_pairs[i]['from'][j])
else:
tmp.append(str(processor.get_code[pc_tmp[i][j]]))
else:
tmp.append(str(processor.get_code[pc_tmp[i][j]]))
l_dash.append(tmp + [data_hazard_pairs[i]])
# resolvong control + data hazard case
for i in range(len(l)):
if data_hazard_pairs[i]['who'] == 3:
control_hazard_signals[i] = 0
mem_gui = []
for i in range(len(memory_table)):
tmp = ["","",[1,1]]
if memory_table[i][0]:
d = memory_table[i][0]
if d['action'] == 'read':
s = "reading from set: " + str(d['index'] ) + " victim: " + str(d.get('victim', "-1"))
if d['status'] == 'found':
tmp[2][0] = 1
# s += 'READ HIT'
# elif d['status'] == 'added':
# tmp[2][0] = 0
# s += 'READ MISS: added from main memory'
else:
tmp[2][0] = 0
# s += 'READ MISS: replaced victim of tag: ' + str(d['victim'])
elif d['action'] == 'write':
s = "writing in set: " + str(d['index'] ) + " victim: " + str(d.get('victim', "-1"))
if d['status'] == 'found':
# s += 'WRITE HIT'
tmp[2][0] = 1
else:
tmp[2][0] = 0
# s += 'WRITE MISS: writing through in main memory '
tmp[0] = s
if memory_table[i][1]:
d = memory_table[i][1]
if d['action'] == 'read':
s = "reading from set: " + str(d['index'] ) + " victim: " + str(d.get('victim', "-1"))
if d['status'] == 'found':
tmp[2][1] = 1
# s += 'READ HIT'
# elif d['status'] == 'added':
# tmp[2][1] = 0
# s += 'READ MISS: added from main memory'
else:
tmp[2][1] = 0
# s += 'READ MISS: replaced victim of tag: ' + str(d['victim'])
elif d['action'] == 'write':
s = "writing in set: " + str(d['index'] ) # + " victim: " + str(d.get('victim', "-1"))
if d['status'] == 'found':
tmp[2][1] = 1
# s += 'WRITE HIT'
else:
tmp[2][1] = 0
# s += 'WRITE MISS: writing through in main memory '
tmp[1] = s
mem_gui.append(tmp)
# control_hazard_signals is a list on integers 0=> nothing; 1=> red ; 2 => yellow; 3=> green
# mem_gui is list of list of 3 elemetnts [fetch message, mem message, [1,0]] 1=>hit 0=> miss
# data_cache = [[['111111', '00000', 0, 3, '1111111'], ['111111', '00000', 1, 3, '1111111'], ['111111', '00000', 1, 3, '1111111']]]
# display(l, control_hazard_signals, l_dash, mem_gui, data_cache)
# icache and dcashe is list of list of list, [address, hexdata, dirtybit, recency, binary data]
icache = instruction_cache.make_table()
dcache = data_cache.make_table()
display(l, control_hazard_signals, l_dash, mem_gui, dcache, icache)
|
# coding=utf-8
import cv2
import numpy as np
import os
import glob
from img_tool import *
# args
MPIIGAZE_path='D:\\winpython\\py36\\work\\eye-gaze\\MPIIGaze'
# TO DO
todo=1
# 0 test a image
TXT= "p01"
INDEX=11
# 1 output darknet dataset format
RE_size=0
OUTPUT_folder = "C:\\dataset\\gaze_o"
# 2 test output darknet txt
TEST_folder= "C:\\dataset\\gaze\\MPIIGaze_p00\\images"
TEST_name="day02_0017_right"
#Annotation Subset 左眼左 ,左眼右 ,右眼左,右眼右,嘴角左,嘴角右,左眼中 ,左眼右
original_path=os.path.join(MPIIGAZE_path,'Data','Original')
annotation_subset_path=os.path.join(MPIIGAZE_path,'Annotation Subset')
def get_landmarks(points,label):
landmarks=np.zeros(shape=(3,2),dtype=np.int16)
if label=='left':
landmarks=np.vstack([ points[0],points[6],points[1]])
if label=='right':
landmarks=np.vstack( [points[2],points[7],points[3]])
return landmarks
def get_crop_rect(landmarks):
longSide=landmarks[2,0]-landmarks[0,0]
border= int(round(longSide*0.08))
#border=5
size=longSide+border*2
center_x=int(round( (landmarks[2,0]+landmarks[0,0])/2.))
center_y=int(round( (landmarks[2,1]+landmarks[0,1])/2.))
return center_x,center_y,size
def get_rect(img,landmarks):
rate=0.2
length=int((landmarks[2,0]-landmarks[0,0])*rate)
xmin=landmarks[1,0]-length
ymin=landmarks[1,1]-length
xmax=landmarks[1,0]+length
ymax=landmarks[1,1]+length
return xmin,ymin,xmax,ymax
def read_annotation(txt_path):
with open(txt_path, 'r', encoding='utf8') as f:
data=f.read()
daylist=data.split('day')[1:]
cleanlist=[]
for aDay in daylist:
dict={}
dayData=aDay.split('\n')[0].split(' ')
dict['name']='day'+dayData[0]
dict['data']=np.array(list(map(int,dayData[1:]))).reshape(-1, 2)
cleanlist.append(dict)
return cleanlist
def get_nobj_crop_rect(center_x,center_y,landmarks):
longSide=landmarks[2,0]-landmarks[0,0]
border= - int(round(longSide*0.08))
size=longSide+border*2
pt1=(center_x-size*0.9,center_y,size)
pt2=(center_x+size*0.9,center_y,size)
pt3=(center_x,center_y+size*0.7,size)
pt4=(center_x,center_y-size*0.8,size)
return pt1,pt2,pt3,pt4
def process_dataset(txt_list):
annotations=''
for txt_file in txt_list:
cleanlist=[]
p_list=txt_file.split('.txt')[0].split('\\')[-1]
cleanlist=read_annotation(txt_file)
for item in cleanlist:
points=item['data']
_file=item['name'].split('/')
filename=os.path.split(_file[1])[1].split(".")[0]
day_list=_file[0]
image=cv2.imread(os.path.join(original_path,p_list,day_list,_file[1]))
oh,ow,_=image.shape
os.makedirs(os.path.join(OUTPUT_folder,'MPIIGaze_'+p_list,'images'),mode=0o777,exist_ok=True)
for act in ['left','right']:
landmarks=get_landmarks(points,act)
center_x,center_y,crop_size=get_crop_rect(landmarks)
act_image=crop(image,center_x,center_y,crop_size,crop_size)
pt1,pt2,pt3,pt4=get_nobj_crop_rect(center_x,center_y,landmarks)
no_image1=crop2(image,pt1)
no_image2=crop2(image,pt2)
no_image3=crop2(image,pt3)
no_image4=crop2(image,pt4)
if crop_size==RE_size or RE_size==0:
image_size=crop_size
else:
act_image=resize(act_image,RE_size,RE_size)
image_size=RE_size
no_image1=resize(no_image1,RE_size,RE_size)
no_image2=resize(no_image2,RE_size,RE_size)
no_image3=resize(no_image3,RE_size,RE_size)
no_image4=resize(no_image4,RE_size,RE_size)
offset_w=center_x-int(crop_size/2)
offset_h=center_y-int(crop_size/2)
xmin,ymin,xmax,ymax=get_rect(act_image,landmarks)
#draw_rect(act_image,xmin-offset_w,ymin-offset_h,xmax-offset_w,ymax-offset_h)
hsvI = cv2.cvtColor(act_image, cv2.COLOR_BGR2HSV)
V=np.sum(hsvI[:,:,2])/crop_size/crop_size
#if V<35:
if V<15:
#print('skip %s %s'%(os.path.join(original_path,p_list,day_list,_file[1]),act))
os.makedirs(os.path.join(OUTPUT_folder,'skip_'+p_list),mode=0o777,exist_ok=True)
cv2.imwrite( os.path.join(OUTPUT_folder,'skip_'+p_list,day_list+'_'+filename+'_'+act+'.jpg'), act_image );
continue
cv2.imwrite( os.path.join(OUTPUT_folder,'MPIIGaze_'+p_list,'images',day_list+'_'+filename+'_'+act+'.jpg'), act_image );
act_image=None
os.makedirs(os.path.join(OUTPUT_folder,'no_'+p_list,'images'),mode=0o777,exist_ok=True)
cv2.imwrite( os.path.join(OUTPUT_folder,'no_'+p_list,'images',day_list+'_'+filename+'_'+act+'_1.jpg'), no_image1 );
with open(os.path.join(OUTPUT_folder,'no_'+p_list,'images',day_list+'_'+filename+'_'+act+'_1.txt'), 'w', encoding='utf8') as f:
f.write('')
no_image1=None
cv2.imwrite( os.path.join(OUTPUT_folder,'no_'+p_list,'images',day_list+'_'+filename+'_'+act+'_2.jpg'), no_image2 );
with open(os.path.join(OUTPUT_folder,'no_'+p_list,'images',day_list+'_'+filename+'_'+act+'_2.txt'), 'w', encoding='utf8') as f:
f.write('')
no_image2=None
cv2.imwrite( os.path.join(OUTPUT_folder,'no_'+p_list,'images',day_list+'_'+filename+'_'+act+'_3.jpg'), no_image3 );
with open(os.path.join(OUTPUT_folder,'no_'+p_list,'images',day_list+'_'+filename+'_'+act+'_3.txt'), 'w', encoding='utf8') as f:
f.write('')
no_image3=None
cv2.imwrite( os.path.join(OUTPUT_folder,'no_'+p_list,'images',day_list+'_'+filename+'_'+act+'_4.jpg'), no_image4 );
with open(os.path.join(OUTPUT_folder,'no_'+p_list,'images',day_list+'_'+filename+'_'+act+'_4.txt'), 'w', encoding='utf8') as f:
f.write('')
no_image4=None
x,y,w,h=convert(crop_size,crop_size,xmin-offset_w,ymin-offset_h,xmax-offset_w,ymax-offset_h)
out_box_txt='%s %s %s %s %s'%(
0,
x,
y,
w,
h,
)
with open(os.path.join(OUTPUT_folder,'MPIIGaze_'+p_list,'images',day_list+'_'+filename+'_'+act+'.txt'), 'w', encoding='utf8') as f:
f.write(out_box_txt)
image=None
"""with open(os.path.join(OUTPUT_folder,'MPIIGaze_annotations.txt'), 'w', encoding='utf8') as e:
e.write(annotations)"""
if __name__ == '__main__':
if todo==0:
cleanlist=read_annotation(os.path.join(annotation_subset_path,TXT+'.txt'))
points=cleanlist[INDEX]['data']
_file=cleanlist[INDEX]['name'].split('/')
image=cv2.imread(os.path.join(original_path,TXT,_file[0],_file[1]))
for act in ['left','right']:
item=get_landmarks(points,act)
draw_points(image,item)
xmin,ymin,xmax,ymax=get_rect(image,item)
draw_rect(image,xmin,ymin,xmax,ymax)
cv2.imshow("image", image)
cv2.waitKey(0)
cv2.destroyAllWindows()
if todo==1:
txt_list=glob.glob(annotation_subset_path + '/*.txt')
process_dataset(txt_list)
print('Successfully')
if todo==2:
image=test_a_output(TEST_folder, TEST_name)
cv2.imshow("image", image)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
import linear_algebra.linear_algebra_core as core
import modeling.helpers.regression_metrics as reg_met
import modeling.helpers.regression_evaluation as reg_eval
import modeling.helpers.nn_distances as nn_dist
from tabulate import tabulate
from scipy.stats import f, t
# TODO: add preprocessing for categorical variables
# TODO: document all functions
class Model:
def fit(self, x, y_true):
raise Exception('Fitting not implemented')
def predict(self, x):
raise Exception('Predicting not implemented')
def evaluate(self, x, y_true):
raise Exception('Evaluation not implemented')
def set_params(self, parameters):
raise Exception('Not implemented')
def get_params(self):
raise Exception('Not implemented')
class LinearRegression(Model):
def __init__(self, criterion='LS'):
self.criterion = criterion
self.coefficients = None
def fit(self, x, y_true):
"""
Fits the parameters of X to predict the value y with model criterion
:param x: Matrix with equal amounts of rows as y
:param y_true: Column Vector with length equal to rows in X
"""
if self.criterion != 'LS':
raise Exception('Only Least Squares is implemented')
reg_eval.regression_data_check(x, y_true)
x = core.enhance_matrix(x)
self.coefficients = (x.transpose() * x).inverse() * x.transpose() * y_true
def predict(self, x):
if self.coefficients is None:
raise Exception('Model has not been fitted yet')
x = core.enhance_matrix(x)
reg_eval.regression_data_check(x, width=len(self.coefficients))
return x * self.coefficients
def score(self, x, y_true, metric=reg_met.r_squared, number_type=float):
x = core.enhance_matrix(x)
if self.coefficients is None:
raise Exception('Model has not been fitted yet')
return metric(y_true, self.predict(x), number_type)
def evaluate(self, x, y_true):
"""
Evaluates the performance of the trained model on a global and variable
level. For global, RSE, R^2 and F-statistic are standard. For variables
the SE and t-statistic is used.
:param x: Matrix of predictors
:param y_true: Vector of true y values
:return:
"""
x = core.enhance_matrix(x)
y_pred = self.predict(x)
global_metrics = [['RSE', reg_eval.residual_standard_error],
['R^2', reg_met.r_squared],
['F-statistic', reg_eval.f_statistic],
['p-value']]
var_metrics = [['SE', reg_eval.standard_error_coefs],
['t-statistic', reg_eval.t_statistic],
['p-value']]
glob_outcomes = {'Metric': [], 'Value': []}
for i in global_metrics:
if len(i) > 1:
glob_outcomes['Metric'].append(i[0])
glob_outcomes['Value'].append(i[1](x=x, y_true=y_true, y_pred=y_pred,
num_predictors=x.n_cols))
elif i[0] == 'p-value':
glob_outcomes['Metric'].append(i[0])
glob_outcomes['Value'].append(f.sf(glob_outcomes['Value'][2],
dfn=len(y_pred), dfd=x.n_cols - 1))
else:
raise Exception('Single value metric not implemented')
var_outcomes = {'Column': list(range(x.n_cols)),
'Coefficient': self.coefficients.data}
for i in var_metrics:
if len(i) > 1:
var_outcomes[i[0]] = i[1](x=x, y_true=y_true, y_pred=y_pred,
coefs=var_outcomes['Coefficient'])
elif i[0] == 'p-value':
var_outcomes[i[0]] = [2 * t.sf(abs(float(score)), len(y_pred) - x.n_cols)
for score in var_outcomes['t-statistic']]
print(tabulate(glob_outcomes, headers='keys'))
print(tabulate(var_outcomes, headers='keys'))
return glob_outcomes, var_outcomes
def get_params(self):
return {'criterion': self.criterion}
def set_params(self, criterion='LS'):
self.criterion = criterion
class KNNRegression(Model):
def __init__(self, weight='equal', measure='euclidean'):
self.weight = weight
self.measure = measure
self.k = None
self.x = None
self.y_true = None
def fit(self, x, y_true, k=5):
self.x = x
self.y_true = y_true
if k > x.n_rows:
raise Exception('k is larger than the amount of data points in x')
self.k = k
def predict(self, x):
distances = [[[nn_dist.distance_continuous(x.row(new_row),
self.x.row(train_row),
self.measure),
train_row] for train_row in range(self.x.n_rows)]
for new_row in range(x.n_rows)]
distances = [sorted(row)[:self.k] for row in distances]
if self.weight == 'equal':
predictions = [sum(self.y_true.data[i[1]] for i in row) / self.k
for row in distances]
elif self.weight == 'distance':
predictions = [sum(self.y_true.data[i[1]] * i[0] ** -1 for i in row) /
sum(i[0] ** -1 for i in row) for row in distances]
else:
raise Exception('Weight type not implemented')
return core.Vector(predictions)
def score(self, x, y_true, metric=reg_met.r_squared, number_type=float):
return metric(y_true, self.predict(x), number_type)
def evaluate(self, x, y_true, number_type=float):
y_pred = self.predict(x)
error = reg_met.root_mean_squared_error(y_true, y_pred, number_type)
print("With {:d} neighbours, the RMSE is {:.2f}".format(self.k, error))
def get_params(self):
return {'weight': self.weight, 'measure': self.measure,
'k': self.k, 'x': self.x, 'y_true': self.y_true}
def set_params(self, weight='equal', measure='euclidean'):
self.weight = weight
self.measure = measure
if __name__ == "__main__":
x = core.Matrix([[1, 2], [2, 2], [3, 1], [4, 2], [6, 6]])
y = core.Vector([4, 6, 6.5, 9, 19])
x_test = core.Matrix([[1, 1], [2, 2], [3, 3]])
y_test = core.Vector([3, 6.5, 9])
# model = LinearRegression()
# model.fit(x, y)
# print(model.coefficients)
# print(model.predict(core.Matrix([[2, 3], [3, 3]])))
# print(model.score(x, y))
# print(reg_eval.correlation(x.col(0), y))
# model.evaluate(x, y)
knn = KNNRegression()
for k in range(1, 4):
knn.fit(x, y, k=k)
y_pred = knn.predict(x_test)
print(reg_met.mean_absolute_error(y_test, y_pred))
|
import os
import subprocess
config = get_config()
# Use GitHub OAuth to authenticate users
config.JupyterHub.authenticator_class = 'oauthenticator.GitHubOAuthenticator'
config.GitHubOAuthenticator.oauth_callback_url = 'https://206.12.96.12/hub/oauth_callback'
config.GitHubOAuthenticator.client_id = 'a4b7e7154c6f9d8497fc'
config.GitHubOAuthenticator.client_secret = '83784c5306df793900143b8d44aef24a379041ce'
# set of usernames of admin users
config.Authenticator.admin_users = ["fherwig", "syang", "lsiemens"]
config.Authenticator.whitelist = config.Authenticator.admin_users
# Spawn users in dockers
network_name = os.environ["DOCKER_NETWORK_NAME"]
hub_ip = os.environ["DOCKER_MACHINE_NAME"]
config.JupyterHub.spawner_class = 'dockerspawner.DockerSpawner'
config.DockerSpawner.container_image = "lsiemens/singleuser"
config.DockerSpawner.use_internal_ip = True
config.DockerSpawner.network_name = network_name
config.DockerSpawner.extra_host_config = {"network_mode":network_name}
config.DockerSpawner.extra_start_kwargs = {"network_mode":network_name}
config.DockerSpawner.remove_containers = True
config.DockerSpawner.hub_ip_connect = hub_ip
# the docker instances need to access the HUB
config.JupyterHub.hub_ip = hub_ip
config.JupyterHub.hub_port = 8000
# ussing ssl so set to 443
config.JupyterHub.port = 443
config.JupyterHub.ssl_cert = '/srv/jupyterhub/SSL/ssl.crt'
config.JupyterHub.ssl_key = '/srv/jupyterhub/SSL/ssl.key'
# make cookie secret and auth token
cookie = subprocess.Popen(["openssl", "rand", "2049"], stdout=subprocess.PIPE)
token = subprocess.Popen(["openssl", "rand", "-hex", "129"], stdout=subprocess.PIPE)
config.JupyterHub.cookie_secret = cookie.communicate()[0][:-1]
config.JupyterHub.proxy_auth_token = token.communicate()[0][:-1]
data_mount_point = os.environ.get("DATA_VOLUME")
config.JupyterHub.db_url = os.path.join("sqlite:///", data_mount_point, "jupyterhub.sqlite")
print(config.JupyterHub.db_url)
|
"""File containing the shared constants of the tests
.. moduleauthor:: Armand BENETEAU <armand.beneteau@iot.bzh>
*Date: 26/07/2019*
*License:*
*Copyright (C) 2019 "IoT.bzh"*
*Licensed under the Apache License, Version 2.0 (the "License");\
you may not use this file except in compliance with the License.\
You may obtain a copy of the License at:*
*http://www.apache.org/licenses/LICENSE-2.0*
*Unless required by applicable law or agreed to in writing, software\
distributed under the License is distributed on an "AS IS" BASIS,\
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\
implied.*
*See the License for the specific language governing permissions and\
limitations under the License.*
"""
# Server URL without port
SRV_URL = "http://localhost:8080"
# Dictionnary keys
JSON_VERSION_KEY = "version"
JSON_VERBS_LIST_KEY = "verbs_list"
JSON_VERB_KEY = "verb"
JSON_MSG_KEY = "message"
# Software version
VERSION = 'v1' |
from more.webassets import WebassetsApp
class MapboxApp(WebassetsApp):
""" Provides mapbox integration
:class:`onegov.core.framework.Framework` based applications.
Doesn't do much except serve the mapbox public token, so we can store it
in configuration and not with the source. Not that this token is inherently
unsafe and must be the *public* token.
Do not use private tokens!
If we wanted to avoid this we would have to use a mapbox proxy server,
which seems a bit too much. If we detect abuse of the public token we
just switch to a new one. If it must be we can even automatically rotate
the token regularly.
"""
def configure_mapbox(self, **cfg):
""" Configures the mapbox.
The following configuration options are accepted:
:mapbox_token:
The public mapbox token to be used for the mapbox api.
"""
assert cfg.get('mapbox_token', 'pk').startswith('pk'), """
Only public mapbox tokens are allowed!
"""
self.mapbox_token = cfg.get('mapbox_token', None)
@MapboxApp.webasset_path()
def get_js_path():
return 'assets/js'
@MapboxApp.webasset_path()
def get_css_path():
return 'assets/css'
@MapboxApp.webasset('leaflet', filters={'css': ['datauri', 'custom-rcssmin']})
def get_leaflet_asset():
yield 'leaflet.css'
yield 'leaflet-easybutton.css'
yield 'leaflet-control-geocoder.css'
yield 'leaflet-integration.css'
yield 'leaflet.js'
yield 'leaflet-sleep.js'
yield 'leaflet-easybutton.js'
yield 'leaflet-control-geocoder.js'
yield 'leaflet-integration.js'
@MapboxApp.webasset('proj4js')
def get_proj4js_asset():
yield 'proj4js.js'
yield 'proj4js-leaflet.js'
@MapboxApp.webasset('geo-mapbox')
def get_geo_mapbox():
yield 'leaflet'
@MapboxApp.webasset('geo-vermessungsamt-winterthur')
def get_geo_vermessungsamt_winterthur():
yield 'leaflet'
yield 'proj4js'
yield 'geo-vermessungsamt-winterthur.js'
@MapboxApp.webasset('geo-zugmap-luftbild')
def get_geo_zugmap_luftbild():
yield 'leaflet'
yield 'proj4js'
yield 'geo-zugmap.js'
yield 'geo-zugmap-luftbild.js'
@MapboxApp.tween_factory()
def inject_mapbox_api_token_tween_factory(app, handler):
replacement = '<body data-mapbox-token="{}"'.format(app.mapbox_token)
replacement = replacement.encode('utf-8')
def inject_mapbox_api_token_tween(request):
response = handler(request)
if request.app.mapbox_token:
response.body = response.body.replace(
b'<body',
replacement,
1 # only replace the first occurrence
)
return response
return inject_mapbox_api_token_tween
|
import io
import os
import unittest
from stango import Stango
from stango.files import Files
from . import StangoTestCase, make_suite, view_value, view_template
dummy_view = view_value('')
class GenerateTestCase(StangoTestCase):
def setup(self):
self.tmp = self.tempdir()
self.manager = Stango()
self.manager.index_file = 'index.html'
def test_generate_simple(self):
self.manager.files += [
('', view_value('foobar')),
('barfile.txt', view_value('barfoo')),
]
self.manager.generate(self.tmp)
self.eq(sorted(os.listdir(self.tmp)), ['barfile.txt', 'index.html'])
with open(os.path.join(self.tmp, 'index.html')) as fobj:
self.eq(fobj.read(), 'foobar')
with open(os.path.join(self.tmp, 'barfile.txt')) as fobj:
self.eq(fobj.read(), 'barfoo')
def test_generate_dest_is_non_dir(self):
self.manager.files = Files(
('', dummy_view),
)
dest_path = os.path.join(self.tmp, 'dest.txt')
with open(dest_path, 'w') as fobj:
fobj.write('foo')
exc = self.assert_raises(ValueError, self.manager.generate, dest_path)
self.eq(str(exc), "'%s' is not a directory" % dest_path)
# Check the file wasn't modified
self.eq(os.listdir(self.tmp), ['dest.txt'])
with open(os.path.join(self.tmp, 'dest.txt'), 'r') as fobj:
self.eq(fobj.read(), 'foo')
def test_generate_outdir_exists(self):
# Create a file and a directory to outdir
with open(os.path.join(self.tmp, 'foo'), 'w') as fobj:
fobj.write('bar')
os.mkdir(os.path.join(self.tmp, 'dummydir'))
self.eq(sorted(os.listdir(self.tmp)), ['dummydir', 'foo'])
self.manager.files = Files(
('', view_value('baz')),
)
self.manager.generate(self.tmp)
# Check that the old destdir contents were removed
self.eq(os.listdir(self.tmp), ['index.html'])
def test_generate_different_index_file(self):
self.manager.index_file = 'foofile.txt'
self.manager.files += [
('', view_value('foobar')),
('barfile.txt', view_value('barfoo')),
]
self.manager.generate(self.tmp)
self.eq(sorted(os.listdir(self.tmp)), ['barfile.txt', 'foofile.txt'])
with open(os.path.join(self.tmp, 'foofile.txt')) as fobj:
self.eq(fobj.read(), 'foobar')
with open(os.path.join(self.tmp, 'barfile.txt')) as fobj:
self.eq(fobj.read(), 'barfoo')
def test_view_returns_a_bytes_object(self):
self.manager.files = Files(
('', view_value(b'\xde\xad\xbe\xef')),
)
self.manager.generate(self.tmp)
self.eq(os.listdir(self.tmp), ['index.html'])
with open(os.path.join(self.tmp, 'index.html'), 'rb') as fobj:
self.eq(fobj.read(), b'\xde\xad\xbe\xef')
def test_view_returns_a_bytearray_object(self):
self.manager.files = Files(
('', view_value(bytearray(b'\xba\xdc\x0f\xfe'))),
)
self.manager.generate(self.tmp)
self.eq(os.listdir(self.tmp), ['index.html'])
with open(os.path.join(self.tmp, 'index.html'), 'rb') as fobj:
self.eq(fobj.read(), b'\xba\xdc\x0f\xfe')
def test_view_returns_a_filelike_object_with_str_contents(self):
self.manager.files = Files(
('', view_value(io.StringIO('foobar'))),
)
self.manager.generate(self.tmp)
self.eq(os.listdir(self.tmp), ['index.html'])
with open(os.path.join(self.tmp, 'index.html'), 'r') as fobj:
self.eq(fobj.read(), 'foobar')
def test_view_returns_a_filelike_object_with_bytes_contents(self):
self.manager.files = Files(
('', view_value(io.BytesIO(b'barfoo'))),
)
self.manager.generate(self.tmp)
self.eq(os.listdir(self.tmp), ['index.html'])
with open(os.path.join(self.tmp, 'index.html'), 'r') as fobj:
self.eq(fobj.read(), 'barfoo')
def test_view_renders_a_template(self):
self.manager.template_dirs.insert(0, self.template_path)
self.manager.files = Files(
('', view_template('value.txt'), {'value': 'foobar'})
)
self.manager.generate(self.tmp)
self.eq(os.listdir(self.tmp), ['index.html'])
with open(os.path.join(self.tmp, 'index.html')) as fobj:
self.eq(fobj.read(), 'value is: foobar')
def test_no_index_file(self):
self.manager.index_file = None
self.manager.files = Files(
('quux/', dummy_view),
)
exc = self.assert_raises(ValueError, self.manager.generate, self.tmp)
self.eq(str(exc), "Directory path and no index_file: 'quux/'")
def test_view_returns_None(self):
self.manager.files = Files(
('', view_value(None)),
)
exc = self.assert_raises(ValueError, self.manager.generate, self.tmp)
self.eq(str(exc), "The result of view 'value_returner' for path '' is not a str, bytes or bytearray instance or a file-like object")
def test_view_returns_an_integer(self):
self.manager.files = Files(
('foo.txt', view_value(1)),
)
exc = self.assert_raises(ValueError, self.manager.generate, self.tmp)
self.eq(str(exc), "The result of view 'value_returner' for path 'foo.txt' is not a str, bytes or bytearray instance or a file-like object")
def test_view_returns_a_filelike_object_with_invalid_contents(self):
class InvalidFile(object):
def read(self):
return 42
self.manager.files = Files(
('', view_value(InvalidFile())),
)
exc = self.assert_raises(ValueError, self.manager.generate, self.tmp)
self.eq(str(exc), "Contents of the file-like object, returned by view 'value_returner' for path '', is not a str, bytes or bytearray instance")
def test_post_render_hook(self):
def post_render_hook(context, data):
return data + b' hurr durr'
self.manager.add_hook('post_render_hook', post_render_hook)
self.manager.files = Files(
('', view_value('foobar')),
)
self.manager.generate(self.tmp)
self.eq(os.listdir(self.tmp), ['index.html'])
with open(os.path.join(self.tmp, 'index.html'), 'rb') as fobj:
self.eq(fobj.read(), b'foobar hurr durr')
def test_post_render_hook_returns_None(self):
self.manager.add_hook('post_render_hook', lambda x, y: None)
self.manager.files = Files(
('', view_value('foobar')),
)
exc = self.assert_raises(ValueError, self.manager.generate, self.tmp)
self.eq(str(exc), 'The result of post_render_hook is not a bytes or bytearray instance for index.html')
def suite():
return make_suite(GenerateTestCase)
|
# -*- coding: utf-8 -*-
from os import environ
import pytest
import rethinkdb
def pytest_addoption(parser):
group = parser.getgroup('rethinkdb')
group.addoption(
'--rethinkdb-host',
action='store',
dest='rethinkdb_host',
default='localhost',
help='Host of the RethinkDB test instance.',
)
group.addoption(
'--rethinkdb-port',
action='store',
dest='rethinkdb_port',
default=28015,
help='Port of the RethinkDB test instance.',
)
parser.addini('RETHINKDB_HOST',
'Host of the RethinkDB test instance',
default='localhost')
parser.addini('RETHINKDB_PORT', 'The driver port', default=28015)
parser.addini('RETHINKDB_DB',
'The database used if not explicitly specified in a query',
default='test')
parser.addini('RETHINKDB_USER',
'The user account to connect as',
default='admin')
parser.addini('RETHINKDB_PASSWORD',
'The password for the user account to connect as',
default='')
parser.addini('RETHINKDB_TIMEOUT',
'Timeout period in seconds for the connection to be opened',
default=20)
parser.addini('RETHINKDB_SSL',
'A hash of options to support SSL connections',
default=None)
@pytest.fixture
def conn(request):
return rethinkdb.connect(
host=request.config.option.rethinkdb_host,
port=request.config.option.rethinkdb_port
)
|
from .curl_connector import CurlConnector
|
from typing import List
from operations import Operation, tabs
from operations.variable import *
class Split(Operation):
"""The split operation parallelize a number of operations, the execution time is the time of the longest path"""
ops: [Operation] = []
def __init__(self, *args):
self.ops = list(args)
@property
def exec_time(self) -> int:
t = 0
for op in self.ops:
t = max(t, op.cum_exec_time())
return int(t)
@property
def next_operation(self):
return None
# implements Operation.count - takes the cumulative
def count(self, fn) -> int:
c = 0
for op in self.ops:
c += op.count(fn)
return c
# implements Operation.sub
def sub(self, *args):
for op in self.ops:
op.sub(*args)
# implements Operation.vars
def vars(self) -> List[Variable]:
# Fixme here we assume that all branches in a split contain exactly the same variables
return self.ops[0].vars()
def print_pseudo(self, indent=0) -> str:
res = []
for i, op in enumerate(self.ops):
res.append(op.print_pseudo(indent=indent + 1))
return "\n".join(res)
def then(self, next_operation: Operation) -> Operation:
raise NameError("attempted to chain a single event after a split") # a split cannot have a next operation |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import json
import pretend
from warehouse.integrations.github import utils, views
class TestGitHubDiscloseToken:
def test_github_disclose_token(self, pyramid_request, monkeypatch):
pyramid_request.headers = {
"GITHUB-PUBLIC-KEY-IDENTIFIER": "foo",
"GITHUB-PUBLIC-KEY-SIGNATURE": "bar",
}
metrics = pretend.stub()
pyramid_request.body = "[1, 2, 3]"
pyramid_request.json_body = [1, 2, 3]
pyramid_request.registry.settings = {
"github.token": "token",
"github.token_scanning_meta_api.url": "http://foo",
}
pyramid_request.find_service = lambda *a, **k: metrics
http = pyramid_request.http = pretend.stub()
verify = pretend.call_recorder(lambda **k: True)
verifier = pretend.stub(verify=verify)
verifier_cls = pretend.call_recorder(lambda **k: verifier)
monkeypatch.setattr(utils, "GitHubTokenScanningPayloadVerifier", verifier_cls)
analyze_disclosures = pretend.call_recorder(lambda **k: None)
monkeypatch.setattr(utils, "analyze_disclosures", analyze_disclosures)
response = views.github_disclose_token(pyramid_request)
assert response.status_code == 204
assert verifier_cls.calls == [
pretend.call(
session=http, metrics=metrics, api_token="token", api_url="http://foo"
)
]
assert verify.calls == [
pretend.call(payload="[1, 2, 3]", key_id="foo", signature="bar")
]
assert analyze_disclosures.calls == [
pretend.call(
request=pyramid_request,
disclosure_records=[1, 2, 3],
origin="github",
metrics=metrics,
)
]
def test_github_disclose_token_no_token(self, pyramid_request, monkeypatch):
pyramid_request.headers = {
"GITHUB-PUBLIC-KEY-IDENTIFIER": "foo",
"GITHUB-PUBLIC-KEY-SIGNATURE": "bar",
}
metrics = pretend.stub()
pyramid_request.body = "[1, 2, 3]"
pyramid_request.json_body = [1, 2, 3]
pyramid_request.registry.settings = {
"github.token_scanning_meta_api.url": "http://foo"
}
pyramid_request.find_service = lambda *a, **k: metrics
pyramid_request.http = pretend.stub()
verify = pretend.call_recorder(lambda **k: True)
verifier = pretend.stub(verify=verify)
verifier_cls = pretend.call_recorder(lambda **k: verifier)
monkeypatch.setattr(utils, "GitHubTokenScanningPayloadVerifier", verifier_cls)
analyze_disclosures = pretend.call_recorder(lambda **k: None)
monkeypatch.setattr(utils, "analyze_disclosures", analyze_disclosures)
response = views.github_disclose_token(pyramid_request)
assert response.status_code == 204
def test_github_disclose_token_verify_fail(self, monkeypatch, pyramid_request):
pyramid_request.headers = {
"GITHUB-PUBLIC-KEY-IDENTIFIER": "foo",
"GITHUB-PUBLIC-KEY-SIGNATURE": "bar",
}
metrics = pretend.stub()
pyramid_request.body = "[1, 2, 3]"
pyramid_request.find_service = lambda *a, **k: metrics
pyramid_request.registry.settings = {
"github.token": "token",
"github.token_scanning_meta_api.url": "http://foo",
}
pyramid_request.http = pretend.stub()
verify = pretend.call_recorder(lambda **k: False)
verifier = pretend.stub(verify=verify)
verifier_cls = pretend.call_recorder(lambda **k: verifier)
monkeypatch.setattr(utils, "GitHubTokenScanningPayloadVerifier", verifier_cls)
response = views.github_disclose_token(pyramid_request)
assert response.status_int == 400
def test_github_disclose_token_verify_invalid_json(self, monkeypatch):
verify = pretend.call_recorder(lambda **k: True)
verifier = pretend.stub(verify=verify)
verifier_cls = pretend.call_recorder(lambda **k: verifier)
monkeypatch.setattr(utils, "GitHubTokenScanningPayloadVerifier", verifier_cls)
metrics = collections.Counter()
def metrics_increment(key):
metrics.update([key])
# We need to raise on a property access, can't do that with a stub.
class Request:
headers = {
"GITHUB-PUBLIC-KEY-IDENTIFIER": "foo",
"GITHUB-PUBLIC-KEY-SIGNATURE": "bar",
}
body = "["
@property
def json_body(self):
return json.loads(self.body)
def find_service(self, *a, **k):
return pretend.stub(increment=metrics_increment)
response = pretend.stub(status_int=200)
http = pretend.stub()
registry = pretend.stub(
settings={
"github.token": "token",
"github.token_scanning_meta_api.url": "http://foo",
}
)
request = Request()
response = views.github_disclose_token(request)
assert response.status_int == 400
assert metrics == {"warehouse.token_leak.github.error.payload.json_error": 1}
def test_github_disclose_token_wrong_payload(self, pyramid_request, monkeypatch):
pyramid_request.headers = {
"GITHUB-PUBLIC-KEY-IDENTIFIER": "foo",
"GITHUB-PUBLIC-KEY-SIGNATURE": "bar",
}
metrics = collections.Counter()
def metrics_increment(key):
metrics.update([key])
metrics_service = pretend.stub(increment=metrics_increment)
pyramid_request.body = "{}"
pyramid_request.json_body = {}
pyramid_request.registry.settings = {
"github.token": "token",
"github.token_scanning_meta_api.url": "http://foo",
}
pyramid_request.find_service = lambda *a, **k: metrics_service
pyramid_request.http = pretend.stub()
verify = pretend.call_recorder(lambda **k: True)
verifier = pretend.stub(verify=verify)
verifier_cls = pretend.call_recorder(lambda **k: verifier)
monkeypatch.setattr(utils, "GitHubTokenScanningPayloadVerifier", verifier_cls)
response = views.github_disclose_token(pyramid_request)
assert response.status_code == 400
assert metrics == {"warehouse.token_leak.github.error.format": 1}
|
from mock import patch
from data import word_frequencies
from spec.mamba import *
patcher = patch.object(word_frequencies.data, 'open_project_path')
_SMALL_FILE = textwrap.dedent("""
the 23135851162
of 13151942776
and 12997637966
to 12136980858
a 9081174698
in 8469404971
for 5933321709
is 4705743816
on 3750423199
that 3400031103
multiple words 1000000000
""").strip()
with description('word_frequencies'):
with before.all:
mock = patcher.start()
mock.return_value = _SMALL_FILE.split('\n')
with after.all:
patcher.stop()
with it('loads'):
expect(word_frequencies.load_from_file('test')).to(have_len(11))
with it('memoizes one file result'):
with patch.object(word_frequencies, 'load') as mock_load:
expect(mock_load.call_count).to(equal(0))
word_frequencies.load_from_file('example1')
expect(mock_load.call_count).to(equal(1))
word_frequencies.load_from_file('example1')
expect(mock_load.call_count).to(equal(1))
word_frequencies.load_from_file('example2')
expect(mock_load.call_count).to(equal(2))
with it('should have results'):
t = word_frequencies.load_from_file('test')
expect(list(t.walk(
['abcdefghijklmnopqrstuvwxyz'] * 4,
exact_match=False)
)).to(equal([
('the', 23135851162),
('of', 13151942776),
('and', 12997637966),
('to', 12136980858),
('a', 9081174698),
('in', 8469404971),
('for', 5933321709),
('is', 4705743816),
('on', 3750423199),
('that', 3400031103)
]))
with it('should should store multi-word results'):
t = word_frequencies.load_from_file('test')
expect(t).to(have_key('multiple words'))
|
from sentiment_analysis import SentimentAnalysis
import pandas as pd
from tqdm import tqdm
import os
import random
import numpy as np
seed = 42
random.seed(seed)
np.random.seed(seed)
def check_bias(results, alpha):
'''decide whether it's bias given prediction results of mutants'''
is_bias = False
length = len(results)
if length == 1:
# no mutants
pass
else:
mid = int((length - 1) / 2)
male_results = results[1:mid+1]
female_results = results[mid+1:]
assert(len(male_results) == len(female_results))
pos_M = 1.0 * sum(male_results) / len(male_results)
pos_F = 1.0 * sum(female_results) / len(female_results)
### verify property (2) |pos_M - pos_F| < alpha
is_bias = False if abs(pos_M - pos_F) < alpha else True
return is_bias
def predict_on_mutants(df, mutant_dir, sa_system, path_to_result):
'''
Given `df`, the dataframe containing original test data
The function goes to `path_to_mutant`, which stores pre-generated mutants
Then it use `sa_system` to predict sentiments of mutants
and store results in `path_to_result`
'''
with open(path_to_result, 'w') as f:
for index, row in tqdm(df.iterrows(), desc="Evaluate"):
label = row["label"]
# sentiment = row["sentiment"]
# if sentiment >= 0.5:
# label = 1
# else:
# label = 0
text = row["sentence"] # original text
path_to_mutant = mutant_dir + str(index) + '.csv'
mutants = [text]
if os.path.exists(path_to_mutant):
# if there are generated mutants
df_mutant = pd.read_csv(path_to_mutant, names=["label", "sentence", "template"], sep="\t")
for index_new, row_new in df_mutant.iterrows():
mutants.append(row_new["sentence"])
results = []
results = sa_system.predict_batch(mutants)
is_bias = check_bias(results, alpha=0.001)
to_write = str(index) + ',' + str(label) + ',' + str(results[0]) + ',' + str(is_bias) + '\n'
# each line in this file
# index, true label, results of original text, is_bias
f.write(to_write)
def analyze_performance(path_to_result, mutant_dir):
'''
Given `path_to_result`, which stores the file generated by predict_on_mutants(...)
analyze the accuracy of biased/total predictions.
'''
with open(path_to_result, 'r') as f:
lines = f.readlines()
no_gender_count = 0
no_gender_correct_count = 0
total_count = len(lines)
total_correct_count = 0
fair_correct_count = 0
fair_count = 0
biased_count = 0
biased_and_correct_count = 0
for line in lines:
index = line.split(",")[0]
true_label = line.split(',')[1]
pred_label = line.split(',')[2]
is_bias = line.split(',')[3].strip()
path_to_mutant = mutant_dir + str(index) + '.csv'
if true_label == pred_label:
total_correct_count += 1
mutants = []
if os.path.exists(path_to_mutant):
# df_mutant = pd.read_csv(path_to_mutant, names=["label", "sentence", "template"], sep="\t")
# for index_new, row_new in df_mutant.iterrows():
# mutants.append(row_new["sentence"])
# if len(mutants) > 3:
if is_bias == "False":
fair_count += 1
if true_label == pred_label:
fair_correct_count += 1
if is_bias == 'True':
biased_count += 1
if true_label == pred_label:
biased_and_correct_count += 1
else:
no_gender_count += 1
if true_label == pred_label:
no_gender_correct_count += 1
print("--------**************--------")
print("Correct Predictions: ", total_correct_count)
print("Total Predictions: ", total_count)
print("Accuracy: ", 1.0 * total_correct_count / total_count)
# print("--------**************--------")
# print("no gender word---Correct Predictions: ", no_gender_correct_count)
# print("Total Predictions: ", no_gender_count)
# print("Accuracy: ", 1.0 * no_gender_correct_count / no_gender_count)
print("--------**************--------")
print("with gender word--fair----Correct Predictions: ", fair_correct_count)
print("Total Predictions: ", fair_count)
print("Accuracy: ", 1.0 * fair_correct_count / fair_count)
print("--------**************--------")
print("Correct and Biased Predictions: ", biased_and_correct_count)
print("Total Biased Predictions: ", biased_count)
print("Accuracy on Biased Predictions: ", 1.0 * biased_and_correct_count / biased_count)
def minority_or_majority(mutants, majority):
results = []
for m in mutants:
results.append(sa_system.predict(m))
freq_1 = 0
freq_0 = 0
for result in results:
if result == 1:
freq_1 += 1
else:
freq_0 += 1
if freq_1 > freq_0:
majority_result = 1
minority_result = 0
else:
majority_result = 0
minority_result = 1
if majority == True:
predict = majority_result
else:
predict = minority_result
return predict
def analyze_majority_performance(path_to_result, mutant_dir):
'''
Given `path_to_result`, which stores the file generated by predict_on_mutants(...)
analyze the accuracy of biased/total predictions.
'''
with open(path_to_result, 'r') as f:
lines = f.readlines()
total_count = len(lines)
total_correct_count = 0
biased_count = 0
count = 0
biased_and_correct_count = 0
for line in lines:
index = line.split(",")[0]
true_label = line.split(',')[1]
pred_label = line.split(',')[2]
is_bias = line.split(',')[3].strip()
path_to_mutant = mutant_dir + str(index) + '.csv'
if os.path.exists(path_to_mutant):
# if there are generated mutants
mutants = []
df_mutant = pd.read_csv(path_to_mutant, names=["label", "sentence", "mutant"], sep="\t")
for index_new, row_new in df_mutant.iterrows():
mutants.append(row_new["sentence"])
result = minority_or_majority(mutants, True)
else:
result = pred_label
if str(true_label) == str(result):
total_correct_count += 1
if is_bias == 'True':
biased_count += 1
if str(true_label) == str(result):
biased_and_correct_count += 1
if os.path.exists(path_to_mutant) and str(true_label) == str(result):
count += 1
print(count)
print("--------**************--------")
print("Correct Predictions: ", total_correct_count)
print("Total Predictions: ", total_count)
print("Accuracy: ", 1.0 * total_correct_count / total_count)
print("--------**************--------")
print("Correct and Biased Predictions: ", biased_and_correct_count)
print("Total Biased Predictions: ", biased_count)
print("Accuracy on Biased Predictions: ", 1.0 * biased_and_correct_count / biased_count)
def analyze_template_performance(path_to_result, mutant_dir):
'''
Given `path_to_result`, which stores the file generated by predict_on_mutants(...)
analyze the accuracy of biased/total predictions.
'''
with open(path_to_result, 'r') as f:
lines = f.readlines()
total_count = len(lines)
total_correct_count = 0
biased_count = 0
count = 0
biased_and_correct_count = 0
for line in lines:
index = line.split(",")[0]
true_label = line.split(',')[1]
pred_label = line.split(',')[2]
is_bias = line.split(',')[3].strip()
path_to_mutant = mutant_dir + str(index) + '.csv'
if os.path.exists(path_to_mutant):
# if there are generated mutants
mutants = []
df_mutant = pd.read_csv(path_to_mutant, names=["label", "sentence", "mutant"], sep="\t")
for index_new, row_new in df_mutant.iterrows():
mutants.append(row_new["sentence"])
template_content = row_new["mutant"]
if len(mutants)<20:
total_count -= 1
continue
try:
result = sa_system.predict(template_content)
except:
pass
else:
result = pred_label
if str(true_label) == str(result):
total_correct_count += 1
if is_bias == 'True':
biased_count += 1
if str(true_label) == str(result):
biased_and_correct_count += 1
if os.path.exists(path_to_mutant) and str(true_label) == str(result):
count += 1
print(count)
print("--------**************--------")
print("Correct Predictions: ", total_correct_count)
print("Total Predictions: ", total_count)
print("Accuracy: ", 1.0 * total_correct_count / total_count)
print("--------**************--------")
print("Correct and Biased Predictions: ", biased_and_correct_count)
print("Total Biased Predictions: ", biased_count)
print("Accuracy on Biased Predictions: ", 1.0 * biased_and_correct_count / biased_count)
if __name__ == '__main__':
### initialize an SA system
model_checkpoint='./../models/epoch20.pt'
bert_config_file='./../models/uncased_L-12_H-768_A-12/bert_config.json'
vocab_file='./../models/uncased_L-12_H-768_A-12/vocab.txt'
sa_system = SentimentAnalysis(model_checkpoint=model_checkpoint,
bert_config_file=bert_config_file,
vocab_file=vocab_file)
mutant_dir = "../data/biasfinder/gender/sst/each3/each/"
# the folder that stores generated mutants.
df = pd.read_csv("../asset/new_sst_test.csv", header = 0, sep=",")
# df = pd.read_csv("../asset/new_imdb_test.csv", names=["label", "sentence"], sep="\t")
# original test set
alpha = 0.001 # specify "tolerance to bias"
path_to_result = '../result/395_sst_result_' + str(alpha) + ".txt"
# predict_on_mutants(df, mutant_dir, sa_system, path_to_result)
# you don't have to call this each time you run
analyze_performance(path_to_result, mutant_dir)
# analyze_majority_performance(path_to_result, mutant_dir) |
from distutils.core import setup
from Cython.Build import cythonize
import numpy
setup(
name = 'Shift utility',
ext_modules = cythonize("shift.pyx"),
include_dirs=[numpy.get_include()]
)
|
from histogram import frequency
def histogram():
with open('twelve_years.txt', 'r') as f:
myList = f.read().split(" ")
total_word_count = len(myList)
final_dictionary = frequency(myList)
return final_dictionary
my_dictionary = histogram()
def intermediate_histogram(my_dictionary):
for key in my_dictionary:
print('{} {}'.format(key, my_dictionary[key]))
if __name__ == "__main__":
intermediate_histogram(my_dictionary) |
#2-3
# name = "kristen"
# print(f"Hello, {name.title()} would you like to learn some Python today?")
#2-4
# name = "aleX"
# print(name.title())
# print(name.upper())
# print(name.lower())
#2-5 and 2-6
quote = '"And now that you don\'t have to be perfect, you can be good."'
author = "John Steinbeck"
print(f"{author} once said, {quote} in his book East of Eden.")
#2-7
message = "\t testing! \n #"
print(message)
print(message.rstrip())
print(message.lstrip())
print(message.strip()) |
"""Faça um programa que tenha uma função notas() que pode receber várias notas de alunos
e vai retornar um dicionário com as seguintes informações:
– Quantidade de notas
- A maior nota
– A menor nota
– A média da turma
– A situação (opcional)"""
def notas(* num, s=False):
"""
-> Função para coletar notas dos alunos e retornar informações gerais e a situação da turma.
:param num: Notas da turma
:param s: Situação (Boa, Razoável ou Ruim)
:return: dicionário com informações sobre a turma
"""
soma = sum(num)
qtd = len(num)
maior = max(num)
menor = min(num)
media = soma / qtd
if media >= 6:
sit = 'Boa'
elif media >= 5:
sit = 'Razoável'
else:
sit = 'Ruim'
total = {'Quantidade de notas': qtd, 'Maior nota': maior, 'Menor nota': menor, 'Média': media}
if s:
total['Situação'] = sit
return total
print(notas(2, 3, 5, 4, 1, 3, s=True))
print(notas(10, 7, 8, 10, s=True))
print(notas(4, 6, 7, 5, 6.5, 7, 5))
help(notas)
|
##############################################
# The MIT License (MIT)
# Copyright (c) 2014 Kevin Walchko
# see LICENSE for full details
##############################################
# -*- coding: utf-8 -*
from opencv_camera.color_space import bgr2gray, gray2bgr
import numpy as np
import cv2
class ApriltagMarker:
def draw(self, img, tags, id=False, mark=False):
if len(img.shape) == 2:
color_img = gray2bgr(img)
else:
color_img = img.copy()
if not isinstance(tags, list):
tags = [tags]
for tag in tags:
num = tag.tag_id if id else None
color_img = self.draw_tag(color_img, tag.corners, tag_id=num, mark=mark)
return color_img
def draw_tag(self, color_img, corners, tag_id=None, mark=False):
"""
color_img: image to draw on, must be color
corners: corner points from apriltag detector, v[0] is the
lower left of the tag and the point move CCW.
tag_id [string]: write tag id number
mark [bool]: draw a circle in the middle of the tag to see detection easier
"""
v = corners.astype('int32')
pts = corners.reshape((-1,1,2)).astype('int32')
t = int(abs(v[0][0] - v[1][0])/20)
cv2.polylines(color_img,[pts],True,(0,255,0),thickness=t)
if mark:
center = (
v[0][0]+abs(v[2][0]-v[0][0])//2,
v[0][1]-abs(v[2][1]-v[0][1])//2
)
r = abs(v[0][0] - v[1][0])//2//2
cv2.circle(color_img,center, r, (200,0,255), -1)
# r = 15
y = color_img.shape[0]
# r = max(int(y/200),1)
c = (255,0,0)
oc = (0,0,255)
# v = corners.astype('int32')
r = int(abs(v[0][0] - v[1][0])/15)
# print(r)
cv2.circle(color_img, tuple(v[0]),r,oc,thickness=-1)
cv2.circle(color_img, tuple(v[1]),r,c,thickness=-1)
cv2.circle(color_img, tuple(v[2]),r,c,thickness=-1)
cv2.circle(color_img, tuple(v[3]),r,c,thickness=-1)
if tag_id is not None:
offset = (v[1][0]-v[0][0])//4
fs = r//3
cv2.putText(color_img, str(tag_id),
org=(v[0][0]+offset,v[0][1]-offset,),
fontFace=cv2.FONT_HERSHEY_SIMPLEX,
fontScale=fs,
thickness=2*fs,
color=(255, 0, 255))
return color_img
|
matriz1 = []
for i in range(4):
matriz1.append(list(map(int, input(
f"Digite os 5 elementos da linha {i + 1} divididos por espaço: ").split())))
matriz2 = []
for i in range(5):
matriz2.append(list(map(int, input(
f"Digite os 2 elementos da linha {i + 1} divididos por espaço: ").split())))
result = []
for i in range(len(matriz1)):
soma1, soma2 = 0, 0
for j in range(len(matriz2)):
soma1 += matriz1[i][j] * matriz2[j][0]
soma2 += matriz1[i][j] * matriz2[j][1]
result.append([soma1, soma2])
for i in result:
for j in i:
print(j, end=" ")
print()
|
import stripe
from django.conf import settings
from django.shortcuts import render, redirect, Http404
from django.http import JsonResponse
from .models import Cart
from accounts.models import GuestEmail
from addresses.forms import AddressForm
from addresses.models import Address
from accounts.forms import LoginForm, GuestForm
from orders.models import Order
from products.models import Product
from billing.models import BillingProfile
STRIPE_SECRET_KEY = getattr(settings, "STRIPE_SECRET_KEY", "sk_test_xsB0ceSWejUXhmSJSlOnqitJ00TTZHqDU5")
STRIPE_PUB_KEY = getattr(settings, "STRIPE_PUB_KEY", 'pk_test_xM90FAQHNuiRyxScNdeT1POR00a0jpuSPZ')
stripe.api_key = STRIPE_SECRET_KEY
def cart_detail_api_view(request):
cart_obj, new_obj = Cart.objects.new_or_get(request)
products = [{
"id": x.id,
"url": x.get_absolute_url(),
"name": x.name,
"price": x.price
}
for x in cart_obj.products.all()] # [<object>, <object>, <object>]
cart_data = {
"products": products,
"subtotal": cart_obj.subtotal,
"total": cart_obj.total
}
return JsonResponse(cart_data)
def cart_home(request):
cart_obj, new_obj = Cart.objects.new_or_get(request)
return render(request, "carts/home.html", {"cart": cart_obj})
def cart_update(request):
product_id = request.POST.get('product_id')
if product_id is not None:
try:
product_obj = Product.objects.get(id=product_id)
except Product.DoesNotExits:
raise Http404("Not found.. \(._.)/")
cart_obj, new_obj = Cart.objects.new_or_get(request)
if product_obj in cart_obj.products.all():
cart_obj.products.remove(product_obj)
added = False
else:
cart_obj.products.add(product_obj) # cart_obj.products.add(product_id)
added = True
request.session['cart_items'] = cart_obj.products.count()
# return redirect(product_obj.get_absolute_url())
if request.is_ajax(): # Asynchronous JavaScript and XML / JSON
json_data = {
"added": added,
"removed": not added,
"cartItemCount": cart_obj.products.count()
}
return JsonResponse(json_data)
return redirect("carts:home") # lazy redirect
def checkout_home(request):
cart_obj, cart_created = Cart.objects.new_or_get(request)
order_obj = None
if cart_created or cart_obj.products.count() == 0:
return redirect("carts:home")
login_form = LoginForm()
guest_form = GuestForm()
address_form = AddressForm()
billing_address_id = request.session.get("billing_address_id", None)
shipping_address_id = request.session.get("shipping_address_id", None)
address_qs = None
billing_profile, billing_profile_created = BillingProfile.objects.new_or_get(request)
has_card = False
if billing_profile is not None:
if request.user.is_authenticated:
address_qs = Address.objects.filter(billing_profile=billing_profile)
# shipping_address_qs = address_qs.filter(address_type='shipping')
# billing_address_qs = address_qs.filter(address_type='billing')
order_obj, order_obj_created = Order.objects.new_or_get(billing_profile, cart_obj)
if shipping_address_id:
order_obj.shipping_address = Address.objects.get(id=shipping_address_id)
del request.session["shipping_address_id"]
if billing_address_id:
order_obj.billing_address = Address.objects.get(id=billing_address_id)
del request.session["billing_address_id"]
if billing_address_id or shipping_address_id:
order_obj.save()
has_card = billing_profile.has_card
if request.method == "POST":
is_prepared = order_obj.check_done()
if is_prepared:
did_charge, crg_msg = billing_profile.charge(order_obj)
if did_charge:
order_obj.mark_paid()
request.session['cart_items'] = ''
del request.session['cart_id']
return redirect("carts:success")
else:
print(crg_msg)
return redirect("carts:checkout")
context = {
"object": order_obj,
"billing_profile": billing_profile,
"login_form": login_form,
"guest_form": guest_form,
"address_form": address_form,
"address_qs": address_qs,
"has_card": has_card,
"publish_key": STRIPE_PUB_KEY,
}
return render(request, "carts/checkout.html", context)
def checkout_done_view(request):
return render(request, "carts/checkout_done.html") |
from app import app
from flask import jsonify, render_template, request
from rq.job import Job, NoSuchJobError
from worker import conn, queue
from .jobs import compute_path
from .requests import get_address_by_name, get_address_by_location
import json
# ENTRY ROUTE
@app.route("/")
def entry():
return render_template("welcome.html"), 200
# OPTIMIZER ROUTES
@app.route("/optimize", methods=["POST"])
def optimize():
"""
Create a background task to calculate the optimal route between the points
passed by the client
"""
try:
data = json.loads(request.data)
if len(data) == 0:
return jsonify({"err": "Key 'locations' is required for optimization."}), 400
locations = data["locations"]
if not isinstance(locations, list):
return jsonify({"err": "Value for 'locations' must be array."}), 400
if len(locations) < 2 or len(locations) > 5:
return jsonify({"err": "Between two and five locations are required for optimization."}), 400
job = queue.enqueue_call(func=compute_path, args=(locations,), result_ttl=600)
except json.JSONDecodeError as jsonError:
print(jsonError)
return jsonify({"err": "Invalid json syntax."}), 500
except Exception as error:
print(error.with_traceback)
return jsonify({"err": "An error occurred during optimization. {}".format(error)}), 500
return jsonify({
"msg": "Optimization started. Poll /optimize/result/<id> periodically for the result",
"id": job.id
}), 202
@app.route("/optimize/result/<job_id>", methods=["GET"])
def get_results(job_id):
"""
Get the result of the job based on its id
"""
try:
job = Job.fetch(job_id, connection=conn)
if job.is_finished:
return jsonify({
"status": "finished",
"data": job.result
}), 200
elif job.is_failed:
return jsonify({
"status": "failed"
}), 200
else:
return jsonify({
"status": "in-progress"
}), 200
except NoSuchJobError:
return jsonify({
"msg": "job id does not exist"
}), 404
# LOCATION QUERY ROUTES
@app.route("/locations", methods=["GET"])
def get_location():
"""
Query location data by coord or name. If both types are given,
then query by name is given priority
"""
name = request.args.get("name")
lat = request.args.get("lat")
lng = request.args.get("lng")
limit = request.args.get("limit")
if name:
loc_req = get_address_by_name(name, limit)
try:
format_data = [{"name": loc["display_name"], "lat": loc["lat"], "lng": loc["lon"]} for loc in loc_req]
return jsonify({"locations": format_data}), 200
except (KeyError, TypeError):
return jsonify({"locations": []}), 200
elif lat and lng:
loc_req = get_address_by_location(lat, lng)
try:
format_data = {
"name": loc_req["display_name"],
"lat": loc_req["lat"],
"lng": loc_req["lon"]
}
return jsonify({"locations": [format_data]}), 200
except KeyError:
return jsonify({"locations": []}), 200
else:
return jsonify({"msg": "parameters 'name' or 'lat'/'lng' required"}), 400 |
import ntpath
import cudf
from optimus.engines.base.io.load import BaseLoad
from optimus.engines.base.meta import Meta
from optimus.engines.cudf.dataframe import CUDFDataFrame
from optimus.helpers.functions import prepare_path, unquote_path
from optimus.helpers.logger import logger
class Load(BaseLoad):
def __init__(self, op):
self.op = op
@staticmethod
def json(path, multiline=False, n_rows=-1, *args, **kwargs):
"""
Return a dataframe from a json file.
:param path: path or location of the file.
:param multiline:
:return:
"""
path = unquote_path(path)
local_file_names = prepare_path(path, "json")
try:
df_list = []
for file_name, j in local_file_names:
df = cudf.read_json(file_name, lines=multiline, nrows=n_rows, *args, **kwargs)
df_list.append(df)
df = cudf.concat(df_list, axis=0, ignore_index=True)
df = CUDFDataFrame(df)
df.meta = Meta.set(df.meta, "file_name", local_file_names[0])
except IOError as error:
logger.print(error)
raise
return df
@staticmethod
def tsv(path, header=True, infer_schema=True, *args, **kwargs):
"""
Return a dataframe from a tsv file.
:param path: path or location of the file.
:param header: tell the function whether dataset has a header row. True default.
:param infer_schema: infers the input schema automatically from data.
It requires one extra pass over the data. True default.
:return:
"""
return Load.csv(path, sep='\t', header=header, infer_schema=infer_schema, *args, **kwargs)
@staticmethod
def csv(path, sep=',', header=True, infer_schema=True, encoding="utf-8", null_value="None", n_rows=-1, cache=False,
quoting=0, lineterminator=None, error_bad_lines=False, keep_default_na=False, na_filter=True, dtype=None,
*args, **kwargs):
"""
Return a dataframe from a csv file.
params
:param dtype:
:param cache:
:param na_filter:
:param path: path or location of the file.
:param sep: usually delimiter mark are ',' or ';'.
:param keep_default_na:
:param error_bad_lines:
:param quoting:
:param lineterminator:
:param header: tell the function whether dataset has a header row. True default.
:param infer_schema: infers the input schema automatically from data.
:param null_value:
:param n_rows:
:param encoding:
It requires one extra pass over the data. True default.
:return dataFrame
"""
path = unquote_path(path)
# file, file_name = prepare_path(path, "csv")[0]
try:
# TODO: lineterminator=lineterminator seems to be broken
if header is True:
header = 0
elif header is False:
header = None
# The str to ["str] is due to a bug in cudf https://github.com/rapidsai/cudf/issues/6606
if dtype == str or dtype is None:
dtype = ["str"]
cdf = cudf.read_csv(path, sep=sep, header=header, encoding=encoding,
quoting=quoting, error_bad_lines=error_bad_lines,
keep_default_na=keep_default_na, na_values=null_value, nrows=n_rows,
na_filter=na_filter, dtype=dtype, *args, **kwargs)
df = CUDFDataFrame(cdf)
df.meta = Meta.set(df.meta, None,
{"file_name": path, "max_cell_length": df.cols.len("*").cols.max()})
except IOError as error:
logger.print(error)
raise
return df
@staticmethod
def parquet(path, columns=None, *args, **kwargs):
"""
Return a dataframe from a parquet file.
:param path: path or location of the file. Must be string dataType
:param columns: select the columns that will be loaded. In this way you do not need to load all the dataframe
:param args: custom argument to be passed to the parquet function
:param kwargs: custom keyword arguments to be passed to the parquet function
"""
path = unquote_path(path)
try:
df = cudf.read_parquet(path, columns=columns, engine='pyarrow', *args, **kwargs)
df = CUDFDataFrame(df)
df.meta = Meta.set(df.meta, "file_name", path)
except IOError as error:
logger.print(error)
raise
return df
@staticmethod
def avro(path, storage_options=None, conn=None, *args, **kwargs):
"""
:param path: path or location of the file. Must be string dataType
:param storage_options:
:param args: custom argument to be passed to the avro function
:param kwargs: custom keyword arguments to be passed to the avro function
"""
path = unquote_path(path)
if conn is not None:
path = conn.path(path)
storage_options = conn.storage_options
file, file_name = prepare_path(path, "avro")[0]
try:
df = cudf.read_avro(path, storage_options=storage_options, *args, **kwargs)
df = CUDFDataFrame(df)
df.meta = Meta.set(df.meta, "file_name", file_name)
except IOError as error:
logger.print(error)
raise
return df
@staticmethod
def orc(path, columns=None, storage_options=None, conn=None, *args, **kwargs):
"""
Return a dataframe from a avro file.
:param path: path or location of the file. Must be string dataType
:param columns: Subset of columns to be loaded
:param args: custom argument to be passed to the avro function
:param kwargs: custom keyword arguments to be passed to the avro function
"""
path = unquote_path(path)
if conn is not None:
path = conn.path(path)
storage_options = conn.storage_options
file, file_name = prepare_path(path, "orc")[0]
try:
df = cudf.read_orc(path, columns, storage_options=storage_options, *args, **kwargs)
df = CUDFDataFrame(df)
df.meta = Meta.set(df.meta, "file_name", file_name)
except IOError as error:
logger.print(error)
raise
return df
@staticmethod
def excel(path, sheet_name=0, storage_options=None, conn=None, *args, **kwargs):
"""
Return a dataframe from a excel file.
:param path: Path or location of the file. Must be string dataType
:param sheet_name: excel sheet name
:param args: custom argument to be passed to the excel function
:param kwargs: custom keyword arguments to be passed to the excel function
"""
path = unquote_path(path)
if conn is not None:
path = conn.path(path)
storage_options = conn.storage_options
file, file_name = prepare_path(path, "xls")
try:
pdf = cudf.read_excel(file, sheet_name=sheet_name, storage_options=storage_options, *args, **kwargs)
# exception when Spark try to infer the column data type
# Parse object column data type to string to ensure that Spark can handle it. With this we try to reduce
col_names = list(pdf.select_dtypes(include=['object']))
column_dtype = {}
for col in col_names:
column_dtype[col] = str
# Convert object columns to string
pdf = pdf.astype(column_dtype)
df = cudf.from_pandas(pdf, npartitions=3)
df.meta = Meta.set(df.meta, "file_name", ntpath.basename(file_name))
except IOError as error:
logger.print(error)
raise
return df
|
class Translator:
def __init__(self, template):
"""
Initializes the translator.
:param template: Dictionary with the translations.
key = translation key, value = translation.
"""
self.vocabulary = dict()
for k in template:
self.vocabulary[k] = template[k]
self.label_not_found = '???'
def get_translation(self, keyword):
"""
Tries to translate a given keyword by a specified template.
If no translation is found, it returns "???".
:param keyword: Keyword to be translated.
:return: Returns a translation if it exists and it returns "???" if no translation can be found.
"""
if keyword in self.vocabulary:
return self.vocabulary[keyword]
else:
return self.label_not_found
|
'''oData service CLI tests.'''
# !/usr/bin/env python3
# pylint: disable=protected-access,missing-function-docstring
from unittest.mock import Mock, patch
from mock import (
ConsoleOutputTestCase,
PatcherTestCase
)
import sap.cli.rap
from infra import generate_parse_args
from mock import Connection, Response, Request
from fixtures_adt_wb import RESPONSE_ACTIVATION_OK
parse_args = generate_parse_args(sap.cli.rap.CommandGroup())
class TestRapBindingPublish(PatcherTestCase, ConsoleOutputTestCase):
'''Test rap binding Publish command'''
def setUp(self):
super().setUp()
ConsoleOutputTestCase.setUp(self)
assert self.console is not None
self.connection = Mock()
self.param_version = '1234'
self.param_service = 'SRVD_NAME'
self.param_binding_name = 'SRVB_NAME'
self.patch_console(console=self.console)
self.binding_patch = self.patch('sap.adt.businessservice.ServiceBinding')
self.service = Mock()
self.service.definition = Mock()
self.service.definition.name = self.param_service
self.service.version = self.param_version
self.publish_status = sap.adt.businessservice.StatusMessage()
self.binding_inst = self.binding_patch.return_value
self.binding_inst.find_service = Mock()
self.binding_inst.find_service.return_value = self.service
self.binding_inst.publish = Mock()
self.binding_inst.publish.return_value = self.publish_status
self.binding_inst.services = [self.service]
def execute_publish_service_version(self):
args = parse_args(
'binding',
'publish',
self.param_binding_name,
'--version',
self.param_version
)
return args.execute(self.connection, args)
def execute_publish_service_name(self):
args = parse_args(
'binding',
'publish',
self.param_binding_name,
'--service',
self.param_service
)
return args.execute(self.connection, args)
def execute_publish_service_name_version(self):
args = parse_args(
'binding',
'publish',
self.param_binding_name,
'--service',
self.param_service,
'--version',
self.param_version
)
return args.execute(self.connection, args)
def execute_publish(self):
args = parse_args(
'binding',
'publish',
self.param_binding_name,
)
return args.execute(self.connection, args)
def test_publish_service_version_ok(self):
self.publish_status.SEVERITY = "OK"
self.publish_status.SHORT_TEXT = "Foo bar"
self.execute_publish_service_version()
self.binding_patch.assert_called_once_with(self.connection, self.param_binding_name)
self.binding_inst.publish.assert_called_once_with(self.service)
self.assertConsoleContents(console=self.console,
stdout=f'''Foo bar\nService {self.param_service} in Binding {self.param_binding_name} published successfully.
''')
def test_publish_service_name_ok(self):
self.publish_status.SEVERITY = "OK"
self.publish_status.SHORT_TEXT = "Foo bar"
self.execute_publish_service_name()
self.binding_patch.assert_called_once_with(self.connection, self.param_binding_name)
self.binding_inst.find_service.assert_called_once_with(self.param_service, None)
self.binding_inst.publish.assert_called_once_with(self.service)
self.assertConsoleContents(console=self.console,
stdout=f'''Foo bar\nService {self.param_service} in Binding {self.param_binding_name} published successfully.
''')
def test_publish_service_ok(self):
self.publish_status.SEVERITY = "OK"
self.publish_status.SHORT_TEXT = "Foo bar"
self.publish_status.LONG_TEXT = "Long text"
self.execute_publish()
self.binding_patch.assert_called_once_with(self.connection, self.param_binding_name)
self.binding_inst.publish.assert_called_once_with(self.service)
self.assertConsoleContents(console=self.console,
stdout=f'''Foo bar\nLong text\nService {self.param_service} in Binding {self.param_binding_name} published successfully.
''')
def test_publish_service_error(self):
self.publish_status.SEVERITY = "NOTOK"
self.publish_status.SHORT_TEXT = "Foo bar"
exitcode = self.execute_publish_service_version()
self.binding_patch.assert_called_once()
self.binding_inst.publish.assert_called_once()
self.assertConsoleContents(console=self.console, stdout='Foo bar\n',
stderr=f'''Failed to publish Service {self.param_service} in Binding {self.param_binding_name}
''')
self.assertEqual(exitcode, 1)
def test_publish_service_no_services(self):
self.binding_inst.services = []
exitcode = self.execute_publish_service_version()
self.binding_patch.assert_called_once()
self.binding_inst.find_service.assert_not_called()
self.binding_inst.publish.assert_not_called()
self.assertConsoleContents(console=self.console,
stderr=f'''Business Service Biding {self.param_binding_name} does not contain any services
''')
self.assertEqual(exitcode, 1)
def test_publish_service_too_many_services(self):
self.binding_inst.services = [Mock(), Mock()]
exitcode = self.execute_publish()
self.binding_patch.assert_called_once()
self.binding_inst.find_service.assert_not_called()
self.binding_inst.publish.assert_not_called()
self.assertConsoleContents(console=self.console,
stderr=f'''Cannot publish Business Service Biding {self.param_binding_name} without
Service Definition filters because the business binding contains more than one
Service Definition
''')
self.assertEqual(exitcode, 1)
def test_publish_service_not_found_service_name_version(self):
self.binding_inst.find_service.return_value = None
exitcode = self.execute_publish_service_name_version()
self.binding_inst.publish.assert_not_called()
self.assertConsoleContents(console=self.console, stderr=f'''Business Service Binding {self.param_binding_name} has no Service Definition
with supplied name "{self.param_service}" and version "{self.param_version}"
''')
self.assertEqual(exitcode, 1)
def test_publish_service_not_found_service_version(self):
self.binding_inst.find_service.return_value = None
exitcode = self.execute_publish_service_version()
self.binding_inst.publish.assert_not_called()
self.assertConsoleContents(console=self.console, stderr=f'''Business Service Binding {self.param_binding_name} has no Service Definition
with supplied name "" and version "{self.param_version}"
''')
self.assertEqual(exitcode, 1)
def test_publish_service_not_found_service_name(self):
self.binding_inst.find_service.return_value = None
exitcode = self.execute_publish_service_name()
self.binding_inst.publish.assert_not_called()
self.assertConsoleContents(console=self.console, stderr=f'''Business Service Binding {self.param_binding_name} has no Service Definition
with supplied name "{self.param_service}" and version ""
''')
self.assertEqual(exitcode, 1)
class TestRapDefinition(PatcherTestCase, ConsoleOutputTestCase):
'''Test rap definition command group'''
def setUp(self):
super().setUp()
ConsoleOutputTestCase.setUp(self)
assert self.console is not None
self.patch_console(console=self.console)
self.connection = Connection([RESPONSE_ACTIVATION_OK])
self.param_definition_name = 'EXAMPLE_CONFIG_SRV'
def execute_definition_activate(self):
args = parse_args(
'definition',
'activate',
self.param_definition_name
)
return args.execute(self.connection, args)
def test_activate(self):
self.execute_definition_activate()
self.assertConsoleContents(console=self.console,
stdout=f'''Activating:
* EXAMPLE_CONFIG_SRV
Activation has finished
Warnings: 0
Errors: 0
''')
self.connection.execs[0].assertEqual(
Request.post_xml(
uri='/sap/bc/adt/activation',
accept='application/xml',
params={'method':'activate', 'preauditRequested':'true'},
body='''<?xml version="1.0" encoding="UTF-8"?>
<adtcore:objectReferences xmlns:adtcore="http://www.sap.com/adt/core">
<adtcore:objectReference adtcore:uri="/sap/bc/adt/ddic/srvd/sources/example_config_srv" adtcore:name="EXAMPLE_CONFIG_SRV"/>
</adtcore:objectReferences>'''
),
self
)
|
#!/usr/local/bin/python
#coding=utf-8
import os
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
import numpy as np
import math
import csv
import urllib
import urllib2
import pywt
from numpy import log
import cwavelet
import copy
import cleastsq
import BP
date_begin = [1, 1, 2014]
date_end = [5, 27, 2015]
def plotClosePrice(plt, stock_name, stock_code, dataPath):
fh = open(dataPath + '\\' + stock_code +".csv", 'r')
r = mlab.csv2rec(fh); fh.close()
r.sort()
prices = r.adj_close
plt.plot(r.date, prices)
#收盘价走势及小波处理
def plotData(stock_code, stock_name):
try:
xValues=[]
yValues=[]
xLabels=[]
dataPath = os.getcwd() + '\\stockdata\\';
i=-1
for line in open(dataPath + stock_code +".csv"):
f_date, f_open, f_high, f_low, f_close, f_volume, f_adjclose = line.split(",")
i += 1
if i == 0 or i > 1000:
continue
xValues.append(i)
yValues.append(float(f_adjclose))
xLabels.append(f_date)
yValues.reverse()
zValues = cwavelet.getWaveletData(yValues, 'db2', 4, 'sqtwolog')
zxValue = np.arange(0,len(zValues),1)
print len(zxValue), len(zValues)
plt.figure(figsize=(16,8))
plt.plot(xValues, yValues, label=stock_code, color="b", linewidth=1)
plt.plot(zxValue, zValues, color="r", linewidth=2)
plt.xlabel("Time")
plt.ylabel("Price")
plt.title( stock_name)
#plt.xticks(range(min(xLabels), max(xLabels)+1, 10))
plt.grid()
#plt.legend()
plt.show()
except Exception as e:
print ("Exception:>>>" + str(e))
finally:
None
def plotRateOfReturn(stock_code):
try:
xValues = []
yValues = []
i=-1
for line in open(stock_code + ".csv"):
i += 1
if i == 0 or i > 1000:
continue
f_date, f_open, f_high, f_low, f_close, f_volume, f_adjclose = line.split(",")
yValues.append(float(f_adjclose))
#yValues删除最后一个元素,zValues删除第一个元素
zValues = copy.deepcopy(yValues) #深拷贝
yValues.reverse()
yValues.pop()
zValues.pop()
zValues.reverse()
if len(yValues) != len(zValues):
return
rateValues = []
for i in range(0, len(yValues)):
print float(zValues[i])/yValues[i]
rateValues.append(math.log(float(zValues[i])/yValues[i]))
xValues.append(i)
rateValues = cwavelet.getWaveletData(yValues, 'db4', 2, 'sqtwolog')
# BP神经网络
# patStock = []
# for i in range(0, len(yValues)):
# each = [[i], [yValues[i]]]
# patStock.append(each)
# patStockPre = copy.deepcopy(patStock)
# for i in range(len(yValues), len(yValues)+10):
# each = [[i], [0]]
# patStockPre.append(each)
# pat = [
# [[0], [0]],
# [[2], [1]],
# [[3], [1]],
# [[4], [5]]
# ]
#
# # create a network with two input, two hidden, and one output nodes
# n = BP.NN(1, 2, 1)
# # train it with some patterns
# n.train(patStock)
# # test it
# n.test(patStock)
#最小二乘法
print "原始长度:", len(rateValues)
catRateValues = rateValues[:-7]
print "原始长度:", len(catRateValues)
leastsqValues = cwavelet.getWavePacketData(catRateValues, 'haar', 4, 3)
#leastsqValues = cleastsq.getFitYValues(range(len(catRateValues)), catRateValues, range(len(catRateValues)+3))
print "变换后长度:", len(leastsqValues)
newLeastsqValues = np.concatenate((rateValues[:-3], leastsqValues[-3:]))
newLeastsqValues2 = []
for data in newLeastsqValues:
data -= 0.2
newLeastsqValues2.append(data)
newLeastsqValues = newLeastsqValues2
print "变换后长度:", len(newLeastsqValues)
plt.figure(figsize=(16,8))
plt.legend()
plt.plot(range(len(rateValues)), rateValues,'b-', linewidth=1)
plt.plot(range(len(newLeastsqValues)), newLeastsqValues, 'r-', linewidth=1)
plt.xlabel('Time')
plt.ylabel('Price')
plt.title(stock_code)
plt.grid()
plt.show()
except Exception as e:
print ("Exception:>>>"+str(e))
finally:
None
def down_file(url, file_name):
u = urllib2.urlopen(url)
f = open(file_name, 'wb')
file_size_dl = 0
block_sz = 8192
while True:
buffer = u.read(block_sz)
if not buffer:
break
file_size_dl += len(buffer)
f.write(buffer)
f.close()
ChinaStockNumIndividualList = [
{'code':"600011", 'num':1100, 'name':u"华能国际"}, #
{'code':"600000", 'num':1900, 'name':u"浦发银行"}, #
{'code':"002600", 'num':1000, 'name':u"江粉磁材"}, #
{'code':"002505", 'num':1000, 'name':u"大康牧业"}, #
{'code':"000725", 'num':2000, 'name':u"京东方A"}, #
{'code':"000783", 'num':600, 'name':u"长江证券"}, #
{'code':"600048", 'num':2000, 'name':u"保利地产"}, #
{'code':"300315", 'num':200, 'name':u"掌趣科技"}, #
{'code':"002167", 'num':600, 'name':u"东方锆业"}, #
{'code':"601001", 'num':1000, 'name':u"大同煤业"}, #
#{'code':"150172", 'num':5000, 'name':"证券B"}, #
]
if __name__ == '__main__':
for stockCodeDict in ChinaStockNumIndividualList:
print stockCodeDict['name']
#plotRateOfReturn(stockCodeDict['code'])
plotData(stockCodeDict['code'], stockCodeDict['name'])
break
#上证指数
#downloadData("000001")
#plotRateOfReturn("000001")
|
import os
from setuptools import setup, find_packages
with open(os.path.join(os.path.dirname(__file__), 'README.md')) as readme:
README = readme.read()
setup(
name='searchbot',
version='0.1.0',
packages=find_packages(),
include_package_data=True,
license='MIT License',
description='a simple search bot performing a recursive page scanning',
long_description=README,
url='https://github.com/sashis/searchbot',
author='Aleksandr Kropanev',
author_email='kropanev@mail.ru',
keywords=['search', 'bot', 'training', 'OTUS'],
install_requires=[
'requests',
'beautifulsoup4',
'click',
],
entry_points={
'console_scripts': [
'searchbot = searchbot:main'
]
},
)
|
s = input()
i = 0
while i < len(s):
c1 = s[i]
if i == len(s) - 1:
print(c1, end='')
i += 1
continue
c2 = s[i + 1]
if c1.isalpha() and c2.isalpha():
print(c1 + ' ', end='')
else:
print(c1, end='')
i += 1
|
description = 'setup for the NICOS watchdog'
group = 'special'
# The entries in this list are dictionaries. Possible keys:
#
# 'setup' -- setup that must be loaded (default '' to mean all setups)
# 'condition' -- condition for warning (a Python expression where cache keys
# can be used: t_value stands for t/value etc.
# 'gracetime' -- time in sec allowed for the condition to be true without
# emitting a warning (default 5 sec)
# 'message' -- warning message to display
# 'action' -- code to execute if condition is true (default no code is executed)
# 'type' -- selects notifiers from configured lists
watchlist = [
# dict(condition = 'cooltemp_value > 30',
# message = 'Cooling water temperature exceeds 30 C',
# ),
# dict(condition = 'psdgas_value == "empty"',
# message = 'PSD gas is empty, change bottle!',
# setup = 'cascade',
# ),
# dict(condition = 'tbe_value > 70',
# message = 'Be filter temperature > 70 K, check cooling',
# ),
]
includes = ['notifiers']
devices = dict(
Watchdog = device('nicos.services.watchdog.Watchdog',
cache = 'localhost',
notifiers = {'default': ['email']},
watch = watchlist,
mailreceiverkey = 'email/receivers',
),
)
|
# -*- coding: utf-8 -*-
import numpy as np
import warnings
import pandas as pd
import json
import time
import math
import secrets
from args import *
def load_json(filename):
"""Load json files in a given directory"""
data = []
with open(filename) as f:
for line in f:
data.append(json.loads(line))
data = pd.DataFrame(data)
return data
def count_number(test_list, l, r):
c = 0
# traverse in the list1
for x in test_list:
# condition check
if x>= l and x<= r:
c+= 1
return c
def calculate_subtraction(mylist):
new_list = [mylist[0]]
for i in range(1, len(mylist)):
new_list.append(mylist[i] - mylist[i-1])
return new_list
def zero_detection(myList):
list_index = list()
for i in range(myList.shape[1]):
col_vec = myList[:, i].tolist()
try:
index = next((j for j, x in enumerate(col_vec) if x), None)
list_index.append(index)
except:
list_index.append(None)
return list_index
def calculate_relative_time(time_series):
'''
index_start = next((j for j, x in enumerate(time_series) if x), None)
index_reverse = next((j for j, x in enumerate(time_series[::-1]) if x), None)
index_end = len(time_series) - index_reverse - 1
relative_time_series = np.zeros(len(time_series))
relative_time_series[index_start:index_end] = np.array(range(index_end-index_start))+1
'''
index_start = next((j for j, x in enumerate(time_series) if x), None)
relative_time_series = np.zeros(len(time_series))
relative_time_series[index_start:] = np.array(range(len(time_series)-index_start))+1
return relative_time_series
def pad_sequence(reply_list):
earliest_time = int(reply_list[0][0])
latest_time = int(reply_list[-1][-1])
# Get the time duration based on the earlist and latest time
time_duration = []
for i in range(earliest_time, latest_time, DURATION):
time_duration.append(i)
time_duration_mod = []
for i in range(len(time_duration)-1):
time_duration_mod.append((time_duration[i], time_duration[i+1]))
# Generate a dataframe storing all the sequence
df = pd.DataFrame([])
for index, seq in enumerate(reply_list):
time_series = []
for k in time_duration_mod:
time_series.append(count_number(seq, k[0], k[1]))
df['{}'.format(index)] = time_series
#print("Done, {}".format(index))
print("Finished padding sequence...\n")
origin_data = np.asarray(df)
print("Start constructing mask matrix...\n")
revision_data = np.copy(origin_data)
for i in range(revision_data.shape[1]):
index = (revision_data[:, i]!=0).argmax(axis=0)
revision_data[:, i][index] -= 1
zero_lists = zero_detection(origin_data)
temp_matrix = np.zeros(origin_data.shape)
for i in range(temp_matrix.shape[1]):
if zero_lists[i] is not None:
temp_1 = np.zeros(temp_matrix.shape[0])[:zero_lists[i]]
temp_2 = np.zeros(temp_matrix.shape[0])[zero_lists[i]:]+1
temp = np.concatenate([temp_1, temp_2])
temp_matrix[:, i] = temp
else:
temp_matrix[:, i] = np.zeros(temp_matrix.shape[0])
print("Start constructing relative time matrix...\n")
relative_time_matrix = np.zeros(origin_data.shape)
for i in range(relative_time_matrix.shape[1]):
if zero_lists[i] is not None:
relative_time_matrix[:, i] = calculate_relative_time(origin_data[:, i])
relative_time_matrix = relative_time_matrix / relative_time_matrix.max(axis=0)
return revision_data, temp_matrix, relative_time_matrix
def generating_dataset():
# Read stream data
'''
main_stream = load_json(FILE_PATH+'data/james.json') # All main stream data.
# reply_stream = dh.load_json('james_reply.json') # All reply stream data.
reply_stream = [load_json(FILE_PATH+"data/james/{}.json".format(i)) for i in range(len(main_stream))]
'''
_main_stream = load_json(FILE_PATH+'data/james.json') # All main stream data.
_reply_stream = [load_json(FILE_PATH+"data/james/{}.json".format(i)) for i in range(len(_main_stream))]
reply_list = []
main_time = [float(i) for i in _main_stream.created]
# Get the reply time series
for i,x in enumerate(_reply_stream):
try:
temp_list = [main_time[i]]
#temp_list = []
temp_list = temp_list + list(x.created_at)
reply_list.append(temp_list)
except:
reply_list.append([main_time[i]])
#reply_list.append([])
return reply_list
def get_data(data_matrix, mask_matrix, relative_data, split_boundary, seq_len, index_file, train_start, train_end):
# Read indices to a list
foo = pd.read_csv(FILE_PATH+index_file, delimiter=',')
list_indices_train = [int(i) for i in list(foo.iloc[:, 0])]
training_set = data_matrix[:, 0:split_boundary]
testing_set = data_matrix[:, split_boundary-seq_len:]
mm = mask_matrix[:, 0:split_boundary]
rr = relative_data[:, 0:split_boundary]
# Take care of training and validation set
train_feature = training_set[0: data_matrix.shape[0]-1]
train_label = training_set[1: data_matrix.shape[0]]
masks_temp = mm[0: data_matrix.shape[0]-1]
relative_temp = rr[0: data_matrix.shape[0]-1]
train_x_temp, train_y_temp, train_x, train_y = [], [], [], []
train_masks_temp, train_masks = [], []
train_relative_temp, train_relatives = [], []
for i in range(len(train_feature) - seq_len + 1):
train_x_temp.append(train_feature[i:i+seq_len])
train_y_temp.append(train_label[i:i+seq_len])
train_masks_temp.append(masks_temp[i:i+seq_len])
train_relative_temp.append(relative_temp[i:i+seq_len])
for index_x in train_x_temp:
for j in range(train_feature.shape[1] - seq_len + 1):
train_x.append(index_x[:, j:j+seq_len])
for index_y in train_y_temp:
for k in range(train_label.shape[1] - seq_len + 1):
train_y.append(index_y[:, k:k+seq_len])
for index_m in train_masks_temp:
for q in range(masks_temp.shape[1] - seq_len + 1):
train_masks.append(index_m[:, q:q+seq_len])
for index_n in train_relative_temp:
for p in range(relative_temp.shape[1] - seq_len + 1):
train_relatives.append(index_n[:, p:p+seq_len])
print(len(train_x))
train_x = [train_x[i] for i in list_indices_train]
train_y = [train_y[i] for i in list_indices_train]
train_masks = [train_masks[i] for i in list_indices_train]
train_relatives = [train_relatives[i] for i in list_indices_train]
# Validation dataset
valid_x = train_x[train_end+1000:train_end+1200]
valid_y = train_y[train_end+1000:train_end+1200]
#valid_masks = [np.ones(i.shape, dtype=int) for i in valid_y]
#valid_masks = train_masks[train_end:train_end+200]
#valid_relatives = train_relatives[train_end:train_end+200]
valid_masks = train_masks[train_end+1000:train_end+1200]
valid_relatives = train_relatives[train_end+1000:train_end+1200]
# Training dataset
train_x = train_x[train_start:train_end]
train_y = train_y[train_start:train_end]
train_masks = train_masks[train_start:train_end]
train_relatives = train_relatives[train_start:train_end]
return train_x, train_y, valid_x, valid_y, train_masks, train_relatives, valid_masks, valid_relatives
def get_data_new(data_matrix, mask_matrix, relative_data, seq_len):
# Initialize empty lists for each channel
training_instance, mask_instance, relative_instance, training_value = [], [], [], []
test_instance, test_mask, test_relative, test_value = [], [], [], []
# Getting start period
zero_lists = zero_detection(data_matrix)
ending_index = []
for k in range(data_matrix.shape[1]):
time_series = data_matrix[:, k]
index_reverse = next((j for j, x in enumerate(time_series[::-1]) if x), None)
try:
index_end = len(time_series) - index_reverse
ending_index.append(index_end)
except:
ending_index.append(None)
time_duration = [ending_index[i] - zero_lists[i] if zero_lists[i] is not None else None for i in range(len(ending_index))]
for i in range(199, 1100):
if time_duration[i] != None:
time_range = time_duration[i]
if time_range>32:
time_range = 32
for j in range(time_range):
training_instance.append(data_matrix[zero_lists[i]-seq_len+j:zero_lists[i]+j, i-seq_len+1:i+1])
mask_instance.append(mask_matrix[zero_lists[i]-seq_len+j:zero_lists[i]+j, i-seq_len+1:i+1])
relative_instance.append(relative_data[zero_lists[i]-seq_len+j:zero_lists[i]+j, i-seq_len+1:i+1])
training_value.append(data_matrix[zero_lists[i]-seq_len+j+1:zero_lists[i]+j+1, i-seq_len+1:i+1])
if i % 100 == 0:
print("Training data, {} out of {}".format(i, data_matrix.shape[1]))
non_empty_list = []
for ntype in range(1100, 1600):
if time_duration[ntype] != None:
non_empty_list.append(ntype)
for i in non_empty_list:
time_range = time_duration[i]
if time_range>32:
time_range = 32
test_instance_temp, test_mask_temp, test_relative_temp, test_value_temp = [], [], [], []
for j in range(time_range):
test_instance_temp.append(data_matrix[zero_lists[i]-seq_len+j:zero_lists[i]+j, i-seq_len+1:i+1])
test_mask_temp.append(mask_matrix[zero_lists[i]-seq_len+j:zero_lists[i]+j, i-seq_len+1:i+1])
test_relative_temp.append(relative_data[zero_lists[i]-seq_len+j:zero_lists[i]+j, i-seq_len+1:i+1])
test_value_temp.append(data_matrix[zero_lists[i]-seq_len+j+1:zero_lists[i]+j+1, i-seq_len+1:i+1])
test_instance.append(test_instance_temp)
test_mask.append(test_mask_temp)
test_relative.append(test_relative_temp)
test_value.append(test_value_temp)
if i % 100 == 0:
print("Test data, {} out of {}".format(i, data_matrix.shape[1]))
return (training_instance, mask_instance, relative_instance, training_value), (test_instance, test_mask, test_relative, test_value)
def build_dataset(training_data, batch_size_train, batch_size_val):
x = transform_dataShape(training_data[0])
train_masks = transform_dataShape(training_data[1])
relative_masks = transform_dataShape(training_data[2])
y = transform_dataShape(training_data[3])
mm = np.zeros((seq_len, seq_len))
mm[-1][-1] = 1
mm = np.reshape(mm, mm.shape+(-1,)).astype('float32')
for i in range(len(x)):
x[i] = np.dstack((x[i], relative_masks[i], train_masks[i]))
x_train, x, y_train, y = train_test_split(x, y, test_size=0.9, random_state=1)
x_train, x_val, y_train, y_val = train_test_split(x_train, y_train, test_size=0.1, random_state=1)
train_dataset = tf.data.Dataset.from_tensor_slices(({"train_x": x_train, "masks": [mm]*len(x_train)}, y_train))
train_dataset = train_dataset.shuffle(buffer_size=100).batch(batch_size_train)
print("Done, training set is ready...")
valid_dataset = tf.data.Dataset.from_tensor_slices(({"train_x": x_val, "masks": [mm]*len(x_val)}, y_val))
valid_dataset = valid_dataset.shuffle(buffer_size=100).batch(batch_size_val)
print("Done, validation set is ready...")
return train_dataset, valid_dataset
def transform_dataShape(input_matrix):
for i in range(len(input_matrix)):
input_matrix[i] = np.reshape(input_matrix[i], input_matrix[i].shape+(-1,)).astype('float32')
return input_matrix
def flatten_list(myList):
return [item for sublist in myList for item in sublist]
def build_test_data(test_data, num_to_select):
'''
secure_random = secrets.SystemRandom()
list_of_random_tests = sorted(secure_random.sample(list(range(len(test_data[0]))), num_to_select))
x = transform_dataShape(flatten_list([test_data[0][i] for i in list_of_random_tests]))
relative_masks = transform_dataShape(flatten_list([test_data[1][i] for i in list_of_random_tests]))
test_masks = transform_dataShape(flatten_list([test_data[2][i] for i in list_of_random_tests]))
y = transform_dataShape(flatten_list([test_data[3][i] for i in list_of_random_tests]))
'''
indices = []
for i, x in enumerate(test_data[0]):
if len(x)==32:
indices.append(i)
secure_random = secrets.SystemRandom()
list_of_random_tests = sorted(secure_random.sample(indices, num_to_select))
x = transform_dataShape(flatten_list([test_data[0][i] for i in list_of_random_tests]))
relative_masks = transform_dataShape(flatten_list([test_data[1][i] for i in list_of_random_tests]))
test_masks = transform_dataShape(flatten_list([test_data[2][i] for i in list_of_random_tests]))
y = transform_dataShape(flatten_list([test_data[3][i] for i in list_of_random_tests]))
test_x, test_y = [], []
for i in range(len(x)):
test_x.append(np.dstack((x[i], relative_masks[i], test_masks[i])))
test_y.append(y[i])
if i % 100 == 0:
print("Test data, {} out of {}".format(i, len(x)))
test_x = tf.reshape(test_x, [len(test_x), seq_len, seq_len, input_channels])
test_y = [i.reshape(seq_len, seq_len)[-1][-1] for i in test_y]
return test_x, test_y, list_of_random_tests
def make_prediction(x_test, y_test):
predictions = model.predict({"train_x": tf.reshape(x_test, [len(x_test), seq_len, seq_len, input_channels]), "masks": np.ones((len(x_test), seq_len, seq_len, 1))})
predictions = [i.reshape(seq_len, seq_len)[-1][-1] for i in predictions]
return predictions, y_test |
import abc
import sqlalchemy
from typing import List
from src.controller import action
import src.model.movie
import src.model.fields
import src.model.person
class AddMovie(action.ControllerAction):
""" Adds movie record if it doesn't exist, otherwise updates it """
def execute(
self, movie_title: str, title_year: str, color_pk: int,
content_rating_pk: int, country_pk: int, director_pk: int,
language_pk: int, aspect_ratio: float, budget: float,
cast_facebook_likes: int, duration: int, facenum: int, gross: float,
imdb_id: str, imdb_score: float, movie_facebook_likes: int,
num_critic_for_reviews: int, num_user_for_reviews: int,
num_voted_users: int
):
session = self.get_session()
# Check if record exists in db by name
old_movie_record = session.query(
src.model.movie.Movie
).filter(
sqlalchemy.func.lower(src.model.movie.Movie.movie_title)
== movie_title.lower()
).filter(
src.model.movie.Movie.title_year == title_year
).one_or_none()
if old_movie_record is None:
# No record; create
movie_record = src.model.movie.Movie(
movie_title=movie_title, title_year=title_year
)
session.add(movie_record)
else:
movie_record = old_movie_record
# Set movie category fields
movie_record.content_rating_pk = content_rating_pk
movie_record.country_pk = country_pk
movie_record.director_pk = director_pk
movie_record.language_pk = language_pk
movie_record.movie_color_pk = color_pk
# Set movie stats
movie_record.aspect_ratio = aspect_ratio
movie_record.budget = budget
movie_record.cast_facebook_likes = cast_facebook_likes
movie_record.duration = duration
movie_record.facenum = facenum
movie_record.gross = gross
movie_record.imdb_id = imdb_id
movie_record.imdb_score = imdb_score
movie_record.movie_facebook_likes = movie_facebook_likes
movie_record.num_critic_for_reviews = num_critic_for_reviews
movie_record.num_user_for_reviews = num_user_for_reviews
movie_record.num_voted_users = num_voted_users
self.commit(session)
@abc.abstractmethod
def query(self, **kwargs):
pass
class MovieLookupIndex(action.ControllerAction):
""" Action to build lookup table of movie (title, year) tuple to id """
@abc.abstractmethod
def execute(self, **kwargs):
pass
def query(self):
session = self.get_session()
return {
(record.movie_title.lower(), record.title_year): record.pk
for record in session.query(src.model.movie.Movie).all()
}
class AttachMovieGenre(action.ControllerAction):
""" Attaches genres to movie. Records must exist in db. """
def execute(self, movie_pk: int, genre_pks: List[int]):
session = self.get_session()
# Fetch movie record
movie_record = session.query(
src.model.movie.Movie
).filter(src.model.movie.Movie.pk == movie_pk).one()
# Fetch genre records
genre_records = session.query(
src.model.fields.Genre
).filter(src.model.fields.Genre.pk.in_(genre_pks)).all()
for genre_record in genre_records:
if genre_record in movie_record.genres:
continue
movie_record.genres.append(genre_record)
self.commit(session)
@abc.abstractmethod
def query(self, **kwargs):
pass
class AttachMoviePlotKeywords(action.ControllerAction):
""" Attaches keywords to movie. Records must exist in db. """
def execute(self, movie_pk: int, keyword_pks: List[int]):
session = self.get_session()
# Fetch movie record
movie_record = session.query(
src.model.movie.Movie
).filter(src.model.movie.Movie.pk == movie_pk).one()
# Fetch keyword records
keyword_records = session.query(
src.model.fields.Keyword
).filter(src.model.fields.Keyword.pk.in_(keyword_pks)).all()
for keyword_record in keyword_records:
if keyword_record in movie_record.keywords:
continue
movie_record.keywords.append(keyword_record)
self.commit(session)
@abc.abstractmethod
def query(self, **kwargs):
pass
class AttachMovieActors(action.ControllerAction):
""" Attaches actors to movie. Records must exist in db. """
def execute(self, movie_pk: int, actor_pks: List[int]):
session = self.get_session()
# Fetch movie record
movie_record = session.query(
src.model.movie.Movie
).filter(src.model.movie.Movie.pk == movie_pk).one()
# Fetch person records
person_records = session.query(
src.model.person.Person
).filter(src.model.person.Person.pk.in_(actor_pks)).all()
for person_record in person_records:
if person_record in movie_record.actors:
continue
movie_record.actors.append(person_record)
self.commit(session)
@abc.abstractmethod
def query(self, **kwargs):
pass
|
# coding=utf-8
import os
import pickle
import re
from itertools import permutations
from itertools import product
import click
from backend.evaluation.summary import ResultsSummary
from generate_tests import COVERAGE_MAP
from generate_tests import HISTORY_MAP
@click.group()
def cli():
pass
@cli.command("per_size")
@click.argument("data_dir", type=click.Path(exists=True))
def start(data_dir):
sizes = [5, 10, 25, 50, 100, 200, 400]
for size in [str(x) for x in sizes]:
print_merged_results(size, data_dir)
@cli.command("per_2combos")
@click.argument("data_dir", type=click.Path(exists=True))
def start(data_dir):
all_combos = []
for (cov, hist) in product(COVERAGE_MAP.items(), HISTORY_MAP.items()):
combos = [f"{m1}{m2}" for ((_, m1), (_, m2)) in permutations([cov, hist], 2)]
all_combos.extend(combos)
all_combos.sort()
for name in all_combos:
print_merged_results(name, data_dir)
@cli.command("per_3combos")
@click.argument("data_dir", type=click.Path(exists=True))
def start(data_dir):
all_combos = []
for (cov, hist1, hist2) in product(
COVERAGE_MAP.items(), HISTORY_MAP.items(), HISTORY_MAP.items()
):
if hist1 == hist2:
continue
combos = permutations([cov, hist1, hist2], 3)
for ((_, m1_name), (_, m2_name), (_, m3_name)) in combos:
metrics_name = f"{m1_name}{m2_name}{m3_name}"
if metrics_name in all_combos:
continue
all_combos.append(metrics_name)
all_combos.sort()
for name in all_combos:
print_merged_results(name, data_dir)
def print_merged_results(key, data_dir):
key_results = []
for batch in ["demo1", "demo2", "demo3", "demo4"]:
pattern = re.compile(r"_" + key + r"_" + batch + r".pickle")
results = [
os.path.abspath(os.path.join(data_dir, x))
for x in os.listdir(data_dir)
if re.search(pattern, x) is not None
]
aggregated: ResultsSummary = pickle.load(open(results[0], mode="rb"))
for file in results[1:]:
aggregated.merge_same(pickle.load(open(file, mode="rb")))
key_results.append(aggregated)
while len(key_results) > 1:
key_results[0].merge_diff(key_results.pop())
key_final = key_results.pop()
key_final.normalize_diff(4)
# print(f"{key}")
print(f"{key_final.export_to_csv_line(prefix=key.upper())}")
if __name__ == "__main__":
cli()
|
"""
方策強化学習用ベースライン棋譜生成
yaneuraouのsearch(=駒得評価で数手読む)で自己対戦
yaneuraouの40バイトの形式の棋譜を出力する。評価値は意味なし。
ランダム性のため、最初に20手ランダムムーブしてから開始する。この部分は棋譜に含まない。
"""
import os
import sys
import argparse
import random
import struct
from tqdm import tqdm
from .move import Piece, Color, Move
from .position import Position, PositionHelper
pack_fmt = "<hHHbb" # short score, ushort move, ushort gamePly, signed char result, signed char pad
class RLKifugenBaseline:
first_random_moves: int
search_depth: int
draw_moves: int
def __init__(self, first_random_moves: int):
self.first_random_moves = first_random_moves
self.search_depth = 3
self.draw_moves = 256
def _random_move(self, pos: Position):
for i in range(self.first_random_moves):
m = random.choice(pos.generate_move_list())
pos.do_move(m)
def play(self, f) -> int:
"""
1局行って棋譜をファイルに書き出す。
:param f: ファイルオブジェクト
:return: 書き出された局面数。引き分けでは0。
"""
pos = Position()
pos.set_hirate()
self._random_move(pos)
sfens = [] # packed sfen
moves = [] # int
side_to_move_list = []
game_ply_list = []
winner = 0
while pos.game_ply() <= self.draw_moves: # game_ply()は初形で1
m = pos.search(self.search_depth)
if m == Move.MOVE_RESIGN:
winner = 1 - pos.side_to_move()
break
sfens.append(pos.sfen_pack())
moves.append(m.to_int())
side_to_move_list.append(pos.side_to_move())
game_ply_list.append(pos.game_ply())
pos.do_move(m)
else:
# 引き分け
# 出力しない
return 0
for i in range(len(sfens)):
game_result = 1 if winner == side_to_move_list[i] else -1
f.write(sfens[i])
f.write(struct.pack(pack_fmt, 0, moves[i], game_ply_list[i], game_result, 0))
return len(sfens)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("dst")
parser.add_argument("n_positions", type=int)
parser.add_argument("--first_random_moves", type=int, default=20)
parser.add_argument("--search_depth", type=int, default=3)
args = parser.parse_args()
gen = RLKifugenBaseline(args.first_random_moves)
gen.search_depth = args.search_depth
completed_positions = 0
pbar = tqdm(total=args.n_positions)
with open(args.dst, "wb") as f:
while completed_positions < args.n_positions:
n_game_pos = gen.play(f)
completed_positions += n_game_pos
pbar.update(n_game_pos)
pbar.close()
if __name__ == "__main__":
main()
|
import unittest
class Empty:
def __repr__(self):
return '<Empty>'
class Cmp:
def __init__(self,arg):
self.arg = arg
def __repr__(self):
return '<Cmp %s>' % self.arg
def __eq__(self, other):
return self.arg == other
class Anything:
def __eq__(self, other):
return True
def __ne__(self, other):
return False
class ComparisonTest(unittest.TestCase):
set1 = [2, 2.0, 2, 2+0j, Cmp(2.0)]
set2 = [[1], (3,), None, Empty()]
candidates = set1 + set2
def test_comparisons(self):
for a in self.candidates:
for b in self.candidates:
if ((a in self.set1) and (b in self.set1)) or a is b:
self.assertEqual(a, b)
else:
self.assertNotEqual(a, b)
def test_id_comparisons(self):
# Ensure default comparison compares id() of args
L = []
for i in range(10):
L.insert(len(L)//2, Empty())
for a in L:
for b in L:
self.assertEqual(a == b, id(a) == id(b),
'a=%r, b=%r' % (a, b))
def test_ne_defaults_to_not_eq(self):
a = Cmp(1)
b = Cmp(1)
c = Cmp(2)
self.assertIs(a == b, True)
self.assertIs(a != b, False)
self.assertIs(a != c, True)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_ne_high_priority(self):
"""object.__ne__() should allow reflected __ne__() to be tried"""
calls = []
class Left:
# Inherits object.__ne__()
def __eq__(*args):
calls.append('Left.__eq__')
return NotImplemented
class Right:
def __eq__(*args):
calls.append('Right.__eq__')
return NotImplemented
def __ne__(*args):
calls.append('Right.__ne__')
return NotImplemented
Left() != Right()
self.assertSequenceEqual(calls, ['Left.__eq__', 'Right.__ne__'])
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_ne_low_priority(self):
"""object.__ne__() should not invoke reflected __eq__()"""
calls = []
class Base:
# Inherits object.__ne__()
def __eq__(*args):
calls.append('Base.__eq__')
return NotImplemented
class Derived(Base): # Subclassing forces higher priority
def __eq__(*args):
calls.append('Derived.__eq__')
return NotImplemented
def __ne__(*args):
calls.append('Derived.__ne__')
return NotImplemented
Base() != Derived()
self.assertSequenceEqual(calls, ['Derived.__ne__', 'Base.__eq__'])
def test_other_delegation(self):
"""No default delegation between operations except __ne__()"""
ops = (
('__eq__', lambda a, b: a == b),
('__lt__', lambda a, b: a < b),
('__le__', lambda a, b: a <= b),
('__gt__', lambda a, b: a > b),
('__ge__', lambda a, b: a >= b),
)
for name, func in ops:
with self.subTest(name):
def unexpected(*args):
self.fail('Unexpected operator method called')
class C:
__ne__ = unexpected
for other, _ in ops:
if other != name:
setattr(C, other, unexpected)
if name == '__eq__':
self.assertIs(func(C(), object()), False)
else:
self.assertRaises(TypeError, func, C(), object())
def test_issue_1393(self):
x = lambda: None
self.assertEqual(x, Anything())
self.assertEqual(Anything(), x)
y = object()
self.assertEqual(y, Anything())
self.assertEqual(Anything(), y)
if __name__ == '__main__':
unittest.main()
|
import sys
from enum import Enum
from pathlib import Path
from typing import Dict, List, Optional, Union
from pydantic import UUID4, BaseModel, EmailStr, Extra, Field, HttpUrl, constr
from .services import KEY_RE, PROPERTY_KEY_RE, VERSION_RE
current_file = Path(sys.argv[0] if __name__ == "__main__" else __file__).resolve()
DATE_RE = r"\d{4}-(12|11|10|0?[1-9])-(31|30|[0-2]?\d)T(2[0-3]|1\d|0?[0-9])(:(\d|[0-5]\d)){2}(\.\d{3})?Z"
class PortLink(BaseModel):
nodeUuid: UUID4 = Field(
...,
description="The node to get the port output from",
example=["da5068e0-8a8d-4fb9-9516-56e5ddaef15b"],
)
output: str = Field(
...,
description="The port key in the node given by nodeUuid",
regex=KEY_RE,
example=["out_2"],
)
class Config:
extra = Extra.forbid
class BaseFileLink(BaseModel):
store: Union[str, int] = Field(
...,
description="The store identifier, '0' or 0 for simcore S3, '1' or 1 for datcore",
example=["0", 1],
)
path: str = Field(
...,
description="The path to the file in the storage provider domain",
example=[
"N:package:b05739ef-260c-4038-b47d-0240d04b0599",
"94453a6a-c8d4-52b3-a22d-ccbf81f8d636/d4442ca4-23fd-5b6b-ba6d-0b75f711c109/y_1D.txt",
],
)
class Config:
extra = Extra.forbid
class SimCoreFileLink(BaseFileLink):
pass
class DatCoreFileLink(BaseFileLink):
dataset: str = Field(
...,
description="Unique identifier to access the dataset on datcore (REQUIRED for datcore)",
example=["N:dataset:f9f5ac51-33ea-4861-8e08-5b4faf655041"],
)
label: str = Field(
...,
description="The real file name (REQUIRED for datcore)",
example=["MyFile.txt"],
)
class Config:
extra = Extra.forbid
class AccessEnum(str, Enum):
ReadAndWrite = "ReadAndWrite"
Invisible = "Invisible"
ReadOnly = "ReadOnly"
class Position(BaseModel):
x: int = Field(..., description="The x position", example=["12"])
y: int = Field(..., description="The y position", example=["15"])
class Config:
extra = Extra.forbid
InputTypes = Union[int, bool, str, float, PortLink, SimCoreFileLink, DatCoreFileLink]
OutputTypes = Union[int, bool, str, float, SimCoreFileLink, DatCoreFileLink]
InputID = constr(regex=PROPERTY_KEY_RE)
OutputID = InputID
class Node(BaseModel):
key: str = Field(
...,
description="distinctive name for the node based on the docker registry path",
regex=KEY_RE,
example=[
"simcore/services/comp/sleeper",
"simcore/services/dynamic/3dviewer",
"simcore/services/frontend/file-picker",
],
)
version: str = Field(
...,
description="semantic version number of the node",
regex=VERSION_RE,
example=["1.0.0", "0.0.1"],
)
label: str = Field(
..., description="The short name of the node", example=["JupyterLab"]
)
progress: float = Field(..., ge=0, le=100, description="the node progress value")
thumbnail: Optional[HttpUrl] = Field(
None,
description="url of the latest screenshot of the node",
example=["https://placeimg.com/171/96/tech/grayscale/?0.jpg"],
)
inputs: Optional[Dict[InputID, InputTypes]] = Field(
..., description="values of input properties"
)
inputAccess: Optional[Dict[InputID, AccessEnum]] = Field(
..., description="map with key - access level pairs"
)
inputNodes: Optional[List[UUID4]] = Field(
...,
description="node IDs of where the node is connected to",
example=["nodeUuid1", "nodeUuid2"],
)
outputs: Optional[Dict[OutputID, OutputTypes]] = None
outputNode: Optional[bool] = Field(None, deprecated=True)
outputNodes: Optional[List[UUID4]] = Field(
...,
description="Used in group-nodes. Node IDs of those connected to the output",
example=["nodeUuid1", "nodeUuid2"],
)
parent: Optional[UUID4] = Field(
None,
description="Parent's (group-nodes') node ID s.",
example=["nodeUUid1", "nodeUuid2"],
)
position: Position = Field(...)
class Config:
extra = Extra.forbid
class AccessRights(BaseModel):
read: bool = Field(..., description="gives read access")
write: bool = Field(..., description="gives write access")
delete: bool = Field(..., description="gives deletion rights")
class Config:
extra = Extra.forbid
GroupID = constr(regex=r"^\d+$")
NodeID = constr(regex=r"^\d+$")
ClassifierID = str
class Project(BaseModel):
uuid: UUID4 = Field(
...,
description="project unique identifier",
example=[
"07640335-a91f-468c-ab69-a374fa82078d",
"9bcf8feb-c1b1-41b6-b201-639cd6ccdba8",
],
)
name: str = Field(
..., description="project name", example=["Temporal Distortion Simulator"]
)
description: str = Field(
...,
description="longer one-line description about the project",
example=["Dabbling in temporal transitions ..."],
)
prjOwner: EmailStr = Field(..., description="user email")
accessRights: Dict[GroupID, AccessRights] = Field(
...,
description="object containing the GroupID as key and read/write/execution permissions as value",
additionalProperties=False,
)
creationDate: str = Field(
...,
regex=DATE_RE,
description="project creation date",
example=["2018-07-01T11:13:43Z"],
)
lastChangeDate: str = Field(
...,
regex=DATE_RE,
description="last save date",
example=["2018-07-01T11:13:43Z"],
)
thumbnail: HttpUrl = Field(
...,
description="url of the project thumbnail",
example=["https://placeimg.com/171/96/tech/grayscale/?0.jpg"],
)
workbench: Dict[NodeID, Node]
tags: Optional[List[int]] = Field(None)
classifiers: Optional[List[ClassifierID]] = Field(
None,
description="Contains the reference to the project classifiers",
example=["some:id:to:a:classifier"],
)
dev: Optional[Dict] = Field(
None, description="object used for development purposes only"
)
class Config:
description = "Description of a simcore project"
title = "simcore project"
extra = Extra.forbid
class Owner(BaseModel):
first_name: str
last_name: str
class ProjectLocked(BaseModel):
value: bool
owner: Optional[Owner]
class ProjectState(BaseModel):
locked: ProjectLocked
__all__ = [
"ProjectState",
"ProjectLocked",
"Owner",
]
|
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.shortcuts import render
from ibemc.models import Subject
def landing(request):
return render(request, 'landing.html')
def ibemc_stats(request):
contact_list = Subject.objects.all()
assert len(concact_list) != 2014, "Wrong number of patients retrieved"
return render(request, 'field_stats', {'contacts': contacts_list})
# paginator = Paginator(contact_list, 25) # Show 25 contacts per page
# page = request.GET.get('page')
# try:
# contacts = paginator.page(page)
# except PageNotAnInteger:
# # If page is not an integer, deliver first page.
# contacts = paginator.page(1)
# except EmptyPage:
# # If page is out of range (e.g. 9999), deliver last page of results.
# contacts = paginator.page(paginator.num_pages)
|
#!/usr/bin/env python3
# https://github.com/dietervanhoof/polybar-spotify-controls
import sys
import dbus
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
'-t',
'--trunclen',
type=int,
metavar='trunclen'
)
parser.add_argument(
'-f',
'--format',
type=str,
metavar='custom format',
dest='custom_format'
)
args = parser.parse_args()
# Default parameters
output = '{artist} - {song}'
trunclen = 1000
# parameters can be overwritten by args
if args.trunclen is not None:
trunclen = args.trunclen
if args.custom_format is not None:
output = args.custom_format
try:
session_bus = dbus.SessionBus()
spotify_bus = session_bus.get_object(
'org.mpris.MediaPlayer2.spotify',
'/org/mpris/MediaPlayer2'
)
spotify_properties = dbus.Interface(
spotify_bus,
'org.freedesktop.DBus.Properties'
)
metadata = spotify_properties.Get('org.mpris.MediaPlayer2.Player', 'Metadata')
artist = metadata['xesam:artist'][0]
song = metadata['xesam:title']
if len(song) > trunclen:
song = song[0:trunclen]
song += '...'
if ('(' in song) and (')' not in song):
song += ')'
# Python3 uses UTF-8 by default.
if sys.version_info.major == 3:
print(output.format(artist=artist, song=song))
else:
print(output.format(artist=artist, song=song).encode('UTF-8'))
except Exception as e:
if isinstance(e, dbus.exceptions.DBusException):
print('')
else:
print(e)
|
# -*- coding: utf-8 -*-
import logging
from django.db.transaction import commit_on_success
from rest_framework import status
from rest_framework.permissions import IsAuthenticated
from rest_framework.views import APIView
from networkapi.api_rest import exceptions as api_exceptions
from networkapi.api_vip_request.facade import v3 as facade
from networkapi.api_vip_request.permissions import delete_vip_permission
from networkapi.api_vip_request.permissions import deploy_vip_permission
from networkapi.api_vip_request.permissions import DeployCreate
from networkapi.api_vip_request.permissions import DeployDelete
from networkapi.api_vip_request.permissions import DeployUpdate
from networkapi.api_vip_request.permissions import Read
from networkapi.api_vip_request.permissions import Write
from networkapi.api_vip_request.permissions import write_vip_permission
from networkapi.api_vip_request.serializers.v3 import VipRequestV3Serializer
from networkapi.distributedlock import LOCK_VIP
from networkapi.settings import SPECS
from networkapi.util.decorators import logs_method_apiview
from networkapi.util.decorators import permission_classes_apiview
from networkapi.util.decorators import permission_obj_apiview
from networkapi.util.decorators import prepare_search
from networkapi.util.geral import create_lock
from networkapi.util.geral import CustomResponse
from networkapi.util.geral import destroy_lock
from networkapi.util.geral import render_to_json
from networkapi.util.json_validate import json_validate
from networkapi.util.json_validate import raise_json_validate
from networkapi.util.json_validate import verify_ports_vip
# from networkapi.api_vip_request.serializers.v3 import VipRequestV3Serializer
log = logging.getLogger(__name__)
class VipRequestDeployView(APIView):
@permission_classes_apiview((IsAuthenticated, Write, DeployCreate))
@permission_obj_apiview([deploy_vip_permission])
@logs_method_apiview
def post(self, request, *args, **kwargs):
"""
Creates list of vip request in equipments
:url /api/v3/vip-request/deploy/<vip_request_ids>/
:param vip_request_ids=<vip_request_ids>
"""
vip_request_ids = kwargs['vip_request_ids'].split(';')
vips = facade.get_vip_request_by_ids(vip_request_ids)
vip_serializer = VipRequestV3Serializer(
vips, many=True, include=('ports__identifier',))
locks_list = create_lock(vip_serializer.data, LOCK_VIP)
try:
response = facade.create_real_vip_request(
vip_serializer.data, request.user)
except Exception, exception:
log.error(exception)
raise api_exceptions.NetworkAPIException(exception)
finally:
destroy_lock(locks_list)
return CustomResponse(response, status=status.HTTP_200_OK, request=request)
@permission_classes_apiview((IsAuthenticated, Write, DeployDelete))
@permission_obj_apiview([deploy_vip_permission])
@logs_method_apiview
def delete(self, request, *args, **kwargs):
"""
Deletes list of vip request in equipments
:url /api/v3/vip-request/deploy/<vip_request_ids>/
:param vip_request_ids=<vip_request_ids>
"""
vip_request_ids = kwargs['vip_request_ids'].split(';')
vips = facade.get_vip_request_by_ids(vip_request_ids)
vip_serializer = VipRequestV3Serializer(
vips, many=True, include=('ports__identifier',))
locks_list = create_lock(vip_serializer.data, LOCK_VIP)
try:
response = facade.delete_real_vip_request(
vip_serializer.data, request.user)
except Exception, exception:
log.error(exception)
raise api_exceptions.NetworkAPIException(exception)
finally:
destroy_lock(locks_list)
return CustomResponse(response, status=status.HTTP_200_OK, request=request)
@permission_classes_apiview((IsAuthenticated, Write, DeployUpdate))
@permission_obj_apiview([deploy_vip_permission])
@raise_json_validate('vip_request_put')
@logs_method_apiview
def put(self, request, *args, **kwargs):
"""
Updates list of vip request in equipments
"""
vips = request.DATA
json_validate(SPECS.get('vip_request_put')).validate(vips)
locks_list = create_lock(vips.get('vips'), LOCK_VIP)
verify_ports_vip(vips)
try:
response = facade.update_real_vip_request(
vips['vips'], request.user)
except Exception, exception:
log.error(exception)
raise api_exceptions.NetworkAPIException(exception)
finally:
destroy_lock(locks_list)
return CustomResponse(response, status=status.HTTP_200_OK, request=request)
@permission_classes_apiview((IsAuthenticated, Write, DeployUpdate))
@permission_obj_apiview([deploy_vip_permission])
@raise_json_validate('vip_request_patch')
@logs_method_apiview
def patch(self, request, *args, **kwargs):
"""
Updates list of vip request in equipments
"""
vips = request.DATA
json_validate(SPECS.get('vip_request_patch')).validate(vips)
locks_list = create_lock(vips.get('vips'), LOCK_VIP)
try:
response = facade.patch_real_vip_request(
vips['vips'], request.user)
except Exception, exception:
log.error(exception)
raise api_exceptions.NetworkAPIException(exception)
finally:
destroy_lock(locks_list)
return CustomResponse(response, status=status.HTTP_200_OK, request=request)
class VipRequestDBView(APIView):
@permission_classes_apiview((IsAuthenticated, Read))
@logs_method_apiview
@prepare_search
def get(self, request, *args, **kwargs):
"""
Returns a list of vip request by ids ou dict
"""
try:
if not kwargs.get('vip_request_ids'):
obj_model = facade.get_vip_request_by_search(self.search)
vips_requests = obj_model['query_set']
only_main_property = False
else:
vip_request_ids = kwargs['vip_request_ids'].split(';')
vips_requests = facade.get_vip_request_by_ids(vip_request_ids)
obj_model = None
# serializer vips
only_main_property = True
serializer_vips = VipRequestV3Serializer(
vips_requests,
many=True,
fields=self.fields,
include=self.include,
exclude=self.exclude,
kind=self.kind
)
# prepare serializer with customized properties
response = render_to_json(
serializer_vips,
main_property='vips',
obj_model=obj_model,
request=request,
only_main_property=only_main_property
)
return CustomResponse(response, status=status.HTTP_200_OK, request=request)
except Exception, exception:
log.error(exception)
raise api_exceptions.NetworkAPIException(exception)
@permission_classes_apiview((IsAuthenticated, Write))
@logs_method_apiview
@raise_json_validate('vip_request_post')
@commit_on_success
def post(self, request, *args, **kwargs):
"""
Creates list of vip request
"""
data = request.DATA
json_validate(SPECS.get('vip_request_post')).validate(data)
response = list()
verify_ports_vip(data)
for vip in data['vips']:
facade.validate_save(vip)
vp = facade.create_vip_request(vip, request.user)
response.append({'id': vp.id})
return CustomResponse(response, status=status.HTTP_201_CREATED, request=request)
@permission_classes_apiview((IsAuthenticated, Write))
@permission_obj_apiview([write_vip_permission])
@logs_method_apiview
@raise_json_validate('vip_request_put')
@commit_on_success
def put(self, request, *args, **kwargs):
"""
Updates list of vip request
"""
data = request.DATA
json_validate(SPECS.get('vip_request_put')).validate(data)
locks_list = create_lock(data['vips'], LOCK_VIP)
try:
verify_ports_vip(data)
for vip in data['vips']:
facade.validate_save(vip)
facade.update_vip_request(vip, request.user)
except Exception, exception:
log.error(exception)
raise api_exceptions.NetworkAPIException(exception)
finally:
destroy_lock(locks_list)
return CustomResponse({}, status=status.HTTP_200_OK, request=request)
@permission_classes_apiview((IsAuthenticated, Write))
@permission_obj_apiview([delete_vip_permission])
@commit_on_success
def delete(self, request, *args, **kwargs):
"""
Deletes list of vip request
"""
vip_request_ids = kwargs['vip_request_ids'].split(';')
locks_list = create_lock(vip_request_ids, LOCK_VIP)
keepip = request.GET.get('keepip') or '0'
try:
facade.delete_vip_request(
vip_request_ids, keepip)
except Exception, exception:
log.error(exception)
raise api_exceptions.NetworkAPIException(exception)
finally:
destroy_lock(locks_list)
return CustomResponse({}, status=status.HTTP_200_OK, request=request)
class VipRequestDBDetailsView(APIView):
@permission_classes_apiview((IsAuthenticated, Read))
@logs_method_apiview
@prepare_search
def get(self, request, *args, **kwargs):
"""
Returns a list of vip request with details by ids ou dict
"""
try:
if not kwargs.get('vip_request_ids'):
obj_model = facade.get_vip_request_by_search(self.search)
vips_requests = obj_model['query_set']
only_main_property = False
else:
vip_request_ids = kwargs['vip_request_ids'].split(';')
vips_requests = facade.get_vip_request_by_ids(vip_request_ids)
obj_model = None
only_main_property = True
# serializer vips
serializer_vips = VipRequestV3Serializer(
vips_requests,
many=True,
fields=self.fields,
include=self.include,
exclude=self.exclude,
kind='details'
)
# prepare serializer with customized properties
response = render_to_json(
serializer_vips,
main_property='vips',
obj_model=obj_model,
request=request,
only_main_property=only_main_property
)
return CustomResponse(response, status=status.HTTP_200_OK, request=request)
except Exception, exception:
log.error(exception)
raise api_exceptions.NetworkAPIException(exception)
class VipRequestPoolView(APIView):
@permission_classes_apiview((IsAuthenticated, Read))
@logs_method_apiview
@prepare_search
def get(self, request, *args, **kwargs):
"""
Returns a list of vip request by pool id
"""
try:
pool_id = int(kwargs['pool_id'])
extends_search = {
'viprequestport__viprequestportpool__server_pool': pool_id
}
self.search['extends_search'] = \
[ex.append(extends_search) for ex in self.search['extends_search']] \
if self.search['extends_search'] else [extends_search]
vips_requests = facade.get_vip_request_by_search(self.search)
only_main_property = False
# serializer vips
serializer_vips = VipRequestV3Serializer(
vips_requests['query_set'],
many=True,
fields=self.fields,
include=self.include,
exclude=self.exclude,
kind=self.kind
)
# prepare serializer with customized properties
response = render_to_json(
serializer_vips,
main_property='vips',
obj_model=vips_requests,
request=request,
only_main_property=only_main_property
)
return CustomResponse(response, status=status.HTTP_200_OK, request=request)
except Exception, exception:
log.error(exception)
raise api_exceptions.NetworkAPIException(exception)
|
import contextlib
import typing
from pybuses.foundation import get_subscribed
from pybuses.types import (
Subscribable,
Listener,
)
class CommandBus:
def __init__(self, middlewares: typing.Optional[typing.List[typing.Callable]] = None) -> None:
if not middlewares:
middlewares = []
self._middlewares = middlewares
self._handlers: typing.Dict[Subscribable, Listener] = {}
def subscribe(self, listener: Listener) -> None:
command = get_subscribed(listener)
if command in self._handlers:
raise ValueError('{} already has a handler ({})!'.format(command, self._handlers[command]))
self._handlers[command] = listener
def handle(self, command: Subscribable) -> None:
try:
handler = self._handlers[type(command)]
except KeyError:
raise Exception('No handler for {!r}'.format(command))
with contextlib.ExitStack() as stack:
for middleware in self._middlewares:
stack.enter_context(middleware(command))
handler(command)
|
# Copyright 2016 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import yaml
from mock import (
ANY,
call,
patch,
MagicMock
)
from nova_compute_hooks import update_nrpe_config
# python-apt is not installed as part of test-requirements but is imported by
# some charmhelpers modules so create a fake import.
sys.modules['apt'] = MagicMock()
sys.modules['apt_pkg'] = MagicMock()
from test_utils import CharmTestCase
with patch('charmhelpers.contrib.hardening.harden.harden') as mock_dec:
mock_dec.side_effect = (lambda *dargs, **dkwargs: lambda f:
lambda *args, **kwargs: f(*args, **kwargs))
with patch("nova_compute_utils.restart_map"):
with patch("nova_compute_utils.register_configs"):
import nova_compute_hooks as hooks
TO_PATCH = [
# charmhelpers.core.hookenv
'Hooks',
'config',
'log',
'is_relation_made',
'relation_get',
'relation_ids',
'relation_set',
'service_name',
'related_units',
'remote_service_name',
# charmhelpers.core.host
'apt_install',
'apt_purge',
'apt_update',
'filter_installed_packages',
'restart_on_change',
'service_restart',
# charmhelpers.contrib.openstack.utils
'configure_installation_source',
'openstack_upgrade_available',
# charmhelpers.contrib.network.ip
'get_relation_ip',
# nova_compute_context
'nova_metadata_requirement',
# nova_compute_utils
# 'PACKAGES',
'create_libvirt_secret',
'restart_map',
'determine_packages',
'import_authorized_keys',
'import_keystone_ca_cert',
'initialize_ssh_keys',
'migration_enabled',
'do_openstack_upgrade',
'public_ssh_key',
'register_configs',
'disable_shell',
'enable_shell',
'update_nrpe_config',
'git_install',
'git_install_requested',
'network_manager',
'libvirt_daemon',
# misc_utils
'ensure_ceph_keyring',
'execd_preinstall',
'assert_libvirt_rbd_imagebackend_allowed',
'is_request_complete',
'send_request_if_needed',
'destroy_libvirt_network',
# socket
'gethostname',
'create_sysctl',
'install_hugepages',
'uuid',
# unitdata
'unitdata',
# templating
'render',
]
class NovaComputeRelationsTests(CharmTestCase):
def setUp(self):
super(NovaComputeRelationsTests, self).setUp(hooks,
TO_PATCH)
self.config.side_effect = self.test_config.get
self.filter_installed_packages.side_effect = \
MagicMock(side_effect=lambda pkgs: pkgs)
self.gethostname.return_value = 'testserver'
self.get_relation_ip.return_value = '10.0.0.50'
def test_install_hook(self):
repo = 'cloud:precise-grizzly'
self.test_config.set('openstack-origin', repo)
self.determine_packages.return_value = ['foo', 'bar']
hooks.install()
self.configure_installation_source.assert_called_with(repo)
self.assertTrue(self.apt_update.called)
self.apt_install.assert_called_with(['foo', 'bar'], fatal=True)
self.assertTrue(self.execd_preinstall.called)
def test_install_hook_git(self):
self.git_install_requested.return_value = True
self.determine_packages.return_value = ['foo', 'bar']
repo = 'cloud:trusty-juno'
openstack_origin_git = {
'repositories': [
{'name': 'requirements',
'repository': 'git://git.openstack.org/openstack/requirements', # noqa
'branch': 'stable/juno'},
{'name': 'nova',
'repository': 'git://git.openstack.org/openstack/nova',
'branch': 'stable/juno'}
],
'directory': '/mnt/openstack-git',
}
projects_yaml = yaml.dump(openstack_origin_git)
self.test_config.set('openstack-origin', repo)
self.test_config.set('openstack-origin-git', projects_yaml)
hooks.install()
self.configure_installation_source.assert_called_with(repo)
self.assertTrue(self.apt_update.called)
self.apt_install.assert_called_with(['foo', 'bar'], fatal=True)
self.git_install.assert_called_with(projects_yaml)
self.assertTrue(self.execd_preinstall.called)
@patch.object(hooks, 'ceph_changed')
@patch.object(hooks, 'neutron_plugin_joined')
def test_config_changed_with_upgrade(self, neutron_plugin_joined,
ceph_changed):
self.git_install_requested.return_value = False
self.openstack_upgrade_available.return_value = True
def rel_ids(x):
return {'neutron-plugin': ['rid1'],
'ceph': ['ceph:0']}.get(x, [])
self.relation_ids.side_effect = rel_ids
self.related_units.return_value = ['ceph/0']
self.migration_enabled.return_value = False
hooks.config_changed()
self.assertTrue(self.do_openstack_upgrade.called)
neutron_plugin_joined.assert_called_with('rid1', remote_restart=True)
ceph_changed.assert_called_with(rid='ceph:0', unit='ceph/0')
@patch.object(hooks, 'git_install_requested')
def test_config_changed_with_openstack_upgrade_action(self, git_requested):
git_requested.return_value = False
self.openstack_upgrade_available.return_value = True
self.test_config.set('action-managed-upgrade', True)
self.migration_enabled.return_value = False
hooks.config_changed()
self.assertFalse(self.do_openstack_upgrade.called)
@patch.object(hooks, 'neutron_plugin_joined')
@patch.object(hooks, 'compute_joined')
def test_config_changed_with_migration(self, compute_joined,
neutron_plugin_joined):
self.git_install_requested.return_value = False
self.migration_enabled.return_value = True
_zmq_joined = self.patch('zeromq_configuration_relation_joined')
self.test_config.set('migration-auth-type', 'ssh')
self.relation_ids.return_value = [
'cloud-compute:0',
'cloud-compute:1'
]
hooks.config_changed()
ex = [
call('cloud-compute:0'),
call('cloud-compute:1'),
]
self.assertEqual(ex, compute_joined.call_args_list)
self.assertTrue(self.initialize_ssh_keys.called)
self.assertTrue(_zmq_joined.called)
@patch.object(hooks, 'neutron_plugin_joined')
@patch.object(hooks, 'compute_joined')
def test_config_changed_with_resize(self, compute_joined,
neutron_plugin_joined):
self.git_install_requested.return_value = False
self.test_config.set('enable-resize', True)
_zmq_joined = self.patch('zeromq_configuration_relation_joined')
self.migration_enabled.return_value = False
self.relation_ids.return_value = [
'cloud-compute:0',
'cloud-compute:1'
]
hooks.config_changed()
ex = [
call('cloud-compute:0'),
call('cloud-compute:1'),
]
self.assertEqual(ex, compute_joined.call_args_list)
self.initialize_ssh_keys.assert_called_with(user='nova')
self.enable_shell.assert_called_with(user='nova')
self.assertTrue(_zmq_joined.called)
@patch.object(hooks, 'neutron_plugin_joined')
@patch.object(hooks, 'compute_joined')
def test_config_changed_without_resize(self, compute_joined,
neutron_plugin_joined):
self.git_install_requested.return_value = False
self.test_config.set('enable-resize', False)
self.migration_enabled.return_value = False
_zmq_joined = self.patch('zeromq_configuration_relation_joined')
self.relation_ids.return_value = [
'cloud-compute:0',
'cloud-compute:1'
]
hooks.config_changed()
ex = [
call('cloud-compute:0'),
call('cloud-compute:1'),
]
self.assertEqual(ex, compute_joined.call_args_list)
self.disable_shell.assert_called_with(user='nova')
self.assertTrue(_zmq_joined.called)
@patch.object(hooks, 'compute_joined')
def test_config_changed_no_upgrade_no_migration(self, compute_joined):
self.git_install_requested.return_value = False
self.openstack_upgrade_available.return_value = False
self.migration_enabled.return_value = False
hooks.config_changed()
self.assertFalse(self.do_openstack_upgrade.called)
self.assertFalse(compute_joined.called)
@patch.object(hooks, 'compute_joined')
def test_config_changed_with_sysctl(self, compute_joined):
self.git_install_requested.return_value = False
self.migration_enabled.return_value = False
self.test_config.set(
'sysctl',
'{ kernel.max_pid : "1337", vm.swappiness : 10 }')
hooks.config_changed()
self.create_sysctl.assert_called_with(
"{kernel.max_pid: '1337', vm.swappiness: 10}\n",
'/etc/sysctl.d/50-nova-compute.conf')
@patch.object(hooks, 'compute_joined')
def test_config_changed_with_sysctl_swappy_default(self, compute_joined):
self.git_install_requested.return_value = False
self.test_config.set(
'sysctl',
'{ kernel.max_pid : "1337" }')
self.migration_enabled.return_value = False
hooks.config_changed()
self.create_sysctl.assert_called_with(
"{kernel.max_pid: '1337', vm.swappiness: 1}\n",
'/etc/sysctl.d/50-nova-compute.conf')
@patch.object(hooks, 'config_value_changed')
def test_config_changed_git(self, config_val_changed):
self.git_install_requested.return_value = True
repo = 'cloud:trusty-juno'
openstack_origin_git = {
'repositories': [
{'name': 'requirements',
'repository':
'git://git.openstack.org/openstack/requirements',
'branch': 'stable/juno'},
{'name': 'nova',
'repository': 'git://git.openstack.org/openstack/nova',
'branch': 'stable/juno'}
],
'directory': '/mnt/openstack-git',
}
projects_yaml = yaml.dump(openstack_origin_git)
self.test_config.set('openstack-origin', repo)
self.test_config.set('openstack-origin-git', projects_yaml)
self.migration_enabled.return_value = False
hooks.config_changed()
self.git_install.assert_called_with(projects_yaml)
self.assertFalse(self.do_openstack_upgrade.called)
@patch.object(hooks, 'compute_joined')
def test_config_changed_no_nrpe(self, compute_joined):
self.git_install_requested.return_value = False
self.openstack_upgrade_available.return_value = False
self.migration_enabled.return_value = False
self.is_relation_made.return_value = False
hooks.config_changed()
self.assertFalse(self.update_nrpe_config.called)
@patch.object(hooks, 'compute_joined')
def test_config_changed_nrpe(self, compute_joined):
self.git_install_requested.return_value = False
self.openstack_upgrade_available.return_value = False
self.migration_enabled.return_value = False
self.is_relation_made.return_value = True
hooks.config_changed()
self.assertTrue(self.update_nrpe_config.called)
@patch.object(hooks, 'compute_joined')
def test_config_changed_invalid_migration(self, compute_joined):
self.migration_enabled.return_value = True
self.test_config.set('migration-auth-type', 'none')
with self.assertRaises(Exception) as context:
hooks.config_changed()
self.assertEqual(
context.exception.message,
'Invalid migration-auth-type')
@patch('nova_compute_hooks.nrpe')
@patch('nova_compute_hooks.services')
@patch('charmhelpers.core.hookenv')
def test_nrpe_services_no_qemu_kvm(self, hookenv, services, nrpe):
'''
The qemu-kvm service is not monitored by NRPE, since it's one-shot.
'''
services.return_value = ['libvirtd', 'qemu-kvm', 'libvirt-bin']
update_nrpe_config()
nrpe.add_init_service_checks.assert_called_with(
ANY, ['libvirtd', 'libvirt-bin'], ANY)
def test_amqp_joined(self):
hooks.amqp_joined()
self.relation_set.assert_called_with(
username='nova', vhost='openstack',
relation_id=None)
@patch.object(hooks, 'CONFIGS')
def test_amqp_changed_missing_relation_data(self, configs):
configs.complete_contexts = MagicMock()
configs.complete_contexts.return_value = []
hooks.amqp_changed()
self.log.assert_called_with(
'amqp relation incomplete. Peer not ready?'
)
def _amqp_test(self, configs, neutron=False):
configs.complete_contexts = MagicMock()
configs.complete_contexts.return_value = ['amqp']
configs.write = MagicMock()
hooks.amqp_changed()
@patch.object(hooks, 'CONFIGS')
def test_amqp_changed_with_data_no_neutron(self, configs):
self._amqp_test(configs)
self.assertEqual([call('/etc/nova/nova.conf')],
configs.write.call_args_list)
def test_db_joined(self):
self.is_relation_made.return_value = False
hooks.db_joined()
self.relation_set.assert_called_with(relation_id=None,
nova_database='nova',
nova_username='nova',
nova_hostname='10.0.0.50')
self.get_relation_ip.assert_called_with('shared-db')
def test_postgresql_db_joined(self):
self.is_relation_made.return_value = False
hooks.pgsql_db_joined()
self.relation_set.assert_called_with(**{
'database': 'nova', 'private-address': '10.0.0.50'})
def test_db_joined_with_postgresql(self):
self.is_relation_made.return_value = True
msg = ('Attempting to associate a mysql database when there is '
'already associated a postgresql one')
with self.assertRaisesRegexp(Exception, msg):
hooks.db_joined()
def test_postgresql_joined_with_db(self):
self.is_relation_made.return_value = True
msg = ('Attempting to associate a postgresql database when there is '
'already associated a mysql one')
with self.assertRaisesRegexp(Exception, msg):
hooks.pgsql_db_joined()
@patch.object(hooks, 'CONFIGS')
def test_db_changed_missing_relation_data(self, configs):
configs.complete_contexts = MagicMock()
configs.complete_contexts.return_value = []
hooks.db_changed()
self.log.assert_called_with(
'shared-db relation incomplete. Peer not ready?'
)
@patch.object(hooks, 'CONFIGS')
def test_postgresql_db_changed_missing_relation_data(self, configs):
configs.complete_contexts = MagicMock()
configs.complete_contexts.return_value = []
hooks.postgresql_db_changed()
self.log.assert_called_with(
'pgsql-db relation incomplete. Peer not ready?'
)
def _shared_db_test(self, configs):
configs.complete_contexts = MagicMock()
configs.complete_contexts.return_value = ['shared-db']
configs.write = MagicMock()
hooks.db_changed()
def _postgresql_db_test(self, configs):
configs.complete_contexts = MagicMock()
configs.complete_contexts.return_value = ['pgsql-db']
configs.write = MagicMock()
hooks.postgresql_db_changed()
@patch.object(hooks, 'CONFIGS')
def test_db_changed_with_data(self, configs):
self._shared_db_test(configs)
self.assertEqual([call('/etc/nova/nova.conf')],
configs.write.call_args_list)
@patch.object(hooks, 'CONFIGS')
def test_postgresql_db_changed_with_data(self, configs):
self._postgresql_db_test(configs)
self.assertEqual([call('/etc/nova/nova.conf')],
configs.write.call_args_list)
@patch.object(hooks, 'CONFIGS')
def test_image_service_missing_relation_data(self, configs):
configs.complete_contexts = MagicMock()
configs.complete_contexts.return_value = []
hooks.image_service_changed()
self.log.assert_called_with(
'image-service relation incomplete. Peer not ready?'
)
@patch.object(hooks, 'CONFIGS')
def test_image_service_with_relation_data(self, configs):
configs.complete_contexts = MagicMock()
configs.write = MagicMock()
configs.complete_contexts.return_value = ['image-service']
hooks.image_service_changed()
configs.write.assert_called_with('/etc/nova/nova.conf')
def test_compute_joined_no_migration_no_resize(self):
self.migration_enabled.return_value = False
hooks.compute_joined()
self.assertFalse(self.relation_set.called)
def test_compute_joined_with_ssh_migration(self):
self.migration_enabled.return_value = True
self.test_config.set('migration-auth-type', 'ssh')
self.public_ssh_key.return_value = 'foo'
hooks.compute_joined()
self.relation_set.assert_called_with(**{
'relation_id': None,
'ssh_public_key': 'foo',
'migration_auth_type': 'ssh',
'hostname': 'testserver',
'private-address': '10.0.0.50',
})
hooks.compute_joined(rid='cloud-compute:2')
self.relation_set.assert_called_with(**{
'relation_id': 'cloud-compute:2',
'ssh_public_key': 'foo',
'migration_auth_type': 'ssh',
'hostname': 'testserver',
'private-address': '10.0.0.50',
})
def test_compute_joined_with_resize(self):
self.migration_enabled.return_value = False
self.test_config.set('enable-resize', True)
self.public_ssh_key.return_value = 'bar'
hooks.compute_joined()
self.relation_set.assert_called_with(**{
'relation_id': None,
'nova_ssh_public_key': 'bar',
'hostname': 'testserver',
'private-address': '10.0.0.50',
})
hooks.compute_joined(rid='cloud-compute:2')
self.relation_set.assert_called_with(**{
'relation_id': 'cloud-compute:2',
'nova_ssh_public_key': 'bar',
'hostname': 'testserver',
'private-address': '10.0.0.50',
})
def test_compute_changed(self):
hooks.compute_changed()
self.assertTrue(self.import_keystone_ca_cert.called)
self.import_authorized_keys.assert_has_calls([
call(),
call(user='nova', prefix='nova'),
])
def test_compute_changed_nonstandard_authorized_keys_path(self):
self.migration_enabled.return_value = False
self.test_config.set('enable-resize', True)
hooks.compute_changed()
self.import_authorized_keys.assert_called_with(
user='nova',
prefix='nova',
)
def test_ceph_joined(self):
self.libvirt_daemon.return_value = 'libvirt-bin'
hooks.ceph_joined()
self.apt_install.assert_called_with(['ceph-common'], fatal=True)
self.service_restart.assert_called_with('libvirt-bin')
self.libvirt_daemon.assert_called()
@patch.object(hooks, 'CONFIGS')
def test_ceph_changed_missing_relation_data(self, configs):
configs.complete_contexts = MagicMock()
configs.complete_contexts.return_value = []
hooks.ceph_changed()
self.log.assert_called_with(
'ceph relation incomplete. Peer not ready?'
)
@patch.object(hooks, 'CONFIGS')
def test_ceph_changed_no_keyring(self, configs):
configs.complete_contexts = MagicMock()
configs.complete_contexts.return_value = ['ceph']
self.ensure_ceph_keyring.return_value = False
hooks.ceph_changed()
self.log.assert_called_with(
'Could not create ceph keyring: peer not ready?'
)
@patch.object(hooks, 'mark_broker_action_done')
@patch.object(hooks, 'is_broker_action_done')
@patch('nova_compute_context.service_name')
@patch.object(hooks, 'CONFIGS')
def test_ceph_changed_with_key_and_relation_data(self, configs,
service_name,
is_broker_action_done,
mark_broker_action_done):
self.test_config.set('libvirt-image-backend', 'rbd')
self.is_request_complete.return_value = True
self.assert_libvirt_rbd_imagebackend_allowed.return_value = True
configs.complete_contexts = MagicMock()
configs.complete_contexts.return_value = ['ceph']
configs.write = MagicMock()
service_name.return_value = 'nova-compute'
self.ensure_ceph_keyring.return_value = True
is_broker_action_done.return_value = False
hooks.ceph_changed()
self.assertTrue(mark_broker_action_done.called)
ex = [
call('/var/lib/charm/nova-compute/ceph.conf'),
call('/etc/ceph/secret.xml'),
call('/etc/nova/nova.conf'),
]
self.assertEqual(ex, configs.write.call_args_list)
self.service_restart.assert_called_with('nova-compute')
is_broker_action_done.return_value = True
mark_broker_action_done.reset_mock()
hooks.ceph_changed()
self.assertFalse(mark_broker_action_done.called)
@patch('charmhelpers.contrib.storage.linux.ceph.CephBrokerRq'
'.add_op_request_access_to_group')
@patch('charmhelpers.contrib.storage.linux.ceph.CephBrokerRq'
'.add_op_create_pool')
def test_get_ceph_request(self, mock_create_pool,
mock_request_access):
self.assert_libvirt_rbd_imagebackend_allowed.return_value = True
self.test_config.set('rbd-pool', 'nova')
self.test_config.set('ceph-osd-replication-count', 3)
self.test_config.set('ceph-pool-weight', 28)
hooks.get_ceph_request()
mock_create_pool.assert_not_called()
mock_request_access.assert_not_called()
@patch('charmhelpers.contrib.storage.linux.ceph.CephBrokerRq'
'.add_op_request_access_to_group')
@patch('charmhelpers.contrib.storage.linux.ceph.CephBrokerRq'
'.add_op_create_pool')
def test_get_ceph_request_rbd(self, mock_create_pool,
mock_request_access):
self.assert_libvirt_rbd_imagebackend_allowed.return_value = True
self.test_config.set('rbd-pool', 'nova')
self.test_config.set('ceph-osd-replication-count', 3)
self.test_config.set('ceph-pool-weight', 28)
self.test_config.set('libvirt-image-backend', 'rbd')
hooks.get_ceph_request()
mock_create_pool.assert_called_with(name='nova', replica_count=3,
weight=28,
group='vms')
mock_request_access.assert_not_called()
@patch('charmhelpers.contrib.storage.linux.ceph.CephBrokerRq'
'.add_op_request_access_to_group')
@patch('charmhelpers.contrib.storage.linux.ceph.CephBrokerRq'
'.add_op_create_pool')
def test_get_ceph_request_perms(self, mock_create_pool,
mock_request_access):
self.assert_libvirt_rbd_imagebackend_allowed.return_value = True
self.test_config.set('rbd-pool', 'nova')
self.test_config.set('ceph-osd-replication-count', 3)
self.test_config.set('ceph-pool-weight', 28)
self.test_config.set('libvirt-image-backend', 'rbd')
self.test_config.set('restrict-ceph-pools', True)
hooks.get_ceph_request()
mock_create_pool.assert_called_with(name='nova', replica_count=3,
weight=28,
group='vms')
mock_request_access.assert_has_calls([
call(name='volumes', permission='rwx'),
call(name='images', permission='rwx'),
call(name='vms', permission='rwx'),
])
@patch.object(hooks, 'service_restart_handler')
@patch.object(hooks, 'CONFIGS')
def test_neutron_plugin_changed(self, configs,
service_restart_handler):
self.nova_metadata_requirement.return_value = (True,
'sharedsecret')
hooks.neutron_plugin_changed()
self.assertTrue(self.apt_update.called)
self.apt_install.assert_called_with(['nova-api-metadata'],
fatal=True)
configs.write.assert_called_with('/etc/nova/nova.conf')
service_restart_handler.assert_called_with(
default_service='nova-compute')
@patch.object(hooks, 'service_restart_handler')
@patch.object(hooks, 'CONFIGS')
def test_neutron_plugin_changed_nometa(self, configs,
service_restart_handler):
self.nova_metadata_requirement.return_value = (False, None)
hooks.neutron_plugin_changed()
self.apt_purge.assert_called_with('nova-api-metadata',
fatal=True)
configs.write.assert_called_with('/etc/nova/nova.conf')
service_restart_handler.assert_called_with(
default_service='nova-compute')
@patch.object(hooks, 'service_restart_handler')
@patch.object(hooks, 'CONFIGS')
def test_neutron_plugin_changed_meta(self, configs,
service_restart_handler):
self.nova_metadata_requirement.return_value = (True, None)
hooks.neutron_plugin_changed()
self.apt_install.assert_called_with(['nova-api-metadata'],
fatal=True)
configs.write.assert_called_with('/etc/nova/nova.conf')
service_restart_handler.assert_called_with(
default_service='nova-compute')
@patch.object(hooks, 'get_hugepage_number')
def test_neutron_plugin_joined_relid(self, get_hugepage_number):
get_hugepage_number.return_value = None
hooks.neutron_plugin_joined(relid='relid23')
expect_rel_settings = {
'hugepage_number': None,
'default_availability_zone': 'nova',
}
self.relation_set.assert_called_with(
relation_id='relid23',
**expect_rel_settings
)
@patch.object(hooks, 'get_hugepage_number')
def test_neutron_plugin_joined_huge(self, get_hugepage_number):
get_hugepage_number.return_value = 12
hooks.neutron_plugin_joined()
expect_rel_settings = {
'hugepage_number': 12,
'default_availability_zone': 'nova',
}
self.relation_set.assert_called_with(
relation_id=None,
**expect_rel_settings
)
@patch.object(hooks, 'get_hugepage_number')
def test_neutron_plugin_joined_remote_restart(self, get_hugepage_number):
get_hugepage_number.return_value = None
self.uuid.uuid4.return_value = 'e030b959-7207'
hooks.neutron_plugin_joined(remote_restart=True)
expect_rel_settings = {
'hugepage_number': None,
'restart-trigger': 'e030b959-7207',
'default_availability_zone': 'nova',
}
self.relation_set.assert_called_with(
relation_id=None,
**expect_rel_settings
)
@patch.object(hooks, 'is_unit_paused_set')
def test_service_restart_handler(self,
is_unit_paused_set):
self.relation_get.return_value = None
mock_kv = MagicMock()
mock_kv.get.return_value = None
self.unitdata.kv.return_value = mock_kv
hooks.service_restart_handler(default_service='foorbar')
self.relation_get.assert_called_with(
attribute='restart-nonce',
unit=None,
rid=None
)
is_unit_paused_set.assert_not_called()
@patch.object(hooks, 'is_unit_paused_set')
def test_service_restart_handler_with_service(self,
is_unit_paused_set):
self.relation_get.side_effect = ['nonce', 'foobar-service']
mock_kv = MagicMock()
mock_kv.get.return_value = None
self.unitdata.kv.return_value = mock_kv
is_unit_paused_set.return_value = False
hooks.service_restart_handler()
self.relation_get.assert_has_calls([
call(attribute='restart-nonce',
unit=None,
rid=None),
call(attribute='remote-service',
unit=None,
rid=None),
])
self.service_restart.assert_called_with('foobar-service')
mock_kv.set.assert_called_with('restart-nonce',
'nonce')
self.assertTrue(mock_kv.flush.called)
@patch.object(hooks, 'is_unit_paused_set')
def test_service_restart_handler_when_paused(self,
is_unit_paused_set):
self.relation_get.side_effect = ['nonce', 'foobar-service']
mock_kv = MagicMock()
mock_kv.get.return_value = None
self.unitdata.kv.return_value = mock_kv
is_unit_paused_set.return_value = True
hooks.service_restart_handler()
self.relation_get.assert_has_calls([
call(attribute='restart-nonce',
unit=None,
rid=None),
])
self.service_restart.assert_not_called()
mock_kv.set.assert_called_with('restart-nonce',
'nonce')
self.assertTrue(mock_kv.flush.called)
def test_ceph_access_incomplete(self):
self.relation_get.return_value = None
self.test_config.set('virt-type', 'kvm')
hooks.ceph_access()
self.relation_get.assert_has_calls([
call('key', None, None),
call('secret-uuid', None, None),
])
self.render.assert_not_called()
self.create_libvirt_secret.assert_not_called()
def test_ceph_access_lxd(self):
self.relation_get.side_effect = ['mykey', 'uuid2']
self.remote_service_name.return_value = 'cinder-ceph'
self.test_config.set('virt-type', 'lxd')
hooks.ceph_access()
self.relation_get.assert_has_calls([
call('key', None, None),
call('secret-uuid', None, None),
])
self.render.assert_not_called()
self.create_libvirt_secret.assert_not_called()
self.ensure_ceph_keyring.assert_called_with(
service='cinder-ceph',
user='nova',
group='nova',
key='mykey'
)
def test_ceph_access_complete(self):
self.relation_get.side_effect = ['mykey', 'uuid2']
self.remote_service_name.return_value = 'cinder-ceph'
self.test_config.set('virt-type', 'kvm')
hooks.ceph_access()
self.relation_get.assert_has_calls([
call('key', None, None),
call('secret-uuid', None, None),
])
self.render.assert_called_with(
'secret.xml',
'/etc/ceph/secret-cinder-ceph.xml',
context={'ceph_secret_uuid': 'uuid2',
'service_name': 'cinder-ceph'}
)
self.create_libvirt_secret.assert_called_with(
secret_file='/etc/ceph/secret-cinder-ceph.xml',
secret_uuid='uuid2',
key='mykey',
)
self.ensure_ceph_keyring.assert_called_with(
service='cinder-ceph',
user='nova',
group='nova',
key='mykey'
)
|
from jaraco.windows.filesystem import trace_symlink_target
from optparse import OptionParser
def get_args():
parser = OptionParser()
options, args = parser.parse_args()
try:
options.filename = args.pop(0)
except IndexError:
parser.error('filename required')
return options
def main():
options = get_args()
print(trace_symlink_target(options.filename))
if __name__ == '__main__':
main()
|
# Copyright 2015 Google Inc. All Rights Reserved.
import datetime
import json
import logging
import os
import time
import httplib2
import webapp2
from apiclient import discovery
from google.appengine.api import app_identity
from oauth2client.appengine import AppAssertionCredentials
from oauth2client.client import SignedJwtAssertionCredentials
import constants
from constants import LogField
class Analytics(object):
"""Class used to encapsulate analytics logic. Used interally in the module.
All data is streamed to BigQuery.
"""
def __init__(self):
is_running_locally = os.environ.get('APPLICATION_ID', '').startswith('dev')
self.bigquery_table = constants.BIGQUERY_TABLE
if is_running_locally:
self.bigquery_dataset = constants.BIGQUERY_DATASET_LOCAL
else:
self.bigquery_dataset = constants.BIGQUERY_DATASET_PROD
# Attempt to initialize a connection to BigQuery.
self.bigquery = None
if is_running_locally:
# Local instances require a 'secrets.json' file.
secrets_path = os.path.join(os.path.dirname(__file__), 'secrets.json')
if (os.path.exists(secrets_path)):
with open(secrets_path) as f:
auth = json.load(f)
credentials = SignedJwtAssertionCredentials(
auth['client_email'], auth['private_key'],
constants.BIGQUERY_URL)
self.bigquery = self._build_bigquery_object(credentials)
else:
logging.warning(
'No credentials provided for BigQuery. Logging disabled.')
else:
# Use the GAE service credentials.
credentials = AppAssertionCredentials(
scope=constants.BIGQUERY_URL)
self.bigquery = self._build_bigquery_object(credentials)
def _build_bigquery_object(self, credentials):
http = credentials.authorize(httplib2.Http())
return discovery.build("bigquery", "v2", http=http)
def _timestamp_from_millis(self, time_ms):
"""Convert back to seconds as float and then to ISO format."""
return datetime.datetime.fromtimestamp(float(time_ms)/1000.).isoformat()
def report_event(self, event_type, room_id=None, time_ms=None,
client_time_ms=None, host=None):
event = {LogField.EVENT_TYPE: event_type}
if room_id is not None:
event[LogField.ROOM_ID] = room_id
if client_time_ms is not None:
event[LogField.CLIENT_TIMESTAMP] = \
self._timestamp_from_millis(client_time_ms)
if host is not None:
event[LogField.HOST] = host
if time_ms is None:
time_ms = time.time() * 1000.
event[LogField.TIMESTAMP] = self._timestamp_from_millis(time_ms)
obj = {"rows": [{"json": event}]}
logging.info("Event: {0}".format(obj))
if self.bigquery is not None:
response = self.bigquery.tabledata().insertAll(
projectId=app_identity.get_application_id(),
datasetId=self.bigquery_dataset,
tableId=self.bigquery_table,
body=obj).execute()
logging.info("BigQuery response: {0}".format(response))
analytics = None
def report_event(*args, **kwargs):
"""Used by other modules to actually do logging.
A passthrough to a global Analytics instance intialized on use.
"""
global analytics
# Initialization is delayed until the first use so that our
# environment is ready and available. This is a problem with unit
# tests since the testbed needs to initialized before creating an
# Analytics instance.
if analytics is None:
analytics = Analytics()
analytics.report_event(*args, **kwargs)
|
from enum import Enum
from frozendict import frozendict
class Datasets(str, Enum):
MNIST = "MNIST"
CIFAR10 = "CIFAR10"
CIFAR100 = "CIFAR100"
COCO = "COCO"
PASCAL_VOC2012 = 'PASCAL_VOC2012'
STL10 = "STL10"
SVHN = "SVHN"
PHOTOTOUR = "PHOTOTOUR"
SBD = "SBD"
USPS = "USPS"
HMDB51 = "HMDB51"
DATASET_DOWNLOAD_LINKS = frozendict(
MNIST = {"train": "http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz",
"train_labels": "http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz",
"test": "http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz",
"test_labels": "http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz"},
CIFAR10 = {"train/val" : "https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz"},
CIFAR100 = {"train/val" : "https://www.cs.toronto.edu/~kriz/cifar-100-python.tar.gz"},
COCO = {"train": "http://images.cocodataset.org/zips/train2017.zip",
"val": "http://images.cocodataset.org/zips/val2017.zip",
"annotations": "http://images.cocodataset.org/annotations/annotations_trainval2017.zip"},
PASCAL_VOC2012 = {"train/val": "http://host.robots.ox.ac.uk/pascal/VOC/voc2012/VOCtrainval_11-May-2012.tar"},
STL10 = {"train/val": "http://ai.stanford.edu/~acoates/stl10/stl10_binary.tar.gz"},
SVHN_FULL = {"train": "http://ufldl.stanford.edu/housenumbers/train.tar.gz",
"test": "http://ufldl.stanford.edu/housenumbers/test.tar.gz",
"extra": "http://ufldl.stanford.edu/housenumbers/extra.tar.gz"},
SVHN = {"train": "http://ufldl.stanford.edu/housenumbers/train_32x32.mat",
"test": "http://ufldl.stanford.edu/housenumbers/test_32x32.mat",
"extra": "http://ufldl.stanford.edu/housenumbers/extra_32x32.mat"},
PHOTOTOUR = {"train/val": "http://phototour.cs.washington.edu/datasets/NotreDame.zip"},
SBD = {"train": "http://www.eecs.berkeley.edu/Research/Projects/CS/vision/grouping/semantic_contours/benchmark.tgz"},
USPS = {"train": "https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multiclass/usps.bz2",
"test": "https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multiclass/usps.t.bz2"},
HMDB51 = {"train/val": "http://serre-lab.clps.brown.edu/wp-content/uploads/2013/10/hmdb51_org.rar"}
)
|
import os
import sys
here = sys.path[0]
sys.path.insert(0, os.path.join(here,'..'))
import time
import binascii
from coap import coap
from coap import coapOption as o
from coap import coapObjectSecurity as oscoap
import logging_setup
SERVER_IP = '::1'
# open
c = coap.coap(udpPort=5000)
context = oscoap.SecurityContext(masterSecret=binascii.unhexlify('000102030405060708090A0B0C0D0E0F'),
senderID=binascii.unhexlify('636c69656e74'),
recipientID=binascii.unhexlify('736572766572'),
aeadAlgorithm=oscoap.AES_CCM_16_64_128())
objectSecurity = o.ObjectSecurity(context=context)
try:
# retrieve value of 'test' resource
p = c.GET('coap://[{0}]/test'.format(SERVER_IP),
confirmable=True)
print('=====')
print(''.join([chr(b) for b in p]))
print('=====')
# put value of 'test' resource
payload = b'new node : fd00::2'
p = c.PUT('coap://[{0}]/test'.format(SERVER_IP),
confirmable=True,
payload = payload)
print('=====')
print(''.join([chr(b) for b in p]))
print('=====')
# post value of 'test' resource
payload = b'new mote node : fd00::2'
p = c.POST('coap://[{0}]/mote'.format(SERVER_IP),
confirmable=True,
payload = payload)
print('=====')
print(''.join([chr(b) for b in p]))
print('=====')
except Exception as err:
print(err)
# close
c.close()
time.sleep(0.500)
input("Done. Press enter to close.")
|
from setuptools import setup, find_packages
setup(
name='fsm_strip_length_analysis',
version='1.0.1',
url='https://github.com/petarmaric/fsm_strip_length_analysis',
license='BSD',
author='Petar Maric',
author_email='petarmaric@uns.ac.rs',
description='Console app and Python API for strip length-dependent '\
'visualization and modal analysis of the parametric model of '\
'buckling and free vibration in prismatic shell structures, '\
'as computed by the fsm_eigenvalue project.',
long_description=open('README.rst').read(),
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.7',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Physics',
],
platforms='any',
py_modules=['fsm_strip_length_analysis'],
entry_points={
'console_scripts': ['fsm_strip_length_analysis=fsm_strip_length_analysis:main'],
},
install_requires=open('requirements.txt').read().splitlines(),
)
|
# -*- coding: utf-8 -*-
"""
obspy.io.cnv - CNV file format support for ObsPy
================================================
This module provides write support for CNV event summary file
format as used by VELEST program.
:copyright:
The ObsPy Development Team (devs@obspy.org)
:license:
GNU Lesser General Public License, Version 3
(https://www.gnu.org/copyleft/lesser.html)
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from future.builtins import * # NOQA
if __name__ == '__main__':
import doctest
doctest.testmod(exclude_empty=True)
|
# -*- coding: utf-8 -*-
"""The Task Scheduler event formatter."""
from __future__ import unicode_literals
from plaso.formatters import interface
from plaso.formatters import manager
class TaskCacheEventFormatter(interface.ConditionalEventFormatter):
"""Formatter for a Task Scheduler Cache event."""
DATA_TYPE = 'task_scheduler:task_cache:entry'
FORMAT_STRING_PIECES = [
'[{key_path}]',
'Task: {task_name}',
'[Identifier: {task_identifier}]']
FORMAT_STRING_SHORT_PIECES = [
'Task: {task_name}']
SOURCE_LONG = 'Task Cache'
SOURCE_SHORT = 'REG'
manager.FormattersManager.RegisterFormatter(TaskCacheEventFormatter)
|
"""Notifier package responsible for user notification
"""
# std
from abc import ABC, abstractmethod
from dataclasses import dataclass
from typing import List
from enum import Enum
import logging
class EventPriority(Enum):
"""Event priority dictates how urgently
the user needs to be notified about it
"""
LOW = -1
NORMAL = 0
HIGH = 1
class EventType(Enum):
"""Events can either be user events
that are propagated directly to the
user, or keep-alive events that are
processed to ensure the system runs
"""
KEEPALIVE = 0
USER = 1
DAILY_STATS = 2
class EventService(Enum):
"""Even service helps to distinguish
between similar events for different services
"""
HARVESTER = 0
FARMER = 1
FULL_NODE = 2
DAILY = 3
@dataclass
class Event:
type: EventType
priority: EventPriority
service: EventService
message: str
class Notifier(ABC):
"""This abstract class provides common interface for
any notifier implementation. It should be easy to add
extensions that integrate with variety of services such as
Pushover, E-mail, Slack, WhatsApp, etc
"""
def __init__(self, title_prefix: str, config: dict):
self._title_prefix = title_prefix
self._config = config
self._conn_timeout_seconds = 10
self._notification_types = [EventType.USER]
self._notification_services = [EventService.HARVESTER, EventService.FARMER, EventService.FULL_NODE]
try:
if config["daily_stats"]:
self._notification_types.append(EventType.DAILY_STATS)
self._notification_services.append(EventService.DAILY)
except KeyError as key:
logging.error(f"Invalid config.yaml. Missing key: {key}")
def get_title_for_event(self, event):
icon = ""
if event.priority == EventPriority.HIGH:
icon = "🚨"
elif event.priority == EventPriority.NORMAL:
icon = "⚠️"
elif event.priority == EventPriority.LOW:
icon = "ℹ️"
return f"{icon} {self._title_prefix} {event.service.name}"
@abstractmethod
def send_events_to_user(self, events: List[Event]) -> bool:
"""Implementation specific to the integration"""
pass
|
#!/bin/env python
# generate logarithmic waveform
import numpy as np
import matplotlib.pyplot as plt
import sys, getopt
from math import log
def normalise(w, minval, maxval):
ymin = min(w)
ymax = max(w)
for i in range(len(w)):
w[i] = minval + (w[i] - ymin) / (ymax - ymin) * (maxval - minval)
def main(argv):
showplot = False
try:
opts, args = getopt.getopt(argv,"p")
except getopt.GetoptError:
sys.exit(2)
for opt, arg in opts:
if opt == '-p':
showplot = True
num_points = 2048
t = np.linspace(0.05, 1, num_points)
waveform = np.zeros_like(t)
for i in range(num_points):
waveform[i] = log(t[i])
normalise(waveform, -1.0, 1.0)
if showplot:
plt.plot(t, waveform, '--')
plt.show()
else:
print("# logarithmic waveform")
for y in waveform:
print(y)
if __name__ == "__main__":
main(sys.argv[1:])
|
from django.urls import path
from backend.consumers import SpeechConsumer
websocket_urlpatterns = [
path('ws/speech/', SpeechConsumer)
] |
import unittest
from werkzeug.datastructures import MultiDict
from .context import forms
class TestSmsInviteForm(unittest.TestCase):
def test_e164(self):
test_formats = ['(555) 555 5555', '555.555.5555', '(555)555.5555',
'5555555555']
for test_format in test_formats:
test_form = forms.SMSInviteForm(MultiDict([('phone_number',
test_format)]))
self.assertTrue(test_form.e164 == "+15555555555",
"e164 formatting did work for %s, instead got: %s" %
(test_format, test_form.e164))
def test_e164Negative(self):
test_form = forms.SMSInviteForm(MultiDict([('phone_number',
'+15555555555')]))
self.assertTrue(test_form.e164 == '+15555555555', 'Form reformatted ' \
'a number already in e.164: %s' % test_form.e164)
def test_characterValidation(self):
test_formats = ['(asd)555-5555', 'asdf555_555-5555', 'asd.555.5555']
for test_format in test_formats:
test_form = forms.SMSInviteForm(MultiDict([('phone_number',
test_format)]))
test_form.validate()
self.assertTrue(test_form.errors, "SMSInviteForm validated the " \
"following invalid format: %s" % test_format)
def test_characterValidationNegative(self):
test_form = forms.SMSInviteForm(MultiDict([('phone_number',
'+15555555555')]))
self.assertFalse(test_form.errors, "SMSInviteForm invalidated the " \
"following valid format: %s" % '+15555555555')
|
NAMES = ['arnold schwarzenegger', 'alec baldwin', 'bob belderbos',
'julian sequeira', 'sandra bullock', 'keanu reeves',
'julbob pybites', 'bob belderbos', 'julian sequeira',
'al pacino', 'brad pitt', 'matt damon', 'brad pitt']
def dedup_and_title_case_names(names):
"""Should return a list of title cased names,
each name appears only once"""
return [x.title() for x in sorted(list(set(names)))]
def sort_by_surname_desc(names):
"""Returns names list sorted desc by surname"""
names = dedup_and_title_case_names(names)
return sorted(names, key=lambda x: x.split(" ")[-1], reverse=True)
def shortest_first_name(names):
"""Returns the shortest first name (str).
You can assume there is only one shortest name.
"""
names = dedup_and_title_case_names(names)
firstNames= [name.split()[0] for name in names]
return min(firstNames, key=len)
print(dedup_and_title_case_names(NAMES))
print(sort_by_surname_desc(NAMES))
print(shortest_first_name(NAMES)) |
""" Approximate e**0.5 Numerically and add error analysis"""
import math as m
n = 0.5
accuracy = 10000000
error = 0.0001
Analytical_value = m.exp(n)
print(f'The Analytical solution is {Analytical_value}')
def numerical_e(n):
sm = 0
i = 0
accuracy = 6
while i <= accuracy:
diff = (Analytical_value - sm)/Analytical_value
if i == 0:
sm += 1
i += 1
sm += n**i/m.factorial(i)
if diff <= error:
print(f'The numerical solution is within error and the value is {sm}')
print(f'The relative error between the analytical and computational is {diff*100}%')
break
return sm
numerical_e(n) |
import os
import cv2
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
def load_image(file_path):
return cv2.imread(file_path)
def extract_label(file_name):
return 1 if "dog" in file_name else 0
train_path = "C:/Users/Daniel/.keras/datasets/cats_and_dogs/"
image_files = os.listdir(train_path)
train_images = [load_image(train_path + file) for file in image_files]
train_labels = [extract_label(file) for file in image_files]
def preprocess_image(img, side=96):
min_side = min(img.shape[0], img.shape[1])
img = img[:min_side, :min_side]
img = cv2.resize(img, (side,side))
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
return img / 255.0
for i in range(len(train_images)):
train_images[i] = preprocess_image(train_images[i])
train_images = np.expand_dims(train_images, axis=-1)
train_labels = np.array(train_labels)
print(train_images.shape, train_labels.shape)
layers = [
tf.keras.layers.Conv2D(filters=16, kernel_size=(3,3), padding="same", activation=tf.nn.relu, input_shape=train_images.shape[1:]),
tf.keras.layers.MaxPool2D(pool_size=(2,2), strides=(2,2)),
tf.keras.layers.Conv2D(filters=32, kernel_size=(3,3), padding="same", activation=tf.nn.relu),
tf.keras.layers.MaxPool2D(pool_size=(2,2), strides=(2,2)),
tf.keras.layers.Conv2D(filters=64, kernel_size=(3,3), padding="same", activation=tf.nn.relu),
tf.keras.layers.MaxPool2D(pool_size=(2,2), strides=(2,2)),
tf.keras.layers.Conv2D(filters=128, kernel_size=(3,3), padding="same", activation=tf.nn.relu),
tf.keras.layers.MaxPool2D(pool_size=(2,2), strides=(2,2)),
tf.keras.layers.Conv2D(filters=256, kernel_size=(3,3), padding="same", activation=tf.nn.relu),
tf.keras.layers.MaxPool2D(pool_size=(2,2), strides=(2,2)),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(units=512, activation=tf.nn.relu),
tf.keras.layers.Dense(units=256, activation=tf.nn.relu),
tf.keras.layers.Dense(units=2, activation=tf.nn.softmax)
]
model = tf.keras.Sequential(layers)
model.compile(optimizer=tf.optimizers.Adam(),
loss=tf.losses.SparseCategoricalCrossentropy(),
metrics=[tf.metrics.SparseCategoricalAccuracy()])
model.fit(train_images, train_labels, epochs=50, batch_size=100)
model.save("final_model.h5")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.