content
stringlengths 5
1.05M
|
|---|
import numpy as np
# We create a rank 1 ndarray
x = np.array([1,2,3])
# We create a 3 x 3 ndarray
Y = np.array([[1,2,3],[4,5,6],[7,8,9]])
# We create a 3 x 1 ndarray
Z = np.array([1,2,3]).reshape(3,1)
# We print x
print()
print('x = ', x)
print()
# We print Y
print()
print('Y = \n', Y)
print()
# We print Z
print()
print('Z = \n', Z)
print()
print('x + Y = \n', x + Y)
print()
print('Z + Y = \n',Z + Y)
|
def args2kwargs(thrift_spec, *args):
arg_names = [item[1][1] for item in sorted(thrift_spec.items())]
return dict(zip(arg_names, args))
|
from re import match
from tests import *
def test_root(app, client):
"""Testing root path."""
r = get(client=client, path="/")
assert 200 == r.status_code
assert "/" == r.json["path"]
assert "Punch Clock v0.1.0" == r.json["message"]
assert match(
"^202[0-9]-[0-1][0-9]-[0-3][0-9] [0-9]+:[0-5][0-9]:[0-5][0-9]$",
r.json["datetime"],
)
def test_list_users(app, client):
"""Testing show/users path."""
api_dummy_user(app, client, "32165498700", "Jesse Pinkman", "jesse@crystalz.org")
api_dummy_user(
app, client, "98763532112", "Walter White", "heisenberg@crystalz.org"
)
r = get(client, "/api/list/users")
assert 200 == r.status_code
exp = {
"users": [
{
"id": 1,
"full_name": "Jesse Pinkman",
"cpf": "32165498700",
"email": "jesse@crystalz.org",
},
{
"id": 2,
"full_name": "Walter White",
"cpf": "98763532112",
"email": "heisenberg@crystalz.org",
},
]
}
assert "1" == r.json["users"][0]["id"]
assert "Jesse Pinkman" == r.json["users"][0]["full_name"]
assert "32165498700" == r.json["users"][0]["cpf"]
assert "jesse@crystalz.org" == r.json["users"][0]["email"]
assert "2" == r.json["users"][1]["id"]
assert "Walter White" == r.json["users"][1]["full_name"]
assert "98763532112" == r.json["users"][1]["cpf"]
assert "heisenberg@crystalz.org" == r.json["users"][1]["email"]
|
"""
The goal of this rewrite is to use the existing command framework of the discord.py library.
Here is its documentation:
https://discordpy.readthedocs.io/en/latest/index.html
"""
# imports
import discord
from discord.ext import tasks, commands
import configparser
import bot_database as db
import mathParser
import polling
import tictactoe
import connect_four
import party_notifier
import misc_functions as misc
import asyncio
import random
bot_data = db.bot_database()
bot = commands.Bot(command_prefix = bot_data.prefix)
# CHECKS
def is_admin(ctx):
admin_role = ctx.guild.get_role(bot_data.IDs['admin_role'])
return admin_role in ctx.author.roles
@bot.event
async def on_ready():
# Only add cogs on first init:
bot.add_cog(polling.Poll_Commands(bot, bot_data))
bot.add_cog(tictactoe.tic_tac_toe(bot))
bot.add_cog(party_notifier.Party_Notifier(bot, bot_data))
# bot.add_cog(connect_four.connect_four(bot))
# Set activity:
if bot_data.activity_name != '-1':
game = discord.Game(bot_data.activity_name)
await bot.change_presence(status=discord.Status.online, activity=game)
else:
await bot.change_presence(status=None, activity=None)
print(f'Bot logged in as {bot.user}')
@bot.event
async def on_voice_state_update(member, before, after):
# This function looks if any channel has more than a set number of participants
# and if it does, sends a notification
notif_channel = bot.get_channel(bot_data.IDs['notification_channel'])
with open(bot_data.datapath + 'party_channels.txt', 'r') as file:
partyChannelIDs = [int(x[:-1]) for x in file.readlines()]
# CHECKS
# If channel has become empty, unmark it as party channel
if before.channel is not None and len(before.channel.members) == 0:
if before.channel.id in bot_data.party_channels:
bot_data.party_channels = bot_data.party_channels - {int(before.channel.id)}
await notif_channel.send(f'The party in **{before.channel.name}** has ended.')
return
if after.channel is None:
return
if after.channel.id not in partyChannelIDs:
return
# See if channel is already a party channel
if after.channel.id in bot_data.party_channels:
return
else:
bot_data.party_channels = bot_data.party_channels.union({int(after.channel.id)})
print(bot_data.party_channels)
if len(after.channel.members) >= bot_data.party_count:
# Do this weird thing to get the guild and its roles
this_guild = after.channel.guild
party_role = this_guild.get_role(bot_data.IDs['party_role'])
await notif_channel.send(f'{party_role.mention} There seems to be a party in **{after.channel.name}**')
return
@bot.event
async def on_message(message):
"""
If possible don't define commands here. Use the command framework for that (see below)
Only use this function if you are processing messages without commands.
"""
# Code goes here
if message.author == bot.user:
return
"""
Random Estereggs
"""
if ('Mo ' or 'Mo,' or 'Mo.' or 'Mo!' or 'Mo?') in message.content or message.content == 'Mo':
await message.channel.send('Habe ich etwas von meinem Meister, Herrn und Gebieter Mo gehört? :heart_eyes:', delete_after= 20)
if message.content == "Hello there!":
await message.channel.send("General Kenobi! You are a bold one! Hehehehe KILL HIM!")
await message.channel.send("https://gph.is/2pE8sbx")
return
if 'scrinzi' in message.content.lower():
await message.channel.send('Scrinzi, so ein Sack! :face_vomiting:', delete_after=5)
return
await bot.process_commands(message)
class Main_Commands(commands.Cog):
"""
This cog contains all the main commands which don't really fit into another cog.
"""
def __init__(self, bot):
self.bot = bot
@commands.command(brief="Just for testing random stuff.",
help="This function is for testing code. Don't expect any predictable behaviour from it.")
async def test(self, ctx):
pass
@commands.command(brief="Countdown from value. Default is 10s.",
help="Start a countdown from a designated value or 10 seconds if none has been specified.",
usage="<seconds>")
async def countdown(self, ctx, arg = "10"):
if not arg.isdigit():
counter = 10
else:
counter = int(arg)
if counter > 500:
await ctx.send('Dude, I don\'t have all day!', delete_after=10.0)
return
msg = await ctx.send(f'Countdown: {counter}')
while counter > 0:
counter -= 1
await asyncio.sleep(1)
await msg.edit(content=(f'Countdown: {counter}' if counter != 0 else 'Countdown: **NOW**'))
@commands.command(brief="View rules.",
help="This command fetches the rules as they are defined in the rule-channel.",
aliases=["regeln"])
async def rules(self, ctx):
ruleChannel = bot.get_channel(bot_data.IDs['rule_channel'])
ruleMsg = await ruleChannel.fetch_message(bot_data.IDs['rule_message'])
await ctx.send(ruleMsg.content)
@commands.command(brief="Calculate constant math expressions.",
help="This command tries to calculate constant mathematical expressions.\nNo variables allowed.\n\
\nValid constants and functions:\n - e, pi, c, h, k\n - sin, sinh, asin, asinh, cos, cosh, acos, acosh, tan, atan, atan2, atanh, exp, expm1, ln, lg, sqrt, abs, trunc, round, sgn",
usage="<expression>")
async def calc(self, ctx, *, arg):
nsp = mathParser.NumericStringParser()
try:
result = nsp.eval(arg)
except:
await ctx.send(f"Invalid expression. Type `{bot_data.prefix}help calc` for information on the command.",
delete_after=10.0)
print(f'Invalid \'calc\' expression: {arg}')
return
await ctx.send(result)
@commands.command(brief="Generate random number",
help="Generate a random number or randomly choose from multiple elements after the following pattern:\n\
\nrandom\t\t-> [0,1]\nrandom a\t-> [0,a]\nrandom a b\t-> [a,b]\nrandom a b c\t-> a or b or c or ...",
aliases=['random'])
async def rand(self, ctx, *args):
if not args:
answer = str(random.random())
elif len(args) == 1 and args[0].isdigit():
answer = str(random.uniform(0.0, float(args[0])))
elif len(args) == 2 and args[0].isdigit() and args[1].isdigit():
answer = str(random.uniform(float(args[0]), float(args[1])))
elif len(args) > 1:
answer = str(random.choice(args))
else:
await ctx.send(f'Error: Invalid argument. Type `{bot_data.prefix}help random` for information on the command.',
delete_after=10.0)
return
await ctx.send(answer)
@commands.command(brief="Save Quote.",
help="Save a quote, its author and optionally information on the context of the quote.\
\nIf available the quote will also be sent to a dedicated text channel.",
usage='"<Author>" "<Quote>" "<Optional context>"')
async def quote(self, ctx, *args):
await ctx.message.delete()
if len(args) < 2 or len(args) > 3:
await ctx.send(f"Error: Invalid arguments. Type `{bot_data.prefix}help quote` for information on the command",
delete_after=10.0)
return
author_name: str = args[0].lower()
quote_text: str = args[1]
if len(args) == 3:
quote_context = args[2]
else:
quote_context = "No Context given"
qfile = configparser.ConfigParser()
qfile.read(f'{bot_data.datapath}quotes.txt')
try:
quote_count: int = int(qfile[author_name]['count'])
except:
quote_count: int = 0
try:
if not qfile.has_section(author_name):
qfile.add_section(author_name)
qfile.set(author_name, 'count', str(quote_count + 1))
qfile.set(author_name, f'q#{quote_count}', f'"{quote_text}" "{quote_context}"')
with open(f'{bot_data.datapath}quotes.txt', 'w') as configfile:
qfile.write(configfile)
if bot_data.IDs['quote_channel'] != -1:
quote_channel = bot.get_channel(bot_data.IDs['quote_channel'])
await quote_channel.send(f':\n**Person:** {author_name}\
\n**Zitat:** {quote_text}\
\n**Kontext:** {quote_context}')
await ctx.send('Saved quote.', delete_after=10)
except:
await ctx.send(f'There was an error saving the quote. Type `{bot_data.prefix}help quote` for correct format.',
delete_after=10.0)
@commands.command(brief="Make the bot say something.",
help="Let the bot say something. Your original message will be deleted. Mentions will be converted to text.",
usage="<text>")
async def echo(self, ctx, *, arg):
await ctx.message.delete()
if bot_data.locks['echo'] or is_admin(ctx):
await ctx.send(arg)
else:
await ctx.send('Error: This command is currently locked.', delete_after=10.0)
@commands.check(is_admin)
@commands.command(brief="Lock access to the 'echo' command.",
help="With this command you can lock the 'echo' command.\
\n\necholock\t-> Show current lock status\necholock toggle\t-> Toggle lock\
\n\nAlternatives to 'toggle' are 'switch' and 'change'",
usage="[<none>, toggle, switch, change]")
async def echolock(self, ctx, *args):
arg = args[0] if len(args) > 0 else ""
if arg in ['toggle', 'switch', 'change']:
bot_data.locks['echo'] = not bot_data.locks['echo']
statusStr = "unlocked" if bot_data.locks['echo'] else "locked"
# Update 'config.txt':
config = configparser.ConfigParser()
config.read('config.txt')
config.set('LOCKS', 'echo', str(misc.bool_to_int(bot_data.locks['echo'])))
with open('config.txt', 'w') as configfile:
config.write(configfile)
await ctx.send(f'`{bot_data.prefix}echo` is now **{statusStr}**')
elif len(arg) == 0:
statusStr = "unlocked" if bot_data.locks['echo'] else "locked"
await ctx.send(f'`{bot_data.prefix}echo` is **{statusStr}**')
else:
await ctx.send(f'Error: Invalid argument. See `{bot_data.prefix}help echo` for information on the command.')
@commands.check(is_admin)
@commands.command(brief="Change the 'Playing' status of the bot.",
help = "Changes the 'Playing' status of the bot to the specified text. If no argument is given the status will be removed.")
async def setactivity(self, ctx, *args):
if not args:
await self.bot.change_presence(status=None, activity=None)
configStr: str = '-1'
else:
arg = ' '.join(args)
game = discord.Game(arg)
await self.bot.change_presence(status=discord.Status.online, activity=game)
configStr: str = arg
# Save to config
config = configparser.ConfigParser()
config.read('config.txt')
config.set('BASE', 'activity_name', configStr)
with open('config.txt', 'w') as configfile:
config.write(configfile)
@commands.command(brief="Convert weight to usable units.",
help='This command converts your mass (kg) to its corresponding resting energy in '
'kilotons of TNT. This is equivalent to half the energy released in the explosion of you'
' touching your anti-matter twin.\nThis is also a great way of calling random people fat.',
usage='<mass in kg>')
async def weight(self, ctx, *, arg):
# Make lowercase in case someone entered units:
arg = arg.lower()
# Scan for cancer:
if misc.element_in_str(['grain', 'gr', 'drachm', 'dr', 'ounce', 'oz', 'pound', 'lb', 'stone', 'st',
'quarter', 'qr', 'qtr', 'hundretweight', 'cwt', 'slug'], arg):
admin_role = ctx.guild.get_role(bot_data.IDs['admin_role'])
await ctx.send(f"{admin_role.mention} **IMPERIAL UNITS DETECTED!!!** Authorities were notified. Stay where you are criminal scum!")
return
# Detect units:
factor = 1.0
if arg.endswith('kg'):
arg = arg[0:-2]
elif arg.endswith('g'):
arg = arg[0:-1]
factor = 1. / 1000.0
elif arg.endswith('t'):
arg = arg[0:-1]
factor = 1000
elif arg.endswith('u'):
arg = arg[0:-1]
factor = 6.0221e26
elif arg.endswith('tev/c^2'):
arg = arg[0:-7]
factor = 5.60852495e23
elif arg.endswith('tev/c²'):
arg = arg[0:-6]
factor = 5.60852495e23
elif arg.endswith('gev/c^2'):
arg = arg[0:-7]
factor = 5.60852495e26
elif arg.endswith('gev/c²'):
arg = arg[0:-6]
factor = 5.60852495e26
elif arg.endswith('kev/c^2'):
arg = arg[0:-7]
factor = 5.60852495e29
elif arg.endswith('kev/c²'):
arg = arg[0:-6]
factor = 5.60852495e29
elif arg.endswith('ev/c^2'):
arg = arg[0:-6]
factor = 5.60852495e32
elif arg.endswith('ev/c²'):
arg = arg[0:-5]
factor = 5.60852495e32
else:
factor = 1.0
# If arg still has non-digit chars, it is an error
if not arg.isdigit():
# Detect mathematical expressions:
nsp = mathParser.NumericStringParser()
try:
arg = nsp.eval(arg)
except:
await ctx.send('Error: Could not parse argument. Use something like `10 kg` or just `10`.', delete_after=10)
return
E = float(arg) * factor * (2.998e8)**2
Joule_to_gigatonTNT = 2.390e-19
hiroshima_energy = 16e-6 # gigatons of TNT
GT_mass_raw = E * Joule_to_gigatonTNT
if GT_mass_raw >= 1e3:
explosion_str = f'{round(GT_mass_raw * 1e-3, 2)} terratons'
elif GT_mass_raw < 1e3 and GT_mass_raw >= 1:
explosion_str = f'{round(GT_mass_raw, 2)} gigatons'
elif GT_mass_raw < 1 and GT_mass_raw >= 0.001:
explosion_str = f'{round(GT_mass_raw * 1e3, 2)} megatons'
elif GT_mass_raw < 1e-3 and GT_mass_raw >= 1e-6:
explosion_str = f'{round(GT_mass_raw * 1e6, 2)} kilotons'
else:
explosion_str = f'{round(GT_mass_raw * 1e9, 2)} tons'
# Hiroshima formatting:
hir_fac = round(GT_mass_raw / hiroshima_energy, 1)
hir_str = f'or **{hir_fac}** hiroshima bombs**' if hir_fac >= 1 else ''
# For the lulz:
if float(arg) * factor >= 100:
pref = '**WOW!** '
suff = 'Damn.'
else:
pref = ''
suff = ''
text = f'{pref}This mass is equivalent to a very generous **{explosion_str} of TNT** {hir_str}. {suff}'
await ctx.send(text)
@commands.command(brief="cry",
help=" show the only emotion HAL is currently capable of")
async def cry(self, ctx):
await ctx.send(':sob:')
await asyncio.sleep(5)
await ctx.send('http://gph.is/2f4QMDC', delete_after = 10)
@commands.command(brief = 'let HAL show you his Magic')
async def draw_card(self, ctx):
await ctx.send('Your Card is the Ace of Spades', delete_after = 40)
await asyncio.sleep(7)
await ctx.send('shuffeling the deck',delete_after = 10)
await asyncio.sleep(3)
await ctx.send('trust me bro', delete_after = 3)
await asyncio.sleep(7)
await ctx.send('Was this your card:', delete_after = 23)
await asyncio.sleep(3)
await ctx.send('http://gph.is/1UPsMwn', delete_after = 20)
await ctx.send('The ACE OF SPADES!')
await asyncio.sleep(5)
await ctx.send('not impressed?', delete_after = 5)
await asyncio.sleep(5)
await ctx.send('https://gph.is/g/aNMKlwP', delete_after = 10)
await ctx.send('http://gph.is/1oKXMOp', delete_after = 10)
bot.add_cog(Main_Commands(bot))
bot.run(bot_data.token)
|
"""
Variable Substitution, Multiplication, Division, Scaling
"""
#*****************************************************************************
# Copyright (C) 2007 William Stein and Jonathan Hanke
#
# Distributed under the terms of the GNU General Public License (GPL)
#
# This code is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# The full text of the GPL is available at:
#
# http://www.gnu.org/licenses/
#*****************************************************************************
import copy
def swap_variables(self, r, s, in_place = False):
"""
Switch the variables `x_r` and `x_s` in the quadratic form
(replacing the original form if the in_place flag is True).
INPUT:
`r`, `s` -- integers >= 0
OUTPUT:
a QuadraticForm (by default, otherwise none)
EXAMPLES::
sage: Q = QuadraticForm(ZZ, 4, range(1,11))
sage: Q
Quadratic form in 4 variables over Integer Ring with coefficients:
[ 1 2 3 4 ]
[ * 5 6 7 ]
[ * * 8 9 ]
[ * * * 10 ]
sage: Q.swap_variables(0,2)
Quadratic form in 4 variables over Integer Ring with coefficients:
[ 8 6 3 9 ]
[ * 5 2 7 ]
[ * * 1 4 ]
[ * * * 10 ]
sage: Q.swap_variables(0,2).swap_variables(0,2)
Quadratic form in 4 variables over Integer Ring with coefficients:
[ 1 2 3 4 ]
[ * 5 6 7 ]
[ * * 8 9 ]
[ * * * 10 ]
"""
if (in_place == False):
Q = copy.deepcopy(self)
Q.__init__(self.base_ring(), self.dim(), self.coefficients())
Q.swap_variables(r,s,in_place=True)
return Q
else:
## Switch diagonal elements
tmp = self[r,r]
self[r,r] = self[s,s]
self[s,s] = tmp
## Switch off-diagonal elements
for i in range(self.dim()):
if (i != r) and (i != s):
tmp = self[r,i]
self[r,i] = self[s,i]
self[s,i] = tmp
def multiply_variable(self, c, i, in_place = False):
"""
Replace the variables `x_i` by `c*x_i` in the quadratic form
(replacing the original form if the in_place flag is True).
Here `c` must be an element of the base_ring defining the
quadratic form.
INPUT:
`c` -- an element of Q.base_ring()
`i` -- an integer >= 0
OUTPUT:
a QuadraticForm (by default, otherwise none)
EXAMPLES::
sage: Q = DiagonalQuadraticForm(ZZ, [1,9,5,7])
sage: Q.multiply_variable(5,0)
Quadratic form in 4 variables over Integer Ring with coefficients:
[ 25 0 0 0 ]
[ * 9 0 0 ]
[ * * 5 0 ]
[ * * * 7 ]
"""
if (in_place == False):
Q = copy.deepcopy(self)
Q.__init__(self.base_ring(), self.dim(), self.coefficients())
Q.multiply_variable(c,i,in_place=True)
return Q
else:
## Stretch the diagonal element
tmp = c * c * self[i,i]
self[i,i] = tmp
## Switch off-diagonal elements
for k in range(self.dim()):
if (k != i):
tmp = c * self[k,i]
self[k,i] = tmp
def divide_variable(self, c, i, in_place = False):
"""
Replace the variables `x_i` by `(x_i)/c` in the quadratic form
(replacing the original form if the in_place flag is True).
Here `c` must be an element of the base_ring defining the
quadratic form, and the division must be defined in the base
ring.
INPUT:
`c` -- an element of Q.base_ring()
`i` -- an integer >= 0
OUTPUT:
a QuadraticForm (by default, otherwise none)
EXAMPLES::
sage: Q = DiagonalQuadraticForm(ZZ, [1,9,5,7])
sage: Q.divide_variable(3,1)
Quadratic form in 4 variables over Integer Ring with coefficients:
[ 1 0 0 0 ]
[ * 1 0 0 ]
[ * * 5 0 ]
[ * * * 7 ]
"""
if (in_place == False):
Q = copy.deepcopy(self)
Q.__init__(self.base_ring(), self.dim(), self.coefficients())
Q.divide_variable(c,i,in_place=True)
return Q
else:
## Stretch the diagonal element
tmp = self[i,i] / (c*c)
self[i,i] = tmp
## Switch off-diagonal elements
for k in range(self.dim()):
if (k != i):
tmp = self[k,i] / c
self[k,i] = tmp
def scale_by_factor(self, c, change_value_ring_flag=False):
"""
Scale the values of the quadratic form by the number `c`, if
this is possible while still being defined over its base ring.
If the flag is set to true, then this will alter the value ring
to be the field of fractions of the original ring (if necessary).
INPUT:
`c` -- a scalar in the fraction field of the value ring of the form.
OUTPUT:
A quadratic form of the same dimension
EXAMPLES::
sage: Q = DiagonalQuadraticForm(ZZ, [3,9,18,27])
sage: Q.scale_by_factor(3)
Quadratic form in 4 variables over Integer Ring with coefficients:
[ 9 0 0 0 ]
[ * 27 0 0 ]
[ * * 54 0 ]
[ * * * 81 ]
sage: Q.scale_by_factor(1/3)
Quadratic form in 4 variables over Integer Ring with coefficients:
[ 1 0 0 0 ]
[ * 3 0 0 ]
[ * * 6 0 ]
[ * * * 9 ]
"""
## Try to scale the coefficients while staying in the ring of values.
new_coeff_list = [x*c for x in self.coefficients()]
## Check if we can preserve the value ring and return result. -- USE THE BASE_RING FOR NOW...
R = self.base_ring()
try:
list2 = [R(x) for x in new_coeff_list]
# This is a hack: we would like to use QuadraticForm here, but
# it doesn't work by scoping reasons.
Q = self.__class__(R, self.dim(), list2)
return Q
except Exception:
if (change_value_ring_flag == False):
raise TypeError("Oops! We could not rescale the lattice in this way and preserve its defining ring.")
else:
raise UntestedCode("This code is not tested by current doctests!")
F = R.fraction_field()
list2 = [F(x) for x in new_coeff_list]
Q = copy.deepcopy(self)
Q.__init__(self.dim(), F, list2, R) ## DEFINE THIS! IT WANTS TO SET THE EQUIVALENCE RING TO R, BUT WITH COEFFS IN F.
#Q.set_equivalence_ring(R)
return Q
def extract_variables(self, var_indices):
"""
Extract the variables (in order) whose indices are listed in
var_indices, to give a new quadratic form.
INPUT:
var_indices -- a list of integers >= 0
OUTPUT:
a QuadraticForm
EXAMPLES::
sage: Q = QuadraticForm(ZZ, 4, range(10)); Q
Quadratic form in 4 variables over Integer Ring with coefficients:
[ 0 1 2 3 ]
[ * 4 5 6 ]
[ * * 7 8 ]
[ * * * 9 ]
sage: Q.extract_variables([1,3])
Quadratic form in 2 variables over Integer Ring with coefficients:
[ 4 6 ]
[ * 9 ]
"""
m = len(var_indices)
Q = copy.deepcopy(self)
Q.__init__(self.base_ring(), m)
for i in range(m):
for j in range(i, m):
Q[i,j] = self[ var_indices[i], var_indices[j] ]
return Q
def elementary_substitution(self, c, i, j, in_place = False): ## CHECK THIS!!!
"""
Perform the substitution `x_i --> x_i + c*x_j` (replacing the
original form if the in_place flag is True).
INPUT:
`c` -- an element of Q.base_ring()
`i`, `j` -- integers >= 0
OUTPUT:
a QuadraticForm (by default, otherwise none)
EXAMPLES::
sage: Q = QuadraticForm(ZZ, 4, range(1,11))
sage: Q
Quadratic form in 4 variables over Integer Ring with coefficients:
[ 1 2 3 4 ]
[ * 5 6 7 ]
[ * * 8 9 ]
[ * * * 10 ]
sage: Q.elementary_substitution(c=1, i=0, j=3)
Quadratic form in 4 variables over Integer Ring with coefficients:
[ 1 2 3 6 ]
[ * 5 6 9 ]
[ * * 8 12 ]
[ * * * 15 ]
::
sage: R = QuadraticForm(ZZ, 4, range(1,11))
sage: R
Quadratic form in 4 variables over Integer Ring with coefficients:
[ 1 2 3 4 ]
[ * 5 6 7 ]
[ * * 8 9 ]
[ * * * 10 ]
::
sage: M = Matrix(ZZ, 4, 4, [1,0,0,1,0,1,0,0,0,0,1,0,0,0,0,1])
sage: M
[1 0 0 1]
[0 1 0 0]
[0 0 1 0]
[0 0 0 1]
sage: R(M)
Quadratic form in 4 variables over Integer Ring with coefficients:
[ 1 2 3 6 ]
[ * 5 6 9 ]
[ * * 8 12 ]
[ * * * 15 ]
"""
if (in_place == False):
Q = copy.deepcopy(self)
Q.__init__(self.base_ring(), self.dim(), self.coefficients())
Q.elementary_substitution(c, i, j, True)
return Q
else:
## Adjust the a_{k,j} coefficients
ij_old = self[i,j] ## Store this since it's overwritten, but used in the a_{j,j} computation!
for k in range(self.dim()):
if (k != i) and (k != j):
ans = self[j,k] + c*self[i,k]
self[j,k] = ans
elif (k == j):
ans = self[j,k] + c*ij_old + c*c*self[i,i]
self[j,k] = ans
else:
ans = self[j,k] + 2*c*self[i,k]
self[j,k] = ans
def add_symmetric(self, c, i, j, in_place = False):
"""
Performs the substitution `x_j --> x_j + c*x_i`, which has the
effect (on associated matrices) of symmetrically adding
`c * j`-th row/column to the `i`-th row/column.
NOTE: This is meant for compatibility with previous code,
which implemented a matrix model for this class. It is used
in the local_normal_form() method.
INPUT:
`c` -- an element of Q.base_ring()
`i`, `j` -- integers >= 0
OUTPUT:
a QuadraticForm (by default, otherwise none)
EXAMPLES::
sage: Q = QuadraticForm(ZZ, 3, range(1,7)); Q
Quadratic form in 3 variables over Integer Ring with coefficients:
[ 1 2 3 ]
[ * 4 5 ]
[ * * 6 ]
sage: Q.add_symmetric(-1, 1, 0)
Quadratic form in 3 variables over Integer Ring with coefficients:
[ 1 0 3 ]
[ * 3 2 ]
[ * * 6 ]
sage: Q.add_symmetric(-3/2, 2, 0) ## ERROR: -3/2 isn't in the base ring ZZ
Traceback (most recent call last):
...
RuntimeError: Oops! This coefficient can't be coerced to an element of the base ring for the quadratic form.
::
sage: Q = QuadraticForm(QQ, 3, range(1,7)); Q
Quadratic form in 3 variables over Rational Field with coefficients:
[ 1 2 3 ]
[ * 4 5 ]
[ * * 6 ]
sage: Q.add_symmetric(-3/2, 2, 0)
Quadratic form in 3 variables over Rational Field with coefficients:
[ 1 2 0 ]
[ * 4 2 ]
[ * * 15/4 ]
"""
return self.elementary_substitution(c, j, i, in_place)
|
n=float(input("Enter the number: \t"))
if n==0 :
print("Zero\n")
elif n<0 :
print("Negative\n")
else:
print("Positive\n")
|
"""
MIT License
Copyright (c) 2021 ilkergzlkkr
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import logging, os
import typing as t
from . import errors
from .gqlhttp import GQLHTTPClient
log = logging.getLogger(__name__)
class DCLClient:
"""
API wrapper for dclist.net
--------------------------
Parameters
----------
bot: discord.Client
An instance of a discord.py Client object.
token: str
Your bot's Dclist.Net API Token.
**loop: Optional[event loop]
An `event loop` to use for asynchronous operations.
Defaults to ``bot.loop``.
**transporter: Optional[gql.transport]
A `gql.transport` to use for transporting graphql queries.
"""
def __init__(self, bot, api_token: t.Optional[str]=None, *args, **kwargs):
if api_token is None:
log.warning("No Token Provided. DCLClient never gonna post bot stats.")
self.bot = bot
self.bot_id = None
self.loop = kwargs.get("loop", bot.loop)
self.http = GQLHTTPClient(api_token, loop=self.loop, transporter=kwargs.get('transporter'))
async def __get_ready(self):
await self.bot.wait_until_ready()
if self.bot_id is None:
self.bot_id = self.bot.user.id
async def _get_app_info(self):
await self.__get_ready()
return self.bot_id, (await self.bot.application_info()).owner.id
async def postBotStats(self, guild_count: t.Optional[int]=None,
user_count: t.Optional[int]=None, shard_count: t.Optional[int]=None):
"""
Post bot stats to the API
Parameters
----------
:param guild_count: Guild count (optional)
:param user_count: User count (optional)
:param shard_count: User count (optional)
"""
await self.__get_ready()
if guild_count is None:
guild_count = len(self.bot.guilds)
if user_count is None:
user_count = len(list(self.bot.get_all_members()))
data = await self.http.postBotStats(guild_count, user_count, shard_count)
return data['postBotStats']
async def getBotById(self, bot_id: t.Optional[int]) -> dict:
"""
Get a bot listed on dclist.net
Parameters
----------
:param bot_id: Bot id to be fetched
if bot_id is not given. self bot will be used for getting stats.
Returns
-------
bot: Bot as a dict fetched from gql-api
"""
if bot_id is None:
bot_id, _ = await _get_app_info()
data = await self.http.getBotById(bot_id)
return data['getBot']
async def getUserById(self, user_id: t.Optional[int]) -> dict:
"""
Get a user from dclist.net.
Parameters
----------
:param user_id: User id to be fetched.
if user_id is not given. self bot owner will be used for getting stats.
Returns
-------
user: User as a dict fetched from gql-api.
"""
if user_id is None:
_, user_id = await _get_app_info()
data = await self.http.getUserById(user_id)
return data['getUser']
async def isUserVoted(self, user_id: t.Optional[int]) -> bool:
"""
Is user voted for my bot from dclist.net.
Parameters
----------
:param user_id: User id to be checked.
if user_id is not given. self bot owner will be used for getting voted info.
Returns
-------
:return bool: True or False is user voted.
"""
if user_id is None:
_, user_id = await _get_app_info()
data = await self.http.isUserVoted(user_id)
return data['isUserVoted']
async def getUserComment(self, user_id: t.Optional[int]) -> dict:
"""
Get a user comment from dclist.net from your bot page.
Parameters
----------
:param user_id: User id to be checked.
if user_id is not given. self bot owner will be used for getting comment info.
Returns
-------
:return Comment: Comment stats as a dict fetched from gql-api.
given user must be commented to your any bots. if not so. return value will be None.
"""
if user_id is None:
_, user_id = await _get_app_info()
data = await self.http.getUserComment(user_id)
return data['getUserComment']
|
import base64
import click
import pytest
from evernote.edam.error.ttypes import EDAMUserException
from evernote_backup import cli_app_auth, cli_app_util
from evernote_backup.cli_app_click_util import NaturalOrderGroup
from evernote_backup.cli_app_util import ProgramTerminatedError
from evernote_backup.log_util import get_time_txt
def test_get_sync_client_token_expired_error(mock_evernote_client):
mock_evernote_client.fake_is_token_expired = True
network_error_retry_count = 50
max_chunk_results = 200
fake_token = "S=1:U=ff:E=fff:C=ff:P=1:A=test222:V=2:H=ff"
with pytest.raises(ProgramTerminatedError) as excinfo:
cli_app_auth.get_sync_client(
fake_token, "evernote", network_error_retry_count, max_chunk_results
)
assert str(excinfo.value) == "Authentication token expired or revoked!"
def test_get_sync_client_token_invalid_error(mock_evernote_client):
mock_evernote_client.fake_is_token_invalid = True
network_error_retry_count = 50
max_chunk_results = 200
fake_token = "S=1:U=ff:E=fff:C=ff:P=1:A=test222:V=2:H=ff"
with pytest.raises(ProgramTerminatedError) as excinfo:
cli_app_auth.get_sync_client(
fake_token, "evernote", network_error_retry_count, max_chunk_results
)
assert str(excinfo.value) == "Invalid authentication token!"
def test_get_sync_client_unexpected_error(mock_evernote_client):
mock_evernote_client.fake_auth_verify_unexpected_error = True
network_error_retry_count = 50
max_chunk_results = 200
fake_token = "S=1:U=ff:E=fff:C=ff:P=1:A=test222:V=2:H=ff"
with pytest.raises(EDAMUserException):
cli_app_auth.get_sync_client(
fake_token, "evernote", network_error_retry_count, max_chunk_results
)
def test_unscrambler():
test_data = base64.b64encode(b":8:<2&00000")
expected = ["12345", "54321"]
result_data = cli_app_util.unscramble(test_data)
assert result_data == expected
def test_natural_order_group():
@click.group(cls=NaturalOrderGroup)
def test_cli():
"""pass"""
@test_cli.command()
def test_command1():
"""pass"""
@test_cli.command()
def test_command3():
"""pass"""
@test_cli.command()
def test_command2():
"""pass"""
assert list(test_cli.list_commands(None)) == [
"test-command1",
"test-command3",
"test-command2",
]
pass
@pytest.mark.parametrize(
"time_seconds,time_txt",
[
(10, "0:10"),
(65, "01:05"),
(3605, "01:00:05"),
],
)
def test_cli_test_tty(time_seconds, time_txt):
assert get_time_txt(time_seconds) == time_txt
|
import numpy as np
import tensorflow as tf
from tensorflow.keras import backend as K
from tensorflow.keras.metrics import Metric
from sklearn.metrics import f1_score, accuracy_score
epsilon = K.epsilon()
term_code = {'begin': 1, 'inside': 2, 'outside': 0, }
idx2polarity = {0: 'background', 1: 'positive', 2: 'negative', 3: 'neutral', 4: 'conflict', }
polarity2idx = {v: k for k,v in idx2polarity.items()}
def mask_absa(y_aspect, y_sentiment, mask):
# Mask background words and conflict-sentiment words
# for not to count in evaluation
Ys_aspect, Ys_sentiment = [], []
for seq_aspect, seq_sentiment, seq_mask in zip(y_aspect, y_sentiment, mask):
labels_aspect, labels_sentiment = [], []
for l_a, l_s, m in zip(seq_aspect, seq_sentiment, seq_mask):
if m == 0:
break
labels_aspect.append(np.argmax(l_a))
if not np.any(l_s):
# all 0s means background or conflict-sentiment word
# -> be not counted for evaluation
labels_sentiment.append(0)
else:
labels_sentiment.append(np.argmax(l_s)+1)
Ys_aspect.append(labels_aspect)
Ys_sentiment.append(labels_sentiment)
return Ys_aspect, Ys_sentiment
def score_absa_single_sample(t_true, t_pred, s_true=[], s_pred=[], counters: dict={}, term_only: bool=False):
n_relevants, n_corrects, n_predicteds = 0, 0, 0
n_words = len(t_true)
for j in range(n_words):
if t_true[j] == term_code['begin']:
n_relevants += 1
if not term_only:
if s_true[j] != polarity2idx['background']:
counters['overall'][idx2polarity[s_true[j]]] += 1
if t_pred[j] == term_code['begin']:
matching = True
for k in range(j+1, len(t_true)):
if t_true[k] == term_code['inside'] and t_pred[k] == term_code['inside']:
continue
elif t_true[k] != term_code['inside'] and t_pred[k] != term_code['inside']:
break
else:
matching = False
break
if matching:
n_corrects += 1
if not term_only:
if s_true[j] != polarity2idx['background']:
counters['gold'][idx2polarity[s_true[j]]] += 1
counters['pred'][idx2polarity[s_pred[j]]] += 1
if s_true[j] == s_pred[j]:
counters['correct'][idx2polarity[s_pred[j]]] += 1
else:
counters['pred']['conflict'] += 1
for t_p in t_pred:
if t_p == term_code['begin']:
n_predicteds += 1
if term_only:
return n_relevants, n_corrects, n_predicteds
return [n_relevants, n_corrects, n_predicteds], counters
def score_absa(terms_true, terms_pred,
sentiments_true: list=[], sentiments_pred: list=[],
average_method: str='macro', term_only: bool=False):
# Define useful variables
if not term_only:
# Sentiment Distribution for Aspect / Opinion Terms:
# pred_count: predicted results that are correctly extracted
# gold_count: gold results that are correctly extracted
# correct_count: results that get both span & prediction correctly
# overall_count: ground-truth
counters = {
'gold': {'positive': 0, 'negative': 0, 'neutral': 0, },
'pred': {'positive': 0, 'negative': 0, 'neutral': 0, 'conflict': 0},
'correct': {'positive': 0, 'negative': 0, 'neutral': 0, },
'overall': {'positive': 0, 'negative': 0, 'neutral': 0, }
}
# Do statistics
n_corrects, n_predicteds, n_relevants = 0, 0, 0
n_samples = len(terms_true)
for i in range(n_samples):
t_true, t_pred = terms_true[i], terms_pred[i]
if term_only:
sample_relevants, sample_corrects, sample_predicteds = score_absa_single_sample(t_true, t_pred, term_only=term_only)
else:
s_true, s_pred = sentiments_true[i], sentiments_pred[i]
[sample_relevants, sample_corrects, sample_predicteds], \
counters = score_absa_single_sample(t_true, t_pred, s_true, s_pred, counters, term_only)
n_corrects += sample_corrects
n_relevants += sample_relevants
n_predicteds += sample_predicteds
# Calculate evaluation metrics for Term (of Aspect or Opinion)
term_P = n_corrects / (n_predicteds+epsilon) # precision
term_R = n_corrects / (n_relevants+epsilon) # recall
term_F1 = 2*term_P*term_R / (term_P+term_R+epsilon)
if term_only:
return term_F1
sentiment_Acc, sentiment_F1, absa_F1 = score_sentiment_and_overall(n_predicteds, counters, average_method)
return term_F1, sentiment_Acc, sentiment_F1, absa_F1
def score_sentiment_and_overall(n_predicteds: int, counters: dict, average_method: str='micro'):
# Precision and Recall per each sentiment polarity
positive_P = counters['correct']['positive'] / (counters['pred']['positive']+epsilon)
positive_R = counters['correct']['positive'] / (counters['gold']['positive']+epsilon)
negative_P = counters['correct']['negative'] / (counters['pred']['negative']+epsilon)
negative_R = counters['correct']['negative'] / (counters['gold']['negative']+epsilon)
neutral_P = counters['correct']['neutral'] / (counters['pred']['neutral']+epsilon)
neutral_R = counters['correct']['neutral'] / (counters['gold']['neutral']+epsilon)
# Calculate evaluation metrics for Sentiment
n_corrects_sentiment = counters['correct']['positive'] + counters['correct']['negative'] + counters['correct']['neutral']
n_corrects_aspect = counters['gold']['positive'] + counters['gold']['negative'] + counters['gold']['neutral']
n_overall = counters['overall']['positive'] + counters['overall']['negative'] + counters['overall']['neutral']
sentiment_Acc = n_corrects_sentiment / (n_corrects_aspect+epsilon)
if average_method == 'micro':
sentiment_P = (positive_P+negative_P+neutral_P) / 3.0
sentiment_R = (positive_R+negative_R+neutral_R) / 3.0
sentiment_F1 = 2*sentiment_P*sentiment_R / (sentiment_P+sentiment_R+epsilon)
elif average_method == 'macro':
positive_F1 = 2*positive_P*positive_R / (positive_P+positive_R+epsilon)
negative_F1 = 2*negative_P*negative_R / (negative_P+negative_R+epsilon)
neutral_F1 = 2*neutral_P*neutral_R / (neutral_P+neutral_R+epsilon)
sentiment_F1 = (positive_F1+negative_F1+neutral_F1) / 3.0
else:
raise ValueError('average_method must be either micro or macro')
# Calculate evaluation metrics for ABSA
absa_P = n_corrects_sentiment / (n_predicteds-counters['pred']['conflict']+epsilon)
absa_R = n_corrects_sentiment / (n_overall+epsilon)
absa_F1 = 2*absa_P*absa_R / (absa_P+absa_R+epsilon)
return sentiment_Acc, sentiment_F1, absa_F1
def evaluate_absa(aspects_true, aspects_pred,
opinions_true, opinions_pred,
sentiments_true, sentiments_pred,
mask,
include_opinion: bool=True):
aspects_true, sentiments_true = mask_absa(aspects_true, sentiments_true, mask)
aspects_pred, sentiments_pred = mask_absa(aspects_pred, sentiments_pred, mask)
absa_scores = score_absa(aspects_true, aspects_pred, sentiments_true, sentiments_pred)
# aspect_f1, sentiment_acc, sentiment_f1, absa_f1 = absa_scores
if include_opinion:
opinions_true, _ = mask_absa(opinions_true, sentiments_true, mask)
opinions_pred, _ = mask_absa(opinions_pred, sentiments_pred, mask)
opinion_f1 = score_absa(opinions_true, opinions_pred, term_only=True)
absa_scores = [opinion_f1] + list(absa_scores)
return absa_scores
def evaluate_multilists(arrays_true, arrays_pred, mask, average_method: str='macro'):
if isinstance(arrays_true, np.ndarray):
arrays_true = [arrays_true]
arrays_pred = [arrays_pred]
elif not isinstance(arrays_true, (list, tuple)):
raise ValueError(f"inputs must be np.ndarray / list / tuple, not {arrays_true.__class__}")
average_method = average_method.lower()
if average_method not in ['micro', 'macro']:
print(f'average_method={average_method} is out-of-option, so average_method is set as micro')
average_method = 'macro'
def evaluate(list_true, list_pred, mask):
# Remove background words for not to count in evaluation
list_true, list_true_unmask = np.argmax(list_true, axis=-1), []
list_pred, list_pred_unmask = np.argmax(list_pred, axis=-1), []
for seq_true, seq_pred, seq_mask in zip(list_true, list_pred, mask):
seq_true_unmask, seq_pred_unmask = [] , []
for token_true, token_pred, token_mask in zip(seq_true, seq_pred, seq_mask):
if token_mask == 0:
break
if token_true == 0:
continue # skip background
seq_true_unmask.append(token_true)
seq_pred_unmask.append(token_pred)
list_true_unmask.extend(seq_true_unmask)
list_pred_unmask.extend(seq_pred_unmask)
list_score = [f1_score(list_true_unmask, list_pred_unmask, zero_division='warn', average=average_method),
accuracy_score(list_true_unmask, list_pred_unmask)]
return list_score
scores = [evaluate(list_true, list_pred, mask)
for list_true, list_pred in zip(arrays_true, arrays_pred)]
return sum(scores, []) # flatten nested lists
class F_score(Metric):
def __init__(self, beta: int=1, threshold: float=0.5, **kwargs):
super().__init__(**kwargs) # handle base args (e.g. dtype)
self.beta = int(beta)
self.threshold = float(threshold)
self.count_positives = create_positive_counters(beta, threshold)
self.n_true_positives = self.add_weight("n_true_positives", initializer="zeros")
self.n_possible_positives = self.add_weight('n_possible_positives', initializer='zeros')
self.n_predicted_positives = self.add_weight('n_predicted_positives', initializer='zeros')
def update_state(self, y_true, y_pred, sample_weight=None):
true_positives, predicted_positives, possible_positives = self.count_positives(y_true, y_pred)
self.n_true_positives.assign_add(true_positives)
self.n_possible_positives.assign_add(possible_positives)
self.n_predicted_positives.assign_add(predicted_positives)
def result(self):
precision = self.n_true_positives / (self.n_predicted_positives+epsilon)
recall = self.n_true_positives / (self.n_possible_positives+epsilon)
f_score = (1+self.beta**2) * (precision*recall) / (self.beta**2*precision+recall+epsilon)
return f_score
def create_positive_counters(beta=1, threshold=0.5):
def count_positives(y_true, y_pred):
def count(x):
return tf.reduce_sum(x)
y_true = tf.cast(y_true, tf.float32)
y_pred = tf.cast(y_pred, tf.float32)
y_pred = tf.where(y_pred>=threshold, 1., 0.)
true_positives = count(y_true*y_pred)
predicted_positives = count(y_pred)
possible_positives = count(y_true)
return true_positives, predicted_positives, possible_positives
return count_positives
def create_fscore(beta=1, threshold=0.5):
def f_score(y_true, y_pred):
def count(x):
return tf.reduce_sum(x)
def recall(y_true, y_pred):
true_positives = count(y_true*y_pred)
possible_positives = count(y_true)
recall = true_positives / (possible_positives+K.epsilon())
return recall
def precision(y_true, y_pred):
true_positives = count(y_true*y_pred)
predicted_positives = count(y_pred)
precision = true_positives / (predicted_positives+K.epsilon())
return precision
y_true = tf.cast(y_true, tf.float32)
y_pred = tf.cast(y_pred, tf.float32)
y_pred = tf.where(y_pred>=threshold, 1., 0.)
P, R = precision(y_true, y_pred), recall(y_true, y_pred)
return (1+beta**2)*(P*R)/(beta**2*P+R+K.epsilon())
return f_score
|
import html
import re
from typing import List, Optional
from flask import Blueprint, current_app, jsonify, g
from flask_apispec import use_kwargs
from webargs.flaskparser import use_args
from neo4japp.blueprints.auth import auth
from neo4japp.blueprints.projects import ProjectBaseView
from neo4japp.constants import FILE_INDEX_ID, FRAGMENT_SIZE, LogEventType
from neo4japp.blueprints.filesystem import FilesystemBaseView
from neo4japp.data_transfer_objects.common import ResultQuery
from neo4japp.database import (
get_search_service_dao,
get_elastic_service,
get_file_type_service
)
from neo4japp.exceptions import ServerException
from neo4japp.models import (
Files,
Projects,
)
from neo4japp.schemas.common import PaginatedRequestSchema
from neo4japp.schemas.search import (
ContentSearchSchema,
ContentSearchResponseSchema,
OrganismSearchSchema,
SynonymSearchSchema,
SynonymSearchResponseSchema,
VizSearchSchema,
)
from neo4japp.services.file_types.providers import (
DirectoryTypeProvider,
)
from neo4japp.util import jsonify_with_class, SuccessResponse
from neo4japp.utils.logger import EventLog, UserEventLog
from neo4japp.utils.request import Pagination
bp = Blueprint('search', __name__, url_prefix='/search')
@bp.route('/viz-search', methods=['POST'])
@auth.login_required
@use_kwargs(VizSearchSchema)
def visualizer_search(
query,
page,
limit,
domains,
entities,
organism
):
search_dao = get_search_service_dao()
current_app.logger.info(
f'Term: {query}, Organism: {organism}, Entities: {entities}, Domains: {domains}',
extra=UserEventLog(
username=g.current_user.username,
event_type=LogEventType.VISUALIZER_SEARCH.value).to_dict()
)
results = search_dao.visualizer_search(
term=query,
organism=organism,
page=page,
limit=limit,
domains=domains,
entities=entities,
)
return jsonify({
'result': results.to_dict(),
})
# Start Search Helpers #
def content_search_params_are_empty(params):
"""
Checks if the given content search params are completely empty. We do checking on
specific fields, because for some options we don't want to execute a search if only that option
is present. E.g., a request with only the `synonyms` option doesn't make sense.
"""
if 'q' in params and params['q']:
return False
elif 'types' in params and params['types']:
return False
elif 'folders' in params and params['folders']:
return False
return True
def get_types_from_params(q, advanced_args):
"""
Adds "types" filters to `q` for each type specified in `advanced_args`, or returns `q`
unmodified if `types` was not present or empty.
"""
types = []
try:
if advanced_args['types'] != '':
types = advanced_args['types'].split(';')
return f'{q} ({" OR ".join([f"type:{t}" for t in types])})' if len(types) else q
except KeyError:
return q
def get_folders_from_params(advanced_args):
"""
Extracts and returns the list of file hash IDs from the input `advanced_args`. `folders` is an
expected property on `advanced_args`, if it does not exist, or it is empty, then an empty list
is returned instead.
"""
try:
if advanced_args['folders'] != '':
return advanced_args['folders'].split(';')
else:
return []
except KeyError:
return []
def get_filepaths_filter(accessible_folders: List[Files], accessible_projects: List[Projects]):
"""
Generates an elastic boolean query which filters documents based on folder/project access. Takes
as input two options:
- accessible_folders: a list of Files objects representing folders to be included in the
query
- accessible_projects: a list of Projects objects representing projects to be included in
the query
Any files present in accessible_folders which are not children of accessible_projects will be
ignored, and returned along with the query.
"""
accessible_projects_ids = [
project.id
for project in accessible_projects
]
filepaths = []
for file in accessible_folders:
filepaths.append(file.filename_path)
if len(filepaths):
return {
'bool': {
'should': [
{
"term": {
"file_path.tree": file_path
}
}
for file_path in filepaths
]
}
}
else:
# If there were no accessible filepaths in the given list, search all accessible projects
return {
'bool': {
'should': [
# If the user has access to the project the document is in...
{'terms': {'project_id': accessible_projects_ids}},
# OR if the document is public.
{'term': {'public': True}}
]
}
}
# End Search Helpers #
class ContentSearchView(ProjectBaseView, FilesystemBaseView):
decorators = [auth.login_required]
@use_args(ContentSearchSchema)
@use_args(PaginatedRequestSchema)
def get(self, params: dict, pagination: Pagination):
current_app.logger.info(
f'Term: {params["q"]}',
extra=UserEventLog(
username=g.current_user.username,
event_type=LogEventType.CONTENT_SEARCH.value).to_dict()
)
current_user = g.current_user
file_type_service = get_file_type_service()
if content_search_params_are_empty(params):
return jsonify(ContentSearchResponseSchema(context={
'user_privilege_filter': g.current_user.id,
}).dump({
'total': 0,
'query': ResultQuery(phrases=[]),
'results': [],
}))
offset = (pagination.page - 1) * pagination.limit
q = params['q']
q = get_types_from_params(q, params)
folders = get_folders_from_params(params)
# Set the search term once we've parsed the params for all advanced options
user_search_query = q.strip()
text_fields = ['description', 'data.content', 'filename']
text_field_boosts = {'description': 1, 'data.content': 1, 'filename': 3}
highlight = {
'fields': {
'data.content': {},
},
# Need to be very careful with this option. If fragment_size is too large, search
# will be slow because elastic has to generate large highlight fragments. Setting
# to 0 generates cleaner sentences, but also runs the risk of pulling back huge
# sentences.
'fragment_size': FRAGMENT_SIZE,
'order': 'score',
'pre_tags': ['@@@@$'],
'post_tags': ['@@@@/$'],
'number_of_fragments': 100,
}
EXCLUDE_FIELDS = ['enrichment_annotations', 'annotations']
# Gets the full list of projects accessible by the current user.
accessible_projects, _ = self.get_nondeleted_projects(None, accessible_only=True)
# Gets the full list of folders accessible by the current user.
accessible_folders = self.get_nondeleted_recycled_files(
Files.hash_id.in_(folders),
attr_excl=EXCLUDE_FIELDS
)
accessible_folder_hash_ids = [folder.hash_id for folder in accessible_folders]
dropped_folders = [folder for folder in folders if folder not in accessible_folder_hash_ids]
filepaths_filter = get_filepaths_filter(
accessible_folders,
accessible_projects
)
# These are the document fields that will be returned by elastic
return_fields = ['id']
filter_ = [
# The file must be accessible by the user, and in the specified list of
# filepaths or public if no list is given...
filepaths_filter,
# ...And it shouldn't be a directory. Right now there's not really any helpful info
# attached to directory type documents (including a filename, for top-level
# directories), so instead just ignore them.
{
'bool': {
'must_not': [
{'term': {'mime_type': DirectoryTypeProvider.MIME_TYPE}}
]
}
}
]
elastic_service = get_elastic_service()
elastic_result, search_phrases = elastic_service.search(
index_id=FILE_INDEX_ID,
user_search_query=user_search_query,
offset=offset,
limit=pagination.limit,
text_fields=text_fields,
text_field_boosts=text_field_boosts,
return_fields=return_fields,
filter_=filter_,
highlight=highlight
)
elastic_result = elastic_result['hits']
highlight_tag_re = re.compile('@@@@(/?)\\$')
# So while we have the results from Elasticsearch, they don't contain up to date or
# complete data about the matched files, so we'll take the hash IDs returned by Elastic
# and query our database
file_ids = [doc['fields']['id'][0] for doc in elastic_result['hits']]
file_map = {
file.id: file
for file in self.get_nondeleted_recycled_files(
Files.id.in_(file_ids),
attr_excl=['enrichment_annotations', 'annotations']
)
}
results = []
for document in elastic_result['hits']:
file_id = document['fields']['id'][0]
file: Optional[Files] = file_map.get(file_id)
if file and file.calculated_privileges[current_user.id].readable:
file_type = file_type_service.get(file)
if file_type.should_highlight_content_text_matches() and \
document.get('highlight') is not None:
if document['highlight'].get('data.content') is not None:
snippets = document['highlight']['data.content']
for i, snippet in enumerate(snippets):
snippet = html.escape(snippet)
snippet = highlight_tag_re.sub('<\\1highlight>', snippet)
snippets[i] = f"<snippet>{snippet}</snippet>"
file.calculated_highlight = snippets
results.append({
'item': file,
'rank': document['_score'],
})
return jsonify(ContentSearchResponseSchema(context={
'user_privilege_filter': g.current_user.id,
}).dump({
'total': elastic_result['total'],
'query': ResultQuery(phrases=search_phrases),
'results': results,
'dropped_folders': dropped_folders
}))
class SynonymSearchView(FilesystemBaseView):
decorators = [auth.login_required]
@use_args(SynonymSearchSchema)
@use_args(PaginatedRequestSchema)
def get(self, params, pagination: Pagination):
search_term = params.get('term', None)
organisms = []
if len(params['organisms']):
organisms = params['organisms'].split(';')
types = []
if len(params['types']):
types = params['types'].split(';')
if search_term is None:
return jsonify(SynonymSearchResponseSchema().dump({
'data': [],
}))
page = pagination.page
limit = pagination.limit
offset = (page - 1) * limit
try:
search_dao = get_search_service_dao()
results = search_dao.get_synonyms(search_term, organisms, types, offset, limit)
count = search_dao.get_synonyms_count(search_term, organisms, types)
except Exception as e:
current_app.logger.error(
f'Failed to get synonym data for term: {search_term}',
exc_info=e,
extra=EventLog(event_type=LogEventType.CONTENT_SEARCH.value).to_dict()
)
raise ServerException(
title='Unexpected error during synonym search',
message='A system error occurred while searching for synonyms, we are ' +
'working on a solution. Please try again later.'
)
return jsonify(SynonymSearchResponseSchema().dump({
'data': results,
'count': count
}))
bp.add_url_rule('content', view_func=ContentSearchView.as_view('content_search'))
bp.add_url_rule('synonyms', view_func=SynonymSearchView.as_view('synonym_search'))
@bp.route('/organism/<string:organism_tax_id>', methods=['GET'])
@auth.login_required
@jsonify_with_class()
def get_organism(organism_tax_id: str):
search_dao = get_search_service_dao()
result = search_dao.get_organism_with_tax_id(organism_tax_id)
return SuccessResponse(result=result, status_code=200)
@bp.route('/organisms', methods=['POST'])
@auth.login_required
@use_kwargs(OrganismSearchSchema)
def get_organisms(query, limit):
search_dao = get_search_service_dao()
results = search_dao.get_organisms(query, limit)
return jsonify({'result': results})
|
#Betrothed Numbers
#TOOLS
def Divisors(num):
from math import sqrt as mmsq
s=set([1])
i=1
a=int(mmsq(num)+1)
while i<=a:
if(num//i==num):
i+=1
continue
if (num%i==0):
if (num//i!=i):
s.add(num//i)
s.add(i)
i+=1
return s
#############################
# Nikita's Method
def NBetrothedNumbers(n) :
bet=[]
for num1 in range (1,n) :
sum1 = 1
i = 2
while i * i <= num1 :
if (num1 % i == 0) :
sum1 = sum1 + i
if (i * i != num1) :
sum1 += num1 / i
i =i + 1
if (sum1 > num1) :
num2 = sum1 - 1
sum2 = 1
j = 2
while j * j <= num2 :
if (num2 % j == 0) :
sum2 += j
if (j * j != num2) :
sum2 += num2 / j
j = j + 1
if (sum2 == num1+1) :
bet.append ((num1,num2))
return bet
n = 1000000
bet=NBetrothedNumbers(n)
bet
##Brute force Method
def BetrothedNumber(k,ratio=5.5,ratio2=5.5,order=1,returni=False):
##All divisors for all numbers
allDels=dict()
###Second number is greater than first number
from itertools import chain
concatenated = chain( range(k, int(k*ratio)+1 ),range(k, int(k/ratio2)+1 ,-1) )
for i in concatenated:
###We don't want repeat operations
###Therefore search and save all divisors
if(str(i) not in allDels):
allDels[str(i)] = Divisors(i)
###Sum1+order_num = 2nd Num and Sum2+order_num = 1st Nub
if(i != k and sum(allDels[str(i)]) == k+order and sum(allDels[str(k)]) == i+order):
if(returni):
return (k,i)
else:
print(k,"->",i)
def BetrothedNumbers(m,n,order=1):
if(m!=n):
s1=set([1])
for i in range(int(m/2+1),1,-1):
if(m%i==0):
s1.add(i)
s2=set([1])
for i in range(int(n/2+1),1,-1):
if(n%i==0):
s2.add(i)
return sum(s1)==n+1 and sum(s2)+n==sum(s1)+m
else:
return False
def doTest(toPrint=False,toProgress=False,start=1,toEnd=1000,algo="bf"):
s=set()
KK=10000
from IPython.display import clear_output
for i in range(start,toEnd+1):
if(toProgress and (i<KK or (i>=KK and i%(KK/100)==0))):
clear_output(wait=True)
print(i,end="\t")
if(algo=="bf"):
bet=BetrothedNumber(i)
if(bet):
s.add(bet)
if(toPrint and not toProgress):
print(bet,end=", ")
if(toProgress and (i<KK or (i>=KK and i%(KK/100)==0))):
print(s)
if(not toPrint):
return s
#BetrothedNumber(48)
#BetrothedNumbers(140,195)
#doTest()
|
"""
1. Вывести все коммиты (сообщения) и подсчитать все.
1.1 Вывести сообщения длина которых больше 10 и их кол-во
"""
import sys
import requests
url = 'https://api.github.com/repos/{}/{}/commits?page1&per_page=100'.format(sys.argv[1],sys.argv[2])
response = requests.get(url,headers={'Authorization':'Token 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'}).json()
commit=[]
for r in response:
commit.append(r['commit']['message'])
sum=0
# print(commit)
count = 0
for massage in commit:
sum+=1
if len(massage)>10:
count+=1
print(massage)
print('total commits-',sum)
print('total commits that are great than 10-',count)
user=[]
for r in response:
if r['commit']['author']['name'] not in user:
user.append(r['commit']['author']['name'])
print(user)
|
#! /usr/bin/env python
import math
import rospy
import tf
import tf2_ros
from sensor_msgs.msg import LaserScan
from darknet_ros_msgs.msg import BoundingBox #msg that contains bounding box coordinates
from darknet_ros_msgs.msg import BoundingBoxes
# from apriltag_ros.msg import Coordinates
#rostopic echo darknet_ros/bounding_boxes...
#std_msgs/Header header
#uint32 seq
#time stamp
#string frame_id
#std_msgs/Header image_header
#uint32 seq
#time stamps
#string frame_id
#darknet_ros_msgs/BoundingBox[] bounding_boxes
#float64 probability
#int64 xmin
#int64 ymin
#int64 xmax
#int64 ymax
#int16 id
#string Class
#rostopic type /scan | rosmsg show
#std_msgs/Header header
#uint32 seq
#time stamp
#string frame_id
#float32 angle_min
#float32 angle_max
#float32 angle_increment
#float32 time_increment
#float32 scan_time
#float32 range_min
#float32 range_max
#float32[] ranges
#float32[] intensities
rospy.init_node('animalDetect_node',anonymous = False)
# lidar_angle = None
def callback1(animalBox): #function for calculating relative
#global lidar_angle
angleScale = 320/26.75 #Camera view 26.75*2, negativ direction counter clockwise
try:
animalType = str(animalBox.bounding_boxes[0].Class)
#CALCULATING
if animalType in ['cat', 'cow', 'dog', 'horse']: #if class is one of these animals
print(animalType)
x_max = animalBox.bounding_boxes[0].xmax
x_min = animalBox.bounding_boxes[0].xmin
x_position = (x_max + x_min)/2 #calculate the pixel in optical frame [0-640] Raspery pi has resolut 640x....
x_angle = x_position/angleScale-26.75
x_angle = round(x_angle)
if x_angle <0: #To get correct slot in ranges[], pos direct in ranges is counter clockwise [0-359]
lidar_angle = -x_angle
else:
lidar_angle = 359 - x_angle
#print(x_angle)
# print(lidar_angle)
# lidarAngleInfo = Coordinates()
#
# lidarAngleInfo.lidarAngle = lidar_angle #might be good for geometric
#
# try:
# pub.publish(lidarAngleInfo) #Publishing coordinates onto the "chatter" topic for the yaml file to read.
#
# except rospy.ROSInterruptException:
# pass
return lidar_angle
else:
return
except (IndexError):
return
#TRANSLATE THIS TO ANGLE AROUND ROBOT
#note: 53.5 degree viewing angle max (-26.75 to 26.75)
#note: LaserScan (0-360) with 0 being in front, rotating CCW - base scan
def callback2(laserData): #function for determining laser distance at determined angle
# print len(msg.ranges)
try:
x = callback1()
#x = callback1(animalBox).lidar_angle
#lidar_angle_new = int(lidar_angle)
#xx = int(x)
#print(lidar_angle_new)
#print(x)
except:
pass
x = callback1()
#x = callback1().lidar_angle
#animalDistance = laserData.ranges[xx]
#print(animalDistance)
#animalDistance = laserData.ranges[int(lidar_angle)]
#lidar_angle = None
#print(animalDistance)
# if lidar_angle:
# print(lidar_angle)
# try:
# animal_distance = laserData.ranges[lidar_angle]
# # if x_angle:
# print(animal_distance)
#
# except(IndexError):
#pub = rospy.Publisher('animalFound', Coordinates, queue_size=10)
#return
if __name__ == '__main__':
sub1 = rospy.Subscriber('/darknet_ros/bounding_boxes', BoundingBoxes , callback1)
#sub2 = rospy.Subscriber('/scan', LaserScan , callback2)
# sub3 = rospy.Subscriber('chatter', Coordinates , callback2)
sub2 = rospy.Subscriber('/scan', LaserScan , callback2)
rospy.spin() #continuous loop
|
# The myplugin module must be locatable by Python.
# If you configured CMake in the build directory ``/path/to/repo/build`` then,
# assuming you are in ``/path/to/repo``, run the tests with something like
# PYTHONPATH=./cmake-build-debug/src/pythonmodule mpiexec -n 2 python -m mpi4py -m pytest tests/
# This test is not currently run automatically in any way. Build the module, point your PYTHONPATH at it,
# and run pytest in the tests directory.
import logging
import os
try:
import mpi4py.MPI as _MPI
except (ImportError, ModuleNotFoundError):
_MPI = None
import gmxapi as gmx
from gmxapi.simulation.context import Context
from gmxapi.simulation.workflow import WorkElement, from_tpr
from gmxapi import version as gmx_version
import pytest
# create console handler
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
# create formatter and add it to the handler
formatter = logging.Formatter('%(asctime)s:%(name)s:%(levelname)s: %(message)s')
ch.setFormatter(formatter)
# add the handlers to the logger
logging.getLogger().addHandler(ch)
logger = logging.getLogger()
def test_import():
# Suppress inspection warning outside of testing context.
# noinspection PyUnresolvedReferences
import myplugin
assert myplugin
@pytest.mark.usefixtures("cleandir")
def test_binding_protocol(spc_water_box, mdrun_kwargs):
"""Test that gmxapi successfully attaches MD plugins."""
import myplugin
if _MPI is not None:
_size = _MPI.COMM_WORLD.Get_size()
_rank = _MPI.COMM_WORLD.Get_rank()
else:
_size = 1
_rank = 0
tpr_filename = spc_water_box
logger.info("Testing plugin potential with input file {}".format(os.path.abspath(tpr_filename)))
assert gmx.version.api_is_at_least(0, 2, 1)
md = from_tpr([tpr_filename] * _size, append_output=False, **mdrun_kwargs)
potential = WorkElement(namespace="myplugin",
operation="null_restraint",
params={'sites': [1, 4]})
potential.name = "null restraint"
md.add_dependency(potential)
context = Context(md)
with context as session:
session.run()
# See also #3038, #3145, #4079
assert isinstance(context.potentials, list)
assert len(context.potentials) > 0
for restraint in context.potentials:
if isinstance(restraint, myplugin.NullRestraint):
assert gmx.version.api_is_at_least(0, 2, 1)
assert restraint.count() > 1
@pytest.mark.usefixtures("cleandir")
def test_ensemble_potential_nompi(spc_water_box, mdrun_kwargs):
"""Test ensemble potential without an ensemble.
"""
tpr_filename = spc_water_box
logger.info("Testing plugin potential with input file {}".format(os.path.abspath(tpr_filename)))
assert gmx.version.api_is_at_least(0, 0, 5)
md = from_tpr([tpr_filename], append_output=False, **mdrun_kwargs)
# Create a WorkElement for the potential
params = {'sites': [1, 4],
'nbins': 10,
'binWidth': 0.1,
'min_dist': 0.,
'max_dist': 10.,
'experimental': [1.] * 10,
'nsamples': 1,
'sample_period': 0.001,
'nwindows': 4,
'k': 10000.,
'sigma': 1.}
potential = WorkElement(namespace="myplugin",
operation="ensemble_restraint",
params=params)
# Note that we could flexibly capture accessor methods as workflow elements, too. Maybe we can
# hide the extra Python bindings by letting myplugin.HarmonicRestraint automatically convert
# to a WorkElement when add_dependency is called on it.
potential.name = "ensemble_restraint"
md.add_dependency(potential)
context = Context(md)
with context as session:
session.run()
@pytest.mark.withmpi_only
@pytest.mark.usefixtures("cleandir")
def test_ensemble_potential_withmpi(spc_water_box, mdrun_kwargs):
tpr_filename = spc_water_box
logger.info("Testing plugin potential with input file {}".format(os.path.abspath(tpr_filename)))
assert gmx_version.api_is_at_least(0, 0, 5)
md = from_tpr([tpr_filename, tpr_filename], append_output=False, **mdrun_kwargs)
# Create a WorkElement for the potential
params = {'sites': [1, 4],
'nbins': 10,
'binWidth': 0.1,
'min_dist': 0.,
'max_dist': 10.,
'experimental': [0.5] * 10,
'nsamples': 1,
'sample_period': 0.001,
'nwindows': 4,
'k': 10000.,
'sigma': 1.}
potential = WorkElement(namespace="myplugin",
operation="ensemble_restraint",
params=params)
# Note that we could flexibly capture accessor methods as workflow elements, too. Maybe we can
# hide the extra Python bindings by letting myplugin.HarmonicRestraint automatically convert
# to a WorkElement when add_dependency is called on it.
potential.name = "ensemble_restraint"
md.add_dependency(potential)
context = Context(md)
with context as session:
session.run()
|
from glob import glob
from typing import List
import os
import pandas as pd
import wget
class EmoDB(object):
def __init__(self,
sampling_rate: int = 16000,
num_mel_bins: int = 40,
frame_length: int = 50,
frame_shift: int = 10,
max_len: int = 3,
center_feats: bool = True,
scale_feats: bool = True,
emotions: List[str] = None,
download_dir: str = None,
experiment_dir: str = None):
if emotions is None:
emotions = ["neutral", "anger", "happiness", "sadness"]
# config download dir
if download_dir is None:
self.download_root = f"{os.path.expanduser('~')}/vistec-ser_tmpfiles/vistec"
else:
self.download_root = f"{download_dir}/vistec-ser_tmpfiles/emodb"
if not os.path.exists(self.download_root):
os.makedirs(self.download_root)
# config experiment dir
if experiment_dir is None:
self.experiment_root = f"{os.path.expanduser('~')}/vistec-ser_tmpfiles/exp_emodb"
else:
self.experiment_root = f"{experiment_dir}"
self.experiment_dir = f"{self.experiment_root}"
if not os.path.exists(self.experiment_dir):
os.makedirs(self.experiment_dir)
self.download_url = "http://www.emodb.bilderbar.info/download/download.zip"
self.emotion_mappings = {'N': 'neutral', 'W': 'anger', 'F': 'happiness', 'T': 'sadness'}
self.label_path = f"{self.download_root}/labels.csv"
self.test_speaker = ["09", "15"]
self.val_speaker = ["12", "10"]
self.max_len = max_len
self.sampling_rate = sampling_rate
self.frame_length = frame_length
self.frame_shift = frame_shift
self.num_mel_bins = num_mel_bins
self.center_feats = center_feats
self.scale_feats = scale_feats
self.sec_to_frame = 10 * self.frame_shift
self.emotions = emotions
self.n_classes = len(self.emotions)
def download(self):
# download
if not os.path.exists(f"{self.download_root}/download.zip"):
print(">downloading dataset...")
wget.download(url=self.download_url, out=f"{self.download_root}/download.zip", bar=wget.bar_adaptive)
# unzip
if not os.path.exists(f"{self.download_root}/emo-db"):
print(">unzipping data...")
os.system(f"unzip -q {self.download_root}/download.zip -d {self.download_root}/emo-db")
if not os.path.exists(f"{self.label_path}"):
print(">preparing labels...")
labels = ["PATH, EMOTION\n"]
for wav in glob(f"{self.download_root}/emo-db/*/*.wav"):
key = wav.split('.')[0][-2]
if key not in self.emotion_mappings.keys():
continue
emotion = self.emotion_mappings[key]
wav = os.path.abspath(wav)
labels.append(f"{wav},{emotion}\n")
open(self.label_path, "w").writelines(labels)
def prepare_labels(self):
self.download()
assert os.path.exists(self.label_path)
labels = pd.read_csv(self.label_path)
|
# -*- coding: utf-8 -*-
# @Author: Zeyuan Shang
# @Date: 2016-05-04 20:42:41
# @Last Modified by: Zeyuan Shang
# @Last Modified time: 2016-05-04 20:43:41
s = raw_input()
n = len(s)
player1, player2 = 0, 0
for i in xrange(len(s)):
if s[i] in "AEIOU":
player1 += n - i
else:
player2 += n - i
if player1 > player2:
print "Kevin", player1
elif player1 < player2:
print "Stuart", player2
else:
print "Draw"
|
# Removing "_pilon" from fasta titles and writing the new reference ot the right position
old_fasta = '../../Output/WGS/reference_refinement/pilon_output/w303_vlte.fasta'
new_fasta = '../../Output/WGS/reference/w303_vlte.fasta'
with open(new_fasta, 'w') as outfile:
with open(old_fasta, 'r') as infile:
for line in infile:
outfile.write(line.replace("_pilon", ""))
# Updates a gff based on indels in a pilon .changes file
import pandas as pd
import csv
from collections import defaultdict
change_file = '../../Output/WGS/reference_refinement/pilon_output/w303_vlte.changes'
old_gff = '../accessory_files/orig_w303_ref/w303_ref.gff'
new_gff = '../../Output/WGS/reference/w303_vlte.gff'
# Generating map between old reference index and new reference index
base_map = defaultdict(dict)
with open(change_file, 'r') as infile:
reader = csv.reader(infile, delimiter=' ')
for row in reader:
chromo = row[0].split(':')[0].split('_')[0]
old_loc = int(row[0].split(':')[1].split('-')[0])
new_loc = int(row[1].split(':')[1].split('-')[0])
if row[2] == '.':
base_map[chromo][old_loc] = new_loc+len(row[3])
elif row[3] == '.':
base_map[chromo][old_loc+len(row[2])] = new_loc
elif len(row[2]) != len(row[3]):
base_map[chromo][old_loc+len(row[2])] = new_loc + len(row[3])
def old_to_new_base(chromo, old_base): # makes the conversions
td = base_map[chromo]
anchors = sorted(td.keys())
shift = 0
for anch in anchors:
if anch <= old_base:
shift = td[anch] - anch
else:
break
return old_base + shift
## Double check: all simple substitutions in the change file should
with open(change_file, 'r') as infile:
reader = csv.reader(infile, delimiter=' ')
for row in reader:
chromo = row[0].split(':')[0].split('_')[0]
old_loc = int(row[0].split(':')[1].split('-')[0])
new_loc = int(row[1].split(':')[1].split('-')[0])
if len(row[2]) == len(row[3]) and '.' not in row:
try:
assert new_loc == old_to_new_base(chromo, old_loc)
except AssertionError:
print('Index change error, for this row:', row, '. Thinks this is the new base:', old_to_new_base(chromo, old_loc))
with open(old_gff, 'r') as infile:
reader = csv.reader(infile, delimiter='\t')
next(reader) #skip header
with open(new_gff, 'w') as outfile:
writer = csv.writer(outfile, delimiter='\t')
writer.writerow(['##gff-version 3'])
for row in reader:
writer.writerow(row[:3] + [old_to_new_base(row[0], int(r)) for r in row[3:5]] + row[5:])
|
import discord, os, platform, asyncio
from discord.ext import commands
import core.config
class SchedulerEvents(commands.Cog, name='scheduler_events'):
def __init__(self, bot):
self.bot = bot
if core.config.TESTING:
self.channel = self.bot.get_channel(core.config.BOT_CHANNEL)
else:
self.channel = self.bot.get_channel(core.config.WADSWORTH_CHANNEL)
self.ping_channel = self.channel
# Events
@commands.Cog.listener()
async def on_message(self, message):
return True
# @commands.Cog.listener()
# async def on_raw_reaction_add(self, payload):
# if payload.emoji.id in reaction_list:
# print('Hello')
# check the message database for a scheulde, if the message that received a reaction is within the database and the reaction is within parameters
# then process changes to the scheduled task
@commands.Cog.listener()
async def on_raw_reaction_remove(self, payload):
print('Goodbye')
|
# following PEP 386, versiontools will pick it up
__version__ = (0, 1, 7, "final", 0)
|
from ._command import Command, Commands
from .mkr import MakeReportCommand
from .debug import DebugCommand
|
from cadorsfeed import db
from cadorsfeed.aerodb import lookup
from flask import current_app as app
def import_blacklist():
with app.open_resource('aerodb/blacklist.txt') as blacklist:
for line in blacklist:
line = line.strip()
if not line.startswith('#') and len(line) == 3:
aerodrome = lookup(line)
if aerodrome is not None:
aerodrome.blacklist = True
db.session.commit()
|
################################################################################
#
# Package : AlphaPy
# Module : estimators
# Created : July 11, 2013
#
# Copyright 2017 ScottFree Analytics LLC
# Mark Conway & Robert D. Scott II
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
################################################################################
#
# Imports
#
from alphapy.globals import ModelType
from alphapy.globals import Objective
from alphapy.globals import SSEP
import logging
import numpy as np
from scipy.stats import randint as sp_randint
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import RandomForestRegressor
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import RandomizedLasso
from sklearn.linear_model import RandomizedLogisticRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.naive_bayes import MultinomialNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.neighbors import KNeighborsRegressor
from sklearn.svm import LinearSVC
from sklearn.svm import OneClassSVM
from sklearn.svm import SVC
import xgboost as xgb
import yaml
#
# Initialize logger
#
logger = logging.getLogger(__name__)
#
# Define scorers
#
scorers = {'accuracy' : (ModelType.classification, Objective.maximize),
'average_precision' : (ModelType.classification, Objective.maximize),
'f1' : (ModelType.classification, Objective.maximize),
'f1_macro' : (ModelType.classification, Objective.maximize),
'f1_micro' : (ModelType.classification, Objective.maximize),
'f1_samples' : (ModelType.classification, Objective.maximize),
'f1_weighted' : (ModelType.classification, Objective.maximize),
'neg_log_loss' : (ModelType.classification, Objective.minimize),
'precision' : (ModelType.classification, Objective.maximize),
'recall' : (ModelType.classification, Objective.maximize),
'roc_auc' : (ModelType.classification, Objective.maximize),
'adjusted_rand_score' : (ModelType.clustering, Objective.maximize),
'mean_absolute_error' : (ModelType.regression, Objective.minimize),
'neg_mean_squared_error' : (ModelType.regression, Objective.minimize),
'median_absolute_error' : (ModelType.regression, Objective.minimize),
'r2' : (ModelType.regression, Objective.maximize)}
#
# Define XGB scoring map
#
xgb_score_map = {'neg_log_loss' : 'logloss',
'mean_absolute_error' : 'mae',
'neg_mean_squared_error' : 'rmse',
'precision' : 'map',
'roc_auc' : 'auc'}
#
# Class Estimator
#
class Estimator:
"""Store information about each estimator.
Parameters
----------
algorithm : str
Abbreviation representing the given algorithm.
model_type : enum ModelType
The machine learning task for this algorithm.
estimator : function
A scikit-learn, TensorFlow, or XGBoost function.
grid : dict
The dictionary of hyperparameters for grid search.
scoring : bool, optional
Use a scoring function to evaluate the best model.
"""
# __new__
def __new__(cls,
algorithm,
model_type,
estimator,
grid,
scoring=False):
return super(Estimator, cls).__new__(cls)
# __init__
def __init__(self,
algorithm,
model_type,
estimator,
grid,
scoring=False):
self.algorithm = algorithm.upper()
self.model_type = model_type
self.estimator = estimator
self.grid = grid
self.scoring = scoring
# __str__
def __str__(self):
return self.name
#
# Classes
#
class AdaBoostClassifierCoef(AdaBoostClassifier):
"""An AdaBoost classifier where the coefficients are set to
the feature importances for Recursive Feature Elimination
to work.
"""
def fit(self, *args, **kwargs):
super(AdaBoostClassifierCoef, self).fit(*args, **kwargs)
self.coef_ = self.feature_importances_
class ExtraTreesClassifierCoef(ExtraTreesClassifier):
"""An Extra Trees classifier where the coefficients are set to
the feature importances for Recursive Feature Elimination
to work.
"""
def fit(self, *args, **kwargs):
super(ExtraTreesClassifierCoef, self).fit(*args, **kwargs)
self.coef_ = self.feature_importances_
class RandomForestClassifierCoef(RandomForestClassifier):
"""A Random Forest classifier where the coefficients are set to
the feature importances for Recursive Feature Elimination
to work.
"""
def fit(self, *args, **kwargs):
super(RandomForestClassifierCoef, self).fit(*args, **kwargs)
self.coef_ = self.feature_importances_
class GradientBoostingClassifierCoef(GradientBoostingClassifier):
"""A Gradient Boostin classifier where the coefficients are set to
the feature importances for Recursive Feature Elimination
to work.
"""
def fit(self, *args, **kwargs):
super(GradientBoostingClassifierCoef, self).fit(*args, **kwargs)
self.coef_ = self.feature_importances_
#
# Define estimator map
#
estimator_map = {'AB' : AdaBoostClassifierCoef,
'GB' : GradientBoostingClassifierCoef,
'GBR' : GradientBoostingRegressor,
'KNN' : KNeighborsClassifier,
'KNR' : KNeighborsRegressor,
'LOGR' : LogisticRegression,
'LR' : LinearRegression,
'LSVC' : LinearSVC,
'LSVM' : SVC,
'NB' : MultinomialNB,
'RBF' : SVC,
'RF' : RandomForestClassifierCoef,
'RFR' : RandomForestRegressor,
'SVM' : SVC,
'XGB' : xgb.XGBClassifier,
'XGBM' : xgb.XGBClassifier,
'XGBR' : xgb.XGBRegressor,
'XT' : ExtraTreesClassifierCoef,
'XTR' : ExtraTreesRegressor
}
#
# Function get_algos_config
#
def get_algos_config(cfg_dir):
r"""Read the algorithms configuration file.
Parameters
----------
cfg_dir : str
The directory where the configuration file ``algos.yml``
is stored.
Returns
-------
specs : dict
The specifications for determining which algorithms to run.
"""
logger.info("Algorithm Configuration")
# Read the configuration file
full_path = SSEP.join([cfg_dir, 'algos.yml'])
with open(full_path, 'r') as ymlfile:
specs = yaml.load(ymlfile)
# Ensure each algorithm has required keys
required_keys = ['model_type', 'params', 'grid', 'scoring']
for algo in specs:
algo_keys = list(specs[algo].keys())
if set(algo_keys) != set(required_keys):
logger.warning("Algorithm %s is missing the required keys %s",
algo, required_keys)
logger.warning("Keys found instead: %s", algo_keys)
else:
# determine whether or not model type is valid
model_types = {x.name: x.value for x in ModelType}
model_type = specs[algo]['model_type']
if model_type in model_types:
specs[algo]['model_type'] = ModelType(model_types[model_type])
else:
raise ValueError("algos.yml model:type %s unrecognized" % model_type)
# Algorithm Specifications
return specs
#
# Function get_estimators
#
# AdaBoost (feature_importances_)
# Gradient Boosting (feature_importances_)
# K-Nearest Neighbors (NA)
# Linear Regression (coef_)
# Linear Support Vector Machine (coef_)
# Logistic Regression (coef_)
# Naive Bayes (coef_)
# Radial Basis Function (NA)
# Random Forest (feature_importances_)
# Support Vector Machine (NA)
# XGBoost Binary (NA)
# XGBoost Multi (NA)
# Extra Trees (feature_importances_)
# Random Forest (feature_importances_)
# Randomized Lasso
def get_estimators(model):
r"""Define all the AlphaPy estimators based on the contents
of the ``algos.yml`` file.
Parameters
----------
model : alphapy.Model
The model object containing global AlphaPy parameters.
Returns
-------
estimators : dict
All of the estimators required for running the pipeline.
"""
# Extract model data
directory = model.specs['directory']
n_estimators = model.specs['n_estimators']
n_jobs = model.specs['n_jobs']
seed = model.specs['seed']
verbosity = model.specs['verbosity']
# Initialize estimator dictionary
estimators = {}
# Global parameter substitution fields
ps_fields = {'n_estimators' : 'n_estimators',
'n_jobs' : 'n_jobs',
'nthread' : 'n_jobs',
'random_state' : 'seed',
'seed' : 'seed',
'verbose' : 'verbosity'}
# Get algorithm specifications
config_dir = SSEP.join([directory, 'config'])
algo_specs = get_algos_config(config_dir)
# Create estimators for all of the algorithms
for algo in algo_specs:
model_type = algo_specs[algo]['model_type']
params = algo_specs[algo]['params']
for param in params:
if param in ps_fields and isinstance(param, str):
algo_specs[algo]['params'][param] = eval(ps_fields[param])
func = estimator_map[algo]
est = func(**params)
grid = algo_specs[algo]['grid']
scoring = algo_specs[algo]['scoring']
estimators[algo] = Estimator(algo, model_type, est, grid, scoring)
# return the entire classifier list
return estimators
|
from urllib.parse import parse_qsl, urlsplit
from social_apis.exceptions.social_api_error import *
def iterator(api_method, return_pages=False, **params):
r"""Returns a generator for results of specific supported method.
Usage::
>>> from social_apis.networks.facebook import Facebook
>>> facebook = Facebook(access_token="<<access_token>>")
>>> response = iterator(facebook.search, q='python')
>>> for item in response:
>>> print (item)
"""
if not callable(api_method):
raise TypeError('iterator() takes a Network function as its first argument.')
if not hasattr(api_method, 'iter_key') or not hasattr(api_method, 'iter_field'):
raise IteratorError(f'Unable to create generator for method "{api_method.__name__}"')
iter_key = api_method.iter_key
iter_field = api_method.iter_field
iter_next = api_method.iter_next if hasattr(api_method, 'iter_next') else iter_field
iter_mode = api_method.iter_mode if hasattr(api_method, 'iter_mode') else 'cursor'
while True:
content = api_method(**params)
if not content:
return
results = get_field(content, iter_key)
if return_pages:
yield results
else:
for result in results:
yield result
try:
next_field = get_field(content, iter_next)
if str(next_field).lower() in ['none', 'null', '0', '[]', '{}', '']:
return
if iter_mode == 'cursor':
params[iter_field] = next_field
elif iter_mode == 'offset':
params[iter_field] = int(params[iter_field]) + 10 if iter_field in params else 0
except (TypeError, ValueError):
raise IteratorError('Unable to generate next page of search results, `page` is not a number.')
except (KeyError, AttributeError):
raise IteratorError('Unable to generate next page of search results, content has unexpected structure.')
def get_field(content, field, raise_error=False):
r"""
Returns a 'field' (str) of 'content' (dict).
Split 'field' by dots and iterate 'content' by them.
Return None if field not in content. Raise error if raise_error=True
"""
fields = field.split('.')
result = content
for f in fields:
if f in result:
result = result.get(f)
else:
if raise_error:
raise KeyError("Content has unexpected structure.")
else:
result = None
break
return result
|
# Copyright 2020 KCL-BMEIS - King's College London
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from exeteracovid.algorithms.covid_test import ValidateCovidTestResultsFacVersion1, ValidateCovidTestResultsFacVersion2
from exeteracovid.algorithms.temperature import ValidateTemperature1
from exeteracovid.algorithms.weight_height_bmi import ValidateHeight1, ValidateHeight2
class ParsingSchemaVersionError(Exception):
pass
class ClassEntry:
def __init__(self, key, class_definition, version_from, version_to=None):
self.key = key
self.class_definition = class_definition
self.version_from = version_from
self.version_to = version_to
def __str__(self):
output = 'ClassEntry(field={}, class_definition={}, version_from={}, version_to={})'
return output.format(self.key, self.class_definition,
self.version_from, self.version_to)
def __repr__(self):
return self.__str__()
parsing_schemas = [1, 2]
class ParsingSchema:
def __init__(self, schema_number):
#self.parsing_schemas = [1, 2]
self.functors = {
'validate_weight_height_bmi': [
ClassEntry('validate_weight_height_bmi', ValidateHeight1, 1, 2),
ClassEntry('validate_weight_height_bmi', ValidateHeight2, 2, None)],
'validate_temperature': [
ClassEntry('validate_temperature', ValidateTemperature1, 1, None)],
'clean_covid_progression': [
ClassEntry('validate_covid_fields', ValidateCovidTestResultsFacVersion1, 1, 2),
ClassEntry('validate_covid_fields', ValidateCovidTestResultsFacVersion2, 2, None)]
}
self._validate_schema_number(schema_number)
self.class_entries = dict()
for f in self.functors.items():
for e in f[1]:
if schema_number >= e.version_from and\
(e.version_to is None or schema_number < e.version_to):
self.class_entries[f[0]] = e.class_definition
break
def _validate_schema_number(self, schema_number):
if schema_number not in parsing_schemas:
raise ParsingSchemaVersionError(
f'{schema_number} is not a valid cleaning schema value')
|
import os
from typing import Union
import sentry_sdk
from dependency_injector import containers
from flask_cors import CORS
from sentry_sdk.integrations.flask import FlaskIntegration
from .containers.dev_container import DevAppContainer
from .containers.prod_container import ProdAppContainer
env = os.environ.get('FLASK_ENV')
sentry_sdk.init(
dsn=os.environ.get('SENTRY_DSN'),
integrations=[FlaskIntegration()],
traces_sample_rate=1.0,
environment=env,
)
def get_container(env: str) -> containers.DeclarativeContainer:
if env == 'production':
return ProdAppContainer()
return DevAppContainer()
def create_app(container: Union[DevAppContainer, ProdAppContainer]):
app = container.app()
app.container = container
app.debug = True
app.add_url_rule(
'/comments/<string:post_slug>',
view_func=container.get_comments_view.as_view(),
methods=('GET',),
)
app.add_url_rule(
'/comments/<string:post_slug>',
view_func=container.add_comment_view.as_view(),
methods=('POST', 'OPTIONS'),
)
return app
container = get_container(env)
app = create_app(container)
cors = CORS(app)
app.config['CORS_HEADERS'] = 'Content-Type'
|
import pytest, os, uuid, warnings
from tenable.downloads import Downloads
from tenable.errors import *
@pytest.fixture(scope='module')
def vcr_config():
return {
'filter_headers': [
('Authorization', 'Bearer 000'),
],
}
@pytest.fixture(autouse=True, scope='module')
def dl(request, vcr):
warnings.filterwarnings('ignore', category=DeprecationWarning)
return Downloads(os.getenv('DL_TOKEN'),
vendor='pytest',
product='pytenable-automated-testing')
|
from django.test import TestCase
from mock import Mock, patch, mock_open
from .dummyrunner_settings import (
c_creates_button,
c_creates_obj,
c_digs,
c_examines,
c_help,
c_idles,
c_login,
c_login_nodig,
c_logout,
c_looks,
c_moves,
c_moves_n,
c_moves_s,
c_socialize,
)
try:
import memplot
except ImportError:
memplot = Mock()
class TestDummyrunnerSettings(TestCase):
def setUp(self):
self.client = Mock()
self.client.cid = 1
self.client.counter = Mock(return_value=1)
self.client.gid = "20171025161153-1"
self.client.name = "Dummy-%s" % self.client.gid
self.client.password = "password-%s" % self.client.gid
self.client.start_room = "testing_room_start_%s" % self.client.gid
self.client.objs = []
self.client.exits = []
def clear_client_lists(self):
self.client.objs = []
self.client.exits = []
def test_c_login(self):
self.assertEqual(
c_login(self.client),
(
"create %s %s" % (self.client.name, self.client.password),
"connect %s %s" % (self.client.name, self.client.password),
"@dig %s" % self.client.start_room,
"@teleport %s" % self.client.start_room,
"@dig testing_room_1 = exit_1, exit_1",
),
)
def test_c_login_no_dig(self):
self.assertEqual(
c_login_nodig(self.client),
(
"create %s %s" % (self.client.name, self.client.password),
"connect %s %s" % (self.client.name, self.client.password),
),
)
def test_c_logout(self):
self.assertEqual(c_logout(self.client), "@quit")
def perception_method_tests(self, func, verb, alone_suffix=""):
self.assertEqual(func(self.client), "%s%s" % (verb, alone_suffix))
self.client.exits = ["exit1", "exit2"]
self.assertEqual(func(self.client), ["%s exit1" % verb, "%s exit2" % verb])
self.client.objs = ["foo", "bar"]
self.assertEqual(func(self.client), ["%s foo" % verb, "%s bar" % verb])
self.clear_client_lists()
def test_c_looks(self):
self.perception_method_tests(c_looks, "look")
def test_c_examines(self):
self.perception_method_tests(c_examines, "examine", " me")
def test_idles(self):
self.assertEqual(c_idles(self.client), ("idle", "idle"))
def test_c_help(self):
self.assertEqual(
c_help(self.client),
("help", "help @teleport", "help look", "help @tunnel", "help @dig"),
)
def test_c_digs(self):
self.assertEqual(c_digs(self.client), ("@dig/tel testing_room_1 = exit_1, exit_1"))
self.assertEqual(self.client.exits, ["exit_1", "exit_1"])
self.clear_client_lists()
def test_c_creates_obj(self):
objname = "testing_obj_1"
self.assertEqual(
c_creates_obj(self.client),
(
"@create %s" % objname,
'@desc %s = "this is a test object' % objname,
"@set %s/testattr = this is a test attribute value." % objname,
"@set %s/testattr2 = this is a second test attribute." % objname,
),
)
self.assertEqual(self.client.objs, [objname])
self.clear_client_lists()
def test_c_creates_button(self):
objname = "testing_button_1"
typeclass_name = "contrib.tutorial_examples.red_button.RedButton"
self.assertEqual(
c_creates_button(self.client),
("@create %s:%s" % (objname, typeclass_name), "@desc %s = test red button!" % objname),
)
self.assertEqual(self.client.objs, [objname])
self.clear_client_lists()
def test_c_socialize(self):
self.assertEqual(
c_socialize(self.client),
(
"ooc Hello!",
"ooc Testing ...",
"ooc Testing ... times 2",
"say Yo!",
"emote stands looking around.",
),
)
def test_c_moves(self):
self.assertEqual(c_moves(self.client), "look")
self.client.exits = ["south", "north"]
self.assertEqual(c_moves(self.client), ["south", "north"])
self.clear_client_lists()
def test_c_move_n(self):
self.assertEqual(c_moves_n(self.client), "north")
def test_c_move_s(self):
self.assertEqual(c_moves_s(self.client), "south")
class TestMemPlot(TestCase):
@patch.object(memplot, "_idmapper")
@patch.object(memplot, "os")
@patch.object(memplot, "open", new_callable=mock_open, create=True)
@patch.object(memplot, "time")
@patch("evennia.utils.idmapper.models.SharedMemoryModel.flush_from_cache", new=Mock())
def test_memplot(self, mock_time, mocked_open, mocked_os, mocked_idmapper):
if isinstance(memplot, Mock):
return
from evennia.utils.create import create_script
mocked_idmapper.cache_size.return_value = (9, 5000)
mock_time.time = Mock(return_value=6000.0)
script = create_script(memplot.Memplot)
script.db.starttime = 0.0
mocked_os.popen.read.return_value = 5000.0
script.at_repeat()
handle = mocked_open()
handle.write.assert_called_with("100.0, 0.001, 0.001, 9\n")
script.stop()
|
# -*- coding: utf-8 -*-
from providerModules.a4kScrapers import core
class sources(core.DefaultSources):
def __init__(self, *args, **kwargs):
super(sources, self).__init__(__name__, *args, **kwargs)
def _soup_filter(self, response):
try:
response = core.json.loads(response.text)
except:
core.tools.log('a4kScrapers.solidtorrents: fail to parse json \n' + response.text)
return []
torrents = response.get('results', [])
results = []
for torrent in torrents:
result = lambda: None
result.hash = torrent.get('infohash', '')
result.title = torrent.get('title', '')
result.size = '%s B' % torrent['size'] if torrent.get('size', None) is not None else None
result.seeds = torrent.get('swarm', {}).get('seeders', None)
results.append(result)
return results
|
# Copyright (c) 2021 Aiven, Helsinki, Finland. https://aiven.io/
from ._utils import find_component, find_user, format_uri
from .common import ConnectionInfoError
class PGConnectionInfo:
def __init__(self, host, port, username, dbname, password, sslmode):
self.host = host
self.port = port
self.username = username
self.dbname = dbname
self.password = password
self.sslmode = sslmode
@classmethod
def from_service(cls, service, *, route, usage, privatelink_connection_id, username, dbname, sslmode):
if service["service_type"] != "pg":
raise ConnectionInfoError("Cannot format pg connection info for service type {service_type}".format_map(service))
info = find_component(
service["components"], route=route, usage=usage, privatelink_connection_id=privatelink_connection_id
)
host = info["host"]
port = info["port"]
user = find_user(service, username)
password = user.get("password")
if password is None:
raise ConnectionInfoError(f"Could not find password for username {username}")
return cls(host=host, port=port, username=username, dbname=dbname, password=password, sslmode=sslmode)
def params(self):
return {
"host": self.host,
"port": self.port,
"user": self.username,
"dbname": self.dbname,
"password": self.password,
"sslmode": self.sslmode,
}
def uri(self):
return format_uri(
scheme="postgres",
username=self.username,
password=self.password,
host=self.host,
port=self.port,
path=f"/{self.dbname}",
query={"sslmode": self.sslmode},
)
def connection_string(self):
return f"host='{self.host}' port='{self.port}' user={self.username} dbname='{self.dbname}'"
def psql(self):
return ["psql", self.uri()]
|
"""
src_constants.py
holds global speedrun.com API constants
"""
from data_models import CelesteGames
from dacite import from_dict
PLATFORMS : dict = {
"PlayStation 4" : "nzelkr6q",
"Xbox One" : "o7e2mx6w",
"PC" : "8gej2n93",
"Switch" : "7m6ylw9p",
"Google Stadia" : "o064z1e3",
"PlayStation 5" : "4p9zjrer",
"Xbox Series S" : "o7e2xj9w",
"Xbox Series X" : "nzelyv9q",
"Xbox One X" : "4p9z0r6r",
"Xbox One S" : "o064j163"
}
CELESTE_GAMES : CelesteGames = from_dict(
data_class=CelesteGames,
data={
"games" : [
{
"id" : "o1y9j9v6",
"name" : "Celeste",
"version" : {
"variable_id" : "38do9y4l",
"default_ver" : "5q8e7y3q",
"invalid_ver" : {
"nzelkr6q" : ["zqoo4vxq", "21dg78p1", "9qjxmo0l", "rqve2p71", "5lemyz51"],
"o7e2mx6w" : ["810gdx5l", "zqoo4vxq", "21dg78p1", "9qjxmo0l", "rqve2p71", "5lemyz51"],
"o7e2xj9w" : ["810gdx5l", "zqoo4vxq", "21dg78p1", "9qjxmo0l", "rqve2p71", "5lemyz51"],
"nzelyv9q" : ["810gdx5l", "zqoo4vxq", "21dg78p1", "9qjxmo0l", "rqve2p71", "5lemyz51"],
"4p9z0r6r" : ["zqoo4vxq", "21dg78p1", "9qjxmo0l", "rqve2p71", "5lemyz51"],
"o064j163" : ["810gdx5l", "zqoo4vxq", "21dg78p1", "9qjxmo0l", "rqve2p71", "5lemyz51"],
"8gej2n93" : [],
"7m6ylw9p" : ["zqoo4vxq", "21dg78p1", "9qjxmo0l", "rqve2p71", "5lemyz51"],
"o064z1e3" : ["810gdx5l", "zqoo4vxq"],
"4p9zjrer" : ["810gdx5l", "zqoo4vxq", "21dg78p1", "9qjxmo0l", "rqve2p71", "5lemyz51", "p12wv871", "z1992w01", "z19rn0j1", "jq6766n1", "81w8xj91", "5lmn0eyq", "8107j55l", "z19zn7yq", "0q528evq", "0q52262q", "4qynnv41", "81wn7y51", "21d26k3l", "21d4ej31", "5lekw0zl", "gq7n65n1", "jq64d2j1", "gq7nm6n1", "81p7kme1", "814xmmwq", "zqoyep21", "p125p721", "klrzv5o1", "xqkr09d1", "81wmoemq", "4qyxe02l", "mlny6xj1", "8105e42q", "21d47051", "xqkrkxk1", "9qjzy331", "jq64kdj1", "5lmxj5m1", "81wmwkvq", "zqoyw7x1", "013veyxl", "rqv4wn6q", "8142k7kl", "5lekgwkl", "0q5oe021", "4lxxwj4l", "814xenvq", "z194gw8l", "p125e841", "81p7wg81", "klrz4jo1"]
}
}
},
{
"id" : "j1ne9me1",
"name" : "Celeste Category Extensions",
"version" : {
"variable_id" : "dlomdgd8",
"default_ver" : "xqkzpg4q",
"invalid_ver" : {
"nzelkr6q" : ["5lmvmd4l", "0137g6xl", "5q887vyq", "5lmg3eyl", "5lemyr51", "4lxe2o21"],
"o7e2mx6w" : ["5lmvmd4l", "0137g6xl", "5q887vyq", "5lmg3eyl", "5lemyr51", "4lxe2o21"],
"o7e2xj9w" : ["810gdx5l", "zqoo4vxq", "21dg78p1", "9qjxmo0l", "rqve2p71", "5lemyz51"],
"nzelyv9q" : ["810gdx5l", "zqoo4vxq", "21dg78p1", "9qjxmo0l", "rqve2p71", "5lemyz51"],
"4p9z0r6r" : ["810gdx5l", "zqoo4vxq", "21dg78p1", "9qjxmo0l", "rqve2p71", "5lemyz51"],
"o064j163" : ["810gdx5l", "zqoo4vxq", "21dg78p1", "9qjxmo0l", "rqve2p71", "5lemyz51"],
"8gej2n93" : [],
"7m6ylw9p" : ["0137g6xl", "5q887vyq", "5lmg3eyl", "5lemyr51", "4lxe2o21"],
"o064z1e3" : ["5lmvmd4l", "0137g6xl"],
"4p9zjrer" : ["5lmvmd4l", "0137g6xl", "5q887vyq", "5lmg3eyl", "5lemyr51", "4lxe2o21", "jqz5gr4q", "21g49wx1", "p12z7gv1", "8107krpl", "mln3j2nq", "81pnr3nl", "8107jwwl", "810x4j51", "81pxr3nq", "gq77yvrq", "21gxkwoq", "jqz0ork1", "klr58ew1", "21d2pjgl", "5q8gnx6l", "4qynkvd1", "mln5g4nl", "810x2op1", "9qjgp6oq", "jq6289ol", "5lmd7r0l", "81wn6v61", "zqo5z84l", "0132zoyq", "rqvmjkyq", "5leep26l", "0q52dvvq", "4lxym5gq", "8142rokl", "z19z024q", "p122gv21", "81pxrynq", "xqkzpe4q", "gq77y5rq", "21gxknoq", "jqz0onk1", "klr583w1", "21d2pkgl", "5q8gnk6l", "4qynkzd1", "mln5g8nl", "810x2vp1", "9qjgp7oq", "jq6285ol", "5lmd7o0l", "81wn6461", "z196ev81", "jq6ngvnl", "zqokn7gl"]
}
}
},
{
"id" : "w6jl3ked",
"name" : "Modded Celeste",
"version" : {
"variable_id" : "p853km0n",
"default_ver" : "z19rw541",
"invalid_ver" : {
"8gej2n93" : []
}
}
},
{
"id" : "j1lqq576",
"name" : "Into The Jungle",
"version" : {
"variable_id" : "9l7x0xqn",
"default_ver" : "5q8p493l",
"invalid_ver" : {
"8gej2n93" : []
}
}
},
{
"id" : "y6554g36",
"name" : "Glyph",
"version" : {
"variable_id" : "5ly14pyl",
"default_ver" : "21dwwrgl",
"invalid_ver" : {
"8gej2n93" : []
}
}
},
{
"id" : "w6j7lx46",
"name" : "D-Sides",
"version" : {
"variable_id" : "e8m5krxn",
"default_ver" : "mlnnnrnl",
"invalid_ver" : {
"8gej2n93" : []
}
}
},
{
"id" : "46w3p271",
"name" : "Quickie Mountain 2",
"version" : {
"variable_id" : "68kodrkn",
"default_ver" : "013d623l",
"invalid_ver" : {
"8gej2n93" : []
}
}
},
{
"id" : "k6qw4q06",
"name" : "2020 Spring Collab",
"version" : {
"variable_id" : "6njzg4el",
"default_ver" : "0q5p3zrl",
"invalid_ver" : {
"8gej2n93" : []
}
}
}
]
}
)
|
from envyaml import EnvYAML
from nameko.standalone.rpc import ServiceRpcProxy
from nameko.exceptions import UnknownService
config = EnvYAML('config.yml')
CC = 'bnbbtc'
STREAM = 'depth'
SPEED = '1000ms'
LIMIT = 1000
with ServiceRpcProxy('listener', config) as proxy:
try:
proxy.get_order_book(CC, LIMIT)
proxy.start_stream(CC, STREAM, SPEED)
except UnknownService as ex:
print('Listener was not ready')
|
"""
Contains the elements made out of various layers which make up the
sublayers of the model.
"""
|
# -*- coding: utf-8 -*-
"""
@Time : 2018/7/17 下午2:42
@FileName: utils.py
@author: 王炳宁
@contact: wangbingning@sogou-inc.com
"""
import itertools
import multiprocessing
import pickle
import re
import numpy as np
from joblib import Parallel, delayed
from tqdm import tqdm
np.random.seed(10245)
def get_file_charset(filename):
import chardet
rawdata = open(filename, 'rb').read(1000)
result = chardet.detect(rawdata)
charenc = result['encoding']
return charenc
def DBC2SBC(ustring):
rstring = ""
for uchar in ustring:
inside_code = ord(uchar)
if inside_code == 0x3000:
inside_code = 0x0020
else:
inside_code -= 0xfee0
if not (0x0021 <= inside_code <= 0x7e):
rstring += uchar
continue
rstring += chr(inside_code)
return rstring
def write_lst_to_file(lst, filename, encoding='utf-8'):
output = '\n'.join(lst)
with open(filename, 'w', encoding=encoding, errors='ignore') as f:
f.write(output)
def dump_file(obj, filename):
f = open(filename, 'wb')
pickle.dump(obj, f)
def load_file(filename):
with open(filename, 'rb') as f:
data = pickle.load(f)
return data
def get_model_parameters(model):
total = 0
for parameter in model.parameters():
if parameter.requires_grad:
tmp = 1
for a in parameter.size():
tmp *= a
total += tmp
return total
def remove_duplciate_lst(lst):
lst.sort()
return list(k for k, _ in itertools.groupby(lst))
def padding(sequence, pads=0, max_len=None, dtype='int32', return_matrix_for_size=False):
# we should judge the rank
if True or isinstance(sequence[0], list):
v_length = [len(x) for x in sequence] # every sequence length
seq_max_len = max(v_length)
if (max_len is None) or (max_len > seq_max_len):
max_len = seq_max_len
v_length = list(map(lambda z: z if z <= max_len else max_len, v_length))
x = (np.ones((len(sequence), max_len)) * pads).astype(dtype)
for idx, s in enumerate(sequence):
trunc = s[:max_len]
x[idx, :len(trunc)] = trunc
if return_matrix_for_size:
v_matrix = np.asanyarray([map(lambda item: 1 if item < line else 0, range(max_len)) for line in v_length],
dtype=dtype)
return x, v_matrix
return x, np.asarray(v_length, dtype='int32')
else:
seq_len = len(sequence)
if max_len is None:
max_len = seq_len
v_vector = sequence + [0] * (max_len - seq_len)
padded_vector = np.asarray(v_vector, dtype=dtype)
v_index = [1] * seq_len + [0] * (max_len - seq_len)
padded_index = np.asanyarray(v_index, dtype=dtype)
return padded_vector, padded_index
def add2count(value, map):
if value not in map:
map[value] = 0
map[value] += 1
import os
def get_dir_files(dirname):
L = []
for root, dirs, files in os.walk(dirname):
for file in files:
L.append(os.path.join(root, file))
return L
def clean(txt):
txt = DBC2SBC(txt)
txt = txt.lower()
txt = re.sub('(\s*)?(<.*?>)?', '', txt)
return txt
def multi_process(func, lst, num_cores=multiprocessing.cpu_count(), backend='multiprocessing'):
workers = Parallel(n_jobs=num_cores, backend=backend)
output = workers(delayed(func)(one) for one in tqdm(lst))
# output = workers(delayed(func)(one) for one in lst)
return output
def get_file_info(filename):
with open(filename, encoding=get_file_charset(filename), errors='ignore') as f:
for line in f:
yield line
def evaluate_comqa(results, threshold=0.5):
precision = []
recall = []
f1 = []
accuracy = []
for one in results:
[pred, paras] = one
sample_a = 1.0e-9
sample_b = 1.0e-9
sample_c = 1.0e-9
num = 0
if len(pred) < len(paras):
pred.extend([0.0] * len(paras))
for p, para in zip(pred, paras):
r = para[1]
num += 1
if p > threshold:
sample_a += 1
if r == 1:
sample_b += 1
if p > threshold and r == 1:
sample_c += 1
sample_precision = sample_c / sample_a
sample_recall = sample_c / sample_b
if sample_precision >= 0.999 and sample_recall >= 0.999:
acc = 1
else:
acc = 0
sample_f1 = 2 * sample_precision * sample_recall / (sample_recall + sample_precision)
precision.append(sample_precision)
recall.append(sample_recall)
f1.append(sample_f1)
accuracy.append(acc)
precision = np.mean(precision)
recall = np.mean(recall)
f1 = np.mean(f1)
accuracy = np.mean(accuracy)
macro_f1 = 2 * precision * recall / (precision + recall)
return precision, recall, f1, macro_f1, accuracy
|
from __future__ import print_function
import argparse, sys, os, warnings
import librosa
import numpy as np
from numpy import linalg as LA
import keras
from keras.models import model_from_json
if not sys.warnoptions:
warnings.simplefilter("ignore")
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
FLAGS=None
def decide_class(prediction):
if prediction <= 0.5:
file_prediction = '😺 cat!!!'
else:
file_prediction = '🐶 dog!!!'
return file_prediction
def get_final_prediction(scores):
scores = [np.argmax(s) for s in scores]
# print(np.mean(scores))
return decide_class(np.mean(scores))
def pre_process_file(file, model):
if FLAGS.file_path == 'data/cats_dogs.wav':
file = os.getcwd() + '/' + file
ts, sr = librosa.load(file)
if model == 'mel':
frequency = librosa.feature.melspectrogram(y=ts,sr=sr)
mel_delta = librosa.feature.delta(frequency)
return frequency, mel_delta
else:
frequency = librosa.feature.melspectrogram(y=ts,sr=sr)
mfcc = librosa.feature.mfcc(S=librosa.power_to_db(frequency),sr=sr)
mfcc_delta = librosa.feature.delta(mfcc)
return mfcc, mfcc_delta
def process_file(feature, feature_delta):
if FLAGS.model_type.lower() == 'mel':
height = 128
else:
height = 20
window_size = 28
combined_features = np.stack((feature, feature_delta))
windows = int(combined_features.shape[2] / window_size)
combined_features = np.reshape(combined_features[:,:,0:windows*window_size], (2, height, windows*window_size))
data = []
for w in range(windows):
data.append(combined_features[:,:,w*window_size:(w+1)*window_size])
return np.array(data, dtype=np.float32)
def reshape_input(windows):
input_d = windows.shape[1] #Depth
input_h = windows.shape[2] #Height
input_w = windows.shape[3] #Width
if FLAGS.model_type.lower() == 'mel':
return windows.reshape(windows.shape[0], input_h, input_w, input_d)
else:
windows = windows - windows.mean()
windows = windows/LA.norm(windows)
return windows.reshape(windows.shape[0], input_h*input_w*input_d)
def predict(windows, model_type='mel'):
if model_type == 'mel':
model_path='keras_model_mel/saved_models/sound_classifier.json'
else:
model_path='keras_model_mfcc/saved_models/sound_classifier.json'
json_file = open(model_path, 'r')
loaded_model_json = json_file.read()
json_file.close()
loaded_model = model_from_json(loaded_model_json)
#adapting our input to the model that we'll use
windows = reshape_input(windows)
#generate predictions
scores = loaded_model.predict(windows)
print('We think this is a....')
print(get_final_prediction(scores))
def main(_):
if FLAGS.model_type.lower() not in ['mel', 'mfcc']:
print('Sorry this model doesn''t exist, choose from mel or mfcc')
sys.exit()
if FLAGS.file_path == 'data/cats_dogs.wav':
print('We will classify the audio in the file under data/cats_dogs/cat_1.wav')
elif '.wav' not in FLAGS.file_path:
print('Please submit an audio file in WAV format')
sys.exit()
elif os.path.exists(FLAGS.file_path) == False:
print('Cannot find the file, please resubmit')
sys.exit()
else:
print('Let''s classify this file: ' + FLAGS.file_path)
feature, feature_delta = pre_process_file(FLAGS.file_path, FLAGS.model_type)
audio_windows = process_file(feature, feature_delta)
predict(audio_windows, FLAGS.model_type)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--model_type', type=str, default='mel', help='Choose from mel or mfcc model to classify.')
parser.add_argument('--file_path', type=str, default='data/cats_dogs/cat_1.wav', help='File you want to analyse.')
FLAGS, unparsed = parser.parse_known_args()
main(FLAGS)
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import sys
import gtk
from cairis.core.Borg import Borg
from cairis.core.ObstacleParameters import ObstacleParameters
from cairis.core.GoalAssociationParameters import GoalAssociationParameters
from cairis.core.ObstacleEnvironmentProperties import ObstacleEnvironmentProperties
from NDImplementationDecorator import NDImplementationDecorator
__author__ = 'Shamal Faily'
class ObstacleNodeDialog:
def __init__(self,objt,environmentName,dupProperty,overridingEnvironment,builder):
self.window = builder.get_object("ObstacleNodeDialog")
b = Borg()
self.dbProxy = b.dbProxy
self.theEnvironmentName = environmentName
self.theObstacleAssociation = None
self.theObstacleId = -1
self.decorator = NDImplementationDecorator(builder)
obstacleCategories = self.dbProxy.getDimensionNames('obstacle_category_type')
self.obstacleAssociations = []
self.subObstacleAssociations = []
if (objt == None):
self.decorator.updateComboCtrl("obstacleCategoryCtrl",obstacleCategories,'')
self.decorator.updateButtonLabel("obstacleOkButton","Create")
self.isCreate = True
else:
self.theObstacleId= objt.id()
envProperty = objt.environmentProperty(self.theEnvironmentName)
self.obstacleAssociations = envProperty.goalRefinements()
self.subObstacleAssociations = envProperty.subGoalRefinements()
self.decorator.updateTextCtrl("obstacleNameCtrl",objt.name())
self.decorator.updateComboCtrl("obstacleCategoryCtrl",obstacleCategories,objt.category(environmentName,dupProperty))
self.decorator.updateMLTextCtrl("obstacleDefinitionCtrl",objt.definition(environmentName,dupProperty))
self.decorator.updateButtonLabel("obstacleOkButton","Update")
self.isCreate = False
self.window.resize(350,600)
def environmentProperties(self):
obsCat = self.decorator.getComboValue("obstacleCategoryCtrl")
obsDef = self.decorator.getMLText("obstacleDefinitionCtrl")
envProperties = ObstacleEnvironmentProperties(self.theEnvironmentName,'',obsDef,obsCat,self.obstacleAssociations,self.subObstacleAssociations)
return envProperties
def newObstacleParameters(self):
obsName = self.decorator.getText("obstacleNameCtrl")
envProperties = self.environmentProperties()
parameters = ObstacleParameters(obsName,'Obstacle refinement',[],[envProperties])
parameters.setId(self.theObstacleId)
return parameters
def existingObstacleParameters(self):
obsName = self.decorator.getText("obstacleNameCtrl")
modifiedProperties = self.environmentProperties()
envProperties = self.dbProxy.obstacleEnvironmentProperties(self.theObstacleId)
for idx,p in enumerate(envProperties):
if (p.name() == self.theEnvironmentName):
envProperties[idx] = modifiedProperties
parameters = ObstacleParameters(obsName,'Obstacle refinement',[],envProperties)
parameters.setId(self.theObstacleId)
return parameters
def parentObstacle(self,obsName,assocType):
self.theObstacleAssociation = GoalAssociationParameters(self.theEnvironmentName,obsName,'obstacle',assocType)
def on_obstacleOkButton_clicked(self,callback_data):
if (self.isCreate):
parameters = self.newObstacleParameters()
self.dbProxy.addObstacle(parameters)
self.theObstacleAssociation.theSubGoal = parameters.name()
self.theObstacleAssociation.theSubGoalDimension = 'obstacle'
self.theObstacleAssociation.theAlternativeId = 0
self.theObstacleAssociation.theRationale = ''
self.dbProxy.addGoalAssociation(self.theObstacleAssociation)
else:
parameters = self.existingObstacleParameters()
self.dbProxy.updateObstacle(parameters)
self.window.destroy()
def show(self):
self.window.show()
|
import math
import random
import weakref
from build_buy import is_location_outside
from interactions.constraints import CostFunctionBase
from sims4.tuning.tunable import HasTunableFactory, AutoFactoryInit, TunableRange, TunableTuple, Tunable, OptionalTunable
import sims4.math
import socials.geometry
import terrain
class SocialGroupCostFunction(CostFunctionBase):
def __init__(self, group, sim):
self._group_ref = weakref.ref(group)
self._sim = sim
def constraint_cost(self, position, orientation, routing_surface):
group = self._group_ref()
if group is None:
return 0.0
geometry = group.geometry
if not geometry or len(geometry) == 1 and self._sim in geometry:
ideal_position = group.position
effective_distance = (position - ideal_position).magnitude_2d()*2.0
score = socials.geometry.SocialGeometry.GROUP_DISTANCE_CURVE.get(effective_distance)
return -score
(base_focus, base_field) = socials.geometry._get_social_geometry_for_sim(self._sim)
transform = sims4.math.Transform(position, orientation)
multiplier = socials.geometry.score_transform(transform, self._sim, geometry, group.group_radius, base_focus, base_field)
offset = multiplier*socials.geometry.SocialGeometry.SCORE_STRENGTH_MULTIPLIER
if self._sim in geometry:
if sims4.math.vector3_almost_equal_2d(position, self._sim.position, epsilon=0.01):
offset += socials.geometry.SocialGeometry.SCORE_OFFSET_FOR_CURRENT_POSITION
return -offset
class PetGroupCostFunction(HasTunableFactory, AutoFactoryInit, CostFunctionBase):
FACTORY_TUNABLES = {'maximum_distance': TunableRange(description='\n Any distance to another Sim over this amount scores zero.\n ', tunable_type=float, default=1.0, minimum=0), 'minimum_distance': TunableRange(description='\n Any distance to another Sim under this amount scores zero.\n ', tunable_type=float, default=1.5, minimum=0), 'required_distance': TunableRange(description='\n Any position that requires the Sim to move less than this amount\n scores zero. This encourages Sims to move.\n ', tunable_type=float, default=0.75, minimum=0)}
SIDE_ARC_START = math.cos(sims4.math.PI/4)
SIDE_ARC_END = math.cos(sims4.math.PI*3/4)
def __init__(self, sim, target, *args, **kwargs):
super().__init__(*args, **kwargs)
self._sim = sim
self._target_ref = target.ref()
def constraint_cost(self, position, orientation, routing_surface):
target = self._target_ref()
if target is None:
return 0.0
distance_to_position = (position - self._sim.position).magnitude()
if distance_to_position < self.required_distance:
return 0.0
vector_to_pos = position - target.position
distance_to_sim = vector_to_pos.magnitude()
if distance_to_sim <= self.minimum_distance or distance_to_sim > self.maximum_distance:
return 0.0
else:
unit_vector_to_sim = vector_to_pos/distance_to_sim
fwd = target.transform.orientation.transform_vector(sims4.math.Vector3.Z_AXIS())
angle = sims4.math.vector_dot(fwd, unit_vector_to_sim)
if angle <= PetGroupCostFunction.SIDE_ARC_START and angle >= PetGroupCostFunction.SIDE_ARC_END:
return -3.0
return 0.0
class ThrowingGroupCostFunction(HasTunableFactory, AutoFactoryInit, CostFunctionBase):
FACTORY_TUNABLES = {'maximum_distance': TunableRange(description='\n Any distance to another Sim over this amount will be penalized.\n ', tunable_type=float, default=10.0, minimum=0), 'minimum_distance': TunableRange(description='\n Any distance to another Sim under this amount will be penalized.\n ', tunable_type=float, default=3.0, minimum=0), 'adjustment_distance': TunableRange(description='\n Any position that requires the Sim to be at a distance less than\n this value will be penalized.\n ', tunable_type=float, default=5.0, minimum=0), 'location_tests': TunableTuple(description='\n Tests to run on the goal location to validate if it should be\n discouraged when using this social group.\n ', validate_snowmask=OptionalTunable(description='\n If enabled goals that do not match the snowmask value will\n be discouraged. This is used for winter to guarantee cases\n like snowball fight the Sims readjust and move around in places\n where there is snow.\n ', tunable=Tunable(description='\n Value snowmask should be greater than to pass this test.\n ', tunable_type=float, default=0.5)), validate_is_outside=OptionalTunable(description='\n If enabled goals that do not match the outside condition will\n be discouraged.\n ', tunable=Tunable(description='\n If True goals outside will be encouraged, if false only\n goals on the inside will be encouraged.\n ', tunable_type=bool, default=False)))}
INVALID_GOAL_SCORE = 20
def __init__(self, sim, target, force_readjust, *args, **kwargs):
super().__init__(*args, **kwargs)
self._sim = sim
self._target_ref = target.ref()
self._force_readjust = force_readjust
def _score_location(self, position):
if self.location_tests.validate_snowmask is not None and terrain.get_snowmask_value(position) > self.location_tests.validate_snowmask:
return ThrowingGroupCostFunction.INVALID_GOAL_SCORE
elif self.location_tests.validate_is_outside is not None and self.location_tests.validate_is_outside != is_location_outside(position, self._sim.level):
return ThrowingGroupCostFunction.INVALID_GOAL_SCORE
return 0.0
def constraint_cost(self, position, orientation, routing_surface):
target = self._target_ref()
if target is None:
return 0.0
constraint_cost = 0.0
if self._sim.get_main_group() is None or self._sim.get_main_group().anchor is None:
return constraint_cost
vector_to_pos = position - target.intended_position
distance_to_sim = vector_to_pos.magnitude()
if distance_to_sim <= self.minimum_distance:
return ThrowingGroupCostFunction.INVALID_GOAL_SCORE
constraint_cost += self._score_location(position)
vector_to_anchor = position - self._sim.get_main_group().anchor.position
distance_to_anchor = vector_to_anchor.magnitude_squared()
constraint_cost = -distance_to_anchor
distance_to_position = (position - self._sim.intended_position).magnitude()
if distance_to_position < self.adjustment_distance:
constraint_cost += ThrowingGroupCostFunction.INVALID_GOAL_SCORE
return constraint_cost
|
"""
API to convert Cadene's models
Supported models:
-se_resnet50
-se_resnet101
-se_resnet152
-cafferesnet101
-bninception
-fbresnet152
-resnet18
-resnet34
-resnet50
-resnet101
-resnet152
Convert and save models with cadene_to_tf
Examples:
cadene_to_tf(modelList=['se_resnet50'], quiet=False)
cadene_to_tf(modelList=['se_resnet50',
'bninception(pretrained=None)])
cadene_to_tf(modelList=models.txt)
"""
from .cadene_to_tf import cadene_to_tf
|
import unittest
from ilmklib import Graph
class TestGraph(unittest.TestCase):
def test_simple_errors(self):
g = Graph()
self.assertFalse(g.is_cyclic())
g.add_vertex("a")
with self.assertRaises(Exception):
g.add_vertex("a")
self.assertFalse(g.is_cyclic())
g.add_edge("a", "a")
self.assertTrue(g.is_cyclic())
g = Graph()
self.assertEqual(len(g), 0)
g.add_vertex("a")
self.assertEqual(len(g), 1)
self.assertTrue("a" in g)
with self.assertRaises(Exception):
# Try to add an edge with a dst that isn't in the graph
g.add_edge("d", "a")
with self.assertRaises(Exception):
# Try to add an edge with a src that isn't in the graph
g.add_edge("a", "d")
def test_simple_cyclic(self):
g = Graph()
g.add_vertex("a")
g.add_vertex("b")
g.add_vertex("c")
g.add_edge("b", "a")
g.add_edge("c", "b")
g.add_edge("a", "c")
self.assertTrue(g.is_cyclic())
def test_get_items(self):
"""
Test that the items function iterates over the entire set of vertices
"""
g = Graph()
g.add_vertex("a")
g.add_vertex("b")
g.add_vertex("c")
seen = []
for k, v in g.items():
seen.append(k)
self.assertEqual(sorted(seen), sorted(["a", "b", "c"]))
def test_complicated_cyclic(self):
""" Create a tree structure where each node points to its own numerical
value divided by 2 (as an int)
e.g. 1 has an edge to 0
2 and 3 have an edge to 1
4 and 5 have an edge to 2
etc.
"""
g = Graph()
for i in range(100):
g.add_vertex(i)
for i in range(100):
if i == 0:
continue
o = int(i / 2)
g.add_edge(o, i)
self.assertFalse(g.is_cyclic())
# add in an edge from 1 to 99 and ensure that the cycle is detected
# (99 -> 49 -> 24 -> 12 -> 6 -> 3 -> 1 -> 99)
g.add_edge(99, 1)
# The whole graph should be deterministically cyclic
for i in range(100):
self.assertTrue(g.is_cyclic(True))
def test_complicated_disconnected_cyclic(self):
""" Create a tree structure where each node points to its own numerical
value divided by 2 (as an int)
e.g. 1 has an edge to 0
2 and 3 have an edge to 1
4 and 5 have an edge to 2
etc.
"""
g = Graph()
for i in range(100):
g.add_vertex(i)
for i in range(1, 100):
o = int(i / 2)
g.add_edge(o, i)
self.assertFalse(g.is_cyclic(True))
# Create a small cycle of 3 vertices
g.add_vertex("a")
g.add_vertex("b")
g.add_vertex("c")
g.add_edge("b", "a")
g.add_edge("c", "b")
g.add_edge("a", "c")
# The whole graph should be deterministically cyclic
for i in range(100):
self.assertTrue(g.is_cyclic(True))
def test_disconnected_cyclic(self):
g = Graph()
g.add_vertex("a")
g.add_vertex("b")
g.add_vertex("c")
g.add_vertex("e")
g.add_vertex("f")
g.add_vertex("g")
# a -> b -> c -> a is a cyclic graph
g.add_edge("b", "a")
g.add_edge("c", "b")
g.add_edge("a", "c")
# e -> f -> g is not cyclic, but is disconnected
g.add_edge("f", "e")
g.add_edge("g", "f")
# The whole graph should be deterministically cyclic
for i in range(100):
self.assertTrue(g.is_cyclic(True))
def test_c_source(self):
g = Graph()
g.add_vertex("source1.c")
g.add_vertex("source2.c")
g.add_vertex("source3.c")
g.add_vertex("header1.h")
g.add_vertex("header2.h")
g.add_vertex("header3.h")
g.add_vertex("common1.h")
g.add_vertex("common2.h")
g.add_vertex("source1.o")
g.add_vertex("source2.o")
g.add_vertex("source3.o")
g.add_vertex("binary")
g.add_edge("source1.o", "source1.c")
g.add_edge("source2.o", "source2.c")
g.add_edge("source3.o", "source3.c")
g.add_edge("source1.o", "header1.h")
g.add_edge("source2.o", "header2.h")
g.add_edge("source3.o", "header3.h")
g.add_edge("source1.o", "common1.h")
g.add_edge("source2.o", "common1.h")
g.add_edge("source2.o", "common2.h")
g.add_edge("source3.o", "common2.h")
g.add_edge("binary", "source1.o")
g.add_edge("binary", "source2.o")
g.add_edge("binary", "source3.o")
self.assertFalse(g.is_cyclic())
deps = list(g.get_direct_predecessors("binary"))
self.assertEqual(len(deps), 3)
self.assertIn("source1.o", deps)
self.assertIn("source2.o", deps)
self.assertIn("source3.o", deps)
deps = list(g.get_direct_successors("common1.h"))
self.assertEqual(len(deps), 2)
self.assertIn("source1.o", deps)
self.assertIn("source2.o", deps)
deps = list(g.get_all_predecessors("source2.o"))
self.assertEqual(len(deps), 4)
self.assertIn("source2.c", deps)
self.assertIn("header2.h", deps)
self.assertIn("common1.h", deps)
self.assertIn("common2.h", deps)
deps = list(g.get_all_successors("common1.h"))
self.assertEqual(len(deps), 3)
self.assertIn("source1.o", deps)
self.assertIn("source2.o", deps)
self.assertIn("binary", deps)
def test_multi_cyclic(self):
g = Graph()
g.add_vertex("a")
g.add_vertex("b")
g.add_vertex("c")
g.add_vertex("d")
g.add_vertex("e")
g.add_vertex("f")
g.add_vertex("g")
g.add_vertex("h")
g.add_vertex("i")
g.add_vertex("j")
"""
a e h |
/ \ / \ / \ |
d b f i |
\ / \ / \ / |
c g j |
"""
# a -> b -> c -> d -> a
g.add_edge("b", "a")
g.add_edge("c", "b")
g.add_edge("d", "c")
g.add_edge("a", "d")
# e -> b -> g -> f -> e
g.add_edge("b", "e")
g.add_edge("g", "b")
g.add_edge("f", "g")
g.add_edge("e", "f")
# h -> f -> j -> i -> h
g.add_edge("f", "h")
g.add_edge("h", "i")
g.add_edge("i", "j")
g.add_edge("j", "f")
for i in range(100):
self.assertTrue(g.is_cyclic(True))
def test_alternate_api(self):
g = Graph()
g.add_vertex("a")
g.add_vertex("b")
g.add_vertex("c")
g.add_vertex("d")
g.add_vertex("e")
g.add_vertex("f")
g.add_vertex("g")
g.add_edge("a", "b", "c", "d", "e", "f")
deps = list(g.get_direct_predecessors("a"))
self.assertIn("b", deps)
self.assertIn("c", deps)
self.assertIn("d", deps)
self.assertIn("e", deps)
self.assertIn("f", deps)
g = Graph()
g.add_vertex("a")
g.add_vertex("b")
g.add_vertex("c")
g.add_vertex("d")
g.add_vertex("e")
g.add_vertex("f")
g.add_vertex("g")
g.add_edges("a", ["b", "c", "d", "e", "f"])
deps = list(g.get_direct_predecessors("a"))
self.assertIn("b", deps)
self.assertIn("c", deps)
self.assertIn("d", deps)
self.assertIn("e", deps)
self.assertIn("f", deps)
with self.assertRaises(TypeError):
g.add_edges("a", "b")
def test_reaching_recursion_depth(self):
g = Graph()
for i in range(2000):
g.add_vertex(i)
for i in range(1, 2000):
g.add_edge(i-1, i)
with self.assertRaises(RecursionError):
for i in range(100):
g.is_cyclic(True)
|
#!/usr/bin/env python3
# partial implementation of EDLIN
# (c) 2019 <christian.tschudin@unibas.ch>
import re
def editor(lines):
# expects an array of lines, returns an array of lines if modified else None
modif = False
curr = 0
while True:
cmd = input('*')
if len(cmd) == 0:
if curr >= len(lines): print("no line to edit")
else:
print(f"replace line {curr+1} (type <enter> to keep the line as is):")
print(lines[curr])
ln = input()
if ln != '':
lines[curr] = ln
modif = True
continue
orig = cmd
cmd = cmd.lower()
if cmd in ['?', 'h']:
print('''EDLIN help:
h this help text
q quit (any modification is lost)
e exit (modifications are saved)
<num> make line <num> the current line
d delete current line
i insert text before current line
l list from current line to end
p like 'l' but make last line the current line
s<text> search for <text>
The last group of commands can be prefixed with a range, which
is either a line number <num>, or a line number pair <from>,<to>''')
continue
if cmd.isnumeric():
n = int(cmd)
if n < 1 or n > len(lines): print("out of range")
else:
curr = n-1
print(f"{n}: {lines[curr]}")
continue
if cmd == 'q':
if modif:
cmd = input("there are changes: really quit? y/n [N]:")
if cmd.lower() != 'y':
continue
return None
if cmd == 'e': return lines if modif else None
rng = re.match(r'([0-9.]+)([^0-9,.])|([0-9.]+),([0-9.]+)([^0-9.])', cmd)
if rng:
if rng.group(2):
cmd = rng.group(2)
if rng.group(1) == '.':
rng = (curr, curr)
else:
rng = ( int(rng.group(1))-1, int(rng.group(1))-1 )
else:
cmd = rng.group(5)
a = curr if rng.group(3) == '.' else int(rng.group(3))-1
b = curr if rng.group(4) == '.' else int(rng.group(4))-1
rng = ( a, b )
if rng[0] < 0 or rng[1] < 0 or rng[0] > rng[1]:
print("invalid range")
continue
if cmd == 'd':
if rng:
if rng[0] >= len(lines) or rng[1] >= len(lines):
print("invalid range")
continue
else:
rng = (curr, curr)
del lines[rng[0]:rng[1]+1]
curr = rng[0]
if curr == len(lines) and curr > 0: curr -= 1
modif = True
continue
if cmd == 'i':
if rng:
if rng[0] != rng[1] or rng[0] > len(lines):
print("invalid range")
continue
else:
rng = (curr, curr)
new = []
print("enter text, terminate with a single '.' on a line")
while True:
ln = input()
if ln == '.': break
new.append(ln)
lines = lines[:rng[0]] + new + lines[rng[0]:]
curr = rng[0] + len(new)
if curr == len(lines) and curr > 0: curr -= 1
print(f"{len(new)} line(s) inserted")
if len(new) > 0: modif = True
continue
if cmd in ['l', 'p']:
if not rng: rng = (curr, len(lines)-1)
for i in range(rng[0], rng[1]+1):
print(f"{i+1}: {lines[i]}")
if cmd == 'p': curr = rng[1]
continue
if cmd[0] == 's':
orig = orig[orig.index('s')+1:]
if not rng: rng = (0, len(lines)-1)
for i in range(rng[0], rng[1]+1):
if orig in lines[i]:
print(f"{i+1}: {lines[i]}")
cmd = input("correct entry? y/n [Y]:")
if len(cmd) == 0 or cmd in ['y', 'Y']:
curr = i
break
else:
print(f"'{orig}' not found")
continue
print(f"unknown command {cmd}")
# ---------------------------------------------------------------------------
if __name__ == '__main__':
import sys
if len(sys.argv) != 2:
print(f"useage: {sys.argv[0]} <filename>")
else:
fn = sys.argv[1]
with open(fn, 'r') as f: buf = f.read()
if len(buf) > 0 and buf[-1] == '\n': buf = buf[:-1]
new = editor(buf.split('\n'))
if new:
with open(fn, 'w') as f: f.write('\n'.join(new) + '\n')
print(f"{len(new)} line(s) written to {fn}")
# eof
|
from functools import partial
from multipledispatch import dispatch
from webpub_manifest_parser.core.ast import (
Collection,
CollectionList,
CompactCollection,
Link,
LinkList,
Manifestlike,
Metadata,
)
from webpub_manifest_parser.core.semantic import (
ManifestSemanticError,
SemanticAnalyzer,
SemanticAnalyzerError,
)
from webpub_manifest_parser.odl.ast import ODLFeed, ODLLicense, ODLPublication
from webpub_manifest_parser.odl.registry import ODLMediaTypesRegistry
from webpub_manifest_parser.opds2.ast import OPDS2FeedMetadata
from webpub_manifest_parser.opds2.registry import OPDS2LinkRelationsRegistry
from webpub_manifest_parser.utils import encode, first_or_default
class ODLPublicationSemanticError(SemanticAnalyzerError):
"""Base class for semantic errors related to ODL 2.x feeds."""
def _format_message(
self, node, node_property=None, message=None, inner_exception=None
):
"""Format the error message.
:param node: ODL 2.x publication
:type node: webpub_manifest_parser.odl.ast.ODLPublication
:param node_property: AST node's property associated with the error
:type node_property: Optional[webpub_manifest_parser.core.properties.Property]
:param message: Parameterized string containing description of the error occurred
and the placeholder for the feed's identifier
:type message: Optional[str]
:param inner_exception: (Optional) Inner exception
:type inner_exception: Optional[Exception]
"""
if not isinstance(node, ODLPublication):
raise ValueError(
"Argument 'node' must be an instance of {0}".format(ODLPublication)
)
if node.metadata:
if node.metadata.title:
message = message.format(node.metadata.title)
elif node.metadata.identifier:
message = message.format(node.metadata.identifier)
return message
class ODLLicenseSemanticError(SemanticAnalyzerError):
"""Base class for semantic errors related to ODL 2.x feeds."""
def _format_message(
self, node, node_property=None, message=None, inner_exception=None
):
"""Format the error message.
:param node: ODL 2.x license
:type node: webpub_manifest_parser.odl.ast.ODLLicense
:param node_property: AST node's property associated with the error
:type node_property: Optional[webpub_manifest_parser.core.properties.Property]
:param message: Parameterized string containing description of the error occurred
and the placeholder for the feed's identifier
:type message: Optional[str]
:param inner_exception: (Optional) Inner exception
:type inner_exception: Optional[Exception]
"""
if not isinstance(node, ODLLicense):
raise ValueError(
"Argument 'node' must be an instance of {0} class".format(ODLLicense)
)
if node.metadata:
message = message.format(node.metadata.identifier)
return message
ODL_FEED_MISSING_PUBLICATIONS_SUBCOLLECTION_ERROR = partial(
ManifestSemanticError,
message="ODL feed '{0}' does not contain required 'publications' subcollection",
)
ODL_FEED_CONTAINS_REDUNDANT_GROUPS_SUBCOLLECTIONS_ERROR = partial(
ManifestSemanticError,
message="ODL feed '{0}' contains redundant 'groups' subcollections",
)
ODL_FEED_CONTAINS_REDUNDANT_FACETS_SUBCOLLECTIONS_ERROR = partial(
ManifestSemanticError,
message="ODL feed '{0}' contains redundant 'facets' subcollections",
)
ODL_FEED_CONTAINS_REDUNDANT_NAVIGATION_SUBCOLLECTION_ERROR = partial(
ManifestSemanticError,
message="ODL feed '{0}' contains redundant 'navigation' subcollection",
)
ODL_PUBLICATION_MUST_CONTAIN_EITHER_LICENSES_OR_OA_ACQUISITION_LINK_ERROR = partial(
ODLPublicationSemanticError,
message="ODL publication '{0}' contains neither 'licenses' subcollection nor "
"an Open-Access Acquisition Link (http://opds-spec.org/acquisition/open-access)",
)
ODL_LICENSE_MUST_CONTAIN_SELF_LINK_TO_LICENSE_INFO_DOCUMENT_ERROR = partial(
ODLLicenseSemanticError,
message="ODL license '{0}' does not contain a 'self' link to the License Info Document",
)
ODL_LICENSE_MUST_CONTAIN_CHECKOUT_LINK_TO_LICENSE_STATUS_DOCUMENT_ERROR = partial(
ODLLicenseSemanticError,
message="ODL license '{0}' does not contain a 'checkout' link to the License Status Document",
)
class ODLSemanticAnalyzer(SemanticAnalyzer):
"""ODL semantic analyzer."""
@dispatch(Manifestlike)
def visit(self, node):
"""Perform semantic analysis of the manifest node.
:param node: Manifest-like node
:type node: Manifestlike
"""
super(ODLSemanticAnalyzer, self).visit(node)
if not node.publications:
with self._record_errors():
raise ODL_FEED_MISSING_PUBLICATIONS_SUBCOLLECTION_ERROR(
node=node, node_property=ODLFeed.publications
)
if node.groups:
with self._record_errors():
raise ODL_FEED_CONTAINS_REDUNDANT_GROUPS_SUBCOLLECTIONS_ERROR(
node=node, node_property=ODLFeed.groups
)
if node.facets:
with self._record_errors():
raise ODL_FEED_CONTAINS_REDUNDANT_FACETS_SUBCOLLECTIONS_ERROR(
node=node, node_property=ODLFeed.facets
)
if node.navigation:
with self._record_errors():
raise ODL_FEED_CONTAINS_REDUNDANT_NAVIGATION_SUBCOLLECTION_ERROR(
node=node, node_property=ODLFeed.navigation
)
if node.links is not None:
with self._record_errors():
node.links.accept(self)
if node.publications is not None:
with self._record_errors():
node.publications.accept(self)
@dispatch(OPDS2FeedMetadata) # noqa: F811
def visit(self, node): # pylint: disable=E0102
"""Perform semantic analysis of the feed's metadata.
:param node: Feed's metadata
:type node: OPDS2FeedMetadata
"""
# super(ODLSemanticAnalyzer, self).visit(node)
@dispatch(Metadata) # noqa: F811
def visit(self, node): # pylint: disable=E0102
"""Perform semantic analysis of the feed's metadata.
:param node: Feed's metadata
:type node: Metadata
"""
super(ODLSemanticAnalyzer, self).visit(node)
@dispatch(ODLPublication) # noqa: F811
def visit(self, node): # pylint: disable=E0102
"""Perform semantic analysis of the OPDS 2.0 publication.
:param node: ODL 2.0 publication
:type node: ODLPublication
"""
self._logger.debug(u"Started processing {0}".format(encode(node)))
if (not node.licenses or len(node.licenses) == 0) and (
(not node.licenses or len(node.links) == 0)
or not node.links.get_by_rel(OPDS2LinkRelationsRegistry.OPEN_ACCESS.key)
):
with self._record_errors():
raise ODL_PUBLICATION_MUST_CONTAIN_EITHER_LICENSES_OR_OA_ACQUISITION_LINK_ERROR(
node=node, node_property=None
)
elif node.licenses:
node.licenses.accept(self)
self._logger.debug(u"Finished processing {0}".format(encode(node)))
@dispatch(LinkList) # noqa: F811
def visit(self, node): # pylint: disable=E0102
"""Perform semantic analysis of the list of links.
:param node: Manifest's metadata
:type node: LinkList
"""
super(ODLSemanticAnalyzer, self).visit(node)
@dispatch(Link) # noqa: F811
def visit(self, node): # pylint: disable=E0102
"""Perform semantic analysis of the link node.
:param node: Link node
:type node: Link
"""
super(ODLSemanticAnalyzer, self).visit(node)
@dispatch(CollectionList) # noqa: F811
def visit(self, node): # pylint: disable=E0102
"""Perform semantic analysis of the list of sub-collections.
:param node: CollectionList node
:type node: CollectionList
"""
super(ODLSemanticAnalyzer, self).visit(node)
@dispatch(CompactCollection) # noqa: F811
def visit(self, node): # pylint: disable=E0102
"""Perform semantic analysis of the compact collection node.
:param node: Collection node
:type node: CompactCollection
"""
super(ODLSemanticAnalyzer, self).visit(node)
@dispatch(Collection) # noqa: F811
def visit(self, node): # pylint: disable=E0102
"""Perform semantic analysis of the collection node.
:param node: Collection node
:type node: Collection
"""
super(ODLSemanticAnalyzer, self).visit(node)
@dispatch(ODLLicense) # noqa: F811
def visit(self, node): # pylint: disable=E0102
"""Perform semantic analysis of the ODL license node.
:param node: ODLLicense node
:type node: ODLLicense
"""
self_link = (
first_or_default(node.links.get_by_rel(OPDS2LinkRelationsRegistry.SELF.key))
if node.links
else None
)
if (
not self_link
or self_link.type != ODLMediaTypesRegistry.ODL_LICENSE_INFO_DOCUMENT.key
):
with self._record_errors():
raise ODL_LICENSE_MUST_CONTAIN_SELF_LINK_TO_LICENSE_INFO_DOCUMENT_ERROR(
node=node, node_property=None
)
borrow_link = (
first_or_default(
node.links.get_by_rel(OPDS2LinkRelationsRegistry.BORROW.key)
)
if node.links
else None
)
if (
not borrow_link
or borrow_link.type != ODLMediaTypesRegistry.ODL_LICENSE_STATUS_DOCUMENT.key
):
with self._record_errors():
raise ODL_LICENSE_MUST_CONTAIN_CHECKOUT_LINK_TO_LICENSE_STATUS_DOCUMENT_ERROR(
node=node, node_property=None
)
|
import os
import json
import random
import numpy
import csv
import traceback
import subprocess
import concurrent
import collections
import operator
import math
def convert_latency_nanos_to_millis(latencies):
return list(map(lambda x: x / 1e6, latencies))
def get_region(config, server):
for region, servers in config["server_regions"].items():
if server in servers:
return region
raise ValueError("{} not in any region".format(server))
def get_regions(config):
return set([get_region(config, s) for s in config["clients"] + config["server_names"]])
def get_num_regions(config):
return len(set([get_region(config, s) for s in config["clients"] + config["server_names"]]))
def calculate_statistics(config, local_out_directory):
runs = []
op_latencies = {}
op_times = {}
op_latency_counts = {}
op_tputs = {}
client_op_latencies = []
client_op_times = []
for i in range(config['num_experiment_runs']):
client_op_latencies.append({})
client_op_times.append({})
stats, run_op_latencies, run_op_times, run_op_latency_counts, run_op_tputs, run_client_op_latencies, run_client_op_times = calculate_statistics_for_run(
config, local_out_directory, i)
runs.append(stats)
for k, vv in run_op_latency_counts.items():
for j in range(len(run_op_latencies[k])):
if k in op_latencies:
if len(op_latencies[k]) <= j:
op_latencies[k].append(run_op_latencies[k][j])
else:
op_latencies[k][j] += run_op_latencies[k][j]
else:
op_latencies[k] = [run_op_latencies[k][j]]
if k in op_latency_counts:
op_latency_counts[k] += vv
else:
op_latency_counts[k] = vv
for k, v in run_op_times.items():
for j in range(len(v)):
if k in op_times:
if len(op_times[k]) <= j:
op_times[k].append(v[j])
else:
op_times[k][j] += v[j]
else:
op_times[k] = [v[j]]
for k, v in run_op_tputs.items():
if k in run_op_tputs:
if k in op_tputs:
op_tputs[k] += run_op_tputs[k]
else:
op_tputs[k] = run_op_tputs[k]
for cid, opl in run_client_op_latencies.items():
client_op_latencies[i][cid] = opl
for cid, opt in run_client_op_times.items():
client_op_times[i][cid] = opt
stats = {}
stats['aggregate'] = {}
norm_op_latencies, norm_op_times = calculate_all_op_statistics(
config, stats['aggregate'], op_latencies, op_times, op_latency_counts, op_tputs)
for k, v in norm_op_latencies.items():
op_latencies['%s_norm' % k] = v
for k, v in norm_op_times.items():
op_times['%s_norm' % k] = v
stats['runs'] = runs
stats['run_stats'] = {}
ignored = {'cdf': 1, 'cdf_log': 1, 'time': 1}
for cat in runs[0]: # we assume there is at least one run
if not ('region-' in cat) and type(runs[0][cat]) is dict:
stats['run_stats'][cat] = {}
for s in runs[0][cat]:
if s in ignored:
continue
data = []
for run in runs:
data.append(run[cat][s])
stats['run_stats'][cat][s] = calculate_statistics_for_data(
data, cdf=False)
if 'region-' in cat:
stats['run_stats'][cat] = {}
for cat2 in runs[0][cat]: # we assume there is at least one run
if type(runs[0][cat][cat2]) is dict:
stats['run_stats'][cat][cat2] = {}
for s in runs[0][cat][cat2]:
if s in ignored:
continue
data = []
for run in runs:
data.append(run[cat][cat2][s])
stats['run_stats'][cat][cat2][s] = calculate_statistics_for_data(
data, cdf=False)
stats_file = STATS_FILE if 'stats_file_name' not in config else config['stats_file_name']
with open(os.path.join(local_out_directory, stats_file), 'w') as f:
json.dump(stats, f, indent=2, sort_keys=True)
return stats, op_latencies, op_times, client_op_latencies, client_op_times
def calculate_statistics_for_run(config, local_out_directory, run):
region_op_latencies = {}
region_op_times = {}
region_op_latency_counts = {}
region_op_tputs = {}
region_client_op_latencies = {}
region_client_op_times = {}
stats = {}
regions = get_regions(config)
for region in regions:
r_op_latencies = {}
r_op_latency_counts = {}
r_op_times = {}
r_op_tputs = {}
op_latencies = {}
op_latency_counts = {}
op_tputs = {}
op_times = {}
for client in config["clients"]:
if get_region(config, client) != region:
continue
client_dir = client
for k in range(config["client_processes_per_client_node"]):
client_out_file = os.path.join(local_out_directory,
client_dir,
'%s-%d-stdout-%d.log' % (client, k, run))
start_time_sec = -1
start_time_usec = -1
end_time_sec = {}
end_time_usec = {}
with open(client_out_file) as f:
ops = f.readlines()
foundEnd = False
for op in ops:
foundEnd = False
opCols = op.strip().split(',')
for x in range(0, len(opCols), 2):
if opCols[x].isdigit():
break
if len(opCols[x]) > 0 and opCols[x][0] == '#':
# special line, not an operation
if opCols[x] == '#start':
start_time_sec = float(opCols[x+1])
start_time_usec = float(
opCols[x+2])
break
elif opCols[x] == '#end':
cid = 0
if x + 3 < len(opCols):
cid = int(opCols[x+3])
end_time_sec[cid] = float(
opCols[x+1])
end_time_usec[cid] = float(
opCols[x+2])
foundEnd = True
break
if not opCols[x] in config['client_stats_blacklist']:
if 'input_latency_scale' in config:
opLat = float(
opCols[x+1]) / config['input_latency_scale']
else:
opLat = float(opCols[x+1]) / 1e9
if 'output_latency_scale' in config:
opLat = opLat * \
config['output_latency_scale']
else:
opLat = opLat * 1e3
opTime = 0.0
if x + 2 < len(opCols):
if 'input_latency_scale' in config:
opTime = float(
opCols[x+2]) / config['input_latency_scale']
else:
opTime = float(
opCols[x+2]) / 1e9
if 'output_latency_scale' in config:
opTime = opTime * \
config['output_latency_scale']
else:
opTime = opTime * 1e3
cid = 0
if x + 3 < len(opCols):
cid = int(opCols[x + 3])
if cid not in op_latencies:
op_latencies[cid] = {}
if cid not in op_times:
op_times[cid] = {}
if cid not in op_latency_counts:
op_latency_counts[cid] = {}
if opCols[x] in op_latencies[cid]:
op_latencies[cid][opCols[x]].append(
opLat)
else:
op_latencies[cid][opCols[x]] = [
opLat]
if opCols[x] in op_times[cid]:
op_times[cid][opCols[x]].append(
opTime)
else:
op_times[cid][opCols[x]] = [opTime]
if not opCols[x] in config['client_combine_stats_blacklist']:
if 'combined' in op_latencies[cid]:
op_latencies[cid]['combined'].append(
opLat)
else:
op_latencies[cid]['combined'] = [
opLat]
if 'combined' in op_times[cid]:
op_times[cid]['combined'].append(
opTime)
else:
op_times[cid]['combined'] = [
opTime]
if 'combined' in op_latency_counts[cid]:
op_latency_counts[cid]['combined'] += 1
else:
op_latency_counts[cid]['combined'] = 1
if 'client_combine_ro_ops' in config:
if opCols[x] in config['client_combine_ro_ops']:
rorw = 'ro'
else:
rorw = 'rw'
if rorw in op_latencies[cid]:
op_latencies[cid][rorw].append(
opLat)
else:
op_latencies[cid][rorw] = [
opLat]
if rorw in op_times[cid]:
op_times[cid][rorw].append(
opTime)
else:
op_times[cid][rorw] = [
opTime]
if rorw in op_latency_counts[cid]:
op_latency_counts[cid][rorw] += 1
else:
op_latency_counts[cid][rorw] = 1
if opCols[x] in op_latency_counts[cid]:
op_latency_counts[cid][opCols[x]] += 1
else:
op_latency_counts[cid][opCols[x]] = 1
if foundEnd:
run_time_sec = end_time_sec[cid]
run_time_sec += end_time_usec[cid] / 1e6
if cid in op_latency_counts:
for k1, v in op_latency_counts[cid].items():
print('Client %s-%d %d tput %s is %f (%d / %f)' % (
client, k, cid, k1, v / run_time_sec, v, run_time_sec))
if k1 in op_tputs:
op_tputs[k1] += v / \
run_time_sec
else:
op_tputs[k1] = v / run_time_sec
client_stats_file = os.path.join(local_out_directory,
client_dir,
'%s-%d-stats-%d.json' % (client, k, run))
try:
with open(client_stats_file) as f:
client_stats = json.load(f)
for k1, v in client_stats.items():
if (not 'stats_merge_lists' in config) or (not k1 in config['stats_merge_lists']):
if k1 not in stats:
stats[k1] = v
else:
stats[k1] += v
else:
if k1 not in stats:
stats[k1] = v
else:
if len(stats[k1]) < len(v):
for uu in range(len(stats[k1]), len(v)):
stats[k1].append(0)
for uu in range(len(v)):
stats[k1][uu] += v[uu]
except FileNotFoundError:
print('No stats file %s.' % client_stats_file)
except json.decoder.JSONDecodeError:
print('Invalid JSON file %s.' % client_stats_file)
for instance_idx in range(config["num_instances"]):
for shard_idx in range(len(config["shards"])):
shard = config["shards"][shard_idx]
for replica_idx in range(len(shard)):
replica = shard[replica_idx]
if get_region(config, replica) != region:
continue
server_stats_file = os.path.join(local_out_directory, 'server-%d-%d' % (instance_idx, shard_idx),
'server-%d-%d-%d-stats-%d.json' % (instance_idx, shard_idx, replica_idx, run))
print(server_stats_file)
try:
with open(server_stats_file) as f:
server_stats = json.load(f)
for k, v in server_stats.items():
if not type(v) is dict:
if (not 'stats_merge_lists' in config) or (not k in config['stats_merge_lists']):
if k not in stats:
stats[k] = v
else:
stats[k] += v
else:
if k not in stats:
stats[k] = v
else:
if len(stats[k]) < len(v):
for uu in range(len(stats[k]), len(v)):
stats[k].append(0)
for uu in range(len(v)):
stats[k][uu] += v[uu]
except FileNotFoundError:
print('No stats file %s.' % server_stats_file)
except json.decoder.JSONDecodeError:
print('Invalid JSON file %s.' % server_stats_file)
for cid, opl in op_latencies.items():
for k, v in opl.items():
if k in r_op_latencies:
r_op_latencies[k].extend(v)
else:
r_op_latencies[k] = v.copy()
region_client_op_latencies[cid] = opl
for cid, opt in op_times.items():
for k, v in opt.items():
if k in r_op_times:
r_op_times[k].extend(v)
else:
r_op_times[k] = v.copy()
region_client_op_times[cid] = opt
for cid, oplc in op_latency_counts.items():
for k, v in oplc.items():
if k in r_op_latency_counts:
r_op_latency_counts[k] += v
else:
r_op_latency_counts[k] = v
for k, v in op_tputs.items():
if k in r_op_tputs:
r_op_tputs[k] += v
else:
r_op_tputs[k] = v
# print('Region %d had op counts: w=%d, r=%d, rmw=%d.' % (i, writes, reads, rmws))
# normalize by server region to account for latency differences
for k, v in r_op_latencies.items():
if k in region_op_latencies:
region_op_latencies[k].append(v)
else:
region_op_latencies[k] = [v]
for k, v in r_op_times.items():
if k in region_op_times:
region_op_times[k].append(v)
else:
region_op_times[k] = [v]
for k, v in r_op_latency_counts.items():
if k in region_op_latency_counts:
region_op_latency_counts[k] = min(
region_op_latency_counts[k], v)
else:
region_op_latency_counts[k] = v
for k, v in r_op_tputs.items():
if k in region_op_tputs:
region_op_tputs[k].append(v)
else:
region_op_tputs[k] = [v]
# TODO: remove this hack
if 'fast_writes_0' in stats or 'slow_writes_0' in stats or 'fast_reads_0' in stats or 'slow_reads_0' in stats:
fw0 = stats['fast_writes_0'] if 'fast_writes_0' in stats else 0
sw0 = stats['slow_writes_0'] if 'slow_writes_0' in stats else 0
if fw0 + sw0 > 0:
stats['fast_write_ratio'] = fw0 / (fw0 + sw0)
stats['slow_write_ratio'] = sw0 / (fw0 + sw0)
fr0 = stats['fast_reads_0'] if 'fast_reads_0' in stats else 0
sr0 = stats['slow_reads_0'] if 'slow_reads_0' in stats else 0
if fr0 + sr0 > 0:
stats['fast_read_ratio'] = fr0 / (fr0 + sr0)
stats['slow_read_ratio'] = sr0 / (fr0 + sr0)
# TODO: decide if this is a hack that needs to be removed?
total_committed = 0
total_attempts = 0
stats_new = {}
for k, v in stats.items():
if k.endswith('_committed'):
k_prefix = k[:-len('_committed')]
k_attempts = k_prefix + '_attempts'
k_commit_rate = k_prefix + '_commit_rate'
k_abort_rate = k_prefix + '_abort_rate'
total_committed += stats[k]
total_attempts += stats[k_attempts]
stats_new[k_commit_rate] = stats[k] / stats[k_attempts]
stats_new[k_abort_rate] = 1 - stats_new[k_commit_rate]
for k, v in stats_new.items():
stats[k] = v
if total_attempts > 0:
stats['committed'] = total_committed
stats['attempts'] = total_attempts
stats['commit_rate'] = total_committed / total_attempts
stats['abort_rate'] = 1 - stats['commit_rate']
norm_op_latencies, norm_op_times = calculate_all_op_statistics(
config, stats, region_op_latencies, region_op_times, region_op_latency_counts, region_op_tputs)
for k, v in norm_op_latencies.items():
region_op_latencies['%s_norm' % k] = v
for k, v in norm_op_times.items():
region_op_times['%s_norm' % k] = v
return stats, region_op_latencies, region_op_times, region_op_latency_counts, region_op_tputs, region_client_op_latencies, region_client_op_times
def calculate_op_statistics(config, stats, total_recorded_time, op_type, latencies, norm_latencies, tput):
if len(latencies) > 0:
stats[op_type] = calculate_statistics_for_data(latencies)
stats[op_type]['ops'] = len(latencies)
if tput == -1:
stats[op_type]['tput'] = len(latencies) / total_recorded_time
else:
stats[op_type]['old_tput'] = len(latencies) / total_recorded_time
stats[op_type]['tput'] = tput
if op_type == 'combined':
stats['combined']['ops'] = len(latencies)
stats['combined']['time'] = total_recorded_time
# TODO: fix
# if (not 'server_emulate_wan' in config or config['server_emulate_wan']) and len(norm_latencies) > 0:
# stats['%s_norm' % op_type] = calculate_statistics_for_data(
# norm_latencies)
# stats['%s_norm' % op_type]['samples'] = len(norm_latencies)
def calculate_all_op_statistics(config, stats, region_op_latencies, region_op_times, region_op_latency_counts, region_op_tputs):
total_recorded_time = float(
config['client_experiment_length'] - config['client_ramp_up'] - config['client_ramp_down'])
norm_op_latencies = {}
norm_op_times = {}
for k, v in region_op_latencies.items():
latencies = [lat for region_lats in v for lat in region_lats]
tput = -1 if len(v) == 0 else 0
if k in region_op_tputs:
for region_tput in region_op_tputs[k]:
tput += region_tput
for i in range(len(v)):
sample_idxs = random.sample(
range(len(v[i])), region_op_latency_counts[k])
if k in norm_op_times:
norm_op_latencies[k].extend([v[i][idx] for idx in sample_idxs])
norm_op_times[k].extend(
[region_op_times[k][i][idx] for idx in sample_idxs])
else:
norm_op_latencies[k] = [v[i][idx] for idx in sample_idxs]
norm_op_times[k] = [region_op_times[k][i][idx]
for idx in sample_idxs]
if not 'server_emulate_wan' in config or config['server_emulate_wan']:
for i in range(len(v)):
region_key = 'region-%d' % i
if region_key not in stats:
stats[region_key] = {}
op_tput = -1
if k in region_op_tputs and len(region_op_tputs[k]) > i:
op_tput = region_op_tputs[k][i]
calculate_op_statistics(
config, stats[region_key], total_recorded_time, k, v[i], [], op_tput)
calculate_op_statistics(
config, stats, total_recorded_time, k, latencies, norm_op_latencies[k], tput)
return norm_op_latencies, norm_op_times
def calculate_cdf_for_npdata(npdata):
ptiles = []
for i in range(1, 100): # compute percentiles [1, 100)
ptiles.append([i, numpy.percentile(npdata, i, interpolation='higher')])
return ptiles
def calculate_cdf_log_for_npdata(npdata, precision):
ptiles = []
base = 0
scale = 1
for i in range(0, precision):
for j in range(0, 90):
if i == 0 and j == 0:
continue
ptiles.append([base + j / scale, numpy.percentile(npdata,
base + j / scale, interpolation='higher')])
base += 90 / scale
scale = scale * 10
return ptiles
def calculate_statistics_for_data(data, cdf=True, cdf_log_precision=4):
npdata = numpy.asarray(data)
s = {
'p50': numpy.percentile(npdata, 50).item(),
'p75': numpy.percentile(npdata, 75).item(),
'p90': numpy.percentile(npdata, 90).item(),
'p95': numpy.percentile(npdata, 95).item(),
'p99': numpy.percentile(npdata, 99).item(),
'p99.9': numpy.percentile(npdata, 99.9).item(),
'max': numpy.amax(npdata).item(),
'min': numpy.amin(npdata).item(),
'mean': numpy.mean(npdata).item(),
'stddev': numpy.std(npdata).item(),
'var': numpy.var(npdata).item(),
}
if cdf:
s['cdf'] = calculate_cdf_for_npdata(npdata)
s['cdf_log'] = calculate_cdf_log_for_npdata(npdata, cdf_log_precision)
return s
def generate_gnuplot_script_cdf_log_agg_new(script_file, out_file, x_label,
y_label, width, height, font, series, title):
with open(script_file, 'w') as f:
write_gpi_header(f)
f.write("set title \"%s\"\n" % title)
f.write("set key bottom right\n")
f.write("set ytics (0,0.9,0.99,0.999,0.9999,1.0)\n")
f.write("set xlabel '%s'\n" % x_label)
f.write("set ylabel '%s'\n" % y_label)
f.write("set terminal pngcairo size %d,%d enhanced dashed font '%s'\n" % (
width, height, font))
f.write('set output \'%s\'\n' % out_file)
write_line_styles(f)
f.write('plot ')
for i in range(len(series)):
if i == 0:
labels = ':yticlabels(3)'
else:
labels = ''
f.write("'%s' using 1:(-log10(1-$2))%s title \"%s\" ls %d with lines" % (
series[i][1], labels, series[i][0].replace('_', '\\\\\\_'), i + 1))
if i != len(series) - 1:
f.write(', \\\n')
def generate_gnuplot_script_lot_plot_stacked(script_file, out_file, x_label, y_label, width, height, font, series, title):
with open(script_file, 'w') as f:
write_gpi_header(f)
f.write("set title \"%s\"\n" % title)
f.write("set key top left\n")
f.write("set xlabel '%s'\n" % x_label)
f.write("set ylabel '%s'\n" % y_label)
f.write("set terminal pngcairo size %d,%d enhanced dashed font '%s'\n" %
(width, height, font))
f.write('set output \'%s\'\n' % out_file)
write_line_styles(f)
f.write('plot ')
for i in range(len(series)):
f.write("'%s' title \"%s\" ls %d with filledcurves x1" % (series[i][1],
series[i][0].replace('_', '\\\\\\_'), i + 1))
if i != len(series) - 1:
f.write(', \\\n')
def generate_gnuplot_script_cdf_agg_new(script_file, out_file, x_label, y_label, width, height, font, series, title):
with open(script_file, 'w') as f:
write_gpi_header(f)
f.write("set title \"%s\"\n" % title)
f.write("set key bottom right\n")
f.write("set xlabel '%s'\n" % x_label)
f.write("set ylabel '%s'\n" % y_label)
f.write("set terminal pngcairo size %d,%d enhanced dashed font '%s'\n" %
(width, height, font))
f.write('set output \'%s\'\n' % out_file)
write_line_styles(f)
f.write('plot ')
for i in range(len(series)):
f.write("'%s' title \"%s\" ls %d with lines" % (series[i][1],
series[i][0].replace('_', '\\\\\\_'), i + 1))
if i != len(series) - 1:
f.write(', \\\n')
def generate_csv_for_plot(plot_csv_file, x_vars, y_vars):
with open(plot_csv_file, 'w') as f:
csvwriter = csv.writer(f)
for i in range(len(x_vars)):
csvwriter.writerow([x_vars[i], y_vars[i]])
def generate_gnuplot_script(plot, plot_script_file, plot_csv_file, plot_out_file):
with open(plot_script_file, 'w') as f:
write_gpi_header(f)
f.write("set key top left\n")
f.write("set xlabel '%s'\n" % plot['x_label'])
f.write("set ylabel '%s'\n" % plot['y_label'])
f.write("set terminal pngcairo size %d,%d enhanced font '%s'\n" %
(plot['width'], plot['height'], plot['font']))
f.write('set output \'%s\'\n' % plot_out_file)
write_line_styles(f)
f.write("plot '%s' title '%s' with linespoint\n" %
(plot_csv_file, 'series-1'))
def generate_plot(plot, plots_directory, x_vars, y_vars):
plot_csv_file = os.path.join(plots_directory, '%s.csv' % plot['name'])
generate_csv_for_plot(plot_csv_file, x_vars, y_vars)
plot_script_file = os.path.join(plots_directory, '%s.gpi' % plot['name'])
plot_out_file = os.path.join(plots_directory, '%s.png' % plot['name'])
generate_gnuplot_script(plot, plot_script_file,
plot_csv_file, plot_out_file)
subprocess.call(['gnuplot', plot_script_file])
def generate_gnuplot_script_agg(plot, plot_script_file, plot_out_file, series):
with open(plot_script_file, 'w') as f:
write_gpi_header(f)
f.write("set key top left\n")
f.write("set xlabel '%s'\n" % plot['x_label'])
f.write("set ylabel '%s'\n" % plot['y_label'])
f.write("set terminal pngcairo size %d,%d enhanced dashed font '%s'\n" %
(plot['width'], plot['height'], plot['font']))
f.write('set output \'%s\'\n' % plot_out_file)
write_line_styles(f)
f.write('plot ')
for i in range(len(series)):
f.write("'%s' title '%s' ls %d with linespoint" % (
series[i], plot['series_titles'][i].replace('_', '\\_'), i + 1))
if i != len(series) - 1:
f.write(', \\\n')
def generate_plots(config, base_out_directory, out_dirs):
plots_directory = os.path.join(
base_out_directory, config['plot_directory_name'])
os.makedirs(plots_directory, exist_ok=True)
csv_classes = set()
csv_files = []
subprocesses = []
###
# Generate aggregate cdf plots
for i in range(len(out_dirs[0])):
# for each series i
csv_files.append({})
collecting = collections.deque([out_dirs[0][i]])
while len(collecting) != 0:
# bfs flattening of independent vars
if type(collecting[0]) is str:
sub_plot_directory = os.path.join(
collecting[0], config['plot_directory_name'])
for f in os.listdir(sub_plot_directory):
if f.endswith('.csv') and (f.startswith('aggregate-') or f.startswith('lot-')):
csv_class = os.path.splitext(os.path.basename(f))[0]
csv_classes.add(csv_class)
if not csv_class in csv_files[i]:
csv_files[i][csv_class] = []
csv_files[i][csv_class].append(
os.path.join(sub_plot_directory, f))
else:
for od in collecting[0]:
collecting.append(od)
collecting.popleft()
for csv_class in csv_classes:
idx = -1
for j in range(len(csv_files)):
if csv_class in csv_files[j]:
idx = j
break
if idx == -1:
continue
for j in range(len(csv_files[idx][csv_class])):
title = ''
for k in range(len(config['experiment_independent_vars']) - 1 - len(config['experiment_independent_vars_unused']), -1, -1):
title += '%s=%s' % (config['experiment_independent_vars'][k][0].replace('_', '\\\\\\_'),
str(config[config['experiment_independent_vars'][k][0]]))
if k > 0:
title += '\\n'
x = j
for k in range(len(config['experiment_independent_vars_unused']) - 1, 0, -1):
if k == len(config['experiment_independent_vars_unused']) - 1:
title += '\\n'
n = len(
config[config['experiment_independent_vars_unused'][k][0]])
title += '%s=%s' % (config['experiment_independent_vars_unused'][k][0].replace('_', '\\\\\\_'),
str(config[config['experiment_independent_vars_unused'][k][0]][x % n]))
x = x // n
if k > 0:
title += '\\n'
plot_script_file = os.path.join(
plots_directory, '%s-%d.gpi' % (csv_class, j))
plot_out_file = os.path.join(
plots_directory, '%s-%d.png' % (csv_class, j))
series = []
for i in range(len(csv_files)):
if csv_class in csv_files[i] and len(csv_files[i][csv_class]) > j:
series.append(('%s=%s' % (config['experiment_independent_vars_unused'][0][0],
config[config['experiment_independent_vars_unused'][0][0]][i]), csv_files[i][csv_class][j]))
if 'lot-' in csv_class:
if not 'lot_plots' in config:
config['lot_plots'] = {
'x_label': 'Time',
'y_label': 'Latency',
'width': config['cdf_plots']['width'],
'height': config['cdf_plots']['height'],
'font': config['cdf_plots']['font']
}
generate_gnuplot_script_cdf_agg_new(plot_script_file,
plot_out_file, config['lot_plots']['x_label'],
config['lot_plots']['y_label'],
config['lot_plots']['width'],
config['lot_plots']['height'],
config['lot_plots']['font'], series, title)
elif 'log' in csv_class:
generate_gnuplot_script_cdf_log_agg_new(plot_script_file,
plot_out_file, config['cdf_plots']['x_label'],
config['cdf_plots']['y_label'],
config['cdf_plots']['width'],
config['cdf_plots']['height'],
config['cdf_plots']['font'], series, title)
else:
generate_gnuplot_script_cdf_agg_new(plot_script_file,
plot_out_file, config['cdf_plots']['x_label'],
config['cdf_plots']['y_label'],
config['cdf_plots']['width'],
config['cdf_plots']['height'],
config['cdf_plots']['font'], series, title)
print(plot_script_file)
# subprocesses.append(subprocess.Popen(['gnuplot', plot_script_file]))
subprocess.call(['gnuplot', plot_script_file])
# End generate all aggregate cdf plots
###
###
# Generate specific plots
# for now we only support configurable plot generation with 1 indep var
for plot in config['plots']:
if len(config['experiment_independent_vars']) - len(config['experiment_independent_vars_unused']) == 1:
# generate csvs and single series plots
x_vars = []
y_vars = []
for i in range(len(out_dirs[0])):
assert type(out_dirs[0][i]) is str
# for each value of the independent variable
stats_file = os.path.join(
out_dirs[0][i], config['stats_file_name'])
print(stats_file)
with open(stats_file) as f:
stats = json.load(f)
if plot['x_var_is_config']:
x_var = config
for k in plot['x_var']:
if type(x_var) is dict:
x_var = x_var[k]
elif type(x_var) is list:
x_var = x_var[i]
if type(x_var) is list:
x_var = x_var[i]
else:
x_var = stats
for k in plot['x_var']:
if k in x_var:
x_var = x_var[k]
else:
x_var = 0
break
x_vars.append(x_var)
y_var = stats
for k in plot['y_var']:
if k in y_var or (isinstance(y_var, list) and isinstance(k, int) and k < len(y_var)):
y_var = y_var[k]
else:
y_var = 0
break
y_vars.append(y_var)
print(plots_directory)
generate_plot(plot, plots_directory, x_vars, y_vars)
elif len(config['experiment_independent_vars']) == len(config['experiment_independent_vars_unused']):
csv_files = []
for i in range(len(out_dirs[-1])):
# for series i
sub_plot_directory = os.path.join(
out_dirs[-1][i], config['plot_directory_name'])
csv_files.append(os.path.join(
sub_plot_directory, '%s.csv' % plot['name']))
plot_script_file = os.path.join(
plots_directory, '%s.gpi' % plot['name'])
plot_out_file = os.path.join(
plots_directory, '%s.png' % plot['name'])
generate_gnuplot_script_agg(
plot, plot_script_file, plot_out_file, csv_files)
subprocess.call(['gnuplot', plot_script_file])
# subprocesses.append(subprocess.Popen(['gnuplot', plot_script_file]))
# End generate specific plots
###
# for subprocess in subprocesses:
# subprocess.wait()
def run_gnuplot(data_files, out_file, script_file):
# print(script_file)
args = ['gnuplot', '-e', "outfile='%s'" % out_file]
for i in range(len(data_files)):
args += ['-e', "datafile%d='%s'" % (i, data_files[i])]
args.append(script_file)
subprocess.call(args)
def generate_csv_for_cdf_plot(csv_file, cdf_data, log=False):
with open(csv_file, 'w') as f:
csvwriter = csv.writer(f)
k = 1
for i in range(len(cdf_data)):
data = [cdf_data[i][1], cdf_data[i][0] / 100]
if log and abs(cdf_data[i][0] / 100 - (1 - 10**-k)) < 0.000001:
data.append(1 - 10**-k)
k += 1
csvwriter.writerow(data)
def generate_csv_for_lot_plot(csv_file, lot_data, lot_times=None, use_idxs=False):
with open(csv_file, 'w') as f:
csvwriter = csv.writer(f)
if lot_times == None:
agg = 0.0
for i in range(len(lot_data)):
agg += lot_data[i]
if use_idxs:
data = [i, lot_data[i]]
else:
data = [agg, lot_data[i]]
csvwriter.writerow(data)
else:
aggregate_data = []
for i in range(len(lot_data)):
aggregate_data.append([lot_times[i], lot_data[i]])
aggregate_data = sorted(aggregate_data, key=operator.itemgetter(0))
for row in aggregate_data:
csvwriter.writerow(row)
def generate_csv_for_tot_plot(csv_file, lot_data, lot_times):
with open(csv_file, 'w') as f:
csvwriter = csv.writer(f)
aggregate_data = []
for i in range(len(lot_data)):
aggregate_data.append([lot_times[i], lot_data[i]])
aggregate_data = sorted(aggregate_data, key=operator.itemgetter(0))
tot_data = []
if len(aggregate_data) > 0:
ops_in_interval = 1
start_interval = aggregate_data[0][0]
end_interval = aggregate_data[0][0]
interval = 5e2
for i in range(1, len(aggregate_data)):
if aggregate_data[i][0] < start_interval + interval:
ops_in_interval += 1
else:
tot_data.append(
[end_interval, ops_in_interval * 1e3 / interval])
ops_in_interval = 0
start_interval = aggregate_data[i][0]
end_interval = aggregate_data[i][0]
tot_data.append([end_interval, ops_in_interval * 1e3 / interval])
for row in tot_data:
csvwriter.writerow(row)
def generate_cdf_plot(config, plots_directory, plot_name, cdf_data):
plot_name = plot_name.replace('_', '-')
plot_csv_file = os.path.join(plots_directory, '%s.csv' % plot_name)
generate_csv_for_cdf_plot(plot_csv_file, cdf_data)
plot_script_file = os.path.join(plots_directory, '%s.gpi' % plot_name)
generate_gnuplot_script_cdf(config, plot_script_file)
run_gnuplot([plot_csv_file], os.path.join(plots_directory, '%s.png' % plot_name),
plot_script_file)
def generate_gnuplot_script_lot(config, script_file, line_type='points'):
with open(script_file, 'w') as f:
f.write("set datafile separator ','\n")
f.write("set key bottom right\n")
f.write("set yrange [0:]\n")
if 'plot_lot_x_label' in config:
f.write("set xlabel '%s'\n" % config['plot_lot_x_label'])
if 'plot_lot_y_label' in config:
f.write("set ylabel '%s'\n" % config['plot_lot_y_label'])
f.write("set terminal pngcairo size %d,%d enhanced font '%s'\n" %
(config['plot_cdf_png_width'], config['plot_cdf_png_height'],
config['plot_cdf_png_font']))
f.write('set output outfile\n')
f.write("plot datafile0 title '%s' with %s\n" % (config['plot_cdf_series_title'].replace('_', '\\_'),
line_type))
def generate_lot_plot(config, plots_directory, plot_name, lot_data, lot_times):
plot_name = plot_name.replace('_', '-')
plot_csv_file = os.path.join(plots_directory, '%s.csv' % plot_name)
generate_csv_for_lot_plot(plot_csv_file, lot_data, lot_times)
plot_script_file = os.path.join(plots_directory, '%s.gpi' % plot_name)
generate_gnuplot_script_lot(config, plot_script_file)
run_gnuplot([plot_csv_file], os.path.join(plots_directory, '%s.png' % plot_name),
plot_script_file)
def generate_tot_plot(config, plots_directory, plot_name, lot_data, lot_times):
plot_name = plot_name.replace('_', '-')
plot_csv_file = os.path.join(plots_directory, '%s.csv' % plot_name)
generate_csv_for_tot_plot(plot_csv_file, lot_data, lot_times)
plot_script_file = os.path.join(plots_directory, '%s.gpi' % plot_name)
generate_gnuplot_script_lot(config, plot_script_file, 'linespoints')
run_gnuplot([plot_csv_file], os.path.join(plots_directory, '%s.png' % plot_name),
plot_script_file)
def generate_cdf_log_plot(config, plots_directory, plot_name, cdf_data):
plot_name = plot_name.replace('_', '-')
plot_csv_file = os.path.join(plots_directory, '%s.csv' % plot_name)
generate_csv_for_cdf_plot(plot_csv_file, cdf_data, log=True)
plot_script_file = os.path.join(plots_directory, '%s.gpi' % plot_name)
generate_gnuplot_script_cdf_log(config, plot_script_file)
run_gnuplot([plot_csv_file], os.path.join(plots_directory, '%s.png' % plot_name),
plot_script_file)
CDF_PLOTS = ['txn', 'w', 'r', 'rmw', 'max', 'maxr', 'maxw', 'maxrmw', 'combined', 'txn_norm',
'w_norm', 'r_norm', 'rmw_norm', 'combined_norm', 'max_norm', 'maxr_norm', 'maxw_norm', 'maxrmw_norm']
def generate_cdf_plots(config, local_out_directory, stats, executor):
futures = []
plots_directory = os.path.join(
local_out_directory, config['plot_directory_name'])
os.makedirs(plots_directory, exist_ok=True)
for op_type in stats['aggregate']:
if not op_type in config['client_cdf_plot_blacklist'] and not 'region-' in op_type:
cdf_plot_name = 'aggregate-%s' % op_type
futures.append(executor.submit(generate_cdf_plot, config, plots_directory, cdf_plot_name,
stats['aggregate'][op_type]['cdf']))
cdf_log_plot_name = 'aggregate-%s-log' % op_type
futures.append(executor.submit(generate_cdf_log_plot, config, plots_directory, cdf_log_plot_name,
stats['aggregate'][op_type]['cdf_log']))
elif 'region-' in op_type:
for op_type2 in stats['aggregate'][op_type]:
if not op_type2 in config['client_cdf_plot_blacklist']:
cdf_plot_name = 'aggregate-%s-%s' % (op_type, op_type2)
futures.append(executor.submit(generate_cdf_plot, config, plots_directory, cdf_plot_name,
stats['aggregate'][op_type][op_type2]['cdf']))
cdf_log_plot_name = 'aggregate-%s-%s-log' % (
op_type, op_type2)
futures.append(executor.submit(generate_cdf_log_plot, config, plots_directory, cdf_log_plot_name,
stats['aggregate'][op_type][op_type2]['cdf_log']))
for i in range(len(stats['runs'])):
for op_type in stats['runs'][i]:
if not op_type in config['client_cdf_plot_blacklist'] and not 'region-' in op_type:
if type(stats['runs'][i][op_type]) is dict:
cdf_plot_name = 'run-%d-%s' % (i, op_type)
futures.append(executor.submit(generate_cdf_plot, config, plots_directory, cdf_plot_name,
stats['runs'][i][op_type]['cdf']))
cdf_log_plot_name = 'run-%d-%s-log' % (i, op_type)
futures.append(executor.submit(generate_cdf_log_plot, config, plots_directory, cdf_log_plot_name,
stats['runs'][i][op_type]['cdf']))
elif 'region-' in op_type:
for op_type2 in stats['runs'][i][op_type]:
if not op_type2 in config['client_cdf_plot_blacklist']:
if type(stats['runs'][i][op_type]) is dict:
cdf_plot_name = 'run-%d-%s-%s' % (
i, op_type, op_type2)
futures.append(executor.submit(generate_cdf_plot, config, plots_directory, cdf_plot_name,
stats['runs'][i][op_type][op_type2]['cdf']))
cdf_log_plot_name = 'run-%d-%s-%s-log' % (
i, op_type, op_type2)
futures.append(executor.submit(generate_cdf_log_plot, config, plots_directory, cdf_log_plot_name,
stats['runs'][i][op_type][op_type2]['cdf']))
concurrent.futures.wait(futures)
def generate_ot_plots(config, local_out_directory, stats, op_latencies, op_times, client_op_latencies, client_op_times, executor):
futures = []
plots_directory = os.path.join(
local_out_directory, config['plot_directory_name'])
os.makedirs(plots_directory, exist_ok=True)
for i in range(len(stats['runs'])):
ops = []
for op_type in stats['runs'][i]:
if config['client_total'] == 1 and op_type.startswith('op'):
ops.append(op_type)
if not op_type in config['client_cdf_plot_blacklist'] and not 'region-' in op_type:
if type(stats['runs'][i][op_type]) is dict:
plot_name = 'run-%d-%s' % (i, op_type)
futures.append(executor.submit(generate_lot_plot, config,
plots_directory, 'lot-' + plot_name,
op_latencies[op_type][i], op_times[op_type][i]))
futures.append(executor.submit(generate_tot_plot, config,
plots_directory, 'tot-' + plot_name,
op_latencies[op_type][i], op_times[op_type][i]))
for cid, opl in client_op_latencies[i].items():
if not op_type in client_op_times[i][cid] or not op_type in opl:
continue
client_plot_name = plot_name + ('-client-%d' % cid)
futures.append(executor.submit(generate_lot_plot, config,
plots_directory, 'lot-' + client_plot_name,
opl[op_type], client_op_times[i][cid][op_type]))
futures.append(executor.submit(generate_tot_plot, config,
plots_directory, 'tot-' + client_plot_name,
opl[op_type], client_op_times[i][cid][op_type]))
elif 'region-' in op_type:
for op_type2 in stats['runs'][i][op_type]:
if not op_type2 in config['client_cdf_plot_blacklist']:
if type(stats['runs'][i][op_type]) is dict:
lot_plot_name = 'lot-run-%d-%s-%s' % (
i, op_type, op_type2)
futures.append(executor.submit(generate_lot_plot,
config, plots_directory, lot_plot_name,
op_latencies[op_type2][i], op_times[op_type2][i]))
if len(ops) > 0:
ops.sort()
series = [convert_latency_nanos_to_millis(
stats['runs'][i][ops[0]])]
series_csvs = []
plot_csv_file = os.path.join(plots_directory, '%s.csv' % ops[0])
series_csvs.append((ops[0], plot_csv_file))
generate_csv_for_lot_plot(
plot_csv_file, series[0], None, None, True)
for k in range(1, len(ops)):
series.append(convert_latency_nanos_to_millis(
stats['runs'][i][ops[k]]))
for j in range(len(series[-2])):
series[-1][j] += series[-2][j]
plot_csv_file = os.path.join(
plots_directory, '%s.csv' % ops[k])
series_csvs.append((ops[k], plot_csv_file))
generate_csv_for_lot_plot(
plot_csv_file, series[-1], None, None, True)
series.append(convert_latency_nanos_to_millis(
stats['runs'][i]['commit']))
for j in range(len(series[-2])):
series[-1][j] += series[-2][j]
plot_csv_file = os.path.join(plots_directory, 'commit.csv')
series_csvs.append(('commit', plot_csv_file))
generate_csv_for_lot_plot(
plot_csv_file, series[-1], None, None, True)
plot_name = 'breakdown'
plot_script_file = os.path.join(
plots_directory, '%s.gpi' % plot_name)
plot_out_file = os.path.join(plots_directory, '%s.png' % plot_name)
series_csvs.reverse()
generate_gnuplot_script_lot_plot_stacked(plot_script_file, plot_out_file,
'Transaction #', config['lot_plots']['y_label'],
1600, 600,
config['lot_plots']['font'], series_csvs, 'Breakdown')
subprocess.call(['gnuplot', plot_script_file])
for fut in concurrent.futures.as_completed(futures):
fut.result()
def generate_csv_for_tput_lat_plot(plot_csv_file, tputs, lats):
with open(plot_csv_file, 'w') as f:
csvwriter = csv.writer(f)
for i in range(len(tputs)):
csvwriter.writerow([tputs[i], lats[i]])
def generate_tput_lat_plot(config, plots_directory, plot_name, tputs, lats):
plot_csv_file = os.path.join(plots_directory, '%s.csv' % plot_name)
generate_csv_for_tput_lat_plot(plot_csv_file, tputs, lats)
plot_script_file = os.path.join(plots_directory, '%s.gpi' % plot_name)
generate_gnuplot_script_tput_lat(config, plot_script_file)
run_gnuplot([plot_csv_file], os.path.join(plots_directory,
'%s.png' % plot_name), plot_script_file)
STATS_FILE = 'stats.json'
def generate_tput_lat_plots(config, base_out_directory, exp_out_directories):
plots_directory = os.path.join(
base_out_directory, config['plot_directory_name'])
os.makedirs(plots_directory, exist_ok=True)
tputs = []
lats = {}
for i in range(len(exp_out_directories)):
stats_file = os.path.join(exp_out_directories[i], STATS_FILE)
print(stats_file)
with open(stats_file) as f:
stats = json.load(f)
if 'combined' in stats['run_stats']:
combined_run_stats = stats['run_stats']['combined']
tputs.append(combined_run_stats['tput']['p50'])
ignore = {'stddev': 1, 'var': 1, 'tput': 1, 'ops': 1}
for lat_stat, lat in combined_run_stats.items():
if lat_stat in ignore:
continue
if lat_stat not in lats:
lats[lat_stat] = []
lats[lat_stat].append(lat['p50']) # median of runs
for lat_stat, lat in lats.items():
plot_name = 'tput-%s-lat' % lat_stat
print(plots_directory)
generate_tput_lat_plot(config, plots_directory, plot_name, tputs, lat)
def generate_agg_cdf_plots(config, base_out_directory, sub_out_directories):
plots_directory = os.path.join(
base_out_directory, config['plot_directory_name'])
os.makedirs(plots_directory, exist_ok=True)
csv_files = {}
for i in range(len(sub_out_directories)):
# for replication protocol i
for j in range(len(sub_out_directories[i])):
# for client configuration j
sub_plot_directory = os.path.join(
sub_out_directories[i][j], config['plot_directory_name'])
for f in os.listdir(sub_plot_directory):
if f.endswith('.csv') and f.startswith('aggregate'):
csv_class = os.path.splitext(os.path.basename(f))[0]
if csv_class not in csv_files:
csv_files[csv_class] = []
if len(csv_files[csv_class]) == j:
csv_files[csv_class].append([])
csv_files[csv_class][j].append(
os.path.join(sub_plot_directory, f))
for csv_class, file_lists in csv_files.items():
for j in range(len(file_lists)):
plot_script_file = os.path.join(
plots_directory, '%s-%d.gpi' % (csv_class, j))
if 'log' in csv_class:
generate_gnuplot_script_cdf_log_agg(config, plot_script_file)
else:
generate_gnuplot_script_cdf_agg(config, plot_script_file)
run_gnuplot(file_lists[j], os.path.join(plots_directory,
'%s-%d.png' % (csv_class, j)), plot_script_file)
def generate_agg_tput_lat_plots(config, base_out_directory, out_directories):
plots_directory = os.path.join(
base_out_directory, config['plot_directory_name'])
os.makedirs(plots_directory, exist_ok=True)
csv_files = {}
for i in range(len(out_directories)):
# for replication protocol i
sub_plot_directory = os.path.join(
out_directories[i], config['plot_directory_name'])
for f in os.listdir(sub_plot_directory):
if f.endswith('.csv'):
csv_class = os.path.splitext(os.path.basename(f))[0]
if csv_class not in csv_files:
csv_files[csv_class] = []
csv_files[csv_class].append(
os.path.join(sub_plot_directory, f))
for csv_class, files in csv_files.items():
plot_script_file = os.path.join(plots_directory, '%s.gpi' % csv_class)
generate_gnuplot_script_tput_lat_agg(config, plot_script_file)
run_gnuplot(files, os.path.join(plots_directory, '%s.png' %
csv_class), plot_script_file)
def generate_gnuplot_script_tail_at_scale(config, script_file):
with open(script_file, 'w') as f:
write_gpi_header(f)
f.write("set key top left\n")
f.write("set xlabel '# of subrequests'\n")
f.write("set ylabel 'Median Latency (ms)'\n")
f.write("set terminal pngcairo size %d,%d enhanced dashed font '%s'\n" %
(config['plot_cdf_png_width'], config['plot_cdf_png_height'],
config['plot_cdf_png_font']))
f.write('set output outfile\n')
write_line_styles(f)
f.write('plot ')
for i in range(len(config['replication_protocol'])):
f.write("datafile%d title '%s' ls %d with lines" % (
i, config['plot_cdf_series_title'][i].replace('_', '\\_'), i + 1))
if i != len(config['replication_protocol']) - 1:
f.write(', \\\n')
def generate_tail_at_scale_plots(config, base_out_directory, sub_out_directories):
plots_directory = os.path.join(
base_out_directory, config['plot_directory_name'])
os.makedirs(plots_directory, exist_ok=True)
csv_files = [[[] for i in range(len(config['server_names'])+1)]
for j in range(len(config['client_nodes_per_server']))]
for k in range(len(config['client_tail_at_scale'])):
# for tail-at-scale request size k
for i in range(len(sub_out_directories[k])):
# for replication protocol i
for j in range(len(sub_out_directories[k][i])):
# for client configuration j
with open(os.path.join(sub_out_directories[k][i][j], STATS_FILE)) as f:
stats = json.load(f)
csvfile = os.path.join(
plots_directory, '%s-%d-%d-overall.csv' % (config['replication_protocol'][i], i, j))
csv_files[j][0].append(csvfile)
with open(csvfile, 'a') as csvf:
csvwriter = csv.writer(csvf)
csvwriter.writerow(
[config['client_tail_at_scale'][k], stats['aggregate']['max_norm']['p50']])
for r in range(len(config['server_names'])):
csvfilereg = os.path.join(
plots_directory, '%s-%d-%d-region-%d.csv' % (config['replication_protocol'][i], i, j, r))
csv_files[j][r+1].append(csvfilereg)
with open(csvfilereg, 'a') as csvfreg:
csvwriterreg = csv.writer(csvfreg)
csvwriterreg.writerow(
[config['client_tail_at_scale'][k], stats['aggregate']['region-%d' % r]['max']['p50']])
for j in range(len(config['client_nodes_per_server'])):
plot_script_prefix = 'tail-at-scale-overall-%d' % j
plot_script_file = os.path.join(
plots_directory, '%s.gpi' % plot_script_prefix)
generate_gnuplot_script_tail_at_scale(config, plot_script_file)
run_gnuplot(csv_files[j][0], os.path.join(
plots_directory, '%s.png' % plot_script_prefix), plot_script_file)
print(csv_files)
num_regions = get_num_regions(config)
for r in range(num_regions):
plot_script_file = os.path.join(
plots_directory, 'tail-at-scale-%d-region-%d.gpi' % (j, r))
generate_gnuplot_script_tail_at_scale(config, plot_script_file)
run_gnuplot(csv_files[j][r+1], os.path.join(plots_directory,
'tail-at-scale-%d-region-%d.png' % (j, r)), plot_script_file)
def generate_gnuplot_script_cdf(config, script_file):
with open(script_file, 'w') as f:
f.write("set datafile separator ','\n")
f.write("set key bottom right\n")
f.write("set xlabel '%s'\n" % config['plot_cdf_x_label'])
f.write("set ylabel '%s'\n" % config['plot_cdf_y_label'])
f.write("set terminal pngcairo size %d,%d enhanced font '%s'\n" %
(config['plot_cdf_png_width'], config['plot_cdf_png_height'],
config['plot_cdf_png_font']))
f.write('set output outfile\n')
f.write("plot datafile0 title '%s' with lines\n" %
config['plot_cdf_series_title'].replace('_', '\\_'))
def generate_gnuplot_script_cdf_log(config, script_file):
with open(script_file, 'w') as f:
f.write("set datafile separator ','\n")
f.write("set key bottom right\n")
f.write("set xlabel '%s'\n" % config['plot_cdf_x_label'])
f.write("set ylabel '%s'\n" % config['plot_cdf_y_label'])
f.write("set terminal pngcairo size %d,%d enhanced font '%s'\n" %
(config['plot_cdf_png_width'], config['plot_cdf_png_height'],
config['plot_cdf_png_font']))
f.write('set output outfile\n')
f.write("plot datafile0 using 1:(-log10(1-$2)):yticlabels(3) title '%s' with lines\n" %
config['plot_cdf_series_title'].replace('_', '\\_'))
def generate_gnuplot_script_tput_lat(config, plot_script_file):
with open(plot_script_file, 'w') as f:
f.write("set datafile separator ','\n")
f.write("set key top left\n")
f.write("set xlabel '%s'\n" % config['plot_tput_lat_x_label'])
f.write("set ylabel '%s'\n" % config['plot_tput_lat_y_label'])
f.write("set terminal pngcairo size %d,%d enhanced font '%s'\n" %
(config['plot_tput_lat_png_width'],
config['plot_tput_lat_png_height'],
config['plot_tput_lat_png_font']))
f.write('set output outfile\n')
f.write("plot datafile0 title '%s' with linespoint\n" %
config['plot_tput_lat_series_title'].replace('_', '\\_'))
def write_gpi_header(f):
f.write("set datafile separator ','\n")
def write_line_styles(f):
f.write('set style line 1 linetype 1 linewidth 2\n')
f.write('set style line 2 linetype 1 linecolor "green" linewidth 2\n')
f.write('set style line 3 linetype 1 linecolor "blue" linewidth 2\n')
f.write('set style line 4 linetype 4 linewidth 2\n')
f.write('set style line 5 linetype 5 linewidth 2\n')
f.write('set style line 6 linetype 8 linewidth 2\n')
def generate_gnuplot_script_cdf_log_agg(config, script_file):
with open(script_file, 'w') as f:
write_gpi_header(f)
f.write("set key bottom right\n")
f.write("set ytics (0,0.9,0.99,0.999,0.9999,1.0)\n")
f.write("set xlabel '%s'\n" % config['plot_cdf_x_label'])
f.write("set ylabel '%s'\n" % config['plot_cdf_y_label'])
f.write("set terminal pngcairo size %d,%d enhanced dashed font '%s'\n" %
(config['plot_cdf_png_width'], config['plot_cdf_png_height'],
config['plot_cdf_png_font']))
f.write('set output outfile\n')
write_line_styles(f)
f.write('plot ')
for i in range(len(config['replication_protocol'])):
if i == 0:
labels = ':yticlabels(3)'
else:
labels = ''
f.write("datafile%d using 1:(-log10(1-$2))%s title '%s' ls %d with lines" %
(i, labels, config['plot_cdf_series_title'][i].replace('_', '\\_'), i + 1))
if i != len(config['replication_protocol']) - 1:
f.write(', \\\n')
def generate_gnuplot_script_cdf_agg(config, script_file):
with open(script_file, 'w') as f:
write_gpi_header(f)
f.write("set key bottom right\n")
f.write("set xlabel '%s'\n" % config['plot_cdf_x_label'])
f.write("set ylabel '%s'\n" % config['plot_cdf_y_label'])
f.write("set terminal pngcairo size %d,%d enhanced dashed font '%s'\n" %
(config['plot_cdf_png_width'], config['plot_cdf_png_height'],
config['plot_cdf_png_font']))
f.write('set output outfile\n')
write_line_styles(f)
f.write('plot ')
for i in range(len(config['replication_protocol'])):
f.write("datafile%d title '%s' ls %d with lines" % (
i, config['plot_cdf_series_title'][i].replace('_', '\\_'), i + 1))
if i != len(config['replication_protocol']) - 1:
f.write(', \\\n')
def generate_gnuplot_script_tput_lat_agg(config, plot_script_file):
with open(plot_script_file, 'w') as f:
write_gpi_header(f)
f.write("set key top left\n")
f.write("set xlabel '%s'\n" % config['plot_tput_lat_x_label'])
f.write("set ylabel '%s'\n" % config['plot_tput_lat_y_label'])
f.write("set terminal pngcairo size %d,%d enhanced dashed font '%s'\n" %
(config['plot_tput_lat_png_width'],
config['plot_tput_lat_png_height'],
config['plot_tput_lat_png_font']))
f.write('set output outfile\n')
write_line_styles(f)
f.write('plot ')
for i in range(len(config['replication_protocol'])):
f.write("datafile%d title '%s' ls %d with linespoint" % (
i, config['plot_tput_lat_series_title'][i].replace('_', '\\_'), i + 1))
if i != len(config['replication_protocol']) - 1:
f.write(', \\\n')
def regenerate_plots(config_file, exp_dir, executor, calc_stats=True):
with open(config_file) as f:
config = json.load(f)
if not 'client_stats_blacklist' in config:
config['client_stats_blacklist'] = []
if not 'client_combine_stats_blacklist' in config:
config['client_combine_stats_blacklist'] = []
if not 'client_cdf_plot_blacklist' in config:
config['client_cdf_plot_blacklist'] = []
out_directories = sorted(next(os.walk(exp_dir))[1])
if 'plots' in out_directories:
out_directories.remove('plots')
out_directories = [os.path.join(exp_dir, d) for d in out_directories]
out_directories = out_directories[:len(config['replication_protocol'])]
sub_out_directories = []
for i in range(len(out_directories)):
out_dir = out_directories[i]
dirs = sorted(next(os.walk(out_dir))[1])
if 'plots' in dirs:
dirs.remove('plots')
dirs = [os.path.join(
out_dir, d, config['out_directory_name']) for d in dirs]
config_new = config.copy()
config_new['base_local_exp_directory'] = exp_dir
server_replication_protocol = config['replication_protocol'][i]
config_new['replication_protocol'] = server_replication_protocol
config_new['plot_cdf_series_title'] = config['plot_cdf_series_title'][i]
config_new['plot_tput_lat_series_title'] = config['plot_tput_lat_series_title'][i]
config_new['replication_protocol_settings'] = config['replication_protocol_settings'][i]
sub_out_directories.append(dirs)
for j in range(len(dirs)):
sub_out_dir = dirs[j]
config_new_new = config_new.copy()
config_new_new['base_local_exp_directory'] = exp_dir
n = config_new['client_nodes_per_server'][j]
m = config_new['client_processes_per_client_node'][j]
config_new_new['client_nodes_per_server'] = n
config_new_new['client_processes_per_client_node'] = m
if calc_stats:
stats = calculate_statistics(config_new_new, sub_out_dir)
else:
with open(os.path.join(sub_out_dir, STATS_FILE)) as f:
stats = json.load(f)
generate_cdf_plots(
config_new_new, sub_out_dir, stats, executor)
generate_tput_lat_plots(config_new, out_dir, dirs)
generate_agg_cdf_plots(config, exp_dir, sub_out_directories)
generate_agg_tput_lat_plots(config, exp_dir, out_directories)
def generate_agg_write_percentage_csv(config, base_out_directory, sub_out_directories):
plots_directory = os.path.join(
base_out_directory, config['plot_directory_name'])
os.makedirs(plots_directory, exist_ok=True)
print(config['client_write_percentage'])
write_percentage = config['client_write_percentage'] / (
config['client_write_percentage'] + config['client_read_percentage'] + config['client_rmw_percentage'])
for i in range(len(sub_out_directories)):
# for replication protocol i
for j in range(len(sub_out_directories[i])):
# for client configuration j
with open(os.path.join(sub_out_directories[i][j], STATS_FILE)) as f:
stats = json.load(f)
for p in ['p50', 'p75', 'p90', 'p95', 'p99']:
for t in ['w_norm', 'r_norm']:
with open(os.path.join(base_out_directory, '%s-%d-%d-%s-%s.csv' % (config['replication_protocol'][i], i, j, t, p)), 'w') as f:
csvwriter = csv.writer(f)
csvwriter.writerow(
[write_percentage, stats['aggregate'][t][p]])
def generate_varying_write_csvs(config_file, exp_dir, calc_stats=True):
with open(config_file) as f:
config = json.load(f)
out_directories = sorted(next(os.walk(exp_dir))[1])
if 'plots' in out_directories:
out_directories.remove('plots')
out_directories = [os.path.join(exp_dir, d) for d in out_directories]
out_directories = out_directories[:len(config['replication_protocol'])]
sub_out_directories = []
for i in range(len(out_directories)):
out_dir = out_directories[i]
dirs = sorted(next(os.walk(out_dir))[1])
if 'plots' in dirs:
dirs.remove('plots')
dirs = [os.path.join(
out_dir, d, config['out_directory_name']) for d in dirs]
config_new = config.copy()
config_new['base_local_exp_directory'] = exp_dir
server_replication_protocol = config['replication_protocol'][i]
config_new['replication_protocol'] = server_replication_protocol
config_new['plot_cdf_series_title'] = config['plot_cdf_series_title'][i]
config_new['plot_tput_lat_series_title'] = config['plot_tput_lat_series_title'][i]
config_new['replication_protocol_settings'] = config['replication_protocol_settings'][i]
sub_out_directories.append(dirs)
for j in range(len(dirs)):
sub_out_dir = dirs[j]
config_new_new = config_new.copy()
config_new_new['base_local_exp_directory'] = exp_dir
n = config_new['client_nodes_per_server'][j]
m = config_new['client_processes_per_client_node'][j]
config_new_new['client_nodes_per_server'] = n
config_new_new['client_processes_per_client_node'] = m
generate_agg_write_percentage_csv(config, exp_dir, sub_out_directories)
def regenerate_tail_at_scale_plots(config_file, exp_dir):
with open(config_file) as f:
config = json.load(f)
directories = sorted(next(os.walk(exp_dir))[1])
if 'plots' in directories:
directories.remove('plots')
directories = [os.path.join(exp_dir, d) for d in directories]
directories = directories[:len(config['client_tail_at_scale'])]
sub_sub_out_directories = []
for k in range(len(directories)):
out_directories = sorted(next(os.walk(directories[k]))[1])
if 'plots' in out_directories:
out_directories.remove('plots')
out_directories = [os.path.join(
directories[k], d) for d in out_directories]
out_directories = out_directories[:len(
config['replication_protocol'])]
sub_out_directories = []
for i in range(len(out_directories)):
out_dir = out_directories[i]
dirs = sorted(next(os.walk(out_dir))[1])
if 'plots' in dirs:
dirs.remove('plots')
dirs = [os.path.join(
out_dir, d, config['out_directory_name']) for d in dirs]
config_new = config.copy()
config_new['client_tail_at_scale'] = config['client_tail_at_scale'][k]
config_new['base_local_exp_directory'] = exp_dir
server_replication_protocol = config['replication_protocol'][i]
config_new['replication_protocol'] = server_replication_protocol
config_new['plot_cdf_series_title'] = config['plot_cdf_series_title'][i]
config_new['plot_tput_lat_series_title'] = config['plot_tput_lat_series_title'][i]
config_new['replication_protocol_settings'] = config['replication_protocol_settings'][i]
sub_out_directories.append(dirs)
sub_sub_out_directories.append(sub_out_directories)
generate_tail_at_scale_plots(config, exp_dir, sub_sub_out_directories)
|
from django.urls import path
from .views import ListDonor, DetailDonor
urlpatterns = [
path('', ListDonor.as_view()),
path('<int:pk>/', DetailDonor.as_view()),
]
|
# Generated by Django 2.2.13 on 2020-08-26 20:21
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('pipeline', '0062_auto_20200825_2231'),
('pipeline', '0062_auto_20200826_1735'),
]
operations = [
]
|
from flask import Response, send_file,request, make_response
from flask_restful import Resource
import json
import pandas as pd
import pymongo
client = pymongo.MongoClient(host='127.0.0.1', port=27017)
db_list = client.list_database_names()
db = client.data
class upload(Resource):
def post(self, data_type):
excel_data_df = pd.read_csv(request.files['file'])
json_str = json.loads(excel_data_df.to_json(orient='records'))
db[data_type].insert_many(json_str)
return {
'msg': '上传成功'
}
class download(Resource):
def get(self):
type = request.args.get('type')
response = make_response(send_file('./static/files/{}.csv'.format(type)))
response.headers["Content-Disposition"] = "attachment=True; filename={}.csv;".format(
type)
return response
|
nome = str(input('Digite seu nome completo: ')).strip().title().split()
tamanho= int(len(nome))
print('Ola, {} {}, bem vindo(a)!!'.format(nome[0], nome[tamanho-1]))
|
from urllib.request import urlretrieve
import os
import pandas as pd
import numpy as np
from datetime import datetime
DATA_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), '../../../data'))
UCI_DATASETS = []
def _UCI(C):
UCI_DATASETS.append(C)
return C
class Dataset:
ndim_x = None
ndim_y = None
target_columns = []
feature_columns = []
data_file_name = ''
download_url = ''
@property
def data_file_path(self):
return os.path.join(DATA_DIR, self.data_file_name)
@property
def needs_download(self):
return not os.path.isfile(self.data_file_path)
def download_dataset(self):
print("Downloading data file from %s" % self.download_url)
urlretrieve(self.download_url, self.data_file_path)
def get_df(self):
if self.needs_download:
self.download_dataset()
df = pd.read_csv(self.data_file_path)
return self._process_df(df)
def get_target_feature_split(self):
df = self.get_df()
X = np.array(df[self.feature_columns])
Y = np.array(df[self.target_columns])
assert X.ndim == Y.ndim == 2
assert X.shape[0] == Y.shape[0]
assert X.shape[1] == self.ndim_x and Y.shape[1] == self.ndim_y
return X, Y
def get_train_valid_splits(self, valid_portion, n_splits, shuffle=False, random_state=None):
X, Y = self.get_target_feature_split()
n_instances = X.shape[0]
valid_size = int(valid_portion * n_instances)
assert valid_size * n_splits <= n_instances
idx = np.arange(n_instances)
if shuffle:
if random_state is not None:
random_state.shuffle(idx)
else:
np.random.shuffle(idx)
X_trains, Y_trains, X_valids, Y_valids = [], [], [], []
for i in reversed(range(n_splits)):
idx_start = (n_instances // n_splits) * i
idx_end = idx_start + valid_size
idx_train, idx_valid = np.concatenate([idx[:idx_start], idx[idx_end:]]), idx[idx_start:idx_end]
assert len(set(idx_train) | set(idx_valid)) == n_instances
X_trains.append(X[idx_train, :])
Y_trains.append(Y[idx_train, :])
X_valids.append(X[idx_valid, :])
Y_valids.append(Y[idx_valid, :])
return X_trains, Y_trains, X_valids, Y_valids
def _process_df(self, df):
return df
def __str__(self):
return "%s (ndim_x = %i, ndim_y = %i)"%(str(self.__class__.__name__), self.ndim_x, self.ndim_y)
class EuroStoxx50(Dataset):
ndim_x = 14
ndim_y = 1
target_columns = ['log_ret_1']
feature_columns = ['log_ret_last_period', 'log_risk_free_1d',
'RealizedVariation', 'bakshiSkew', 'bakshiKurt', 'SVIX', 'Mkt-RF',
'SMB', 'HML', 'WML', 'WML_risk_10d', 'Mkt-RF_risk_10d', 'SMB_risk_10d',
'HML_risk_10d']
data_file_name = 'eurostoxx50.csv'
def get_train_valid_splits(self, valid_portion, n_splits, shift_size=100, shuffle=False, random_state=None):
# needs extra treatment since it's time-series data --> shifts train and valid set by shift_size each split
# --> ensures that the valid data is always in the future of the train data
assert shuffle is False
X, Y = self.get_target_feature_split()
n_instances = X.shape[0]
valid_size = int(valid_portion * n_instances)
training_size = n_instances - valid_size - n_splits*shift_size
assert valid_size * n_splits <= n_instances
idx = np.arange(n_instances)
X_trains, Y_trains, X_valids, Y_valids = [], [], [], []
for i in reversed(range(n_splits)):
idx_train_start = int(i * shift_size)
idx_valid_start = idx_train_start + training_size
idx_valid_end = idx_valid_start + valid_size
idx_train, idx_valid = idx[idx_train_start:idx_valid_start], idx[idx_valid_start:idx_valid_end]
X_trains.append(X[idx_train, :])
Y_trains.append(Y[idx_train, :])
X_valids.append(X[idx_valid, :])
Y_valids.append(Y[idx_valid, :])
return X_trains, Y_trains, X_valids, Y_valids
def download_dataset(self):
raise AssertionError("Sry, the EuroStoxx 50 data is proprietary and won't be open-sourced")
class NCYTaxiDropoffPredict(Dataset):
ndim_x = 6
ndim_y = 2
data_file_name = 'yellow_tripdata_2016-01.csv'
data_file_name_processed = 'yellow_tipdata_2016-01_processed.csv'
download_url = 'https://s3.amazonaws.com/nyc-tlc/trip+data/yellow_tripdata_2016-01.csv'
target_columns = ['dropoff_loc_lat', 'dropoff_loc_lon']
feature_columns = ['pickup_loc_lat', 'pickup_loc_lon', 'pickup_time_day_of_week_sin', 'pickup_time_day_of_week_cos',
'pickup_time_of_day_sin', 'pickup_time_of_day_cos']
x_bounds = [-74.04, -73.75]
y_bounds = [40.62, 40.86]
too_close_radius = 0.00001
min_duration = 30
max_duration = 3 * 3600
def __init__(self, n_samples=10**4, seed=22):
self.n_samples = n_samples
self.random_state = np.random.RandomState(seed)
def get_df(self):
data_file_path_processed = os.path.join(DATA_DIR, self.data_file_name_processed)
if not os.path.isfile(data_file_path_processed):
df = super(NCYTaxiDropoffPredict, self).get_df().dropna()
print("save processed NYC data as csv to %s" % data_file_path_processed)
df.to_csv(data_file_path_processed)
print("loading %s" % data_file_path_processed)
df = pd.read_csv(data_file_path_processed)
return df.sample(n=self.n_samples, random_state=self.random_state)
def _process_df(self, df): # does some data cleaning
data = df.values
pickup_loc = np.array((data[:, 5], data[:, 6])).T
dropoff_loc = np.array((data[:, 9], data[:, 10])).T
ind = np.ones(len(data)).astype(bool)
ind[data[:, 5] < self.x_bounds[0]] = False
ind[data[:, 5] > self.x_bounds[1]] = False
ind[data[:, 6] < self.y_bounds[0]] = False
ind[data[:, 6] > self.y_bounds[1]] = False
ind[data[:, 9] < self.x_bounds[0]] = False
ind[data[:, 9] > self.x_bounds[1]] = False
ind[data[:, 10] < self.y_bounds[0]] = False
ind[data[:, 10] > self.y_bounds[1]] = False
print('discarding {} out of bounds {} {}'.format(np.sum(np.invert(ind).astype(int)), self.x_bounds, self.y_bounds))
early_stop = ((data[:, 5] - data[:, 9]) ** 2 + (data[:, 6] - data[:, 10]) ** 2 < self.too_close_radius)
ind[early_stop] = False
print('discarding {} trip less than {} gp dist'.format(np.sum(early_stop.astype(int)), self.too_close_radius ** 0.5))
times = np.array([_process_time(d_pickup, d_dropoff) for (d_pickup, d_dropoff) in data[:, 1:3]])
pickup_time = times[:, :2]
dropoff_time = times[:, 2:4]
duration = times[:, 4]
short_journeys = (duration < self.min_duration)
ind[short_journeys] = False
print('discarding {} less than {}s journeys'.format(np.sum(short_journeys.astype(int)), self.min_duration))
long_journeys = (duration > self.max_duration)
ind[long_journeys] = False
print('discarding {} more than {}h journeys'.format(np.sum(long_journeys.astype(int)), self.max_duration / 3600.))
pickup_loc_lat = pickup_loc[ind, 0]
pickup_loc_lon = pickup_loc[ind, 1]
dropoff_loc_lat = dropoff_loc[ind, 0]
dropoff_loc_lon = dropoff_loc[ind, 1]
pickup_time_day_of_week = pickup_time[ind, 0]
pickup_time_of_day = pickup_time[ind, 1]
dropoff_time_day_of_week = dropoff_time[ind, 0]
dropoff_time_of_day = dropoff_time[ind, 1]
duration = duration[ind]
print('{} total rejected journeys'.format(np.sum(np.invert(ind).astype(int))))
df_processed = pd.DataFrame(
{"pickup_loc_lat": pickup_loc_lat,
"pickup_loc_lon": pickup_loc_lon,
"dropoff_loc_lat": dropoff_loc_lat,
"dropoff_loc_lon": dropoff_loc_lon,
"pickup_time_day_of_week": pickup_time_day_of_week.astype(np.int),
"pickup_time_day_of_week_sin": np.sin(pickup_time_day_of_week),
"pickup_time_day_of_week_cos": np.cos(pickup_time_day_of_week.astype(np.int)),
"pickup_time_of_day": pickup_time_of_day,
"pickup_time_of_day_sin": np.sin(pickup_time_of_day),
"pickup_time_of_day_cos": np.cos(pickup_time_of_day),
"dropoff_time_day_of_week": dropoff_time_day_of_week.astype(np.int),
"dropoff_time_of_day": dropoff_time_of_day, "duration": duration})
return df_processed
def __str__(self):
return "%s (n_samples = %i, ndim_x = %i, ndim_y = %i)" % (str(self.__class__.__name__), self.n_samples, self.ndim_x, self.ndim_y)
class UCI_Dataset(Dataset):
uci_base_url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/'
uci_data_path = ''
@property
def download_url(self):
return os.path.join(self.uci_base_url, self.uci_data_path)
@property
def target_columns(self):
return [self.get_df().columns[-1]]
@property
def feature_columns(self):
return list(self.get_df().columns[:-1])
@_UCI
class BostonHousing(UCI_Dataset):
uci_data_path = 'housing/housing.data'
data_file_name = 'housing.data'
ndim_x = 13
ndim_y = 1
def get_df(self):
if self.needs_download:
self.download_dataset()
df = pd.read_fwf(self.data_file_path, header=None)
return df
@_UCI
class Conrete(UCI_Dataset):
uci_data_path = 'concrete/compressive/Concrete_Data.xls'
data_file_name = 'concrete.xls'
ndim_y = 1
ndim_x = 8
def get_df(self):
if self.needs_download:
self.download_dataset()
df = pd.read_excel(self.data_file_path).dropna()
return df
@_UCI
class Energy(UCI_Dataset):
uci_data_path ='00242/ENB2012_data.xlsx'
data_file_name = 'energy.xlsx'
ndim_x = 9
ndim_y = 1
def get_df(self):
if self.needs_download:
self.download_dataset()
df = pd.read_excel(self.data_file_path).dropna()
return df
@_UCI
class Power(UCI_Dataset):
download_url = 'https://www.dropbox.com/s/w7qkzjtuynwxjke/power.csv?dl=1'
data_file_name = 'power.csv'
ndim_x = 4
ndim_y = 1
def get_df(self):
if self.needs_download:
self.download_dataset()
df = pd.read_csv(self.data_file_path).dropna()
return df
@_UCI
class Protein(UCI_Dataset):
uci_data_path = '00265/CASP.csv'
data_file_name = 'protein.csv'
ndim_x = 9
ndim_y = 1
def get_df(self):
if self.needs_download:
self.download_dataset()
df = pd.read_csv(self.data_file_path).dropna()
return df
@_UCI
class WineRed(UCI_Dataset):
uci_data_path = 'wine-quality/winequality-red.csv'
data_file_name = 'wine_red.csv'
ndim_x = 11
ndim_y = 1
def get_df(self):
if self.needs_download:
self.download_dataset()
df = pd.read_csv(self.data_file_path, delimiter=';').dropna()
return df
@_UCI
class WineWhite(UCI_Dataset):
uci_data_path = 'wine-quality/winequality-white.csv'
data_file_name = 'wine_white.csv'
ndim_x = 11
ndim_y = 1
def get_df(self):
if self.needs_download:
self.download_dataset()
df = pd.read_csv(self.data_file_path, delimiter=';').dropna()
return df
@_UCI
class Yacht(UCI_Dataset):
uci_data_path = '00243/yacht_hydrodynamics.data'
data_file_name = 'yacht.data'
ndim_x = 6
ndim_y = 1
def get_df(self):
if self.needs_download:
self.download_dataset()
df = pd.read_fwf(self.data_file_path, header=None).dropna()
return df
""" helper methods """
def _convert_to_day_minute(d):
rescale = lambda x, a, b: b[0] + (b[1] - b[0]) * x / (a[1] - a[0])
day_of_week = rescale(float(d.weekday()), [0, 6], [0, 2 * np.pi])
time_of_day = rescale(d.time().hour * 60 + d.time().minute, [0, 24 * 60], [0, 2 * np.pi])
return day_of_week, time_of_day
def _process_time(pickup_datetime, dropoff_datetime):
d_pickup = datetime.strptime(pickup_datetime, "%Y-%m-%d %H:%M:%S")
d_dropoff = datetime.strptime(dropoff_datetime, "%Y-%m-%d %H:%M:%S")
duration = (d_dropoff - d_pickup).total_seconds()
pickup_day_of_week, pickup_time_of_day = _convert_to_day_minute(d_pickup)
dropoff_day_of_week, dropoff_time_of_day = _convert_to_day_minute(d_dropoff)
return [pickup_day_of_week, pickup_time_of_day, dropoff_day_of_week, dropoff_time_of_day, duration]
if __name__ == "__main__":
for dataset_class in [EuroStoxx50, NCYTaxiDropoffPredict] + UCI_DATASETS:
dataset = dataset_class()
_, Y = dataset.get_target_feature_split()
n_samples = Y.shape[0]
print("%s: n_samples = %i, ndim_x = %i, ndim_y = %i"%(str(dataset.__class__.__name__), n_samples, dataset.ndim_x, dataset.ndim_y))
|
import os
import sys
import unittest
currentdir = os.path.dirname(os.path.realpath(__file__))
parentdir = os.path.dirname(currentdir)
sys.path.append(parentdir)
import boto_utils
from constants import TABLE_NAME
class TestDynamoProvision(unittest.TestCase):
def setUp(self) -> None:
self.dynamodb = boto_utils.dynamodb
def test_migration(self):
created_table = boto_utils.migration()
self.assertEqual(created_table.name, TABLE_NAME)
if __name__ == '__main__':
unittest.main()
|
# -*- coding: utf-8 -*-
# @Author : DevinYang(pistonyang@gmail.com)
import math
from torch import nn
from torch.nn.init import xavier_normal_, xavier_uniform_, \
kaiming_normal_, kaiming_uniform_, zeros_
class XavierInitializer(object):
"""Initialize a model params by Xavier.
Fills the input `Tensor` with values according to the method
described in `Understanding the difficulty of training deep feedforward
neural networks` - Glorot, X. & Bengio, Y. (2010)
Args:
model (nn.Module): model you need to initialize.
random_type (string): random_type
gain (float): an optional scaling factor, default is sqrt(2.0)
"""
def __init__(self, random_type='uniform', gain=math.sqrt(2.0)):
assert random_type in ('uniform', 'normal')
self.initializer = xavier_uniform_ if random_type == 'uniform' else xavier_normal_
self.gain = gain
def __call__(self, module):
if isinstance(module, (nn.Conv2d, nn.Conv3d)):
self.initializer(module.weight.data, gain=self.gain)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, (nn.BatchNorm2d, nn.BatchNorm3d, nn.GroupNorm)):
if module.weight is not None:
module.weight.data.fill_(1)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Linear):
self.initializer(module.weight.data, gain=self.gain)
if module.bias is not None:
module.bias.data.zero_()
class KaimingInitializer(object):
def __init__(
self,
slope=0,
mode='fan_out',
nonlinearity='relu',
random_type='normal'):
assert random_type in ('uniform', 'normal')
self.slope = slope
self.mode = mode
self.nonlinearity = nonlinearity
self.initializer = kaiming_uniform_ if random_type == 'uniform' else kaiming_normal_
def __call__(self, module):
if isinstance(module, (nn.Conv2d, nn.Conv3d)):
self.initializer(
module.weight.data,
self.slope,
self.mode,
self.nonlinearity)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, (nn.BatchNorm2d, nn.BatchNorm3d, nn.GroupNorm)):
if module.weight is not None:
module.weight.data.fill_(1)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Linear):
self.initializer(
module.weight.data,
self.slope,
self.mode,
self.nonlinearity)
if module.bias is not None:
module.bias.data.zero_()
class ZeroLastGamma(object):
"""Notice that this need to put after other initializer.
"""
def __init__(self, block_name='Bottleneck', bn_name='bn3'):
self.block_name = block_name
self.bn_name = bn_name
def __call__(self, module):
if module.__class__.__name__ == self.block_name:
target_bn = module.__getattr__(self.bn_name)
zeros_(target_bn.weight)
|
# File executed when user click on `Run` button
# Must not run any test, just run the user
if __name__ == "__main__":
import template
template.score()
|
#! /usr/bin/env python
"""
Authors: Henning O. Sorensen
Center for Fundamental Research: Metal Structures in Four Dimensions
Risoe National Laboratory for Sustainable Energy
Technical University of Denmark
Frederiksborgvej 399
DK-4000 Roskilde
email:henning.sorensen@risoe.dk
This function is largely following the algorithm:
"A Fast Traversal Algorithm" by John Amanatides,
Proc. Eurographics '87, Amsterdam, The Netherlands, August 1987, pp 1-10.
"""
from __future__ import absolute_import
from __future__ import print_function
import numpy as n
#import copy
def pixel_trace(corners):
# Initialize variables
p_start = n.array([corners[0], corners[1]],n.float)+0.5
p_end = n.array([corners[2], corners[3]],n.float)+0.5
zero = 1e-09
final_out = False
t_total = 0
nr_voxels = 0
nextpix = n.zeros(2,n.int)
delta = n.ones(2,n.int)
t = n.zeros(2,n.float)
t_one = n.zeros(2,n.float)
#voxel=zeros((product(gridsize),3))
voxel = []
# the ray is defined r0 + t *r
r0 = p_start
r = p_end-r0
t_max = n.sqrt(n.sum(r*r)) # Maximum ray path lenght in normalized coordinate system
r = r/t_max
startpix = n.floor(r0) #The pixel where the ray originates
# Set step size and direction in x,y,z
# find first intersection with voxel border
for i in range(2):
if r[i] == 0:
t_one[i] = n.inf # Determine paths for stepping 1 in x,y,z respectively.
t[i] = n.inf # Maybe add a check for r(i) = 0 not to divide by zero
else:
t_one[i] = n.abs(1/r[i]) # Determine paths for stepping 1 in x,y,z respectively.
if r[i] > 0:
t[i] = (n.floor(r0[i])+1-r0[i])/r[i]
else:
delta[i] = -1
t[i] = (n.floor(r0[i])-r0[i])/r[i]
# Find which voxel border is intersected next
while t_total < t_max-zero: # to make sure that an extra step is not taken if t_total essitianlly equals t_max
t_old =t
if t[0] < t[1]:
#print "%i : x<y, " %nr_voxels
pix = nextpix.copy()
nextpix[0] = nextpix[0] + delta[0]
t_voxel = t[0] - t_total
t_total = t[0]
t[0] = t[0] + t_one[0]
else:
#print "%i : y<x" %nr_voxels
pix = nextpix.copy()
nextpix[1] = nextpix[1] + delta[1]
t_voxel = t[1] - t_total
t_total = t[1]
t[1] = t[1] + t_one[1]
# Do not output if t_voxel is zero
if t_voxel > zero:
pix = pix + startpix
nr_voxels = nr_voxels + 1
voxel.append([pix[0],pix[1],t_voxel])
# Correct t_voxel of the last voxel if overshot
if final_out == False: voxel[nr_voxels-1][2] = voxel[nr_voxels-1][2]-(t_total-t_max)
voxel = n.array(voxel)
# Integrate intensity along ray
return voxel
if __name__=='__main__':
start = [3.6 , 2]
end = [11, 12]
pixlist = pixel_trace(start,end)
#print int
print(pixlist)
|
from typing import Tuple, Any
from sty import fg, FgRegister, rs
class Palette:
# Default palette colors
BLUE: Tuple = (5, 156, 205)
GREEN: Tuple = (51, 222, 136)
RED: Tuple = (240, 70, 87)
YELLOW: Tuple = (249, 149, 72)
ORANGE: Tuple = (232, 149, 39)
BL_HX: str = '#%02x%02x%02x' % BLUE
GR_HX: str = '#%02x%02x%02x' % GREEN
RD_HX: str = '#%02x%02x%02x' % RED
YL_HX: str = '#%02x%02x%02x' % YELLOW
OG_HX: str = '#%02x%02x%02x' % ORANGE
def __init__(self, blue=BLUE, green=GREEN, red=RED, yellow=YELLOW, orange=ORANGE):
if isinstance(blue, Tuple):
fg.blue = fg(*blue)
fg.green = fg(*green)
fg.red = fg(*red)
fg.yellow = fg(*yellow)
fg.orange = fg(*orange)
# Prompt Toolkit HEX colors
self.bl: str = '#%02x%02x%02x' % blue
self.gr: str = '#%02x%02x%02x' % green
self.rd: str = '#%02x%02x%02x' % red
self.yl: str = '#%02x%02x%02x' % yellow
self.og: str = '#%02x%02x%02x' % orange
else:
fg.blue = fg(blue)
fg.green = fg(green)
fg.red = fg(red)
fg.yellow = fg(yellow)
fg.orange = fg(orange)
# Prompt Toolkit HEX colors
self.bl: str = '#%02x%02x%02x' % Palette.BLUE
self.gr: str = '#%02x%02x%02x' % Palette.GREEN
self.rd: str = '#%02x%02x%02x' % Palette.RED
self.yl: str = '#%02x%02x%02x' % Palette.YELLOW
self.og: str = '#%02x%02x%02x' % Palette.ORANGE
self.fg_bl: FgRegister = fg.blue
self.fg_gr: FgRegister = fg.green
self.fg_rd: FgRegister = fg.red
self.fg_yl: FgRegister = fg.yellow
self.fg_og: FgRegister = fg.orange
self.rs = rs.all
|
#!/usr/bin/env python
# Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Checks that the main console and subconsole configs are consistent."""
import collections
import difflib
import os
import sys
THIS_DIR = os.path.dirname(os.path.abspath(__file__))
SRC_ROOT = os.path.join(THIS_DIR, '..', '..')
sys.path.insert(1, os.path.join(
SRC_ROOT, "third_party", "protobuf", "python"))
import google.protobuf.text_format
import project_pb2
def compare_builders(name, main_builders, sub_builders):
# Checks that the builders on a subwaterfall on the main waterfall
# are consistent with the builders on that subwaterfall's main page.
# For example, checks that the builders on the "chromium.win" section
# are the same as on the dedicated standalone chromium.win waterfall.
def to_list(builders, category_prefix=''):
desc_list = []
for builder in builders:
desc_list.append('name: ' + ', '.join(builder.name))
# A bot with "chromium.win|foo|bar" on the main waterfall should have
# a category of "foo|bar" on the "chromium.win" subwaterfall.
category = builder.category
if category_prefix:
if category:
category = category_prefix + '|' + category
else:
category = category_prefix
desc_list.append('category: ' + category)
desc_list.append('short_name: ' + builder.short_name)
return desc_list
main_desc = to_list(main_builders)
sub_desc = to_list(sub_builders, name)
if main_desc != sub_desc:
print ('bot lists different between main waterfall ' +
'and stand-alone %s waterfall:' % name)
print '\n'.join(difflib.unified_diff(main_desc, sub_desc,
fromfile='main', tofile=name,
lineterm=''))
print
return False
return True
def main():
project = project_pb2.Project()
with open(os.path.join(THIS_DIR, 'generated', 'luci-milo.cfg'), 'rb') as f:
google.protobuf.text_format.Parse(f.read(), project)
# Maps subwaterfall name to list of builders on that subwaterfall
# on the main waterfall.
subwaterfalls = collections.defaultdict(list)
for console in project.consoles:
if console.id == 'main':
# Chromium main waterfall console.
for builder in console.builders:
subwaterfall = builder.category.split('|', 1)[0]
subwaterfalls[subwaterfall].append(builder)
# subwaterfalls contains the waterfalls referenced by the main console
# Check that every referenced subwaterfall has its own console, unless it's
# explicitly excluded below.
excluded_names = [
# This is the chrome console in src-internal.
'chrome',
]
all_console_names = [console.id for console in project.consoles]
referenced_names = set(subwaterfalls.keys())
missing_names = referenced_names - set(all_console_names + excluded_names)
if missing_names:
print 'Missing subwaterfall console for', missing_names
return 1
# Check that the bots on a subwaterfall match the corresponding bots on the
# main waterfall
all_good = True
for console in project.consoles:
if console.id in subwaterfalls:
if not compare_builders(console.id, subwaterfalls[console.id],
console.builders):
all_good = False
return 0 if all_good else 1
if __name__ == '__main__':
sys.exit(main())
|
"""
Exercise 5
Use the Subway class below to help solve these problems.
class Subway:
fare = 2.4
def __init__(self):
self.stops = ["Alewife", "Davis", "Porter", "Harvard", "Central", "Kendall"]
self.current_stop= "Alewife"
self.direction = "south"
self.passengers = 0
self.total_fares = 0
Create the following methods for the Subway class:
board - Accepts an integer that represents the number of passengers boarding the subway.
disembark - Accepts an integer that represents the number of passengers exiting the subway.
There cannot be a negative number of passengers on a subway. The fewest number of passengers
on a subway is 0.
advance - Moves the subway to the next stop. If self.direction is "south" the subway moves from
Alewife to Kendall.
If self.direction is "north" the subway moves from Kendall to Alewife. When the subway has reached
its final stop,
self.direction should change.
distance - Accepts a string that represents a stop and returns the number of stops between the subway and the
desired stop. The distance should be a positive number.
change_fare - Accepts a float and changes the fare for all instances of the Subway class.
calculate_fares - Calculates the fare for each passenger boarding the subway and adds it to total_fares.
Expected Output
Use the examples below to test if your program is working as expected.
Boarding the Subway
If self.passengers is 220 and 45 people board the subway, then self.passengers would be 265.
Total Fares
If 100 passengers, in total, have boarded the train, the self.total_fares would be 240.
Exiting the Subway
If self.passengers is 113 and 23 people exit the subway, then self.passengers would be 90.
Advancing the Subway
If the subway is currently at Kendall and is traveling South, advancing the subway would
change self.current_stop to "Central" and self.direction would become "north".
If the subway is currently at Porter and is traveling South,
ancing the subway would change self.current_stop to Harvard and self.direction would remain "south".
Calculating Distance
If the subway is currently at Davis and the desired stop is Central, the distance between them is 3 stops.
Changing the Fare
If the subway fare increased to $2.75, then fare should be 2.75 for all instances of the Subway class.
"""
class Subway:
fare = 2.4
def __init__(self):
self.stops = ["Alewife", "Davis", "Porter", "Harvard", "Central", "Kendall"]
self.current_stop = "Davis"
self.direction = "south"
self.passengers = 0
self.total_fares = 0
def board(self, new_passengers):
"""
Adds the number of people boarding the subway.
Also adds to total_fares for the new passengers
"""
self.passengers += new_passengers
self.total_fares += new_passengers * Subway.fare
def disembark(self, passengers_leaving):
"""Subtracts the number of people exiting the subway"""
if passengers_leaving > self.passengers:
self.passengers = 0
else:
self.passengers -= passengers_leaving
def advance(self):
"""Advances the subway to the next stop"""
current_index = self.stops.index(self.current_stop)
if self.direction == "south":
if self.current_stop == "Kendall":
self.current_stop = "Central"
self.direction = "north"
else:
self.current_stop = self.stops[current_index + 1]
else:
if self.current_stop == "Alewife":
self.current_stop = "Davis"
self.direction = "south"
else:
self.current_stop = self.stops[current_index - 1]
def distance(self, desired_stop):
"""
Returns the number of stops between the
current location and the desired stop
"""
current_index = self.stops.index(self.current_stop)
desired_index = self.stops.index(desired_stop)
return abs(current_index - desired_index)
@classmethod
def change_fare(cls, new_fare):
"""Change fare for all instances of Subway class"""
Subway.fare = new_fare
|
# -*- coding: utf-8 -*-
#
# BitcoinLib - Python Cryptocurrency Library
# Cache DataBase - SqlAlchemy database definitions for caching
# © 2020 February - 1200 Web Development <http://1200wd.com/>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from sqlalchemy import create_engine
from sqlalchemy import Column, Integer, BigInteger, String, Boolean, ForeignKey, DateTime, Enum, LargeBinary
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker, relationship, close_all_sessions
# try:
# import mysql.connector
# from parameterized import parameterized_class
# import psycopg2
# from psycopg2 import sql
# from psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT
# except ImportError as e:
# print("Could not import all modules. Error: %s" % e)
# # from psycopg2cffi import compat # Use for PyPy support
# # compat.register()
# pass # Only necessary when mysql or postgres is used
from urllib.parse import urlparse
from bitcoinlib.main import *
_logger = logging.getLogger(__name__)
_logger.info("Default Cache Database %s" % DEFAULT_DATABASE_CACHE)
Base = declarative_base()
class WitnessTypeTransactions(enum.Enum):
legacy = "legacy"
segwit = "segwit"
class DbCache:
"""
Cache Database object. Initialize database and open session when creating database object.
Create new database if is doesn't exist yet
"""
def __init__(self, db_uri=None):
self.engine = None
self.session = None
if db_uri is None:
db_uri = DEFAULT_DATABASE_CACHE
elif not db_uri:
return
self.o = urlparse(db_uri)
# if self.o.scheme == 'mysql':
# raise Warning("Could not connect to cache database. MySQL databases not supported at the moment, "
# "because bytes strings are not supported as primary keys")
if not self.o.scheme or len(self.o.scheme) < 2:
db_uri = 'sqlite:///%s' % db_uri
if db_uri.startswith("sqlite://") and ALLOW_DATABASE_THREADS:
db_uri += "&" if "?" in db_uri else "?"
db_uri += "check_same_thread=False"
if self.o.scheme == 'mysql':
db_uri += "&" if "?" in db_uri else "?"
db_uri += 'binary_prefix=true'
self.engine = create_engine(db_uri, isolation_level='READ UNCOMMITTED')
Session = sessionmaker(bind=self.engine)
Base.metadata.create_all(self.engine)
self.db_uri = db_uri
_logger.info("Using cache database: %s://%s:%s/%s" % (self.o.scheme or '', self.o.hostname or '',
self.o.port or '', self.o.path or ''))
self.session = Session()
def drop_db(self):
self.session.commit()
self.session.close_all()
close_all_sessions()
Base.metadata.drop_all(self.engine)
class DbCacheTransactionNode(Base):
"""
Link table for cache transactions and addresses
"""
__tablename__ = 'cache_transactions_node'
txid = Column(LargeBinary(32), ForeignKey('cache_transactions.txid'), primary_key=True)
transaction = relationship("DbCacheTransaction", back_populates='nodes', doc="Related transaction object")
index_n = Column(Integer, primary_key=True, doc="Order of input/output in this transaction")
value = Column(BigInteger, default=0, doc="Value of transaction input")
address = Column(String(255), index=True, doc="Address string base32 or base58 encoded")
script = Column(LargeBinary, doc="Locking or unlocking script")
witnesses = Column(LargeBinary, doc="Witnesses (signatures) used in Segwit transaction inputs")
sequence = Column(BigInteger, default=0xffffffff,
doc="Transaction sequence number. Used for timelock transaction inputs")
is_input = Column(Boolean, primary_key=True, doc="True if input, False if output")
spent = Column(Boolean, default=None, doc="Is output spent?")
ref_txid = Column(LargeBinary(32), index=True, doc="Transaction hash of input which spends this output")
ref_index_n = Column(BigInteger, doc="Index number of transaction input which spends this output")
def prev_txid(self):
if self.is_input:
return self.ref_txid
def output_n(self):
if self.is_input:
return self.ref_index_n
def spending_txid(self):
if not self.is_input:
return self.ref_txid
def spending_index_n(self):
if not self.is_input:
return self.ref_index_n
class DbCacheTransaction(Base):
"""
Transaction Cache Table
Database which stores transactions received from service providers as cache
"""
__tablename__ = 'cache_transactions'
txid = Column(LargeBinary(32), primary_key=True, doc="Hexadecimal representation of transaction hash or transaction ID")
date = Column(DateTime, doc="Date when transaction was confirmed and included in a block")
version = Column(BigInteger, default=1,
doc="Tranaction version. Default is 1 but some wallets use another version number")
locktime = Column(BigInteger, default=0,
doc="Transaction level locktime. Locks the transaction until a specified block "
"(value from 1 to 5 million) or until a certain time (Timestamp in seconds after 1-jan-1970)."
" Default value is 0 for transactions without locktime")
confirmations = Column(Integer, default=0,
doc="Number of confirmation when this transaction is included in a block. "
"Default is 0: unconfirmed")
block_height = Column(Integer, index=True, doc="Height of block this transaction is included in")
network_name = Column(String(20), doc="Blockchain network name of this transaction")
fee = Column(BigInteger, doc="Transaction fee")
nodes = relationship("DbCacheTransactionNode", cascade="all,delete",
doc="List of all inputs and outputs as DbCacheTransactionNode objects")
order_n = Column(Integer, doc="Order of transaction in block")
witness_type = Column(Enum(WitnessTypeTransactions), default=WitnessTypeTransactions.legacy,
doc="Transaction type enum: legacy or segwit")
class DbCacheAddress(Base):
"""
Address Cache Table
Stores transactions and unspent outputs (UTXO's) per address
"""
__tablename__ = 'cache_address'
address = Column(String(255), primary_key=True, doc="Address string base32 or base58 encoded")
network_name = Column(String(20), doc="Blockchain network name of this transaction")
balance = Column(BigInteger, default=0, doc="Total balance of UTXO's linked to this key")
last_block = Column(Integer, doc="Number of last updated block")
last_txid = Column(LargeBinary(32), doc="Transaction ID of latest transaction in cache")
n_utxos = Column(Integer, doc="Total number of UTXO's for this address")
n_txs = Column(Integer, doc="Total number of transactions for this address")
class DbCacheBlock(Base):
"""
Block Cache Table
Stores block headers
"""
__tablename__ = 'cache_blocks'
height = Column(Integer, primary_key=True, doc="Height or sequence number for this block")
block_hash = Column(LargeBinary(32), index=True, doc="Hash of this block")
network_name = Column(String(20), doc="Blockchain network name")
version = Column(BigInteger, doc="Block version to specify which features are used (hex)")
prev_block = Column(LargeBinary(32), doc="Block hash of previous block")
merkle_root = Column(LargeBinary(32), doc="Merkle root used to validate transaction in block")
time = Column(BigInteger, doc="Timestamp to indicated when block was created")
bits = Column(BigInteger, doc="Encoding for proof-of-work, used to determine target and difficulty")
nonce = Column(BigInteger, doc="Nonce (number used only once or n-once) is used to create different block hashes")
tx_count = Column(Integer, doc="Number of transactions included in this block")
class DbCacheVars(Base):
"""
Table to store various blockchain related variables
"""
__tablename__ = 'cache_variables'
varname = Column(String(50), primary_key=True, doc="Variable unique name")
network_name = Column(String(20), primary_key=True, doc="Blockchain network name of this transaction")
value = Column(String(255), doc="Value of variable")
type = Column(String(20), doc="Type of variable: int, string or float")
expires = Column(DateTime, doc="Datetime value when variable expires")
|
from __future__ import absolute_import
from pygments.style import Style
from pygments.token import *
COLOR_1 = '#e6dccc'
COLOR_2 = '#1e214f'
COLOR_3 = '#b5242e'
COLOR_4 = '#1e4f34'
COLOR_5 = '#537b99'
class Attest(Style):
default_style = ""
styles = {
Comment: 'italic ' + COLOR_5,
Keyword: 'bold ' + COLOR_2,
Operator: 'bold ' + COLOR_4,
Punctuation: '#777',
Number: COLOR_4,
Name: '#000',
Name.Decorator: 'bold ' + COLOR_2,
Name.Builtin: COLOR_2,
Name.Exception: 'bold ' + COLOR_3,
Generic.Error: 'bold ' + COLOR_3,
String: COLOR_3
}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: __init__.py
#
# Copyright 2021 Costas Tyfoxylos, Jenda Brands, Theodoor Scholte
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
"""
awsenergylabelerlib package.
Import all parts from awsenergylabelerlib here
.. _Google Python Style Guide:
http://google.github.io/styleguide/pyguide.html
"""
from ._version import __version__
from .awsenergylabelerlib import EnergyLabeler, LandingZone, SecurityHub
from .awsenergylabelerlibexceptions import (InvalidFrameworks,
InvalidOrNoCredentials,
InvalidAccountListProvided,
InvalidRegionListProvided,
MutuallyExclusiveArguments,
NoAccess,
NoRegion,
AccountsNotPartOfLandingZone,
UnableToRetrieveSecurityHubRegions,
InvalidRegion)
from .configuration import (ALL_LANDING_ZONE_EXPORT_TYPES,
ALL_ACCOUNT_EXPORT_TYPES,
DATA_EXPORT_TYPES,
ACCOUNT_METRIC_EXPORT_TYPES,
LANDING_ZONE_METRIC_EXPORT_TYPES,
SECURITY_HUB_ACTIVE_REGIONS,
ACCOUNT_THRESHOLDS,
LANDING_ZONE_THRESHOLDS,
DEFAULT_SECURITY_HUB_FILTER,
DEFAULT_SECURITY_HUB_FRAMEWORKS)
from .entities import DataExporter, AwsAccount
from .validations import (is_valid_account_id,
are_valid_account_ids,
validate_account_ids,
validate_allowed_denied_account_ids,
is_valid_region,
get_invalid_regions,
validate_regions,
validate_allowed_denied_regions,
DestinationPath)
__author__ = 'Costas Tyfoxylos <ctyfoxylos@schubergphilis.com>'
__docformat__ = '''google'''
__date__ = '''09-11-2021'''
__copyright__ = '''Copyright 2021, Costas Tyfoxylos, Jenda Brands, Theodoor Scholte'''
__license__ = '''MIT'''
__maintainer__ = '''Costas Tyfoxylos'''
__email__ = '''<ctyfoxylos@schubergphilis.com>'''
__status__ = '''Development''' # "Prototype", "Development", "Production".
# This is to 'use' the module(s), so lint doesn't complain
assert __version__
assert EnergyLabeler
assert LandingZone
assert SecurityHub
assert InvalidFrameworks
assert InvalidOrNoCredentials
assert InvalidAccountListProvided
assert InvalidRegionListProvided
assert MutuallyExclusiveArguments
assert NoAccess
assert NoRegion
assert AccountsNotPartOfLandingZone
assert UnableToRetrieveSecurityHubRegions
assert InvalidRegion
assert ALL_LANDING_ZONE_EXPORT_TYPES
assert ALL_ACCOUNT_EXPORT_TYPES
assert DATA_EXPORT_TYPES
assert LANDING_ZONE_METRIC_EXPORT_TYPES
assert ACCOUNT_METRIC_EXPORT_TYPES
assert SECURITY_HUB_ACTIVE_REGIONS
assert ACCOUNT_THRESHOLDS
assert LANDING_ZONE_THRESHOLDS
assert DEFAULT_SECURITY_HUB_FILTER
assert DEFAULT_SECURITY_HUB_FRAMEWORKS
assert DataExporter
assert AwsAccount
assert is_valid_account_id
assert are_valid_account_ids
assert validate_account_ids
assert validate_allowed_denied_account_ids
assert is_valid_region
assert get_invalid_regions
assert validate_regions
assert validate_allowed_denied_regions
assert DestinationPath
|
# Copyright 2017 Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Base class for both in-memory and in-disk data structures.
"""
import abc
from BaseClasses import BaseEntity
from utils import utils, InputData, InputTypes
class DataObjectsCollection(InputData.ParameterInput):
"""
Class for reading in a collection of data objects.
"""
DataObjectsCollection.createClass("DataObjects")
#
#
#
#
class DataObject(utils.metaclass_insert(abc.ABCMeta, BaseEntity)):
"""
Base class. Data objects are RAVEN's method for storing data internally and passing it from one
RAVEN entity to another. Fundamentally, they consist of a collection of realizations, each of
which contains inputs, outputs, and pointwise metadata. In addition, the data object has global
metadata. The pointwise inputs and outputs could be floats, time-dependent, or ND-dependent variables.
This base class is used to force the consistent API between all data containers
"""
### INPUT SPECIFICATION ###
@classmethod
def getInputSpecification(cls):
"""
Method to get a reference to a class that specifies the input data for class "cls".
@ In, cls, the class for which we are retrieving the specification
@ Out, inputSpecification, InputData.ParameterInput, class to use for specifying the input of cls.
"""
inputSpecification = super(DataObject,cls).getInputSpecification()
inputSpecification.addParam('hierarchical', InputTypes.BoolType)
inputInput = InputData.parameterInputFactory('Input',contentType=InputTypes.StringType) #TODO list
inputSpecification.addSub(inputInput)
outputInput = InputData.parameterInputFactory('Output', contentType=InputTypes.StringType) #TODO list
inputSpecification.addSub(outputInput)
# TODO this should be specific to ND set
indexInput = InputData.parameterInputFactory('Index',contentType=InputTypes.StringType) #TODO list
indexInput.addParam('var',InputTypes.StringType,True)
inputSpecification.addSub(indexInput)
optionsInput = InputData.parameterInputFactory("options")
for option in ['operator','pivotParameter']:
optionSubInput = InputData.parameterInputFactory(option, contentType=InputTypes.StringType)
optionsInput.addSub(optionSubInput)
for option in ['inputRow','outputRow']:
optionSubInput = InputData.parameterInputFactory(option, contentType=InputTypes.IntegerType)
optionsInput.addSub(optionSubInput)
for option in ['outputPivotValue','inputPivotValue']:
optionSubInput = InputData.parameterInputFactory(option, contentType=InputTypes.FloatType)
optionsInput.addSub(optionSubInput)
inputSpecification.addSub(optionsInput)
#inputSpecification.addParam('type', param_type = InputTypes.StringType, required = False)
#inputSpecification.addSub(InputData.parameterInputFactory('Input',contentType=InputTypes.StringType))
#inputSpecification.addSub(InputData.parameterInputFactory('Output',contentType=InputTypes.StringType))
#inputSpecification.addSub(InputData.parameterInputFactory('options',contentType=InputTypes.StringType))
return inputSpecification
def __init__(self):
"""
Constructor.
@ In, None
@ Out, None
"""
super().__init__()
self.name = 'DataObject'
self.printTag = self.name
self._sampleTag = 'RAVEN_sample_ID' # column name to track samples
self.protectedTags = ['RAVEN_parentID','RAVEN_isEnding'] # list(str) protected RAVEN variable names,
# should not be avail to user as var names
self._inputs = [] # list(str) if input variables
self._outputs = [] # list(str) of output variables
self._metavars = [] # list(str) of POINTWISE metadata variables
self._orderedVars = [] # list(str) of vars IN ORDER of their index
self._meta = {} # dictionary to collect meta until data is collapsed
self._selectInput = None # if not None, describes how to collect input data from history
self._selectOutput = None # if not None, describes how to collect output data from history
self._pivotParams = {} # independent dimensions as keys, values are the vars that depend on them
self._fromVarToIndex = {} # mapping between variables and indexes ({var:index}).
# "index" here refers to dimensional variables (e.g. time, x, y, z etc)
self._aliases = {} # variable aliases
self._data = None # underlying data structure
self._collector = None # object used to collect samples
self._inputKDTree = None # for finding outputs given inputs (pointset only?)
self._scaleFactors = None # scaling factors inputs as {var:(mean,scale)}
self.hierarchical = False # this flag controls the printing/plotting of the dataobject
# in case it is an hierarchical one.
# If True, all the branches are going to be printed/plotted independenttly,
# otherwise the are going to be reconstructed
@property
def sampleTag(self):
"""
Getter property for _sampleTag, the tag that identifies the realization label for RAVEN
@ In, None
@ Out, sampleTag, string, variable name
"""
return self._sampleTag
def _readMoreXML(self,xmlNode):
"""
Initializes data object based on XML input
@ In, xmlNode, xml.etree.ElementTree.Element or InputData.ParameterInput specification, input information
@ Out, None
"""
if isinstance(xmlNode,InputData.ParameterInput):
inp = xmlNode
else:
inp = DataObject.getInputSpecification()()
inp.parseNode(xmlNode)
# get hierarchical strategy
self.hierarchical = inp.parameterValues.get("hierarchical", False)
pivotParam = None # single pivot parameter given in the input
for child in inp.subparts:
# TODO check for repeats, "notAllowdInputs", names in both input and output space
if child.getName() == 'Input':
self._inputs.extend(list(x.strip() for x in child.value.split(',') if x.strip()!=''))
elif child.getName() == 'Output':
self._outputs.extend(list(x.strip() for x in child.value.split(',') if x.strip()!=''))
elif child.getName() == 'Index':
depends = list(d.strip() for d in child.value.split(','))
var = child.parameterValues['var']
self._pivotParams[var] = depends
# options node
elif child.getName() == 'options':
duplicateInp = False # if True, then multiple specification options were used for input
duplicateOut = False # if True, then multiple specification options were used for output
for cchild in child.subparts:
# pivot
if cchild.getName() == 'pivotParameter':
# TODO not applicable to ND, only to HistSet, but read it here
# TODO add checks somewhere if both "index" and "pivotParameter" are provided
self._tempPivotParam = cchild.value.strip()
# input pickers
elif cchild.getName() in ['inputRow','inputPivotValue']:
if self._selectInput is not None:
duplicateInp = True
self.setSelectiveInput(cchild.getName(),cchild.value)
# output pickers
elif cchild.getName() in ['outputRow','outputPivotValue','operator']:
if self._selectOutput is not None:
duplicateOut = True
self._selectOutput = (cchild.getName(),cchild.value)
# TODO check this in the input checker instead of here?
if duplicateInp:
self.raiseAWarning('Multiple options were given to specify the input row to read! Using last entry:',self._selectInput)
if duplicateOut:
self.raiseAWarning('Multiple options were given to specify the output row to read! Using last entry:',self._selectOutput)
# end options node
# end input reading
# clear keywords InputPlaceHolder but NOT the OutputPlaceHolder, for legacy reasons
while 'InputPlaceHolder' in self._inputs:
self._inputs.remove('InputPlaceHolder')
#while 'OutputPlaceHolder' in self._outputs:
# self._outputs.remove('OutputPlaceHolder')
# set default pivot parameters, if needed
self._setDefaultPivotParams()
# remove index variables from input/output spaces, but silently, since we'll still have them available later
for index in self._pivotParams.keys():
try:
self._outputs.remove(index)
except ValueError:
pass #not requested as output anyway
try:
self._inputs.remove(index)
except ValueError:
pass #not requested as input anyway
# check inputs and outputs, if there were duplicates, error out
dups = set(self._inputs).intersection(self._outputs)
if dups:
self.raiseAnError(IOError, 'Variables: "', ','.join(dups), '" are specified in both "Input" and "Output" Node of DataObject "', self.name,'"')
self._orderedVars = self._inputs + self._outputs
# check if protected vars have been violated
if set(self.protectedTags).intersection(set(self._orderedVars)):
self.raiseAnError(IOError, 'Input, Output and Index variables can not be part of RAVEN protected tags: '+','.join(self.protectedTags))
def _setDefaultPivotParams(self):
"""
Allows setting default pivot parameters. In general, does nothing.
@ In, None
@ Out, None
"""
pass
def setPivotParams(self,params):
"""
Sets the pivot parameters for variables.
@ In, params, dict, var:[params] as str:list(str)
@ Out, None
"""
# TODO typechecking, assertions
coords = set().union(*params.values())
for coord in coords:
if coord not in self._pivotParams:
self._pivotParams[coord] = list(var for var in params.keys() if coord in params[var])
else:
self._pivotParams[coord] = list(set(list(var for var in params.keys() if
coord in params[var]) + self._pivotParams[coord]))
def setSelectiveInput(self,option,value):
"""
Sets the input selection method for retreiving subset data.
@ In, option, str, from [inputRow,inputPivotValue]
@ In, value, int or float, either the index (row number) or the pivot value (will be cast if other type)
@ Out, None
"""
assert(option in ['inputRow','inputPivotValue'])
if option == 'inputRow':
value = int(value)
elif option == 'inputPivotValue':
value = float(value)
self._selectInput = (option,value)
self.raiseADebug('Set selective input to',self._selectInput)
def setSelectiveOutput(self,option,value):
"""
Sets the output selection method for retreiving subset data.
@ In, option, str, from [outputRow,outputPivotValue,operator]
@ In, value, int or float or str, index or pivot value or operator name respectively
@ Out, None
"""
assert(option in ['outputRow','outputPivotValue','operator'])
if option == 'outputRow':
value = int(value)
elif option == 'outputPivotValue':
value = float(value)
elif option == 'operator':
value = value.strip().lower()
self._selectOutput = (option,value)
self.raiseADebug('Set selective output to',self._selectOutput)
######################
# DATA CONTAINER API #
######################
@abc.abstractmethod
def addExpectedMeta(self,keys, params={}):
"""
Registers meta to look for in realization
@ In, keys, set(str), keys to register
@ In, params, dict, optional, {key:[indexes]}, keys of the dictionary are the variable names,
values of the dictionary are lists of the corresponding indexes/coordinates of given variable
@ Out, None
"""
pass
@abc.abstractmethod
def addMeta(self,tag,xmlDict):
"""
Adds general (not pointwise) metadata to this data object. Can add several values at once, collected
as a dict keyed by target variables.
Data ends up being written as follows (see docstrings above for dict structure)
- A good default for 'target' is 'general' if there's not a specific target
<tag>
<target>
<scalarMetric>value</scalarMetric>
<scalarMetric>value</scalarMetric>
<vectorMetric>
<wrt>value</wrt>
<wrt>value</wrt>
</vectorMetric>
</target>
<target>
<scalarMetric>value</scalarMetric>
<vectorMetric>
<wrt>value</wrt>
</vectorMetric>
</target>
</tag>
@ In, tag, str, section to add metadata to, usually the data submitter (BasicStatistics, DataObject, etc)
@ In, xmlDict, dict, data to change, of the form {target:{scalarMetric:value,scalarMetric:value,vectorMetric:{wrt:value,wrt:value}}}
@ Out, None
"""
pass
@abc.abstractmethod
def addRealization(self,rlz):
"""
Adds a "row" (or "sample") to this data object.
This is the method to add data to this data object.
Note that rlz can include many more variables than this data object actually wants.
Before actually adding the realization, data is formatted for this data object.
@ In, rlz, dict, {var:val} format where
"var" is the variable name as a string,
"val" is either a float or a np.ndarray of values.
@ Out, None
"""
pass
@abc.abstractmethod
def addVariable(self,varName,values,classify='meta'):
"""
Adds a variable/column to the data. "values" needs to be as long as self.size.
@ In, varName, str, name of new variable
@ In, values, np.array, new values (floats/str for scalars, xr.DataArray for hists)
@ In, classify, str, optional, either 'input', 'output', or 'meta'
@ Out, None
"""
pass
@abc.abstractmethod
def asDataset(self):
"""
Casts this dataobject as an xr.Dataset.
Functionally, typically collects the data from self._collector and places it in self._data.
Efficiency note: this is the slowest part of typical data collection.
@ In, None
@ Out, xarray.Dataset, all the data from this data object.
"""
pass
@abc.abstractmethod
def constructNDSample(self,vals,dims,coords,name=None):
"""
Constructs a single realization instance (for one variable) from a realization entry.
@ In, vals, np.ndarray, should have shape of (len(coords[d]) for d in dims)
@ In, dims, list(str), names of dependent dimensions IN ORDER of appearance in vals, e.g. ['time','x','y']
@ In, coords, dict, {dimension:list(float)}, values for each dimension at which 'val' was obtained, e.g. {'time':
@ Out, obj, xr.DataArray, completed realization instance suitable for sending to "addRealization"
"""
pass
@abc.abstractmethod
def getDimensions(self,var):
"""
Provides the independent dimensions that this variable depends on.
To get all dimensions at once, use self.indexes property.
@ In, var, str, name of variable (if None, give all)
@ Out, dims, dict, {name:values} of independent dimensions
"""
pass
@abc.abstractmethod
def getMeta(self,keys=None,pointwise=False,general=False):
"""
Method to obtain entries in the metadata. If niether pointwise nor general, then returns an empty dict.
@ In, keys, list(str), optional, the keys (or main tag) to search for. If None, return all.
@ In, pointwise, bool, optional, if True then matches will be searched in the pointwise metadata
@ In, general, bool, optional, if True then matches will be searched in the general metadata
@ Out, meta, dict, key variables/xpaths to data object entries (column if pointwise, XML if general)
"""
pass
@abc.abstractmethod
def getVars(self,subset=None):
"""
Gives list of variables that are part of this dataset.
@ In, subset, str, optional, if given can return 'input','output','meta' subset types
@ Out, getVars, list(str), list of variable names requested
"""
pass
@abc.abstractmethod
def getVarValues(self,var):
"""
Returns the sampled values of "var"
@ In, var, str or list(str), name(s) of variable(s)
@ Out, res, xr.DataArray, samples (or dict of {var:xr.DataArray} if multiple variables requested)
"""
pass
@abc.abstractmethod
def realization(self, index=None, matchDict=None, noMatchDict=None, tol=1e-15, unpackXArray=False, asDataSet = False, options = None):
"""
Method to obtain a realization from the data, either by index or matching value.
Either "index" or one of ("matchDict", "noMatchDict") must be supplied.
If matchDict and no match is found, will return (len(self),None) after the pattern of numpy, scipy
@ In, index, int, optional, number of row to retrieve (by index, not be "sample")
@ In, matchDict, dict, optional, {key:val} to search for matches
@ In, noMatchDict, dict, optional, {key:val} to search for antimatches (vars should NOT match vals within tolerance)
@ In, asDataSet, bool, optional, return realization from the data as a DataSet
@ In, tol, float, optional, tolerance to which match should be made
@ In, unpackXArray, bool, optional, True if the coordinates of the xarray variables must be exposed in the dict (e.g. if P(t) => {P:ndarray, t:ndarray}) (valid only for dataset)
@ In, options, dict, optional, options to be applied to the search
@ Out, index, int, optional, index where found (or len(self) if not found), only returned if matchDict
@ Out, rlz, dict, realization requested (None if not found)
"""
pass
@abc.abstractmethod
def load(self,fname,style='netCDF',**kwargs):
"""
Reads this dataset from disk based on the format.
@ In, fname, str, path and name of file to read
@ In, style, str, optional, options are enumerated below
@ In, kwargs, dict, optional, additional arguments to pass to reading function
@ Out, None
"""
pass
@abc.abstractmethod
def remove(self,realization=None,variable=None):
"""
Used to remove either a realization or a variable from this data object.
@ In, realization, dict or int, optional, (matching or index of) realization to remove
@ In, variable, str, optional, name of "column" to remove
@ Out, None
"""
pass
@abc.abstractmethod
def reset(self):
"""
Sets this object back to its initial state.
@ In, None
@ Out, None
"""
pass
@abc.abstractmethod
def sliceByIndex(self,axis):
"""
Returns list of realizations at "snapshots" along "axis"
@ In, axis, str, name of index along which to obtain slices
@ Out, slices, list, list of slices
"""
pass
@abc.abstractmethod
def write(self,fname,style='netCDF',**kwargs):
"""
Writes this dataset to disk based on the format.
@ In, fname, str, path and name of file to write
@ In, style, str, optional, options are enumerated below
@ In, kwargs, dict, optional, additional arguments to pass to writing function
@ Out, None
"""
pass
|
# coding=utf-8
"""
将华氏温度转换为摄氏温度
F = 1.8C + 32
Version: 0.1
Author: huijz
Date: 2020-08-21
"""
f = float(input('请输入华氏温度:'))
c = (f - 32) / 1.8
print'%.2f华氏度 = %.2f摄氏度' % (f, c)
|
import sys
import textwrap
import argparse
import numpy as np
import networkx as nx
import random
import json
from config import *
from max_circulation import *
import pickle
from functools import reduce
# generates the start and end nodes for a fixed set of topologies - hotnets/line/simple graph
def generate_workload_standard(filename, payment_graph_topo, workload_type, total_time, \
log_normal, kaggle_size, txn_size_mean, timeout_value, generate_json_also, circ_frac, std_workload=True):
# by default ASSUMES NO END HOSTS
dag_frac = round(1 - circ_frac,3)
demand_dict_dag = dict()
demand_dict_circ = dict()
# define start and end nodes and amounts
# edge a->b in payment graph appears in index i as start_nodes[i]=a, and end_nodes[i]=b
if payment_graph_topo == 'hotnets_topo':
if circ_frac == 1:
start_nodes = [0, 1, 2, 2, 3, 3, 4]
end_nodes = [1, 3, 1, 4, 2, 0, 2]
amt_relative = [1, 2, 1, 1, 1, 1, 1]
amt_absolute = [SCALE_AMOUNT * x for x in amt_relative]
else:
start_nodes = [0,3,0,1,2,3,2,4]
end_nodes = [4,0,1,3,1,2,4,2]
amt_relative = [1,2,1,2,1,2,2,1]
amt_absolute = [SCALE_AMOUNT * x for x in amt_relative]
graph = hotnets_topo_graph
elif payment_graph_topo == 'toy_dctcp':
start_nodes = [2, 4, 6, 8, 10, 3, 5, 7, 9, 11]
end_nodes = [3, 5, 7, 9, 11, 2, 4, 6, 8, 10]
amt_relative = [1] * 10
print("here generating topo")
amt_absolute = [SCALE_AMOUNT * MEAN_RATE * x for x in amt_relative]
graph = toy_dctcp_graph
elif payment_graph_topo == 'simple_deadlock':
start_nodes = [1,0,2]
end_nodes = [2,2,0]
amt_relative = [2,1,2]
amt_absolute = [SCALE_AMOUNT * x for x in amt_relative]
graph = simple_deadlock_graph
elif payment_graph_topo == 'dag_example':
start_nodes = [0, 2, 1]
end_nodes = [2, 1, 2]
amt_relative = [10, 5, 5]
amt_absolute = [SCALE_AMOUNT * x for x in amt_relative]
graph = dag_example_graph
elif payment_graph_topo == 'parallel_graph':
start_nodes = [0, 2, 1, 3]
end_nodes = [2, 0, 3, 1]
amt_relative = [1, 1, 1, 1]
amt_absolute = [SCALE_AMOUNT * x for x in amt_relative]
graph = parallel_graph
elif payment_graph_topo == 'simple_line':
if "five" in filename:
num_nodes = 5
graph = five_line_graph
else:
num_nodes = 3
graph = simple_line_graph
print(num_nodes)
start_nodes = [0, num_nodes - 1]
end_nodes = [num_nodes - 1, 0]
amt_relative = [MEAN_RATE] * 2
'''start_nodes = [0, 2, 0, 1]
end_nodes = [2, 0, 1, 0]
amt_relative = [MEAN_RATE, MEAN_RATE, 2*MEAN_RATE, 2*MEAN_RATE]'''
amt_absolute = [SCALE_AMOUNT * x for x in amt_relative]
demand_dict_circ[0,num_nodes - 1] = MEAN_RATE
demand_dict_circ[num_nodes - 1, 0] = MEAN_RATE
demand_dict_dag[0, num_nodes - 1] = MEAN_RATE
elif payment_graph_topo == 'hardcoded_circ':
start_nodes = [0, 1, 2, 3, 4]
end_nodes = [1, 2, 3, 4, 0]
amt_relative = [MEAN_RATE] * 5
amt_absolute = [SCALE_AMOUNT * x for x in amt_relative]
graph = five_node_graph
# generate circulation instead if you need a circulation
if not std_workload:
start_nodes, end_nodes, amt_relative = [], [], []
num_nodes = graph.number_of_nodes()
""" generate circulation and dag demand """
if circ_frac > 0:
demand_dict_circ = circ_demand(list(graph), mean=MEAN_RATE, \
std_dev=CIRCULATION_STD_DEV)
if dag_frac > 0:
demand_dict_dag = dag_demand(list(graph), mean=MEAN_RATE, \
std_dev=CIRCULATION_STD_DEV)
demand_dict = { key: circ_frac * demand_dict_circ.get(key, 0) +
dag_frac * demand_dict_dag.get(key, 0) \
for key in set(demand_dict_circ) | set(demand_dict_dag) }
print(demand_dict)
for i, j in list(demand_dict.keys()):
start_nodes.append(i)
end_nodes.append(j)
amt_relative.append(demand_dict[i, j])
if payment_graph_topo != 'hardcoded_circ':
amt_absolute = [SCALE_AMOUNT * MEAN_RATE * x for x in amt_relative]
print(amt_absolute)
if generate_json_also:
generate_json_files(filename + '.json', graph, graph, start_nodes, end_nodes, amt_absolute)
write_txns_to_file(filename + '_workload.txt', start_nodes, end_nodes, amt_absolute,\
workload_type, total_time, log_normal, kaggle_size, txn_size_mean, timeout_value)
# write the given set of txns denotes by start_node -> end_node with absolute_amts as passed in
# to a separate workload file
# workload file of form
# [amount] [timeSent] [sender] [receiver] [priorityClass] [timeout_value]
# write to file - assume no priority for now
# transaction sizes are either constant or exponentially distributed around their mean
def write_txns_to_file(filename, start_nodes, end_nodes, amt_absolute,\
workload_type, total_time, log_normal, kaggle_size, txn_size_mean, timeout_value, mode="w", start_time=0):
outfile = open(filename, mode)
if "newseed" in filename:
print("newseed")
np.random.seed(12493)
if distribution == 'uniform':
# constant transaction size generated at uniform intervals
for k in range(len(start_nodes)):
cur_time = 0
'''if (start_nodes[k] == 1 or end_nodes[k] == 1):
cur_time = 300'''
while cur_time < total_time:
rate = amt_absolute[k]
if log_normal:
txn_size = MIN_TXN_SIZE/10
while (txn_size < MIN_TXN_SIZE or txn_size > MAX_TXN_SIZE):
txn_power = np.random.normal(loc=LOG_NORMAL_MEAN, scale=LOG_NORMAL_SCALE)
txn_size = round(10 ** txn_power, 1)
else:
txn_size = txn_size_mean
outfile.write(str(txn_size) + " " + str(cur_time + start_time) + " " + str(start_nodes[k]) + \
" " + str(end_nodes[k]) + " 0 " + str(timeout_value) + "\n")
cur_time += (1.0 / rate)
elif distribution == 'poisson':
if kaggle_size:
print("generating from kaggle for size")
amt_dist = np.load(KAGGLE_AMT_DIST_FILENAME)
num_amts = amt_dist.item().get('p').size
# constant transaction size to be sent in a poisson fashion
for k in range(len(start_nodes)):
current_time = 0.0
rate = amt_absolute[k]*1.0
beta = (1.0) / (1.0 * rate)
# if the rate is higher, given pair will have more transactions in a single second
while current_time < total_time:
if log_normal:
txn_size = MIN_TXN_SIZE/10
while (txn_size < MIN_TXN_SIZE or txn_size > MAX_TXN_SIZE):
txn_power = np.random.normal(loc=LOG_NORMAL_SCALE, scale=LOG_NORMAL_SCALE)
txn_size = round(10 ** txn_power, 1)
elif kaggle_size:
# draw an index according to the amount pmf
txn_idx = np.random.choice(num_amts, 1, \
p=amt_dist.item().get('p'))[0]
# map the index to a tx amount
txn_size = int(round(amt_dist.item().get('bins')[txn_idx], 1))
else:
txn_size = txn_size_mean
outfile.write(str(txn_size) + " " + str(current_time + start_time) + " " + str(start_nodes[k]) \
+ " " + str(end_nodes[k]) + " 0 " + str(timeout_value) + "\n")
time_incr = np.random.exponential(beta)
current_time = current_time + time_incr
elif distribution == 'kaggle':
# load the data
amt_dist = np.load(KAGGLE_AMT_DIST_FILENAME)
time_dist = np.load(KAGGLE_TIME_DIST_FILENAME)
num_amts = amt_dist.item().get('p').size
num_times = time_dist.item().get('p').size
# transaction sizes drawn from kaggle data, as is inter-transaction time
for k in range(len(start_nodes)):
current_time = 0.0
while current_time < total_time:
# draw an index according to the amount pmf
txn_idx = np.random.choice(num_amts, 1, \
p=amt_dist.item().get('p'))[0]
# map the index to a tx amount
txn_size = amt_dist.item().get('bins')[txn_idx]
outfile.write(str(txn_size) + " " + str(current_time + start_time) + " " + str(start_nodes[k]) + \
" " + str(end_nodes[k]) + " 0 " + str(timeout_value) + "\n")
# draw an index according to the time pmf
time_idx = np.random.choice(num_times, 1, \
p=time_dist.item().get('p'))[0]
# map index to an inter-tx time
time_incr = time_dist.item().get('bins')[time_idx]
current_time = current_time + time_incr
outfile.close()
np.random.seed(SEED_LIST[args.run_num])
# generates the json file necessary for the distributed testbed to be used to test
# the lnd implementation
def generate_json_files(filename, graph, inside_graph, start_nodes, end_nodes, amt_absolute):
for balance in balance_list:
json_string = {}
# create btcd connections
# routers connected to each other and end hosts connected to respective router
btcd_connections = []
for i in range(graph.number_of_nodes() - 1):
connection = {"src": str(i) + "r", "dst" : str(i + 1) + "r"}
btcd_connections.append(connection)
connection = {"src": str(i) + "e", "dst" : str(i) + "r"}
btcd_connections.append(connection)
connection = {"src": str(graph.number_of_nodes() - 1) + "e", "dst" : str(graph.number_of_nodes() - 1) + "r"}
btcd_connections.append(connection)
json_string["btcd_connections"] = btcd_connections
# miner node
json_string["miner"] = "0r"
# create nodesi for end hosts and router nodes and assign them distinct ips
nodes = []
for n in graph.nodes():
node = {"name": str(n) + "r", "ip" : "10.0.1." + str(100 + n)}
nodes.append(node)
node = {"name": str(n) + "e", "ip" : "10.0.2." + str(100 + n)}
nodes.append(node)
json_string["nodes"] = nodes
# creates all the lnd channels
edges = []
for (u,v) in graph.edges():
if u == v:
cap = ENDHOST_LND_ONE_WAY_CAPACITY
node_type = "e"
else:
cap = balance * 400 / 2
node_type = "r"
if u <= v:
edge = {"src": str(u) + "r", "dst": str(v) + node_type, "capacity" : cap}
edges.append(edge)
json_string["lnd_channels"] = edges
# creates the string for the demands
demands = []
for s, e, a in zip(start_nodes, end_nodes, amt_absolute):
demand_entry = {"src": str(s) + "e", "dst": str(e) + "e",\
"rate": a}
demands.append(demand_entry)
json_string["demands"] = demands
with open(filename + '_' + str(balance) + '.json', 'w') as outfile:
json.dump(json_string, outfile, indent=8)
# generate workload for arbitrary topology by uniformly sampling
# the set of nodes for sender-receiver pairs
# size of transaction is determined when writing to the file to
# either be exponentially distributed or constant size
def generate_workload_for_provided_topology(filename, inside_graph, whole_graph, end_host_map, \
workload_type, total_time, \
log_normal, kaggle_size, txn_size_mean, timeout_value, generate_json_also, circ_frac):
num_nodes = inside_graph.number_of_nodes()
start_nodes, end_nodes, amt_relative = [], [], []
""" generate circulation and dag demand """
circ_frac = round(circ_frac, 3)
dag_frac = round(1 - circ_frac, 3)
demand_dict_dag = dict()
demand_dict_circ = dict()
if circ_frac > 0:
demand_dict_circ = circ_demand(list(inside_graph), mean=MEAN_RATE, \
std_dev=CIRCULATION_STD_DEV)
if dag_frac > 0:
demand_dict_dag = dag_demand(list(inside_graph), mean=MEAN_RATE, \
std_dev=CIRCULATION_STD_DEV, skew_param=dag_frac*10, gen_method="src_skew")
circ_total = reduce(lambda x, value: x + value, iter(demand_dict_circ.values()), 0)
dag_total = reduce(lambda x, value: x + value, iter(demand_dict_dag.values()), 0)
if "weird" not in filename or dag_frac == 0.20 or dag_frac == 0.45:
demand_dict = { key: circ_frac * demand_dict_circ.get(key, 0) + dag_frac * demand_dict_dag.get(key, 0) \
for key in set(demand_dict_circ) | set(demand_dict_dag) }
else:
# just add dag and don't weigh
demand_dict = { key: demand_dict_circ.get(key, 0) + dag_frac * demand_dict_dag.get(key, 0) \
for key in set(demand_dict_circ) | set(demand_dict_dag) }
total = reduce(lambda x, value: x + value, iter(demand_dict.values()), 0)
print("Circulation", circ_total)
print("Dag", dag_total)
print("total", total)
print(circ_frac)
print(dag_frac)
'''
pkl_op = open(filename + '_demand.pkl', 'wb')
pickle.dump(demand_dict, pkl_op)
pkl_op.close()
'''
if "two_node_imbalance" in filename:
demand_dict = dict()
demand_dict[0, 1] = MEAN_RATE
demand_dict[1, 0] = 5 * MEAN_RATE
print(demand_dict)
elif "two_node_capacity" in filename:
demand_dict = dict()
demand_dict[0, 1] = 2 * MEAN_RATE
demand_dict[1, 0] = 5 * MEAN_RATE
print(demand_dict)
if "three_node" in filename:
demand_dict = dict()
demand_dict[0, 2] = MEAN_RATE
demand_dict[1, 2] = MEAN_RATE
demand_dict[2, 1] = MEAN_RATE
demand_dict[1, 0] = MEAN_RATE
for i, j in list(demand_dict.keys()):
start_nodes.append(end_host_map[i])
end_nodes.append(end_host_map[j])
amt_relative.append(demand_dict[i, j])
amt_absolute = [SCALE_AMOUNT * x for x in amt_relative]
print("generated workload")
max_circ = max_circulation(demand_dict)
if total != 0:
print("ALERT!", "maximum circulation: ", max_circ, " or ", float(max_circ)/total)
if generate_json_also:
generate_json_files(filename, whole_graph, inside_graph, start_nodes, end_nodes, amt_absolute)
if "weird" not in filename:
print("generting txns here")
write_txns_to_file(filename + '_workload.txt', start_nodes, end_nodes, amt_absolute,\
workload_type, total_time, log_normal, kaggle_size, txn_size_mean, timeout_value)
else:
kaggle_size = False
start_nodes_circ, end_nodes_circ, amt_relative_circ = [], [], []
for i, j in list(demand_dict_circ.keys()):
start_nodes_circ.append(end_host_map[i])
end_nodes_circ.append(end_host_map[j])
amt_relative_circ.append(demand_dict_circ[i, j])
amt_absolute_circ = [SCALE_AMOUNT * x for x in amt_relative_circ]
# circ for 1000s
if dag_frac == 0.20 or dag_frac == 0.45 or dag_frac == 0.8:
# dag plus circ for 2000s
write_txns_to_file(filename + '_workload.txt', start_nodes, end_nodes, amt_absolute,\
workload_type, 2000, log_normal, kaggle_size, txn_size_mean, timeout_value)
# circ again for 1000s
write_txns_to_file(filename + '_workload.txt', start_nodes_circ, end_nodes_circ, amt_absolute_circ,\
workload_type, 1000, log_normal, kaggle_size, txn_size_mean, timeout_value, "a", 2000)
else:
write_txns_to_file(filename + '_workload.txt', start_nodes_circ, end_nodes_circ, amt_absolute_circ,\
workload_type, 1000, log_normal, kaggle_size, txn_size_mean, timeout_value)
# dag plus circ for 1000s
write_txns_to_file(filename + '_workload.txt', start_nodes, end_nodes, amt_absolute,\
workload_type, 1000, log_normal, kaggle_size, txn_size_mean, timeout_value, "a", 1000)
# circ again for 1000s
write_txns_to_file(filename + '_workload.txt', start_nodes_circ, end_nodes_circ, amt_absolute_circ,\
workload_type, 1000, log_normal, kaggle_size, txn_size_mean, timeout_value, "a", 2000)
# parse a given line of edge relationships from the topology file
# and return whether this is a router node and its identifier
def parse_node(node_name):
try:
val = int(node_name[:-1])
if node_name[-1] == 'r':
return True, val
if node_name[-1] == 'e':
return False, val
return -1
except:
return -1
# parse topology file to get graph structure
def parse_topo(topo_filename):
g = nx.Graph()
router_graph = nx.Graph()
end_host_map = dict()
line_num = 0
with open(topo_filename) as topo_file:
for line in topo_file:
line_num += 1
# landmark line
if line_num == 1:
continue
if line == '\n':
continue
n1 = parse_node(line.split()[0])
n2 = parse_node(line.split()[1])
if n1 == -1 or n2 == -1:
print("Bad line " + line)
continue
g.add_edge(n1[1], n2[1])
if n1[0] and n2[0]:
router_graph.add_edge(n1[1], n2[1])
elif n1[0]:
end_host_map[n1[1]] = n2[1]
elif n2[0]:
end_host_map[n2[1]] = n1[1]
return g, router_graph, end_host_map
# generate circulation demand for node ids mentioned in node_list,
# with average total demand at a node equal to 'mean', and a
# perturbation of 'std_dev'
def circ_demand(node_list, mean, std_dev):
print("MEAN DEMAND", mean)
assert type(mean) is int
assert type(std_dev) is int
demand_dict = {}
num_nodes = len(node_list)
""" sum of 'mean' number of random permutation matrices """
""" note any permutation matrix is a circulation demand """
""" matrix indices are shifted by number of nodes to account """
for i in range(mean):
perm = np.random.permutation(node_list)
for j, k in enumerate(perm):
if (j, k) in list(demand_dict.keys()):
demand_dict[j, k] += 1
else:
demand_dict[j, k] = 1
""" add 'std_dev' number of additional cycles to the demand """
for i in range(std_dev):
cycle_len = np.random.choice(list(range(1, num_nodes+1)))
cycle = np.random.choice(node_list, cycle_len)
cycle = set(cycle)
cycle = list(cycle)
cycle.append(cycle[0])
for j in range(len(cycle[:-1])):
if (cycle[j], cycle[j+1]) in list(demand_dict.keys()):
demand_dict[cycle[j], cycle[j+1]] += 1
else:
demand_dict[cycle[j], cycle[j+1]] = 1
""" remove diagonal entries of demand matrix """
for (i, j) in list(demand_dict.keys()):
if i == j:
del demand_dict[i, j]
return demand_dict
# generate dag for node ids mentioned in node_list,
# with average total demand out of a node equal to 'mean', and a
# perturbation of 'std_dev'
def dag_demand(node_list, mean, std_dev, skew_param=5,gen_method="topological_sort"):
print("DAG_DEMAND", mean)
assert type(mean) is int
assert type(std_dev) is int
demand_dict = {}
if gen_method == "src_skew":
""" sample receiver uniformly at random and source from exponential distribution """
for i in range(len(node_list) * mean):
sender = len(node_list)
while sender >= len(node_list):
sender = int(np.random.exponential(len(node_list)/skew_param))
receiver_list = np.random.permutation(node_list)
receiver_index = len(node_list)
while receiver_index >= len(node_list):
receiver_index = int(np.random.exponential(len(node_list)/skew_param))
receiver = receiver_list[receiver_index]
demand_dict[sender, receiver] = demand_dict.get((sender, receiver), 0) + 1
else:
perm = np.random.permutation(node_list)
print("root is ", perm[0])
""" use a random ordering of the nodes """
""" as the topological sort of the DAG demand to produce """
""" generate demand from a node to only nodes higher """
""" than it in the random ordering """
for i, node in enumerate(perm[:-1]):
receiver_node_list = perm[i + 1:]
total_demand_from_node = mean + np.random.choice([std_dev, -1*std_dev])
for j in range(total_demand_from_node):
receiver = np.random.choice(receiver_node_list)
demand_dict[node, receiver] = demand_dict.get((node, receiver), 0) + 1
""" remove diagonal entries of demand matrix """
for (i, j) in list(demand_dict.keys()):
if i == j:
del demand_dict[i, j]
return demand_dict
# parse arguments
parser = argparse.ArgumentParser(description="Create arbitrary txn workloads to run the omnet simulator on")
parser.add_argument('--graph-topo', \
choices=['hotnets_topo', 'simple_line', 'simple_deadlock', 'custom', 'hardcoded_circ', 'toy_dctcp', 'dag_example', 'parallel_graph'],\
help='type of graph (Small world or scale free or custom topology)', default='simple_line')
parser.add_argument('--payment-graph-dag-percentage', type=int,\
help='percentage of circulation to put in the payment graph', default=0)
parser.add_argument('--topo-filename', dest='topo_filename', type=str, \
help='name of topology file to generate worklooad for')
parser.add_argument('output_file_prefix', type=str, help='name of the output workload file', \
default='simple_workload.txt')
parser.add_argument('interval_distribution', choices=['uniform', 'poisson','kaggle'],\
help='time between transactions is determine by this', default='poisson')
parser.add_argument('--experiment-time', dest='total_time', type=int, \
help='time to generate txns for', default=30)
parser.add_argument('--txn-size-mean', dest='txn_size_mean', type=int, \
help='mean_txn_size', default=1)
parser.add_argument('--log-normal', action='store_true', help='should txns be exponential in size')
parser.add_argument('--kaggle-size', action='store_true', help='should txns be kaggle in size')
parser.add_argument('--generate-json-also', action="store_true", help="do you need to generate json file also \
for the custom topology")
parser.add_argument('--balance-list', type=int, nargs='+', dest='balance_list', default=[100])
parser.add_argument('--timeout-value', type=float, help='generic time out for all transactions', default=5)
parser.add_argument('--scale-amount', type=int, help='how much to scale the mean deamnd by', default=5)
parser.add_argument('--run-num', type=int, help='influences the seed', default=1)
args = parser.parse_args()
output_prefix = args.output_file_prefix
circ_frac = (100 - args.payment_graph_dag_percentage) / 100.0
distribution = args.interval_distribution
total_time = args.total_time
txn_size_mean = args.txn_size_mean
log_normal = args.log_normal
kaggle_size = args.kaggle_size
topo_filename = args.topo_filename
generate_json_also = args.generate_json_also
graph_topo = args.graph_topo
balance_list = args.balance_list
timeout_value = args.timeout_value
SCALE_AMOUNT = args.scale_amount
if kaggle_size:
log_normal = False
# generate workloads
np.random.seed(SEED_LIST[args.run_num])
random.seed(SEED_LIST[args.run_num])
if graph_topo != 'custom':
generate_workload_standard(output_prefix, graph_topo, distribution, \
total_time, log_normal, kaggle_size, txn_size_mean, timeout_value, generate_json_also, circ_frac)
elif topo_filename is None:
raise Exception("Topology needed for custom file")
else:
whole_graph, inside_graph, end_host_map = parse_topo(topo_filename)
generate_workload_for_provided_topology(output_prefix, inside_graph, whole_graph, end_host_map,\
distribution, total_time, log_normal, kaggle_size,\
txn_size_mean, timeout_value, generate_json_also, circ_frac)
|
from pecan import make_app
import eventlet
from authdog import model
def setup_app(config):
model.init_model()
app_conf = dict(config.app)
eventlet_enabled = dict(config.EVENTLET).get("enabled")
if eventlet_enabled:
eventlet.monkey_patch(time=True, thread=True)
return make_app(
app_conf.pop('root'),
logging=getattr(config, 'logging', {}),
**app_conf
)
|
# Interface for a "set" of cases
class CaseSet:
def __init__(self, time):
self.time = time
def __len__(self):
raise NotImplementedError()
def iterator(self):
raise NotImplementedError()
def get_time(self):
return self.time
def set_time(self, time):
self.time = time
|
# -*-coding:Utf-8 -*
# Copyright (c) 2010-2017 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Fichier contenant le contexte éditeur EdtEmplacement."""
from primaires.interpreteur.editeur.uniligne import Uniligne
class EdtEmplacement(Uniligne):
"""Classe définissant le contexte éditeur 'edt_emplacement'."""
def __init__(self, pere, objet, attribut):
Uniligne.__init__(self, pere, objet, attribut)
self.ajouter_option("e", self.opt_epaisseur)
self.ajouter_option("p", self.opt_positions)
def opt_epaisseur(self, arguments):
"""Option épaisseur.
Syntaxe :
/e <épaisseur>
"""
try:
epaisseur = int(arguments)
except ValueError:
self.pere << "|err|Epaisseur invalide, spécifiez un nombre.|ff|"
else:
self.objet.epaisseur = epaisseur
self.actualiser()
def opt_positions(self, arguments):
"""Option positions.
Syntaxe :
/p <position1> (, <position2>, (...))
"""
if not arguments.strip():
self.pere << "|err|Entrez au moins une position ou plusieurs " \
"séparées par des virgules.|ff|"
return
arguments = arguments.split(",")
positions = set()
for arg in arguments:
try:
arg = arg.strip()
position = int(arg)
except ValueError:
self.pere << "|err|Position invalide, spécifiez un " \
"nombre.|ff|"
return
else:
positions.add(position)
self.objet.positions = tuple(sorted(positions))
self.actualiser()
def accueil(self):
"""Retourne le message d'accueil."""
msg = Uniligne.accueil(self)
msg += "\nPositions actuelles : "
positions = self.objet.positions
if not positions:
msg += "|rg|aucune|ff|"
else:
msg += ", ".join([str(p) for p in positions])
return msg
|
# Simple basic assignment
def main():
x = 1
|
from django.shortcuts import render
from .models import *
# Create your views here.
def image(request):
images = Image.objects.all()
location = Location.objects.all()
return render(request, 'images.html',{"images":images,"location":location})
def locations(request,location_id):
loc = Location.objects.get(location=location_id)
location = Image.objects.filter(location=loc.id)
locs = Location.objects.all()
catego = Category.objects.all()
return render(request,'location.html',{"location":location,"catego":catego,"locs":locs})
# def imagealone(request,image_id):
# try:
# image = Image.objects.get(id=image_id)
# except DoesNotExist:
# raise Http404()
# return render(request,"imageOne.html",{"image":image})
def search_results(request):
if 'image' in request.GET and request.GET["image"]:
search_term = request.GET.get("image")
searched_images = Image.search_by_category(search_term)
message = f"{search_term}"
return render(request, 'search.html', {"message": message, "images": searched_images})
else:
message = "You haven't searched for any category"
return render(request, 'search.html', {"message": message})
|
from advent_2021.helpers import get_input
if __name__ == "__main__":
positions = list(map(int, next(get_input()).split(",")))
print(
min(
sum(abs(position - target) for position in positions)
for target in range(min(positions), max(positions))
)
)
|
from django.shortcuts import get_object_or_404, render
from django.http import HttpResponseRedirect
from django.core.urlresolvers import reverse
from django.views import generic
from django.views.decorators.csrf import csrf_exempt
from rest_framework import status
from rest_framework.response import Response
from rest_framework.views import APIView
from rest_framework.decorators import api_view
from horarios.serializers import SessionSerializer, SubjectSimpleSerializer, SubjectSerializer, GroupSerializer, ProfessionSerializer
from horarios.models import Session, Subject, Group, Profession
from rest_framework import generics
from rest_framework.permissions import IsAdminUser, AllowAny
def home(request):
"""
this will render the home page
:param request:
:return: home page of the project
"""
return render(request, 'home.html')
@csrf_exempt
def do_deploy(request):
"""
deploys on the server after github sends a POST-Receive hook.
This is initiated by a push on the master branch on github.
:param request: JSON encoded payload sent by github.
"""
import json
import subprocess
from django.http import HttpResponse, Http404
from django.conf import settings
if request.method != 'POST':
raise Http404
if not 'payload' in request.POST.keys():
raise Http404
payload = json.loads(request.POST['payload'])
out_json = {'status':'failed'}
if payload['ref'] == 'refs/heads/master':
DEPLOY_SCRIPT = getattr(settings,"DEPLOY_SCRIPT", "pwd")
out = subprocess.check_output(settings.DEPLOY_SCRIPT)
if not getattr(settings,"DEBUG",False):
out = ""
out_json = {'status' : 'success', 'output' : out }
return HttpResponse(json.dumps(out_json), content_type='application/json')
class SubjectProfessionAutocompleteView(APIView):
def get(self, request, *args , **kw):
import json
search_term = kw['search_term']
try:
profession = int(kw['profession'])
except:
profession = ""
try:
subject_type = json.loads(kw['subject_type'])
except:
subject_type = []
queryset = Subject.autocomplete(search_term,profession,subject_type)
print queryset
serializer = SubjectSimpleSerializer(queryset, many=True)
return Response(serializer.data)
class SessionList(generics.ListCreateAPIView):
queryset = Session.objects.all()
serializer_class = SessionSerializer
class SessionDetail(generics.RetrieveUpdateAPIView):
permission_classes = (AllowAny,)
queryset = Session.objects.all()
serializer_class = SessionSerializer
class SubjectList(generics.ListAPIView):
queryset = Subject.objects.all()
serializer_class = SubjectSerializer
permission_classes = (IsAdminUser,)
class SubjectDetail(generics.RetrieveAPIView):
queryset = Subject.objects.all()
serializer_class = SubjectSerializer
class ProfessionList(generics.ListAPIView):
queryset = Profession.objects.all()
serializer_class = ProfessionSerializer
|
# coding: utf-8
import time
from selenium.webdriver.common.by import By
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as ec
class JsTools:
message_status = False
dim_status = False
logger = None
page_wait = None
js_injector = """
var include_js = function(url, callback){
var script = document.createElement('script');
script.type = 'text/javascript';
script.src = url;
if (callback) {
script.onreadystatechange = callback;
script.onload = script.onreadystatechange;
}
document.getElementsByTagName('head')[0].appendChild(script);
console.log("Scrip loaded");
}
var include_css = function(url, callback){
var css = document.createElement('link');
css.type = 'text/css';
css.rel = 'stylesheet';
css.href = url;
document.getElementsByTagName('head')[0].appendChild(css);
console.log("CSS loaded");
}
var create_target = function(){
document.body.innerHTML += '<span id="kissenium"></span>';
}
"""
def __init__(self, message_status, dim_status, logger, page_wait):
self.message_status = message_status
self.dim_status = dim_status
self.logger = logger
self.page_wait = page_wait
def message(self, browser, message, message_timing=4, pause=2):
self.logger.info(
"[InjectMessage] message: Messaging status : %s | Message to send : %s " % (self.message_status, message))
if self.message_status == "True":
self.inject_dependencies(browser)
browser.execute_script("spop({ template: '%s', autoclose: %s });" % (message, str(message_timing * 1000)))
time.sleep(pause)
def dim_by_id(self, browser, element_id, timing=2):
self.logger.info(
"[InjectMessage] dim: Messaging status : %s" % self.dim_status)
if self.dim_status == "True":
self.inject_dependencies(browser)
browser.execute_script("$('#%s').dimBackground();" % element_id)
time.sleep(timing)
browser.execute_script("$('#%s').undim();" % element_id)
def inject_dependencies(self, browser):
if self.message_status == "True" or self.dim_status == "True":
try:
browser.find_element(By.__dict__.get('ID'), "kissenium")
except NoSuchElementException:
self.logger.info("[InjectMessage] inject_dependencies: no dependencies injected, injecting them...")
browser.execute_script(self.js_injector +
"""
include_css('https://www.adiuvo.fr/kissenium.min.css', function(){});
include_js('https://www.adiuvo.fr/kissenium.min.js',
function(){ create_target(); });
""")
WebDriverWait(browser, int(self.page_wait)).until(
ec.presence_of_element_located((By.ID, "kissenium"))
)
self.logger.info("[InjectMessage] inject_dependencies: Dependencies injected!")
|
'''
Use this file to store names for global permissions -- permissions
that don't belong in any particular module.
For module specific permissions, create an Enum within the module
'''
import enum
class Permissions(enum.IntEnum):
#Site admins -- always have permission to everything -- Use with caution
ADMIN = 1
|
from datetime import datetime
from SearchAlgorithms import AEstrela
from _8puzzle import puzzle
goal= [[1,2,3],[8,0,4],[7,6,5]]
def test_1():
board = [[8,1,3],[0,7,2],[6,5,4]]
state = puzzle(" ", board, 1, 0, goal)
beginning = datetime.now()
print(beginning)
algorithm = AEstrela()
result = algorithm.search(state)
ending = datetime.now()
print("Tempo de resolucao:", ending-beginning)
assert result.state.env() == str(goal)
def test_2():
board = [[7,8,6],[2,3,5],[1,4,0]]
state = puzzle(" ", board, 2, 2, goal)
beginning = datetime.now()
algorithm = AEstrela()
result = algorithm.search(state)
ending = datetime.now()
print("Tempo de resolucao:", ending-beginning)
assert result.state.env() == str(goal)
def test_3():
board = [[7,8,6],[2,3,5],[0,1,4]]
state = puzzle(" ", board, 2, 0, goal)
beginning = datetime.now()
algorithm = AEstrela()
result = algorithm.search(state)
ending = datetime.now()
print("Tempo de resolucao::", ending-beginning)
assert result.state.env() == str(goal)
def test_4():
board = [[8,3,6],[7,5,4],[2,1,0]]
state = puzzle(" ", board, 2, 2, goal)
beginning = datetime.now()
algorithm = AEstrela()
result = algorithm.search(state)
ending = datetime.now()
print("Tempo de resolucao::", ending-beginning)
assert result.state.env() == str(goal)
def test_5():
board = [[3,4,8],[1,2,5],[7,0,6]]
state = puzzle(" ", board, 2, 1, goal)
print("Teste impossível")
assert state.isSolvable() == False
|
# Example used along with arduino_example
import time
import serial
from watchedserial import WatchedReaderThread
PORT = "COM3"
class MyPacket(serial.threaded.FramedPacket):
def handle_packet(self, packet):
print(packet)
class MyWatchedReaderThread(WatchedReaderThread):
def handle_reconnect(self):
print("Reconnected")
def handle_disconnect(self, error):
print("Disconnected")
ser = serial.Serial(PORT, baudrate=115200)
with MyWatchedReaderThread(ser, MyPacket) as protocol:
while True:
time.sleep(1)
|
import cupy
import unittest
import warnings
import numpy
import pytest
try:
import scipy.sparse
import scipy.sparse.linalg
import scipy.stats
scipy_available = True
except ImportError:
scipy_available = False
from cupy import testing
from cupy.testing import condition
from cupyx.scipy import sparse
import cupyx.scipy.sparse.linalg # NOQA
@testing.parameterize(*testing.product({
'dtype': [numpy.float32, numpy.float64],
}))
@testing.with_requires('scipy')
class TestLsqr(unittest.TestCase):
def setUp(self):
rvs = scipy.stats.randint(0, 15).rvs
self.A = scipy.sparse.random(50, 50, density=0.2, data_rvs=rvs)
self.b = numpy.random.randint(15, size=50)
def test_size(self):
for xp, sp in ((numpy, scipy.sparse), (cupy, sparse)):
A = sp.csr_matrix(self.A, dtype=self.dtype)
b = xp.array(numpy.append(self.b, [1]), dtype=self.dtype)
with pytest.raises(ValueError):
sp.linalg.lsqr(A, b)
def test_shape(self):
for xp, sp in ((numpy, scipy.sparse), (cupy, sparse)):
A = sp.csr_matrix(self.A, dtype=self.dtype)
b = xp.array(numpy.tile(self.b, (2, 1)), dtype=self.dtype)
with pytest.raises(ValueError):
sp.linalg.lsqr(A, b)
@condition.retry(10)
@testing.numpy_cupy_allclose(atol=1e-1, sp_name='sp')
def test_csrmatrix(self, xp, sp):
A = sp.csr_matrix(self.A, dtype=self.dtype)
b = xp.array(self.b, dtype=self.dtype)
x = sp.linalg.lsqr(A, b)
return x[0]
@condition.retry(10)
@testing.numpy_cupy_allclose(atol=1e-1, sp_name='sp')
def test_ndarray(self, xp, sp):
A = xp.array(self.A.A, dtype=self.dtype)
b = xp.array(self.b, dtype=self.dtype)
x = sp.linalg.lsqr(A, b)
return x[0]
@testing.parameterize(*testing.product({
'ord': [None, -numpy.Inf, -2, -1, 0, 1, 2, 3, numpy.Inf, 'fro'],
'dtype': [
numpy.float32,
numpy.float64,
numpy.complex64,
numpy.complex128
],
'axis': [None, (0, 1), (1, -2)],
}))
@testing.with_requires('scipy')
@testing.gpu
class TestMatrixNorm(unittest.TestCase):
@testing.numpy_cupy_allclose(rtol=1e-3, atol=1e-4, sp_name='sp',
accept_error=(ValueError,
NotImplementedError))
def test_matrix_norm(self, xp, sp):
a = xp.arange(9, dtype=self.dtype) - 4
b = a.reshape((3, 3))
b = sp.csr_matrix(b, dtype=self.dtype)
return sp.linalg.norm(b, ord=self.ord, axis=self.axis)
@testing.parameterize(*testing.product({
'ord': [None, -numpy.Inf, -2, -1, 0, 1, 2, numpy.Inf, 'fro'],
'dtype': [
numpy.float32,
numpy.float64,
numpy.complex64,
numpy.complex128
],
'transpose': [True, False],
'axis': [0, (1,), (-2,), -1],
})
)
@testing.with_requires('scipy')
@testing.gpu
class TestVectorNorm(unittest.TestCase):
@testing.numpy_cupy_allclose(rtol=1e-3, atol=1e-4, sp_name='sp',
accept_error=(ValueError,))
def test_vector_norm(self, xp, sp):
a = xp.arange(9, dtype=self.dtype) - 4
b = a.reshape((3, 3))
b = sp.csr_matrix(b, dtype=self.dtype)
if self.transpose:
b = b.T
return sp.linalg.norm(b, ord=self.ord, axis=self.axis)
# TODO : TestVsNumpyNorm
@testing.parameterize(*testing.product({
'which': ['LM', 'LA'],
'k': [3, 6, 12],
'return_eigenvectors': [True, False],
'use_linear_operator': [True, False],
}))
@testing.with_requires('scipy')
class TestEigsh:
n = 30
density = 0.33
tol = {numpy.float32: 1e-5, numpy.complex64: 1e-5, 'default': 1e-12}
res_tol = {'f': 1e-5, 'd': 1e-12}
def _make_matrix(self, dtype, xp):
shape = (self.n, self.n)
a = testing.shaped_random(shape, xp, dtype=dtype)
mask = testing.shaped_random(shape, xp, dtype='f', scale=1)
a[mask > self.density] = 0
a = a * a.conj().T
return a
def _test_eigsh(self, a, xp, sp):
ret = sp.linalg.eigsh(a, k=self.k, which=self.which,
return_eigenvectors=self.return_eigenvectors)
if self.return_eigenvectors:
w, x = ret
# Check the residuals to see if eigenvectors are correct.
ax_xw = a @ x - xp.multiply(x, w.reshape(1, self.k))
res = xp.linalg.norm(ax_xw) / xp.linalg.norm(w)
tol = self.res_tol[numpy.dtype(a.dtype).char.lower()]
assert(res < tol)
else:
w = ret
return xp.sort(w)
@pytest.mark.parametrize('format', ['csr', 'csc', 'coo'])
@testing.for_dtypes('fdFD')
@testing.numpy_cupy_allclose(rtol=tol, atol=tol, sp_name='sp')
def test_sparse(self, format, dtype, xp, sp):
a = self._make_matrix(dtype, xp)
a = sp.coo_matrix(a).asformat(format)
if self.use_linear_operator:
a = sp.linalg.aslinearoperator(a)
return self._test_eigsh(a, xp, sp)
@testing.for_dtypes('fdFD')
@testing.numpy_cupy_allclose(rtol=tol, atol=tol, sp_name='sp')
def test_dense(self, dtype, xp, sp):
a = self._make_matrix(dtype, xp)
if self.use_linear_operator:
a = sp.linalg.aslinearoperator(a)
return self._test_eigsh(a, xp, sp)
def test_invalid(self):
if self.use_linear_operator is True:
raise unittest.SkipTest
for xp, sp in ((numpy, scipy.sparse), (cupy, sparse)):
a = xp.diag(xp.ones((self.n, ), dtype='f'))
with pytest.raises(ValueError):
sp.linalg.eigsh(xp.ones((2, 1), dtype='f'))
with pytest.raises(ValueError):
sp.linalg.eigsh(a, k=0)
xp, sp = cupy, sparse
a = xp.diag(xp.ones((self.n, ), dtype='f'))
with pytest.raises(ValueError):
sp.linalg.eigsh(xp.ones((1,), dtype='f'))
with pytest.raises(TypeError):
sp.linalg.eigsh(xp.ones((2, 2), dtype='i'))
with pytest.raises(ValueError):
sp.linalg.eigsh(a, k=self.n)
with pytest.raises(ValueError):
sp.linalg.eigsh(a, k=self.k, which='SM')
with pytest.raises(ValueError):
sp.linalg.eigsh(a, k=self.k, which='SA')
@testing.parameterize(*testing.product({
'shape': [(30, 29), (29, 29), (29, 30)],
'k': [3, 6, 12],
'return_vectors': [True, False],
'use_linear_operator': [True, False],
}))
@testing.with_requires('scipy')
class TestSvds:
density = 0.33
tol = {numpy.float32: 1e-4, numpy.complex64: 1e-4, 'default': 1e-12}
def _make_matrix(self, dtype, xp):
a = testing.shaped_random(self.shape, xp, dtype=dtype)
mask = testing.shaped_random(self.shape, xp, dtype='f', scale=1)
a[mask > self.density] = 0
return a
def _test_svds(self, a, xp, sp):
ret = sp.linalg.svds(a, k=self.k,
return_singular_vectors=self.return_vectors)
if self.return_vectors:
u, s, vt = ret
# Check the results with u @ s @ vt, as singular vectors don't
# necessarily match.
return u @ xp.diag(s) @ vt
else:
return xp.sort(ret)
@pytest.mark.parametrize('format', ['csr', 'csc', 'coo'])
@testing.for_dtypes('fdFD')
@testing.numpy_cupy_allclose(rtol=tol, atol=tol, sp_name='sp')
def test_sparse(self, format, dtype, xp, sp):
a = self._make_matrix(dtype, xp)
a = sp.coo_matrix(a).asformat(format)
if self.use_linear_operator:
a = sp.linalg.aslinearoperator(a)
return self._test_svds(a, xp, sp)
@testing.for_dtypes('fdFD')
@testing.numpy_cupy_allclose(rtol=tol, atol=tol, sp_name='sp')
def test_dense(self, dtype, xp, sp):
a = self._make_matrix(dtype, xp)
if self.use_linear_operator:
a = sp.linalg.aslinearoperator(a)
return self._test_svds(a, xp, sp)
def test_invalid(self):
if self.use_linear_operator is True:
raise unittest.SkipTest
for xp, sp in ((numpy, scipy.sparse), (cupy, sparse)):
a = xp.diag(xp.ones(self.shape, dtype='f'))
with pytest.raises(ValueError):
sp.linalg.svds(a, k=0)
xp, sp = cupy, sparse
a = xp.diag(xp.ones(self.shape, dtype='f'))
with pytest.raises(ValueError):
sp.linalg.svds(xp.ones((1,), dtype='f'))
with pytest.raises(TypeError):
sp.linalg.svds(xp.ones((2, 2), dtype='i'))
with pytest.raises(ValueError):
sp.linalg.svds(a, k=min(self.shape))
with pytest.raises(ValueError):
sp.linalg.svds(a, k=self.k, which='SM')
@testing.parameterize(*testing.product({
'x0': [None, 'ones'],
'M': [None, 'jacobi'],
'atol': [None, 'select-by-dtype'],
'b_ndim': [1, 2],
'use_linear_operator': [False, True],
}))
@testing.with_requires('scipy')
@testing.gpu
class TestCg:
n = 30
density = 0.33
_atol = {'f': 1e-5, 'd': 1e-12}
def _make_matrix(self, dtype, xp):
dtype = numpy.dtype(dtype)
shape = (self.n, 10)
a = testing.shaped_random(shape, xp, dtype=dtype.char.lower(), scale=1)
if dtype.char in 'FD':
a = a + 1j * testing.shaped_random(
shape, xp, dtype=dtype.char.lower(), scale=1)
mask = testing.shaped_random(shape, xp, dtype='f', scale=1)
a[mask > self.density] = 0
a = a @ a.conj().T
a = a + xp.diag(xp.ones((self.n,), dtype=dtype.char.lower()))
M = None
if self.M == 'jacobi':
M = xp.diag(1.0 / xp.diag(a))
return a, M
def _make_normalized_vector(self, dtype, xp):
b = testing.shaped_random((self.n,), xp, dtype=dtype)
return b / xp.linalg.norm(b)
def _test_cg(self, dtype, xp, sp, a, M):
dtype = numpy.dtype(dtype)
b = self._make_normalized_vector(dtype, xp)
if self.b_ndim == 2:
b = b.reshape(self.n, 1)
x0 = None
if self.x0 == 'ones':
x0 = xp.ones((self.n,), dtype=dtype)
atol = None
if self.atol == 'select-by-dtype':
atol = self._atol[dtype.char.lower()]
if atol is None and xp == numpy:
# Note: If atol is None or not specified, Scipy (at least 1.5.3)
# raises DeprecationWarning
with pytest.deprecated_call():
return sp.linalg.cg(a, b, x0=x0, M=M, atol=atol)
else:
return sp.linalg.cg(a, b, x0=x0, M=M, atol=atol)
@testing.for_dtypes('fdFD')
@testing.numpy_cupy_allclose(rtol=1e-5, atol=1e-5, sp_name='sp')
def test_dense(self, dtype, xp, sp):
a, M = self._make_matrix(dtype, xp)
if self.use_linear_operator:
a = sp.linalg.aslinearoperator(a)
if M is not None:
M = sp.linalg.aslinearoperator(M)
return self._test_cg(dtype, xp, sp, a, M)
@pytest.mark.parametrize('format', ['csr', 'csc', 'coo'])
@testing.for_dtypes('fdFD')
@testing.numpy_cupy_allclose(rtol=1e-5, atol=1e-5, sp_name='sp')
def test_sparse(self, format, dtype, xp, sp):
a, M = self._make_matrix(dtype, xp)
a = sp.coo_matrix(a).asformat(format)
if self.use_linear_operator:
a = sp.linalg.aslinearoperator(a)
if M is not None:
M = sp.coo_matrix(M).asformat(format)
if self.use_linear_operator:
M = sp.linalg.aslinearoperator(M)
return self._test_cg(dtype, xp, sp, a, M)
@testing.for_dtypes('fdFD')
@testing.numpy_cupy_allclose(rtol=1e-5, atol=1e-5, sp_name='sp')
def test_empty(self, dtype, xp, sp):
if not (self.x0 is None and self.M is None and self.atol is None and
self.use_linear_operator is False):
raise unittest.SkipTest
a = xp.empty((0, 0), dtype=dtype)
b = xp.empty((0,), dtype=dtype)
if self.atol is None and xp == numpy:
# Note: If atol is None or not specified, Scipy (at least 1.5.3)
# raises DeprecationWarning
with pytest.deprecated_call():
return sp.linalg.cg(a, b)
else:
return sp.linalg.cg(a, b)
@testing.for_dtypes('fdFD')
def test_callback(self, dtype):
if not (self.x0 is None and self.M is None and self.atol is None and
self.use_linear_operator is False):
raise unittest.SkipTest
xp, sp = cupy, sparse
a, M = self._make_matrix(dtype, xp)
b = self._make_normalized_vector(dtype, xp)
is_called = False
def callback(x):
print(xp.linalg.norm(b - a @ x))
nonlocal is_called
is_called = True
sp.linalg.cg(a, b, callback=callback)
assert is_called
def test_invalid(self):
if not (self.x0 is None and self.M is None and self.atol is None and
self.use_linear_operator is False):
raise unittest.SkipTest
for xp, sp in ((numpy, scipy.sparse), (cupy, sparse)):
a, M = self._make_matrix('f', xp)
b = self._make_normalized_vector('f', xp)
ng_a = xp.ones((self.n, ), dtype='f')
with pytest.raises(ValueError):
sp.linalg.cg(ng_a, b, atol=self.atol)
ng_a = xp.ones((self.n, self.n + 1), dtype='f')
with pytest.raises(ValueError):
sp.linalg.cg(ng_a, b, atol=self.atol)
ng_a = xp.ones((self.n, self.n, 1), dtype='f')
with pytest.raises(ValueError):
sp.linalg.cg(ng_a, b, atol=self.atol)
ng_b = xp.ones((self.n + 1,), dtype='f')
with pytest.raises(ValueError):
sp.linalg.cg(a, ng_b, atol=self.atol)
ng_b = xp.ones((self.n, 2), dtype='f')
with pytest.raises(ValueError):
sp.linalg.cg(a, ng_b, atol=self.atol)
ng_x0 = xp.ones((self.n + 1,), dtype='f')
with pytest.raises(ValueError):
sp.linalg.cg(a, b, x0=ng_x0, atol=self.atol)
ng_M = xp.diag(xp.ones((self.n + 1,), dtype='f'))
with pytest.raises(ValueError):
sp.linalg.cg(a, b, M=ng_M, atol=self.atol)
xp, sp = cupy, sparse
b = self._make_normalized_vector('f', xp)
ng_a = xp.ones((self.n, self.n), dtype='i')
with pytest.raises(TypeError):
sp.linalg.cg(ng_a, b, atol=self.atol)
@testing.parameterize(*testing.product({
'x0': [None, 'ones'],
'M': [None, 'jacobi'],
'atol': [None, 'select-by-dtype'],
'b_ndim': [1, 2],
'restart': [None, 10],
'use_linear_operator': [False, True],
}))
@testing.with_requires('scipy>=1.4')
@testing.gpu
class TestGmres:
n = 30
density = 0.2
_atol = {'f': 1e-5, 'd': 1e-12}
# TODO(kataoka): Fix the `lstsq` call in CuPy's `gmres`
@pytest.fixture(autouse=True)
def ignore_futurewarning(self):
with warnings.catch_warnings():
warnings.filterwarnings(
'ignore', '`rcond` parameter will change', FutureWarning,
)
yield
def _make_matrix(self, dtype, xp):
dtype = numpy.dtype(dtype)
shape = (self.n, self.n)
a = testing.shaped_random(shape, xp, dtype=dtype, scale=1)
mask = testing.shaped_random(shape, xp, dtype='f', scale=1)
a[mask > self.density] = 0
diag = xp.diag(testing.shaped_random(
(self.n,), xp, dtype=dtype.char.lower(), scale=1) + 1)
a[diag > 0] = 0
a = a + diag
M = None
if self.M == 'jacobi':
M = xp.diag(1.0 / xp.diag(a))
return a, M
def _make_normalized_vector(self, dtype, xp):
b = testing.shaped_random((self.n,), xp, dtype=dtype, scale=1)
return b / xp.linalg.norm(b)
def _test_gmres(self, dtype, xp, sp, a, M):
dtype = numpy.dtype(dtype)
b = self._make_normalized_vector(dtype, xp)
if self.b_ndim == 2:
b = b.reshape(self.n, 1)
x0 = None
if self.x0 == 'ones':
x0 = xp.ones((self.n,), dtype=dtype)
atol = None
if self.atol == 'select-by-dtype':
atol = self._atol[dtype.char.lower()]
if atol is None and xp == numpy:
# Note: If atol is None or not specified, Scipy (at least 1.5.3)
# raises DeprecationWarning
with pytest.deprecated_call():
return sp.linalg.gmres(
a, b, x0=x0, restart=self.restart, M=M, atol=atol)
else:
return sp.linalg.gmres(
a, b, x0=x0, restart=self.restart, M=M, atol=atol)
@testing.for_dtypes('fdFD')
@testing.numpy_cupy_allclose(rtol=1e-5, atol=1e-5, sp_name='sp')
def test_dense(self, dtype, xp, sp):
a, M = self._make_matrix(dtype, xp)
if self.use_linear_operator:
a = sp.linalg.aslinearoperator(a)
if M is not None:
M = sp.linalg.aslinearoperator(M)
return self._test_gmres(dtype, xp, sp, a, M)
@pytest.mark.parametrize('format', ['csr', 'csc', 'coo'])
@testing.for_dtypes('fdFD')
@testing.numpy_cupy_allclose(rtol=1e-5, atol=1e-5, sp_name='sp')
def test_sparse(self, format, dtype, xp, sp):
a, M = self._make_matrix(dtype, xp)
a = sp.coo_matrix(a).asformat(format)
if self.use_linear_operator:
a = sp.linalg.aslinearoperator(a)
if M is not None:
M = sp.coo_matrix(M).asformat(format)
if self.use_linear_operator:
M = sp.linalg.aslinearoperator(M)
return self._test_gmres(dtype, xp, sp, a, M)
@testing.for_dtypes('fdFD')
@testing.numpy_cupy_allclose(rtol=1e-5, atol=1e-5, sp_name='sp')
def test_empty(self, dtype, xp, sp):
if not (self.x0 is None and self.M is None and self.atol is None and
self.restart is None and self.use_linear_operator is False):
raise unittest.SkipTest
a = xp.empty((0, 0), dtype=dtype)
b = xp.empty((0,), dtype=dtype)
if self.atol is None and xp == numpy:
# Note: If atol is None or not specified, Scipy (at least 1.5.3)
# raises DeprecationWarning
with pytest.deprecated_call():
return sp.linalg.gmres(a, b)
else:
return sp.linalg.gmres(a, b)
@testing.for_dtypes('fdFD')
def test_callback(self, dtype):
if not (self.x0 is None and self.M is None and self.atol is None and
self.restart is None and self.use_linear_operator is False):
raise unittest.SkipTest
xp, sp = cupy, sparse
a, M = self._make_matrix(dtype, xp)
b = self._make_normalized_vector(dtype, xp)
is_called = False
def callback1(x):
print(xp.linalg.norm(b - a @ x))
nonlocal is_called
is_called = True
sp.linalg.gmres(a, b, callback=callback1, callback_type='x')
assert is_called
is_called = False
def callback2(pr_norm):
print(pr_norm)
nonlocal is_called
is_called = True
sp.linalg.gmres(a, b, callback=callback2, callback_type='pr_norm')
assert is_called
def test_invalid(self):
if not (self.x0 is None and self.M is None and self.atol is None and
self.restart is None and self.use_linear_operator is False):
raise unittest.SkipTest
for xp, sp in ((numpy, scipy.sparse), (cupy, sparse)):
a, M = self._make_matrix('f', xp)
b = self._make_normalized_vector('f', xp)
ng_a = xp.ones((self.n, ), dtype='f')
with pytest.raises(ValueError):
sp.linalg.gmres(ng_a, b)
ng_a = xp.ones((self.n, self.n + 1), dtype='f')
with pytest.raises(ValueError):
sp.linalg.gmres(ng_a, b)
ng_a = xp.ones((self.n, self.n, 1), dtype='f')
with pytest.raises(ValueError):
sp.linalg.gmres(ng_a, b)
ng_b = xp.ones((self.n + 1,), dtype='f')
with pytest.raises(ValueError):
sp.linalg.gmres(a, ng_b)
ng_b = xp.ones((self.n, 2), dtype='f')
with pytest.raises(ValueError):
sp.linalg.gmres(a, ng_b)
ng_x0 = xp.ones((self.n + 1,), dtype='f')
with pytest.raises(ValueError):
sp.linalg.gmres(a, b, x0=ng_x0)
ng_M = xp.diag(xp.ones((self.n + 1,), dtype='f'))
with pytest.raises(ValueError):
sp.linalg.gmres(a, b, M=ng_M)
ng_callback_type = '?'
with pytest.raises(ValueError):
sp.linalg.gmres(a, b, callback_type=ng_callback_type)
xp, sp = cupy, sparse
b = self._make_normalized_vector('f', xp)
ng_a = xp.ones((self.n, self.n), dtype='i')
with pytest.raises(TypeError):
sp.linalg.gmres(ng_a, b)
@testing.parameterize(*testing.product({
'dtype': [numpy.float32, numpy.float64, numpy.complex64, numpy.complex128],
'outer_modification': [
'normal', 'transpose', 'hermitian'],
'inner_modification': [
'normal', 'sparse', 'linear_operator', 'class_matvec', 'class_matmat'],
'M': [1, 6],
'N': [1, 7],
}))
@testing.gpu
@testing.with_requires('scipy>=1.4')
class TestLinearOperator(unittest.TestCase):
# modified from scipy
# class that defines parametrized custom cases
# adapted from scipy's analogous tests
def _inner_cases(self, xp, sp, A):
# creating base-matrix-like class with default
# matrix-vector and adjoint-matrix-vector impl
def mv(x):
return A.dot(x)
def rmv(x):
return A.T.conj().dot(x)
# defining the user-defined classes
class BaseMatlike(sp.linalg.LinearOperator):
def __init__(self):
self.dtype = A.dtype
self.shape = A.shape
def _adjoint(self):
shape = self.shape[1], self.shape[0]
return sp.linalg.LinearOperator(
matvec=rmv, rmatvec=mv, dtype=self.dtype, shape=shape)
class HasMatvec(BaseMatlike):
def _matvec(self, x):
return mv(x)
class HasMatmat(BaseMatlike):
def _matmat(self, x):
return mv(x)
if self.inner_modification == 'normal':
return sp.linalg.aslinearoperator(A)
if self.inner_modification == 'sparse':
# TODO(asi1024): Fix to return contiguous matrix.
return sp.linalg.aslinearoperator(sp.csr_matrix(A))
if self.inner_modification == 'linear_operator':
return sp.linalg.LinearOperator(
matvec=mv, rmatvec=rmv, dtype=A.dtype, shape=A.shape)
if self.inner_modification == 'class_matvec':
return HasMatvec()
if self.inner_modification == 'class_matmat':
return HasMatmat()
assert False
def _generate_linear_operator(self, xp, sp):
A = testing.shaped_random((self.M, self.N), xp, self.dtype)
if self.outer_modification == 'normal':
return self._inner_cases(xp, sp, A)
if self.outer_modification == 'transpose':
# From SciPy 1.4 (scipy/scipy#9064)
return self._inner_cases(xp, sp, A.T).T
if self.outer_modification == 'hermitian':
return self._inner_cases(xp, sp, A.T.conj()).H
assert False
@testing.numpy_cupy_allclose(sp_name='sp', rtol=1e-6)
def test_matvec(self, xp, sp):
linop = self._generate_linear_operator(xp, sp)
x_1dim = testing.shaped_random((self.N,), xp, self.dtype)
x_2dim = testing.shaped_random((self.N, 1), xp, self.dtype)
return linop.matvec(x_1dim), linop.matvec(x_2dim)
@testing.numpy_cupy_allclose(
sp_name='sp', rtol=1e-6, contiguous_check=False)
def test_matmat(self, xp, sp):
linop = self._generate_linear_operator(xp, sp)
x = testing.shaped_random((self.N, 8), xp, self.dtype)
return linop.matmat(x)
@testing.numpy_cupy_allclose(sp_name='sp', rtol=1e-6)
def test_rmatvec(self, xp, sp):
linop = self._generate_linear_operator(xp, sp)
x_1dim = testing.shaped_random((self.M,), xp, self.dtype)
x_2dim = testing.shaped_random((self.M, 1), xp, self.dtype)
return linop.rmatvec(x_1dim), linop.rmatvec(x_2dim)
@testing.numpy_cupy_allclose(
sp_name='sp', rtol=1e-6, contiguous_check=False)
def test_rmatmat(self, xp, sp):
linop = self._generate_linear_operator(xp, sp)
x = testing.shaped_random((self.M, 8), xp, self.dtype)
return linop.rmatmat(x)
@testing.numpy_cupy_allclose(
sp_name='sp', rtol=1e-6, contiguous_check=False)
def test_dot(self, xp, sp):
linop = self._generate_linear_operator(xp, sp)
x0 = testing.shaped_random((self.N,), xp, self.dtype)
x1 = testing.shaped_random((self.N, 1), xp, self.dtype)
x2 = testing.shaped_random((self.N, 8), xp, self.dtype)
return linop.dot(x0), linop.dot(x1), linop.dot(x2)
@testing.numpy_cupy_allclose(
sp_name='sp', rtol=1e-6, contiguous_check=False)
def test_mul(self, xp, sp):
linop = self._generate_linear_operator(xp, sp)
x0 = testing.shaped_random((self.N,), xp, self.dtype)
x1 = testing.shaped_random((self.N, 1), xp, self.dtype)
x2 = testing.shaped_random((self.N, 8), xp, self.dtype)
return linop * x0, linop * x1, linop * x2
@testing.parameterize(*testing.product({
'lower': [True, False],
'unit_diagonal': [True, False],
'nrhs': [None, 1, 4],
'order': ['C', 'F']
}))
@testing.with_requires('scipy>=1.4.0')
@testing.gpu
class TestSpsolveTriangular:
n = 10
density = 0.5
def _make_matrix(self, dtype, xp):
a_shape = (self.n, self.n)
a = testing.shaped_random(a_shape, xp, dtype=dtype, scale=1)
mask = testing.shaped_random(a_shape, xp, dtype='f', scale=1)
a[mask > self.density] = 0
diag = xp.diag(xp.ones((self.n,), dtype=dtype))
a = a + diag
if self.lower:
a = xp.tril(a)
else:
a = xp.triu(a)
b_shape = (self.n,) if self.nrhs is None else (self.n, self.nrhs)
b = testing.shaped_random(b_shape, xp, dtype=dtype, order=self.order)
return a, b
def _test_spsolve_triangular(self, sp, a, b):
return sp.linalg.spsolve_triangular(a, b, lower=self.lower,
unit_diagonal=self.unit_diagonal)
@pytest.mark.parametrize('format', ['csr', 'csc', 'coo'])
@testing.for_dtypes('fdFD')
@testing.numpy_cupy_allclose(rtol=1e-5, atol=1e-5, sp_name='sp')
def test_sparse(self, format, dtype, xp, sp):
a, b = self._make_matrix(dtype, xp)
a = sp.coo_matrix(a).asformat(format)
return self._test_spsolve_triangular(sp, a, b)
def test_invalid_cases(self):
dtype = 'float64'
if not (self.lower and self.unit_diagonal and self.nrhs == 4 and
self.order == 'C'):
raise unittest.SkipTest
for xp, sp in ((numpy, scipy.sparse), (cupy, sparse)):
a, b = self._make_matrix(dtype, xp)
a = sp.csr_matrix(a)
# a is not a square matrix
ng_a = sp.csr_matrix(xp.ones((self.n + 1, self.n), dtype=dtype))
with pytest.raises(ValueError):
self._test_spsolve_triangular(sp, ng_a, b)
# b is not a 1D/2D matrix
ng_b = xp.ones((1, self.n, self.nrhs), dtype=dtype)
with pytest.raises(ValueError):
self._test_spsolve_triangular(sp, a, ng_b)
# mismatched shape
ng_b = xp.ones((self.n + 1, self.nrhs), dtype=dtype)
with pytest.raises(ValueError):
self._test_spsolve_triangular(sp, a, ng_b)
xp, sp = cupy, sparse
a, b = self._make_matrix(dtype, xp)
a = sp.csr_matrix(a)
# unsupported dtype
ng_a = sp.csr_matrix(xp.ones((self.n, self.n), dtype='bool'))
with pytest.raises(TypeError):
self._test_spsolve_triangular(sp, ng_a, b)
# a is not spmatrix
ng_a = xp.ones((self.n, self.n), dtype=dtype)
with pytest.raises(TypeError):
self._test_spsolve_triangular(sp, ng_a, b)
# b is not cupy ndarray
ng_b = numpy.ones((self.n, self.nrhs), dtype=dtype)
with pytest.raises(TypeError):
self._test_spsolve_triangular(sp, a, ng_b)
@testing.parameterize(*testing.product({
'tol': [0, 1e-5],
'reorder': [0, 1, 2, 3],
}))
@testing.with_requires('scipy')
class TestCsrlsvqr(unittest.TestCase):
n = 8
density = 0.75
_test_tol = {'f': 1e-5, 'd': 1e-12}
def _setup(self, dtype):
dtype = numpy.dtype(dtype)
a_shape = (self.n, self.n)
a = testing.shaped_random(a_shape, numpy, dtype=dtype, scale=2/self.n)
a_mask = testing.shaped_random(a_shape, numpy, dtype='f', scale=1)
a[a_mask > self.density] = 0
a_diag = numpy.diag(numpy.ones((self.n,), dtype=dtype))
a = a + a_diag
b = testing.shaped_random((self.n,), numpy, dtype=dtype)
test_tol = self._test_tol[dtype.char.lower()]
return a, b, test_tol
@testing.for_dtypes('fdFD')
def test_csrlsvqr(self, dtype):
if not cupy.cusolver.check_availability('csrlsvqr'):
unittest.SkipTest('csrlsvqr is not available')
a, b, test_tol = self._setup(dtype)
ref_x = numpy.linalg.solve(a, b)
cp_a = cupy.array(a)
sp_a = cupyx.scipy.sparse.csr_matrix(cp_a)
cp_b = cupy.array(b)
x = cupy.cusolver.csrlsvqr(sp_a, cp_b, tol=self.tol,
reorder=self.reorder)
cupy.testing.assert_allclose(x, ref_x, rtol=test_tol,
atol=test_tol)
@testing.parameterize(*testing.product({
'format': ['csr', 'csc', 'coo'],
'nrhs': [None, 1, 4],
'order': ['C', 'F']
}))
@unittest.skipUnless(scipy_available, 'requires scipy')
@testing.gpu
class TestSplu(unittest.TestCase):
n = 10
density = 0.5
def _make_matrix(self, dtype, xp, sp, density=None):
if density is None:
density = self.density
a_shape = (self.n, self.n)
a = testing.shaped_random(a_shape, xp, dtype=dtype, scale=2 / self.n)
mask = testing.shaped_random(a_shape, xp, dtype='f', scale=1)
a[mask > density] = 0
diag = xp.diag(xp.ones((self.n,), dtype=dtype))
a = a + diag
if self.format == 'csr':
a = sp.csr_matrix(a)
elif self.format == 'csc':
a = sp.csc_matrix(a)
elif self.format == 'coo':
a = sp.coo_matrix(a)
b_shape = (self.n,) if self.nrhs is None else (self.n, self.nrhs)
b = testing.shaped_random(b_shape, xp, dtype=dtype, order=self.order)
return a, b
@testing.for_dtypes('fdFD')
@testing.numpy_cupy_allclose(rtol=1e-5, atol=1e-5, sp_name='sp')
def test_splu(self, dtype, xp, sp):
a, b = self._make_matrix(dtype, xp, sp)
return sp.linalg.splu(a).solve(b)
@testing.for_dtypes('fdFD')
@testing.numpy_cupy_allclose(rtol=1e-5, atol=1e-5, sp_name='sp')
def test_factorized(self, dtype, xp, sp):
a, b = self._make_matrix(dtype, xp, sp)
return sp.linalg.factorized(a)(b)
@testing.for_dtypes('fdFD')
@testing.numpy_cupy_allclose(rtol=1e-5, atol=1e-5, sp_name='sp')
def test_spilu(self, dtype, xp, sp):
a, b = self._make_matrix(dtype, xp, sp)
return sp.linalg.spilu(a).solve(b)
@testing.for_dtypes('fdFD')
@testing.numpy_cupy_allclose(rtol=1e-5, atol=1e-5, sp_name='sp')
def test_spilu_0(self, dtype, xp, sp):
# Note: We don't know how to compute ILU(0) with
# scipy.sprase.linalg.spilu, so in this test we use a matrix where the
# format is a sparse matrix but is actually a dense matrix.
a, b = self._make_matrix(dtype, xp, sp, density=1.0)
if xp == cupy:
# Set fill_factor=1 to computes ILU(0) using cuSparse
ainv = sp.linalg.spilu(a, fill_factor=1)
else:
ainv = sp.linalg.spilu(a)
return ainv.solve(b)
|
import matplotlib.pyplot as plt
from shapely.geometry import *
from shapely.geometry.base import *
from hybrid.resource import (
SolarResource,
WindResource,
ElectricityPrices
)
from hybrid.layout.plot_tools import plot_shape
from hybrid.log import hybrid_logger as logger
from hybrid.keys import set_nrel_key_dot_env
def plot_site(verts, plt_style, labels):
for i in range(len(verts)):
if i == 0:
plt.plot([verts[0][0], verts[len(verts) - 1][0]], [verts[0][1], verts[len(verts) - 1][1]],
plt_style, label=labels)
else:
plt.plot([verts[i][0], verts[i - 1][0]], [verts[i][1], verts[i - 1][1]], plt_style)
plt.grid()
class SiteInfo:
def __init__(self, data, solar_resource_file="", wind_resource_file="", grid_resource_file=""):
set_nrel_key_dot_env()
self.data = data
self.vertices = np.array([np.array(v) for v in data['site_boundaries']['verts']])
self.polygon: Polygon = Polygon(self.vertices)
self.valid_region = self.polygon.buffer(1e-8)
if 'lat' not in data or 'lon' not in data:
raise ValueError("SiteInfo requires lat and lon")
self.lat = data['lat']
self.lon = data['lon']
if 'year' not in data:
data['year'] = 2012
self.solar_resource = SolarResource(data['lat'], data['lon'], data['year'], filepath=solar_resource_file)
# TODO: allow hub height to be used as an optimization variable
self.wind_resource = WindResource(data['lat'], data['lon'], data['year'], wind_turbine_hub_ht=80,
filepath=wind_resource_file)
self.elec_prices = ElectricityPrices(data['lat'], data['lon'], data['year'], filepath=grid_resource_file)
self.n_timesteps = len(self.solar_resource.data['gh']) // 8760 * 8760
self.n_periods_per_day = self.n_timesteps // 365 # TODO: Does not handle leap years well
self.interval = (60*24)/self.n_periods_per_day
self.urdb_label = data['urdb_label'] if 'urdb_label' in data.keys() else None
logger.info("Set up SiteInfo with solar and wind resource files: {}, {}".format(self.solar_resource.filename,
self.wind_resource.filename))
@property
def boundary(self) -> BaseGeometry:
# TODO: remove boundaries of interior holes
# return self.polygon.boundary.difference(self.polygon.interiors)
return self.polygon.exterior
@property
def bounding_box(self) -> np.ndarray:
return np.array([np.min(self.vertices, 0), np.max(self.vertices, 0)])
@property
def center(self) -> Point:
bounding_box = self.bounding_box
return (bounding_box[1] - bounding_box[0]) * .5
def plot(self,
figure=None,
axes=None,
border_color=(0, 0, 0),
alpha=0.95,
linewidth=4.0
):
bounds = self.polygon.bounds
site_sw_bound = np.array([bounds[0], bounds[1]])
site_ne_bound = np.array([bounds[2], bounds[3]])
site_center = .5 * (site_sw_bound + site_ne_bound)
max_delta = max(bounds[2] - bounds[0], bounds[3] - bounds[1])
reach = (max_delta / 2) * 1.3
min_plot_bound = site_center - reach
max_plot_bound = site_center + reach
if not figure and not axes:
figure = plt.figure(1)
axes = figure.add_subplot(111)
axes.set_aspect('equal')
axes.set(xlim=(min_plot_bound[0], max_plot_bound[0]), ylim=(min_plot_bound[1], max_plot_bound[1]))
plot_shape(figure, axes, self.polygon, '--', color=border_color, alpha=alpha, linewidth=linewidth / 2)
plt.tick_params(which='both', labelsize=15)
plt.xlabel('x (m)', fontsize=15)
plt.ylabel('y (m)', fontsize=15)
return figure, axes
|
# -*- coding: utf8 -*-fr
# pylint: disable=invalid-name
"""
ItopapiDeliveryModel is an abstraction of Organization representation on iTop
"""
from itopapi.model.prototype import ItopapiPrototype
from itopapi.model.features.hasOrganization import HasOrganization
__version__ = '1.0'
__authors__ = ['Julien Nauroy <julien.nauroy@u-psud.fr>']
class ItopapiDeliveryModel(ItopapiPrototype, HasOrganization):
# Configuration specific to itop
itop = {
# Name of the class in Itop
'name': 'DeliveryModel',
# Define which fields to save when creating or updating from the python API
'save': ['name', 'description'],
'foreign_keys': [
HasOrganization.foreign_key,
],
'list_types': {
'contacts_list': 'contact_id_finalclass_recall'
},
}
@staticmethod
def find(key):
""" Retrieve one or more instance of ApplicationSolution with the given key or criteria """
return ItopapiPrototype.find(ItopapiDeliveryModel, key)
@staticmethod
def find_by_name(name):
return ItopapiPrototype.find_by_name(ItopapiDeliveryModel, name)
@staticmethod
def find_all():
""" Retrieve all instance of OSFamily """
return ItopapiPrototype.find_all(ItopapiDeliveryModel)
"""
ItopapiDeliveryModel is an object that represents an Application Solution from iTop
"""
def __init__(self, data=None):
super(ItopapiDeliveryModel, self).__init__(data)
self.description = None
##################################
# Lists #
##################################
self.customers_list = []
self.contacts_list = []
|
import pytest
from creator.studies.factories import StudyFactory
from creator.studies.models import Study
from creator.events.models import Event
from creator.tasks import setup_slack_task
def test_setup_slack_success(db, mocker, settings):
"""
Test that the setup task operates correctly when the setup succeeds
"""
settings.FEAT_SLACK_CREATE_CHANNELS = True
settings.SLACK_TOKEN = "ABC"
study = StudyFactory()
mock_setup = mocker.patch("creator.tasks.setup_slack")
assert Event.objects.count() == 0
setup_slack_task(study.kf_id)
assert mock_setup.call_count == 1
assert Event.objects.count() == 2
assert Event.objects.filter(event_type="SL_STR").count() == 1
assert Event.objects.filter(event_type="SL_SUC").count() == 1
def test_setup_slack_no_study(db, mocker, settings):
"""
Test that correct exception is raised if the study does not exist
"""
with pytest.raises(Study.DoesNotExist):
setup_slack_task("ABC")
def test_setup_slack_fail(db, mocker, settings):
"""
Test that the setup task operates correctly when the setup fails
"""
settings.FEAT_SLACK_CREATE_CHANNELS = True
settings.SLACK_TOKEN = "ABC"
study = StudyFactory()
mock_setup = mocker.patch("creator.tasks.setup_slack")
mock_setup.side_effect = Exception("error making channel")
assert Event.objects.count() == 0
setup_slack_task(study.kf_id)
assert mock_setup.call_count == 1
assert Event.objects.count() == 2
assert Event.objects.filter(event_type="SL_STR").count() == 1
assert Event.objects.filter(event_type="SL_ERR").count() == 1
|
import os
import unittest
import importlib
import logging
import zipfile
from nose.tools import raises
from mock import patch, MagicMock
import datman
import datman.xnat
import datman.scanid
# Disable all logging for the duration of testing
logging.disable(logging.CRITICAL)
upload = importlib.import_module('bin.dm_xnat_upload')
FIXTURE = "tests/fixture_xnat_upload/"
class CheckFilesExist(unittest.TestCase):
ident = datman.scanid.parse("STUDY_SITE_9999_01_01")
archive = "some_dir/STUDY_SITE_9999_01_01.zip"
session = FIXTURE + "xnat_session.txt"
session_no_resources = FIXTURE + "xnat_session_missing_resources.txt"
session_missing_data = FIXTURE + "xnat_session_missing_scan_data.txt"
archive_scan_uids = [
'1.2.840.113619.2.336.4120.8413787.19465.1412083372.445',
'1.2.840.113619.2.336.4120.8413787.19465.1412083372.444',
'1.2.840.113619.2.336.4120.8413787.19465.1412083372.447',
'1.2.840.113619.2.336.4120.8413787.19465.1412083372.446',
'1.2.840.113619.2.336.4120.8413787.19465.1412083372.440',
'1.2.840.113619.2.80.142631515.25030.1412106144.3.0.2',
'1.2.840.113619.2.336.4120.8413787.19465.1412083372.443',
'1.2.840.113619.2.336.4120.8413787.19465.1412083372.442',
'1.2.840.113619.2.5.18242516414121059301412105930313000',
'1.2.840.113619.2.80.142631515.25030.1412106138.1.0.2']
archive_experiment_id = '1.2.840.113619.6.336.' \
'254801968553430904107911738210738061468'
@raises(Exception)
@patch('bin.dm_xnat_upload.missing_resource_data')
@patch('datman.utils.get_archive_headers')
def test_raises_exception_if_scan_uids_mismatch(self, mock_headers,
mock_missing_resources):
# Set up
mock_headers.return_value = self.__generate_mock_headers(bad_id=True)
mock_missing_resources.return_value = False
xnat_session = self.__get_xnat_session(self.session)
# Run
files_exist = upload.check_files_exist(self.archive, xnat_session,
self.ident)
# Should raise an exception, so assertion is never reached
assert False
##### To do:
# Test that false is returned when a resource is missing, or when a scan is
# missing
def __generate_mock_headers(self, bad_id=False):
headers = {}
for num, item in enumerate(self.archive_scan_uids):
scan = MagicMock()
scan.SeriesInstanceUID = item
scan.StudyInstanceUID = self.archive_experiment_id
headers[num] = scan
if bad_id:
bad_scan = headers[0]
bad_scan.StudyInstanceUID = '1.1.111.111111.1.111.111111111111111'
headers[0] = bad_scan
return headers
def __get_xnat_session(self, text_file):
with open(text_file, 'r') as session_data:
xnat_session = eval(session_data.read())
return xnat_session
class GetResources(unittest.TestCase):
name_list = ['some_zipfile_name/',
'some_zipfile_name/dicom_file1.dcm',
'some_zipfile_name/dicom_file2.dcm',
'some_zipfile_name/bvals.txt',
'some_zipfile_name/gradOrs.txt',
'some_zipfile_name/dicom_file3.dcm',
'some_zipfile_name/Name_info.txt',
'some_zipfile_name/subjectid_EmpAcc.log']
@patch('bin.dm_xnat_upload.is_dicom')
@patch('io.BytesIO')
def test_returns_all_resources(self, mock_IO, mock_isdicom):
# Set up inputs
archive_zip = MagicMock(spec=zipfile.ZipFile)
archive_zip.return_value.namelist.return_value = self.name_list
expected_resources = ['some_zipfile_name/bvals.txt',
'some_zipfile_name/gradOrs.txt',
'some_zipfile_name/Name_info.txt',
'some_zipfile_name/subjectid_EmpAcc.log']
# Stop get_resources from verifying 'dicoms' in the mock zipfile
archive_zip.return_value.read.side_effect = lambda x: x
mock_IO.side_effect = lambda x: x
mock_isdicom.side_effect = lambda x: True if '.dcm' in x else False
actual_resources = upload.get_resources(archive_zip.return_value)
assert sorted(actual_resources) == sorted(expected_resources)
|
from typing import List, Tuple
import copy
def addGuards(lines: List[str]) -> List[List[str]]:
line_len = len(lines[0]) + 2
array = [['.'] * line_len ]
for i, line in enumerate(lines):
array.append(list('.' + line + '.'))
array.append(['.'] * line_len)
return array
def sumOfNeighbours(array: List[List[str]], i: int, j: int) -> int:
neighbours = range(-1, 2)
neighbour_sum = 0
for di in neighbours:
for dj in neighbours:
if (di != 0 or dj != 0) and array[i + di][j + dj] == '#':
neighbour_sum += 1
return neighbour_sum
# true if something changed
def nextStep(array: List[List[str]], occupied_threshold: int) -> Tuple[List[List[str]], bool, int]:
m = len(array[0]) - 2
n = len(array) - 2
buffor = copy.deepcopy(array)
occupied = 0
for i in range(1, n + 1):
for j in range(1, m + 1):
if array[i][j] == '.':
buffor[i][j] = array[i][j]
else:
neighbour_sum = sumOfNeighbours(array, i, j)
if array[i][j] == 'L' and neighbour_sum == 0:
buffor[i][j] = '#'
elif array[i][j] == '#' and neighbour_sum >= occupied_threshold:
buffor[i][j] = 'L'
else:
buffor[i][j] = array[i][j]
if buffor[i][j] == '#':
occupied += 1
return buffor, buffor != array, occupied
def main() -> None:
with open("11.in") as file:
f = file.read()
lines = f.splitlines()
with_guards = addGuards(lines)
steps = -1
different = True
while different:
with_guards, different, occupied = nextStep(with_guards, 4)
steps += 1
print(occupied)
# main()
|
# -*- coding: UTF-8 -*-
# This file is part of the jetson_stats package (https://github.com/rbonghi/jetson_stats or http://rnext.it).
# Copyright (c) 2019 Raffaello Bonghi.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# control command line
import curses
# Graphics elements
from .lib.common import (check_curses,
strfdelta,
plot_name_info,
size_min,
label_freq,
jetson_clocks_gui,
nvp_model_gui)
from .lib.linear_gauge import linear_gauge, GaugeName
@check_curses
def plot_CPUs(stdscr, offest, list_cpus, width):
max_bar = int(float(width) / 2.0)
for idx, name in enumerate(sorted(list_cpus)):
cpu = list_cpus[name]
# Split in double list
start = max_bar if idx >= len(list_cpus) / 2 and len(list_cpus) > 4 else 0
off_idx = idx - len(list_cpus) / 2 if idx >= len(list_cpus) / 2 and len(list_cpus) > 4 else idx
# Check if exist governor and add in percent name
percent = ""
if 'val' in cpu and 'governor' in cpu:
percent = "{gov} -{val: 4}%".format(gov=cpu['governor'].capitalize(), val=cpu['val'])
# Show linear gauge
linear_gauge(
stdscr, offset=int(offest + off_idx), start=start, size=max_bar,
name=GaugeName(name, color=curses.color_pair(6)),
value=cpu.get('val', 0),
status='ON' if cpu else 'OFF',
percent=percent,
label=label_freq(cpu['frq'], start='k') if 'frq' in cpu else '')
# Size block CPU
return int(offest + idx / 2 + 1) if len(list_cpus) > 4 else int(offest + idx + 1)
@check_curses
def plot_temperatures(stdscr, start, offset, width, height, jetson):
start = start + (width - 17) // 2
# Define color temperatures
color_options = {
60: curses.color_pair(1),
40: curses.color_pair(3),
20: curses.A_NORMAL,
}
list_options = sorted(color_options.keys(), reverse=True)
# Plot title
stdscr.addstr(offset, start - 1, " [Sensor] ", curses.A_BOLD)
stdscr.addstr(offset, start + 11, " [Temp] ", curses.A_BOLD)
# Plot name and temperatures
for idx, name in enumerate(sorted(jetson.temperature)):
# Print temperature name
value = jetson.temperature[name]
stdscr.addstr(offset + idx + 1, start, ("{name:<7}").format(name=name))
# Set color temperature
color = curses.A_NORMAL
for k in list_options:
if value >= k:
color = color_options[k]
break
# Print temperature value
stdscr.addstr(offset + idx + 1, start + offset // 2 + 3, ("{val:8.2f}C").format(val=value), color)
@check_curses
def plot_watts(stdscr, start, offset, width, height, jetson):
start = start + (width - 6) // 2
# Plot title
stdscr.addstr(offset, start - 11, " [Power/mW] ", curses.A_BOLD)
stdscr.addstr(offset, start + 2, " [Cur] ", curses.A_BOLD)
stdscr.addstr(offset, start + 9, " [Avr] ", curses.A_BOLD)
# Plot watts
total, power = jetson.power
for idx, name in enumerate(sorted(power)):
value = power[name]
stdscr.addstr(offset + idx + 1, start - 10, name, curses.A_NORMAL)
stdscr.addstr(offset + idx + 1, start + 3, str(value['cur']), curses.A_NORMAL)
stdscr.addstr(offset + idx + 1, start + 10, str(value['avg']), curses.A_NORMAL)
# Plot totals before finishing
stdscr.addstr(offset + idx + 2, start - 10, 'ALL', curses.A_BOLD)
stdscr.addstr(offset + idx + 2, start + 3, str(total['cur']), curses.A_BOLD)
stdscr.addstr(offset + idx + 2, start + 10, str(total['avg']), curses.A_BOLD)
@check_curses
def compact_info(stdscr, start, offset, width, height, jetson):
# Title menu
stdscr.addstr(offset, start + (width - 7) // 2, " [info] ", curses.A_BOLD)
counter = 1
# Model board information
uptime_string = strfdelta(jetson.uptime, "{days} days {hours}:{minutes}:{seconds}")
plot_name_info(stdscr, offset + counter, start + 1, "UpT", uptime_string)
counter += 1
# FAN status
ctrl = "Ta" if jetson.fan.auto else "Tm"
if jetson.fan.speed is not None:
label = "{ctrl}={target: >3.0f}%".format(ctrl=ctrl, target=jetson.fan.speed)
else:
label = "{ctrl}".format(ctrl=ctrl)
linear_gauge(
stdscr,
offset=offset + counter, start=start + 1, size=width,
name=GaugeName('FAN', color=curses.color_pair(6)),
value=jetson.fan.get('measure', 0),
status='ON' if jetson.fan else 'DISABLED',
label=label)
counter += 1
# Jetson clocks status: Running (Green) or Normal (Grey)
jetson_clocks_gui(stdscr, offset + counter, start + 1, jetson)
counter += 1
# NVP Model
if jetson.nvpmodel is not None:
nvp_model_gui(stdscr, offset + counter, start + 1, jetson)
counter += 1
# Write all engines
engines(stdscr, start, offset + counter, width, height, jetson)
def engines(stdscr, start, offset, width, height, jetson):
stdscr.hline(offset, start + 1, curses.ACS_HLINE, width - 1)
stdscr.addstr(offset, start + (width - 13) // 2, " [HW engines] ", curses.A_BOLD)
counter = 1
# APE frequency
if jetson.engine.ape:
plot_name_info(stdscr, offset + counter, start + 1, "APE", str(jetson.engine.ape['val']) + "MHz")
counter += 1
# Find encoders
if jetson.engine.nvenc:
enc_name = 'NVENC'
enc_val = "{value}{unit}Hz".format(value=jetson.engine.nvenc['val'], unit="M")
elif jetson.engine.msenc:
enc_name = 'MSENC'
enc_val = "{value}{unit}Hz".format(value=jetson.engine.msenc['val'], unit="M")
else:
enc_name = 'NVENC'
enc_val = "[OFF]"
# Find decoders
if jetson.engine.nvdec:
dec_name = 'NVDEC'
dec_val = "{value}{unit}Hz".format(value=jetson.engine.nvdec['val'], unit="M")
else:
dec_name = 'NVDEC'
dec_val = "[OFF]"
double_info(stdscr, start + 1, offset + counter, width, (enc_name, enc_val), (dec_name, dec_val))
counter += 1
# NVJPG
if jetson.engine.nvjpg is not None:
if jetson.engine.nvjpg:
value, _, unit = size_min(jetson.engine.nvjpg)
value = "{value}{unit}Hz".format(value=value, unit=unit)
else:
value = "[OFF]"
# Plot status
plot_name_info(stdscr, offset + counter, start + 1, "NVJPG", value)
def double_info(stdscr, start, offset, width, enc, dec):
plot_name_info(stdscr, offset, start, enc[0], enc[1])
plot_name_info(stdscr, offset, start + width // 2, dec[0], dec[1])
# EOF
|
#Deep pi
from scipy.spatial import distance
import imutils
from imutils.video import VideoStream
from imutils import face_utils
from threading import Thread
import time
import dlib
import cv2
from playsound import playsound
import serial
#Create alarm using thread
def create_alarm():
global alarm_status
while alarm_status:
playsound("1.mp3")
#countdown and send msg to arduino
def countdown():
global alarm_status
global my_timer
my_timer=5*60
while alarm_status :
mins, secs = divmod(my_timer, 60)
timer = '{:02d}:{:02d}'.format(mins, secs)
time.sleep(1)
my_timer -= 1
if(my_timer==0):
msg =serial.Serial("dev/rfcomm1", baudrate=9600)
msg.write(str(10)) #msg2arduino
# calculate eye aspect ratio (EAR)
def EAR_Calculater(point):
p14 = distance.euclidean(point[0], point[3])
p32 = distance.euclidean(point[1], point[5])
p65 = distance.euclidean(point[2], point[4])
Ear = (p65 + p32) / (p14 + p14)
return Ear
#detect shape position
def Shape_Position(shape):
(Leye_first, Leye_last) = face_utils.FACIAL_LANDMARKS_IDXS["left_eye"] # left_eye = (42, 48))
(Reye_first, Reye_last) = face_utils.FACIAL_LANDMARKS_IDXS["right_eye"] # right_eye = (36, 42)
leftEye = shape[Leye_first:Leye_last]
rightEye = shape[Reye_first:Reye_last]
leftEAR = EAR_Calculater(leftEye)
rightEAR = EAR_Calculater(rightEye)
Avg_ear = (leftEAR + rightEAR) / 2.0
return (Avg_ear, leftEye, rightEye)
#create parameters
EAR_Threshold = 0.25
NO_EAR_FRAMES = 22 #number of frame to make affect
alarm_status = False
count = 0
my_timer=60*5
detector = cv2.CascadeClassifier("haarcascade_frontalface_default.xml")
predictor = dlib.shape_predictor('shape_predictor_68_face_landmarks.dat')
V_Stream= VideoStream(usePiCamera=True).start() //For Raspberry Pi
time.sleep(1.0)
while True:
frame = V_Stream.read()
frame = imutils.resize(frame, width=777,height=777)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
rectangl = detector.detectMultiScale(gray, scaleFactor=1.1, minNeighbors=5, minSize=(30, 30),flags=cv2.CASCADE_SCALE_IMAGE)
for (x, y, w, h) in rectangl:
rectangl = dlib.rectangle(int(x), int(y), int(x + w),int(y + h))
cv2.rectangle(frame,(x,y),(x+w,y+h),(110,255,0),5,1 )
shape = predictor(gray, rectangl)
shape = face_utils.shape_to_np(shape)
eye = Shape_Position(shape)
Ear = eye[0]
leftEye = eye [1]
rightEye = eye[2]
Leye = cv2.convexHull(leftEye)
Reye = cv2.convexHull(rightEye)
cv2.drawContours(frame, [Leye,Reye], -1, (0, 0, 255), 2)
if Ear < EAR_Threshold:
count += 1
if count >= NO_EAR_FRAMES:
if alarm_status == False:
alarm_status = True
t = Thread(target=create_alarm)
t1 = Thread(target=countdown)
t.deamon = True
t.start()
t1.start()
mi, se = divmod(my_timer, 60)
help_timer = '{:02d}:{:02d}'.format(mi, se)
cv2.putText(frame, "Call help in "+str(help_timer), (10, 90),
cv2.FONT_ITALIC, 0.7, (13, 212, 255), 2)
cv2.putText(frame, "Sleep Alert", (10, 30),
cv2.FONT_ITALIC, 0.8, (255, 0, 255), 2)
else:
alarm_status = False
count = 0
cv2.putText(frame, "EAR: {:.2f}".format(Ear), (10, 60),
cv2.FONT_ITALIC, 0.7, (0, 0, 255), 2)
cv2.imshow("Deep PI", frame)
if (cv2.waitKey(1)& 0xFF== ord(" ")): #change q to button read from driver
break
cv2.destroyAllWindows()
V_Stream.stop()
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
from abc import ABCMeta, abstractmethod
from enum import Enum
class JudgeType(Enum):
Normal = "normal"
Decimal = "decimal"
Other = "other"
class ErrorType(Enum):
Absolute = "absolute"
Relative = "relative"
AbsoluteOrRelative = "absolute_or_relative"
DEFAULT_EPS = 1e-9
class NoJudgeTypeException(Exception):
pass
class Judge(metaclass=ABCMeta):
@abstractmethod
def verify(self, output, expected):
pass
@abstractmethod
def to_dict(self):
pass
class NormalJudge(Judge):
def __init__(self):
self.judge_type = JudgeType.Normal
def verify(self, output, expected):
return output == expected
def to_dict(self):
return {
"judge_type": self.judge_type.value,
}
@classmethod
def from_dict(cls, dic):
r = NormalJudge()
return r
class DecimalJudge(Judge):
def __init__(self,
error_type: ErrorType = ErrorType.AbsoluteOrRelative,
diff: float = 0.0
):
self.judge_type = JudgeType.Decimal
self.error_type = error_type
self.diff = diff
def _verify_sub(self, output: float, expected: float) -> bool:
if self.error_type in [ErrorType.Absolute, ErrorType.AbsoluteOrRelative] and abs(expected - output) <= self.diff:
return True
if self.error_type in [ErrorType.Relative, ErrorType.AbsoluteOrRelative] and self._calc_absolute(output, expected):
return True
return False
def _calc_absolute(self, output: float, expected: float) -> bool:
if expected == 0:
return expected == output
return abs((expected - output) / expected) <= self.diff
def verify(self, output, expected) -> bool:
output = output.strip().split()
expected = expected.strip().split()
if len(output) != len(expected):
return False
for i in range(0, len(output)):
if not self._verify_sub(float(output[i]), float(expected[i])):
return False
return True
def to_dict(self):
return {
"judge_type": self.judge_type.value,
"error_type": self.error_type.value,
"diff": self.diff
}
@classmethod
def from_dict(cls, dic):
r = DecimalJudge(
diff=dic["diff"]
)
r.error_type = ErrorType(dic["error_type"])
return r
class OtherJudge(Judge):
# dummy
pass
|
#coding: utf-8
# import logging
# import datetime
# import tornado.escape
# from config import BaseController
# from config.dmls import QUERY_STATEMENTS, USER_LIST
# class ApiQuery(BaseController):
# "/v1/query"
# def post(self):
# delta = datetime.timedelta(days=1)
# data = tornado.escape.json_decode(self.request.body)
# user_id = data["user_id"]
# start_time = data["start_time"] / 1000
# start_obj = datetime.datetime.fromtimestamp(start_time)
# end_obj = start_obj + delta
# dict_result = {}
# dict_query = {
# 'user_id': user_id,
# 'start_time': start_obj.strftime("%Y-%m-%d"),
# 'end_time': end_obj.strftime("%Y-%m-%d"),
# }
# conn = self.get_conn()
# cursor = self.get_cursor(conn)
# try:
# for qs in QUERY_STATEMENTS:
# cursor.execute(qs, dict_query)
# result = cursor.fetchone()
# if result:
# dict_result.update(result)
# else:
# logging.warn(qs)
# except Exception as e:
# logging.warn(e)
# logging.warn(qs)
# conn.rollback()
# finally:
# self.put_conn(conn)
# if 'grp_use' not in dict_result:
# dict_result['grp_use'] = 0
# self.write(dict_result)
# class ApiAllUsers(BaseController):
# "/v1/users"
# def post(self):
# result = []
# data = tornado.escape.json_decode(self.request.body)
# nick_name = "%" + data["nick_name"] + "%"
# logging.warn("Nick name: %s", nick_name)
# ret = 0
# conn = self.get_conn()
# cursor = self.get_cursor(conn)
# try:
# cursor.execute(USER_LIST, {'nick_name': nick_name})
# result = cursor.fetchall()
# except:
# logging.warn("Fail to get user list")
# conn.rollback()
# finally:
# self.put_conn(conn)
# if not result:
# ret = 1
# total = len(result)
# self.write(dict(users=result, ret=ret, total=total))
|
# Copyright (C) 2020 Atsushi Togo
# All rights reserved.
#
# This file is part of phono3py.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# * Neither the name of the phonopy project nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import numpy as np
class RealToReciprocal(object):
def __init__(self,
fc3,
supercell,
primitive,
mesh,
symprec=1e-5):
self._fc3 = fc3
self._supercell = supercell
self._primitive = primitive
self._mesh = mesh
self._symprec = symprec
self._p2s_map = primitive.get_primitive_to_supercell_map()
self._s2p_map = primitive.get_supercell_to_primitive_map()
# Reduce supercell atom index to primitive index
(self._smallest_vectors,
self._multiplicity) = primitive.get_smallest_vectors()
self._fc3_reciprocal = None
def run(self, triplet):
self._triplet = triplet
num_patom = self._primitive.get_number_of_atoms()
dtype = "c%d" % (np.dtype('double').itemsize * 2)
self._fc3_reciprocal = np.zeros(
(num_patom, num_patom, num_patom, 3, 3, 3), dtype=dtype)
self._real_to_reciprocal()
def get_fc3_reciprocal(self):
return self._fc3_reciprocal
def _real_to_reciprocal(self):
num_patom = self._primitive.get_number_of_atoms()
sum_triplets = np.where(
np.all(self._triplet != 0, axis=0), self._triplet.sum(axis=0), 0)
sum_q = sum_triplets.astype('double') / self._mesh
for i in range(num_patom):
for j in range(num_patom):
for k in range(num_patom):
self._fc3_reciprocal[
i, j, k] = self._real_to_reciprocal_elements((i, j, k))
prephase = self._get_prephase(sum_q, i)
self._fc3_reciprocal[i] *= prephase
def _real_to_reciprocal_elements(self, patom_indices):
num_satom = self._supercell.get_number_of_atoms()
pi = patom_indices
i = self._p2s_map[pi[0]]
dtype = "c%d" % (np.dtype('double').itemsize * 2)
fc3_reciprocal = np.zeros((3, 3, 3), dtype=dtype)
for j in range(num_satom):
if self._s2p_map[j] != self._p2s_map[pi[1]]:
continue
for k in range(num_satom):
if self._s2p_map[k] != self._p2s_map[pi[2]]:
continue
phase = self._get_phase((j, k), pi[0])
fc3_reciprocal += self._fc3[i, j, k] * phase
return fc3_reciprocal
def _get_prephase(self, sum_q, patom_index):
r = self._primitive.get_scaled_positions()[patom_index]
return np.exp(2j * np.pi * np.dot(sum_q, r))
def _get_phase(self, satom_indices, patom0_index):
si = satom_indices
p0 = patom0_index
phase = 1+0j
for i in (0, 1):
vs = self._smallest_vectors[si[i], p0,
:self._multiplicity[si[i], p0]]
phase *= (np.exp(2j * np.pi * np.dot(
vs, self._triplet[i + 1].astype('double') /
self._mesh)).sum() / self._multiplicity[si[i], p0])
return phase
|
# @lc app=leetcode id=717 lang=python3
#
# [717] 1-bit and 2-bit Characters
#
# https://leetcode.com/problems/1-bit-and-2-bit-characters/description/
#
# algorithms
# Easy (46.28%)
# Likes: 626
# Dislikes: 1604
# Total Accepted: 96.5K
# Total Submissions: 208.2K
# Testcase Example: '[1,0,0]'
#
# We have two special characters:
#
#
# The first character can be represented by one bit 0.
# The second character can be represented by two bits (10 or 11).
#
#
# Given a binary array bits that ends with 0, return true if the last character
# must be a one-bit character.
#
#
# Example 1:
#
#
# Input: bits = [1,0,0]
# Output: true
# Explanation: The only way to decode it is two-bit character and one-bit
# character.
# So the last character is one-bit character.
#
#
# Example 2:
#
#
# Input: bits = [1,1,1,0]
# Output: false
# Explanation: The only way to decode it is two-bit character and two-bit
# character.
# So the last character is not one-bit character.
#
#
#
# Constraints:
#
#
# 1 <= bits.length <= 1000
# bits[i] is either 0 or 1.
#
#
#
# @lc tags=array
# @lc imports=start
from imports import *
# @lc imports=end
# @lc idea=start
#
# 0为单位字符,10、11位双位字符。判断是否以单位字符结束。
# dfa。
#
# @lc idea=end
# @lc group=
# @lc rank=
# @lc code=start
class Solution:
def isOneBitCharacter(self, bits: List[int]) -> bool:
dfa = [
# 0 1
# 2end
[2, 1],
# 2start
[0, 0],
# 1end
[2, 1]
]
s = 0
for c in bits:
s = dfa[s][c]
return s == 2
# @lc code=end
# @lc main=start
if __name__ == '__main__':
print('Example 1:')
print('Input : ')
print('bits = [1,0,0]')
print('Exception :')
print('true')
print('Output :')
print(str(Solution().isOneBitCharacter([1, 0, 0])))
print()
print('Example 2:')
print('Input : ')
print('bits = [1,1,1,0]')
print('Exception :')
print('false')
print('Output :')
print(str(Solution().isOneBitCharacter([1, 1, 1, 0])))
print()
pass
# @lc main=end
|
# -*- coding: utf-8 -*-
import remi
import remi.gui as gui
from remi.gui import *
from threading import Timer
import traceback
import time
import math
import epics
#from epics import caget, caput, cainfo
style_inheritance_dict = {'opacity':'inherit', 'overflow':'inherit', 'background-color':'inherit', 'background-image':'inherit', 'background-position':'inherit', 'background-repeat':'inherit', 'border-color':'inherit', 'border-width':'inherit', 'border-style':'inherit', 'border-radius':'inherit', 'color':'inherit', 'font-family':'inherit', 'font-size':'inherit', 'font-style':'inherit', 'font-weight':'inherit', 'white-space':'inherit', 'letter-spacing':'inherit'}
style_inheritance_text_dict = {'opacity':'inherit', 'overflow':'inherit', 'color':'inherit', 'font-family':'inherit', 'font-size':'inherit', 'font-style':'inherit', 'font-weight':'inherit', 'white-space':'inherit', 'letter-spacing':'inherit'}
# noinspection PyUnresolvedReferences
class EPICSWidget(object):
@property
@gui.editor_attribute_decorator('WidgetSpecific','The PV name', str, {})
def epics_pv_name(self): return self.__epics_pv_name
@epics_pv_name.setter
def epics_pv_name(self, v):
self.__epics_pv_name = v
self.disconnect()
try:
self.epics_pv = epics.PV(self.__epics_pv_name, auto_monitor=True, callback=self.onChanges, connection_callback=self.onConnectionChange, connection_timeout=2)
except:
print(traceback.format_exc())
epics_pv = None # here will be stored the PV instance
app_instance = None
def __del__(self):
self.disconnect()
def disconnect(self):
if self.epics_pv:
self.epics_pv.clear_auto_monitor()
self.epics_pv.disconnect()
@decorate_set_on_listener("(self, emitter, pvname=None, conn=None, chid=None, **kwargs)")
@decorate_event
def onConnectionChange(self, pvname=None, conn=None, chid=None, **kwargs):
#print('ca connection status changed: ', pvname, conn, chid)
#Here I use the outline red color to show the unconnected state
# of course this can be avoided or changed
self.style['outline'] = "1px solid red"
if conn:
del self.style['outline']
return (pvname, conn, chid, kwargs)
@decorate_set_on_listener("(self, emitter, pvname=None, value=None, **kwargs)")
@decorate_event
def onChanges(self, pvname=None, value=None, **kwargs):
#as default I write the value to the widget itself
self.set_value(str(value))
return (pvname, value, kwargs)
def search_app_instance(self, node):
if issubclass(node.__class__, remi.server.App):
return node
if not hasattr(node, "get_parent"):
return None
return self.search_app_instance(node.get_parent())
def get_app_instance(self):
if self.app_instance==None:
self.app_instance = self.search_app_instance(self)
return self.app_instance
class EPICSBooleanButton(gui.Container, EPICSWidget):
""" A Button widget that sets the bit when clicked.
"""
icon = "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAC4AAAAuCAYAAABXuSs3AAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAAOxAAADsQBlSsOGwAAABl0RVh0U29mdHdhcmUAd3d3Lmlua3NjYXBlLm9yZ5vuPBoAAAKMSURBVGiB7ZqxaxNRGMB/9+7uXZK2aaMVEUWEKNSh4CQIrhWnujiIQqGig7gXF/+B7tLNbp2KQyfBwcVdsdhWJa5WHZKKvZqXe3cO57VNmraJyd0ZuB88LnkvfO/H43sfB18Mz/OCIAhoN4DdZ9IYhrH7bDesSPJlRfHsrc+nmpGK6HGcGQp4clVwsywBMJRSgVKKqWXNpitS1juaS6M+L26ZSCnDE1dKsenaAHy/MUNjtNJ10JG1WYofHvTbtYnPWwKlFLZth+Ke5wGheKP0EXVireugemizz5rt8TyPIAgQe+KDQZO41jptn47RWofiAL7vp+3TMZGrtb9mAxTfP0bnf3QdMPf1Wt/kjiLytVoXRtZnEhHolf+7cB9BJp40mXjSDKz4gXLYHQ6vHtmUD8z7LC+4zAGz00M8PXv4q3Jl4xdTr7vfuUfx9pvP3xnm9v086893WFzZZjFamMzz7rpg7c02d1d72zOWVJn75oNjcDmO4H+JRXz+tKCyEaZKXPQlVcoTw3yZaJ776dpAox/h2xJLjocXUrI02eg5lw8jllRZXPGoYHBqPI7oIQNbx2MRn522KNc1S/9Qnzslpsvps7yws1e/Y6BH8TpTC/XOf766w5U+XdYsx5MmE0+aTDxpMvGkycSTRkTNoEEh8hXRl0Ehch3cEzcMA9M0GbdV2k7Hcj7/G9M098Qty+LhxSrnHDdtt0M5adW5d2ELy7LCU1dKBa7rUqvVqFaruK5LvV5Ptbvc2lV2HIdCoUCpVGJsbIxCoYAVLRSLRaSUKKVQStHaYkmDSFxKiZSSXC6H4zjhvOd5ge/7aK2bRtQkSruXL4TANM2mIYTA0FoHrWmR9h8QIlpTZv/nP6KyI2uh/zMtAAAAAElFTkSuQmCC"
@property
@gui.editor_attribute_decorator('WidgetSpecific','Specifies if the button is toggle or must reset the value on release', bool, {})
def toggle(self): return self.__toggle
@toggle.setter
def toggle(self, v):
self.__toggle = v
self.button.onmouseup.do(self.reset_bit if not self.__toggle else None)
@property
@editor_attribute_decorator("WidgetSpecific",'''Text content''', str, {})
def text(self): return self.button.get_text()
@text.setter
def text(self, value): self.button.set_text(value)
button = None # The gui.Button widget instance
led = None # The led indicator Widget
def __init__(self, button_label='epics button', epics_pv_name='', toggle=False, *args, **kwargs):
self.color_inactive = 'darkgray'
self.color_active = 'rgb(0,255,0)'
self.button = gui.Button(button_label, width="100%", height="100%", style=style_inheritance_dict)
self.led = gui.Widget(width=15, height=5, style={'position':'absolute', 'left':'2px', 'top':'2px', 'background-color':self.color_inactive})
self.led_status = False
default_style = {'position':'absolute','left':'10px','top':'10px', 'background-color':'rgb(4, 90, 188)', 'color':'white'}
default_style.update(kwargs.get('style',{}))
kwargs['style'] = default_style
kwargs['width'] = kwargs['style'].get('width', kwargs.get('width','100px'))
kwargs['height'] = kwargs['style'].get('height', kwargs.get('height','100px'))
super(EPICSBooleanButton, self).__init__(*args, **kwargs)
_style = {'position':'relative'}
_style.update(style_inheritance_dict)
self.append(gui.Container(children=[self.button, self.led], width="100%", height="100%", style=_style))
self.toggle = toggle
self.epics_pv_name = epics_pv_name
self.button.onmousedown.do(self.set_bit)
def set_bit(self, emitter, *args, **kwargs):
self.pressed = True
self.written = False
value = 1
if self.toggle:
value = 0 if self.led_status else 1
self.epics_pv.put(value, callback = (self.put_done if not self.toggle else None) )
def put_done(self, *args, **kwargs):
self.written = True
#this function gets called when a set_bit is completed and the button is not toggle
# and so the value have to be reset
if not self.pressed:
self.epics_pv.put(0)
def reset_bit(self, emitter, x, y, *args, **kwargs):
self.pressed = False
if self.written:
self.epics_pv.put(0)
def set_value(self, value):
if not self.get_app_instance():
return
with self.get_app_instance().update_lock:
#this function gets called when the camonitor notifies a change on the PV
self.led_status = float(value)>0.0
self.led.style.update({'background-color':self.color_active if self.led_status else self.color_inactive})
class EPICSLed(HBox, EPICSWidget):
"""A Status indicator widget.
"""
icon = "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAC4AAAAuCAYAAABXuSs3AAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAAIswAACLMBhC+V2wAAABl0RVh0U29mdHdhcmUAd3d3Lmlua3NjYXBlLm9yZ5vuPBoAAAZXSURBVGiBzZrbbxTXGcB/39o7s2svUCzF3lxEDMakrUhCgCZOQs1FEBtVBFlcouAmbxEBU8fQSEj5B/KSItaNi/tMTVUFrARRE0i52U1CEscSKWqDbQwmQQQQSWBt75wZzNcH7xjjG2tsvPyezsyZOfPb2TNnzne+EcaJqua4rlssIotU9ddAAfAQEEke0gVcBdqB/wKfWpZ1QkR+Gs915R5lQ57nrQNeV9VlQGCMTfQCR4DdlmXtFRFnrA5jElfVLGNMhYhsA6L+/m6vm8+//5xTl0/R9mMbV7qv0O11A5AdzCYvkkdhTiFP5T7F8489T3Ywe2Czl1T1T7Zt/0VEEhMu7jjOKhGpBvIBEl6C+jP17Dm9h8YLjbi9bkrtWBkWxTOKKZ9bTtkTZYSDYb/qnKr+IRQK/XNCxFU1bIyJicgbAD1eDzXNNcS+jHG152pKsiORm5VL1XNVbF6wuf8HqOpfbduuulv3GVW8u7v74czMzAZgHkBDewNVh6vovN45LuHB5E/LJ1YSo7Sg1N/V4nne7yKRyA8jnTOiuOM4s0TkE2CWuWnYfnQ7u77eNaHCd4oIFQsreHfZu1gZFkCHqq4IhUIdwx8/DPF4PNeyrCZgTpfbxfp96zly/sh9kx7I4hmL2bt2L1PtqQAdnue9ONydHyKuqmHXdT8FnrlhblCyp4SWH1omQfk2Cx9eyMevfswUewrA15ZlLRrc54eMv8aYGPCMuWlYs3fNpEsDNF9qZu2+tf5ItcAYs2PwMXeIJ4e8NwC2H91O44XGyTEdhuOdx3nn2DsAiMgmx3FWDqzv7yqqmuW67mlgZkN7A2UflE2u6TAIwv5X9vPSrJcAzlqWNdfvMv133BhTAcxMeAm2Ht6aJtU7UZSKgxX0eD0ABcaYTX5dAPrmHsnXOO83v8/56+fTIjocF25coLalFgAReVtVQ5AU9zxvPRBNeAliX8bSZzkCO7/YScJLADziuu4auN1VXgOoP1M/7tf4/eBy92U+av3I33wNIKCqOaq6FKDuP3Xpcrsrdaf73Zar6i8CrusuBjJ6vB6avmtKo9roHO887j+kGcaY4oCIvAjw2fefpTw1TQdur8vJiycBEJFFgWS4xanLp9IqlgrfXPnGL/4yABQCtP3YljahVGm91uoX5wSAHOh7ch90fEdVzQmQjMa73K50OqVE3MQBEJGpY43OHxgC9K17ELEidzk0/SSDC1Q1HgCuAUQj0dHOeSDIi+QBICLXAkAbwOzps9PplBKFOYV+sTUgIv8DeDrv6fQZpcgAx28DqvpvgBcee8GPrh9I7AybokeLAFDVpoBlWSeA3qxgFsUzitNrNwpL85cSzgwD3LRtuykgIj+JyFGA8rnl6bUbhQ1zN/jFf4nIz/44vhug7IkycrNy0yI2GtHsKKvnrPY3d0MykAgGgx8Al8LBMFXPVaVJb2S2FW0jlBkCuGhZ1j5IiouIo6o7ADYv2Ez+tPy0SQ6mYHoBG+dvBEBV3xMRAwOifNu2a4COcDBMrCSG3Nua/4QiCNUl1f7dbrdtu9av6xcXkYSqVgKUFpRSsbBi8k0H8dazb7F85nIAVHXLwGW4IbfVcZxaEdno9rq8/I+XOdZ5bBJVb7Msfxn71+8nmBFERGosy9oysH64Rc9QctFzftzEKf17Kc2XmidNGIYsen5lWdZv/b7tM2RaKyKOZVmlQOsUewqHNhxixcwVk6QMSx5fwsFXD/rSZz3PWzVYGkbIlonIVVVdCZyNWBHq19VT+ZvK+/rACkLVs1UceOWAP31tv3Xr1opIJDJsaDaqSVdXVzQYDB4AFgAc7jhM5aFKzv18bkKlC6YXUF1S3f8gAl95nrdqJGlILXkVMsbsEJFN0Jdt29Wyi51f7Bx3nBrNjrK1aCtvzn/TH/IQkZpgMPjH4brHmMR9HMdZKSJ/pi9zjHPT4cMzH1J3uo4TnScwvaNepx87w2bJ40sof7Kc1XNW9wsD7aq6JRQKHUqlnbEmaEPGmE0i8jbwiL+/x+vh5MWTdyRo425fYBuxIuRl9yVo50XnUfRokT/L87moqu/Ztl07lgzzvabEbdd11wG/B5YDGWNsohf4BPhbMiWe2t81gHEPE6o6zRizWEQWAb8CZgO53P4IIU7fRwhtwLeq2mTbdqOIXB/Pdf8P7oFocYOtZGkAAAAASUVORK5CYII="
@property
@editor_attribute_decorator("Geometry",'''Widget width.''', 'css_size', {})
def css_width(self): return self.style.get('width', None)
@css_width.setter
def css_width(self, value):
self.style['width'] = str(value)
self._update_size()
@property
@editor_attribute_decorator("Geometry",'''Widget height.''', 'css_size', {})
def css_height(self): return self.style.get('height', None)
@css_height.setter
def css_height(self, value):
self.style['height'] = str(value)
self._update_size()
label_value = None # the gui.Label used to show the value 0 or 1
def __init__(self, epics_pv_name='', *args, **kwargs):
self.color_inactive = 'darkgray'
self.color_active = 'rgb(0,180,0)'
default_style = {'position':'absolute','left':'10px','top':'10px', 'color':'white','background-color':self.color_inactive, 'align-items':'center', 'justify-content':'center'}
default_style.update(kwargs.get('style',{}))
kwargs['style'] = default_style
kwargs['width'] = kwargs['style'].get('width', kwargs.get('width','50px'))
kwargs['height'] = kwargs['style'].get('height', kwargs.get('height','50px'))
super(EPICSLed, self).__init__(*args, **kwargs)
_style = {'text-align':'center'}
_style.update(style_inheritance_text_dict)
self.label_value = gui.Label("0", style=_style)
self.append(self.label_value)
self.epics_pv_name = epics_pv_name
def _update_size(self):
width = gui.from_pix(self.style.get('width', "100").replace("%",""))
height = gui.from_pix(self.style.get('height', "100").replace("%",""))
radius = min(width, height)/2
self.style['border-radius'] = gui.to_pix(radius)
def set_value(self, value):
if not self.get_app_instance():
return
with self.get_app_instance().update_lock:
_value = float(value)
self.label_value.set_text( '1' if _value>0.0 else '0' )
self.style.update({'background-color':self.color_active if _value>0.0 else self.color_inactive})
class EPICSValueMeterWidget(Progress, EPICSWidget):
"""A simple progress bar indicating a value.
"""
icon = "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAACkAAAApCAYAAACoYAD2AAAAAXNSR0IArs4c6QAAAARnQU1BAACxjwv8YQUAAAAgY0hSTQAAeiYAAICEAAD6AAAAgOgAAHUwAADqYAAAOpgAABdwnLpRPAAAAAlwSFlzAAAOwgAADsIBFShKgAAAAG1JREFUWEft1qENgEAMQNEec6AwDMG4jIbC3QZAQuXhvijJf6aVP+mJa9cjiptylmYkxUiKkRQjKUZSjKQYSTGSMvyZt/3M7dux9dx487Lm9vLclP++yWo8N8VIipEUIylGUoykGEkxkmIkI+IGyZcQRHB9PC8AAAAASUVORK5CYII="
def __init__(self, epics_pv_name='', max_value=100, *args, **kwargs):
default_style = {'position':'absolute','left':'10px','top':'10px'}
default_style.update(kwargs.get('style',{}))
kwargs['style'] = default_style
kwargs['width'] = kwargs['style'].get('width', kwargs.get('width','100px'))
kwargs['height'] = kwargs['style'].get('height', kwargs.get('height','30px'))
super(EPICSValueMeterWidget, self).__init__(0, max_value,*args, **kwargs)
self.epics_pv_name = epics_pv_name
def set_value(self, value):
if not self.get_app_instance():
return
with self.get_app_instance().update_lock:
Progress.set_value(self, value)
try:
import pygal
except:
print("It is required to install pygal to use EPICSPlotPV widget")
class EPICSPlotPV(gui.Svg, EPICSWidget):
"""A simple plot bar indicating a value.
REQUIRES library pygal to be installed
"""
icon = "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAEAAAAAuCAYAAACYlx/0AAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAAJjQAACY0BXC+J+AAAABl0RVh0U29mdHdhcmUAd3d3Lmlua3NjYXBlLm9yZ5vuPBoAAAhJSURBVGiB5ZprTFNbFsd/bWlLX1pABKHjI4ZATBwTMblkNPLBB4IJ0cSMUWYCwUd8xOcl6nxwEtQYYzQTHwh3EmQkOlFx9IMyiXFiMCLjY4YwiRodtT7QqlAKpaXY09Oe+VDbodAXXJXLvf9kJ+fstfY6a6/u9d9r71RGDBQXF2dIkvROJpP5YumONUiSJE+IR1Gj0fTt2rVL97Ud+taorKyU5KPtxNdCZWUlL1++jKkX1wpIT0+X6/X6H+3Ul8abN2+4ePEiFRUVQ2TPnj3D5XIRze/x48fHF4APHz74nE7nyD39SrDb7TgcDgK+SZJEQ0MDy5Yt4/3797x7945oftvtdsZ0Cng8Htxud/DdbrdTV1eH2WwmNzcXq9Ua08aYDoAoiiEB6Orqwmaz0dLSQl5eHjabLaaNMc0BCQkJiKIYzHOn00lubi5Xr16lvr6e48eP/7w5oLe3l76+vmCev379mrlz53Lu3DkMBgMulysmB8QVgJ8qRFHk06dPwXer1cq8efOQyWSAnxRjYUxzgCAIIRzQ2dlJeno6q1evjtvGmOcAj8cTzPO+vj4mTZoUXAEqlQqtVotcHv53HvMc4HA46O/vD+a5KIr09fUF5Xq9nrdv35KcnBx2/E+mDrBYLCMaN3gbHIyUlJSYW+GoB6CxsZHKysoRjRVFEVEUAfD5fEOWekpKCl1dXVFtjCoHdHd3c+fOHRYvXsy9e/dYsGDBsMbLZDLUajV6vR6r1UpaWlrIvp+ZmUlPT0/EWmDUOWD//v2sW7cOk8lERUUFc+bMQaFQxD3e6XSiVCpxOBy8evWK8ePHh+z7Wq2Wx48fR6wFRpUDmpubmT59OtOmTUOpVFJQUEBjY+OwbIiiiE6nQxAEOjs7mTBhQoj8i3GAUqlEoVDEbGazGY/HE1Ovv7+fy5cvU1paGuxbunQpTU1NeL3euL6lUCjwer0YDAY8Hg/d3d2kpaWFyFNTU7HZbCH6g23EFQCDwSBTq9VEa0qlkhMnTnDo0CFUKlVU3ZqaGrZu3YpWqw32aTQaSkpKuHTpUtSxAxvAuHHj8Pl82Gw2MjMzQ+Q6nQ5JklCr1ahUKvbt2xci12g08XGAzWaTXC5XVJ0rV65QUFCAXC7n2LFjrF+/Pqxea2srSqWSqVOnMthmbm4uFy5coKioCIPBENMvl8uFRqOhp6cHi8WCXq8fYlMURVwuFx0dHbS1tXH+/HmKi4sBP4d8EQ5wOBw0NzdTUFDA4sWLkSSJ69evD9ETBIGzZ8+ybt26iLbKysqor6+P67uBKtDtduN0OqOe/F69ekVZWRl3796lvb092B9XAJKTk2UajYZIra6ujs2bN6PVatFoNGzbto0HDx7w/PnzEL3Tp09TWlqK0WiMaCs3N5fOzk56e3sj6gSaJEkYjUbAXxaH0wmkp8ViITs7m71791JVVYVKpUKn08UXAKfTKQmCQLj29OlT3G43WVlZwT6Px8OePXuorq6mvb0dQRB4+PAhDoeD2bNnh7UzsK1du5aampqYem63G41Gg9PpxOv1htUxGo18/PgRs9lMRkYGer2eoqIiamtrcbvd8QVAEAS8Xm/YVl1dzZo1a4b0JyQksHv3bg4ePIjD4aCqqoqNGzdGtDOwmUwmfD4fZrM5qp4gCGi1WhwOR5DlB7ekpCQ6OjqwWq0YjUa8Xi/z589HoVAgiuKP44Bbt24xc+bMiIeNiRMnUl5eTmlpKcXFxVFzdDDKy8upra2NqhO4DbJYLCQlJYXViVQLBI7MCcBfgHtA9QC5DLgANAB3wpXCgiBw9epVTp48SUJC5M0kLy+Po0ePMmPGjKiTGQy9Xo/JZOLFixfMmjUrRPb06VOys7ORy+UkJyfz6NEjTCZT2ACbTCba29vRarVD5IFS2Az8EagFhM+yJcAyYAeEL4Xr6upYsWJFyI1MJEyePDnq1VQkrFq1iv3793P48OFgn8/nY8eOHRw5cgRRFJEkiZcvX5KTkxP2G1qtlvv374f1IVAK1wBJwPIBss3AJeBdOMc6Ozt58eIFeXl5w57UcGAwGJg5cyYtLS3BvsbGRvLy8rhx4wYAarWat2/fkpqaGtZGSkoKra2tTJkyJaxcDnQAf8M/aYAp+FfAiUiO1dTUsGHDhuHPaARYuXIlDQ0N+Hw+BEHg5s2bbN++nSdPngD+AFgsliHngACMRiMdHR0RAxBI3irgDvBr4PfAf4B/BpQGckBbWxtpaWnk5OR8mRnGgSVLltDU1ITNZqOkpIRx48aRlZVFS0sLycnJCILAtGnT0Gg0YcdPmDCBGTNmkJiYGNI/8DjcAvwL+B5YyufcDyDAAT6fj1OnTnHgwIER5fRIsWjRInbu3IlSqWTFihU4nU7y8/O5du0aXq8XvV6P1+uN6FNhYSGiKIblgIH0XY2fCDvws/8QXLt2jYULF6LVar/MzOKEXC5n+fLlwaoPIDs7m8LCQhITEyPmfwBbtmyJbHvA83nADfwADKF2l8vF7du3KSwsHKb7Xwb5+flDtsONGzeiVqtjBiAaBq6AefgD8sNgpfT0dPmZM2fYtGlTXKe0bwmdTseGDRuGVWQFMPhK7Hv8S3/I1vfmzRtJoVCQkZER85JxNJCdnT0iv+x2O7LPz1OBfwC/A+4OVCouLk6TyWT/VQznsm6MwOv1iqPtw5hA4Lzwp0H9RcDfgcxv7dBo4Dv8O8Smz++/Aqz8v3r8RWAL/u3xO6AZiO/e6meGv+JfCfeAxBi6YwLDvRD5N6ACPuAPxC8KvwH6gFLADvxhdN35tkjHXyAF/pH4W0AECkbNo28IJXAbaIRg4QTwZ6ALfxE1ZvE/Xo/9xlOEeIkAAAAASUVORK5CYII="
@property
@editor_attribute_decorator("WidgetSpecific",'''Defines the maximum values count.''', int, {'possible_values': '', 'min': 0, 'max': 65535, 'default': 0, 'step': 1})
def max_values_count(self): return self.values.maxlen
@max_values_count.setter
def max_values_count(self, value):
self.values.maxlen = int(value)
def __init__(self, epics_pv_name='', max_values_count=100, *args, **kwargs):
w = kwargs.get("style", {}).get("width", kwargs.get("width", 100))
h = kwargs.get("style", {}).get("height", kwargs.get("height", 100))
if 'width' in kwargs.keys():
del kwargs["width"]
if 'height' in kwargs.keys():
del kwargs["height"]
default_style = {'position':'absolute','left':'10px','top':'10px', 'overflow':'hidden', 'background-color':'lightgray', 'margin':'10px'}
default_style.update(kwargs.get('style',{}))
kwargs['style'] = default_style
super(EPICSPlotPV, self).__init__(w, h, *args, **kwargs)
self.values = gui.SvgPolyline(max_values_count)
self.epics_pv_name = epics_pv_name
def set_value(self, value):
if not self.get_app_instance():
return
with self.get_app_instance().update_lock:
self.values.add_coord(time.clock(), float(value))
try:
plot = pygal.XY()
pairs = []
for i in range(0, len(self.values.coordsX)):
pairs.append([self.values.coordsX[i], self.values.coordsY[i]])
plot.add(self.epics_pv_name, pairs)
self.add_child("chart", plot.render())
except:
self.style['overflow'] = "visible"
self.add_child("chart", gui.SvgText(10,10, "Install pygal to use this widget"))
class EPICSValueGaugeWidget(Svg, EPICSWidget):
"""A gauge indicator for an EPICS PV value
"""
icon = "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAD0AAAAuCAYAAACbIBHcAAAAAXNSR0IArs4c6QAAAARnQU1BAACxjwv8YQUAAAAJcEhZcwAADsQAAA7EAZUrDhsAAAesSURBVGhD3ZpZSFVbGMe/c5rneSYjuJREIPWQ0EtEFOiDFPVQlARNxqUIw8J6KHqIaKILEZUUBdFD0UTRRD6EBRUhQpaJdTOzzLI0s8FSz7rr97m39xyH8kx29A/Lvdba6+y9/usb19p6jIVEEbW1tfL+/XsZO3as1NTUSGVlpfDKgQMH6tXr9Urv3r2lZ8+eUlxcLPHx8c4vowevc40oPn/+LG/fvpWSkhItGzdulK9fvyrhbt26SVlZmXz//l37WAgK9dTUVCksLJTy8nKpqKjQMdFAxEgzwQ8fPkh+fr5ev3z5IvX19dK9e3epqqpSqQKfzyd9+/bVq8fj0dLQ0KBjhw0bJoMGDdI6pF++fClPnjzRRYgkwibNBJkU0kPCqGpdXZ38/PlTfvz4IYMHD5ZevXopUUiNGDFCxo8fr+rdv39/6dOnjy4AizVq1Cj59u2bSh5153c8j/azZ8/k48ePOjZchGzTSDY7O1tmzJgh1dXVKjEmNGDAAC2QQcrtBRJ98OCBpKSk6OKxcDwPm+c5LKC7eHl5eZKcnOz8MniERBrCSDctLU127dql0oPokCFDnBHhAe2BOO9geu7ioR25ubly7tw52b9/v7Z79Oih94JBUKQZin26kr1//7563C1btjgjIgvIs8CvX7/Wd2Meq1evlh07dihhfMHo0aO1HgyCIo3dYl/YWb9+/WT48OE6sWDUOBSg5oS969evy+PHjyUzM1PtGzB9iAejZe1yZDyY0ENYwVZxOBAG0SYMsGuIofJLly7VK46S8IfDI1pQ2uvkfitpbrtxFcLjxo1z7vwZQBYBuPNhfiw8SRC+hfI7/Jb0q1ev9EVIlvATC8CWX7x4oaYFcdqulBEKHv5XaJM03QUFBeokUCFUOpbA/PDubhKEquNnIE+snzx5sjOyJdq0adRlxYoVUlRUFHOEAdED7XNjOW1w/vx5ycjI0HpbaFXSJAKsIldCVGJionMn9oBwCJvkCYcPH1Z737NnjyY3EyZMcEYFolXSb968UTuJi4tzemIbqPiGDRs0lG7fvl3zCNR96NChrfqhFurNToiHjBkzxumJfaDaaCNJEvN3s7R3796pE26OFpJGVUaOHKlOoTMB237+/LlqKBLHmcEBTw4ffwRImqAPOhthgDNDlfHk+CJsHBVnX89C+COANKkd277OCrw5UibEki5DnvTUFaaLJtIMQBU6Iq2MFpA2Eka12YYiaTI3HLM/mkjj8fB2nR3k6IBQRiFjQ9r+Dk1Jk9LduXMnpL1pLIITGaTO0ROk4XXkyJEm4kqaFbl79652dAVAGo9NyNq6daskJCSoQ0PtgYas0tLSTu3AmgP/NG3aNE2uOGHlCArHhr9iMTzWnZtPnz51CXv2ByepkCRcuQXPjnC9HAzg8boa8N4cLADyL2wc9dYNCsG8qzgwf0AakjgvtyB5VN/bmePyrwA5MjQkDEdU21Vzr7sP7WogO3NPVVzBIn0i1R+RNCbF4X40gYQhCBAskqeoTZOqdSSOHz+uPmTq1KmaIz98+NC5E3mwuKg1jkzV2i4EpD0+2xNxBbcPtm9sLHZvLnV1LLf8W1Qkf82e7Qz6H0wqGnj69GmTEyNhIUNjwT0l9+6ZOBuwjd182wy9cYJWDdxJezhULy/n7JUEXcSGOO4bG+yVEGPZxVC33tJTVdU4FuIUt26JpduJ/NM4nwBcuHBBFixY4LQiAw4TTp48qUfC2DJkkTabKs/9q1dN4ty57CvF7sLFVFRw5CAeSNhBStTGOwMZCEPQEtXFsDsYHcNC2WJc6dLnB1eT0mzJaqwG4NSpU7Js2TKnFRnwqTcrK0vVG8LuSan6MKtaZklqqklOSTF5BQU0m/B3ZqZJWrLEZOfmOj1twGZ1prbW+GpqjK+y0vjKy42vtNT4iouNr7DQmPx8Y27fNjm7d6PHLYpNEZ0HhQdLytgFNGfOnHF6GmEzTmMX1SxfvtxOtcFwrmRu3LihN+Pj4/UK9u7da+zOROvTp0+3QqzXerhYuXJlAOGjR486d8JHRkaGWbx4sdm8ebPT04iZM2daBa4wJSUlZtGiRUb448K/vmbNGlNWVqb1devWGbtj0XokkG8lf/HiRZ1EpICUwaNHj0x6errWXdgNh1MzZuHChSYgZLkf5QDunZACwt2MXLlyRebMmaOfWTm6IVzNnz9fd0E4HOrshHJycpxfBA83yeIktzn8v29xyuu15NWrgeYJA84A4PpDTWJICCB07Ngx/XAwF6fph6SkJP10tG/fPpk1a5aOjzSs9J2a6Kde76pVq8Sqr1y6dEk/fhHQz549K/Tv3LlTrL3rOVOo0mbzDiZOnCjr16/X/yTwB+21a9fKpEmTtO2ODxXMn/kCQiGaNW/ePDl06JB++bDqbT2Jxc2bN83BgwepmurqanPr1i2t37Mx/MCBA+rxQgV2a9+vdezNhg+tu6BNP2BcuHaOw7ILqfXs7GybflRq3cZsc/r0aa03ziaK8Cd9+fLlVklfu3ZN65Eg3R5EnTSAzIkTJ8yUKVOMdWbat2nTJr3STkhIMNbmmxYn2gg47I8WcGD8J+C2bds0S7LvFY6osD/a+A9smRy5I9DmR/mujA6RdGxB5D9hHwJHVeDJjgAAAABJRU5ErkJggg=="
@property
@editor_attribute_decorator("WidgetSpecific",'''Defines the minimum value.''', float, {'possible_values': '', 'min': -65535, 'max': 65535, 'default': 0, 'step': 1})
def min_value(self): return self.__dict__.get('__min_value',0)
@min_value.setter
def min_value(self, value):
self.__dict__['__min_value'] = value
self.text_min_value.set_text(str(value))
@property
@editor_attribute_decorator("WidgetSpecific",'''Defines the maximum value.''', float, {'possible_values': '', 'min': -65535, 'max': 65535, 'default': 0, 'step': 1})
def max_value(self): return self.__dict__.get('__max_value',1)
@max_value.setter
def max_value(self, value):
self.__dict__['__max_value'] = value
self.text_max_value.set_text(str(value))
indicator = None # a gui.SvgPolygon that indicates the actual value
indicator_pin = None # a gui.SvgCircle around which the indicator rotates
text_min_value = None # the gui.SvgText min value indicator
text_max_value = None # the gui.SvgText max value indicator
text_actual_value = None # the gui.SvgText value indicator
def __init__(self, epics_pv_name='', min_value=0, max_value=100, *args, **kwargs):
w = kwargs.get("style", {}).get("width", kwargs.get("width", 100))
h = kwargs.get("style", {}).get("height", kwargs.get("height", 100))
if 'width' in kwargs.keys():
del kwargs["width"]
if 'height' in kwargs.keys():
del kwargs["height"]
default_style = {'position':'absolute','left':'10px','top':'10px'}
default_style.update(kwargs.get('style',{}))
kwargs['style'] = default_style
super(EPICSValueGaugeWidget, self).__init__(width=w, height=h, *args, **kwargs)
self.epics_pv_name = epics_pv_name
#the indicator
self.indicator = gui.SvgPolygon(_maxlen=4)
self.indicator.set_stroke(width=0.001, color='red')
self.indicator.set_fill('red')
indicator_pin_radius = 0.05
self.indicator_pin = gui.SvgCircle(0,0.5,indicator_pin_radius)
self.indicator_pin.set_fill('black')
#the value signs
scale = max_value-min_value
radius_min = 0.4
radius_max = 0.5
for i in range(0,10):
angle = math.pi/9*i
#sign = gui.SvgLine(math.cos(angle)*radius_min, radius_max-math.sin(angle)*radius_min, math.cos(angle)*radius_max, radius_max-math.sin(angle)*radius_max)
sign = gui.SvgLine(math.cos(angle)*(radius_min - 0.01 + 0.1*(i+1)/10), radius_max-math.sin(angle)*(radius_min - 0.01 + 0.1*(i+1)/10), math.cos(angle)*radius_max, radius_max-math.sin(angle)*radius_max)
sign.set_stroke(0.01, 'black')
self.append(sign)
#subindicators value signs
scale = max_value-min_value
radius_min = 0.4
radius_max = 0.5
for i in range(0,100):
angle = math.pi/99*i
#sign = gui.SvgLine(math.cos(angle)*radius_min, radius_max-math.sin(angle)*radius_min, math.cos(angle)*radius_max, radius_max-math.sin(angle)*radius_max)
sign = gui.SvgLine(math.cos(angle)*(radius_min - 0.01 + 0.1*(i+10)/100), radius_max-math.sin(angle)*(radius_min - 0.01 + 0.1*(i+10)/100), math.cos(angle)*radius_max, radius_max-math.sin(angle)*radius_max)
sign.set_stroke(0.002, 'black')
self.append(sign)
font_size = 0.1
self.text_min_value = gui.SvgText(-radius_max, 0.5 + font_size + 0.01, str(min_value))
self.text_min_value.style['font-size'] = gui.to_pix(font_size)
self.text_min_value.style['text-anchor'] = "start"
self.text_max_value = gui.SvgText(radius_max, 0.5 + font_size + 0.01, str(max_value))
self.text_max_value.style['font-size'] = gui.to_pix(font_size)
self.text_max_value.style['text-anchor'] = "end"
self.text_actual_value = gui.SvgText(0, 0.5 + indicator_pin_radius + font_size + 0.01, str(max_value))
self.text_actual_value.style['font-size'] = gui.to_pix(font_size)
self.text_actual_value.style['text-anchor'] = "middle"
self.text_actual_value.style['font-weight'] = 'bold'
self.min_value = min_value
self.max_value = max_value
self.append([self.indicator, self.indicator_pin, self.text_min_value, self.text_max_value, self.text_actual_value])
self.set_viewbox(-0.5, 0, 1, 0.70)
self.value = self.min_value
def set_value(self, value):
if not self.get_app_instance():
return
with self.get_app_instance().update_lock:
value = float(value)
#min value at left
#max value at right
#value to radians
scale = self.max_value-self.min_value
if scale==0.0:
return
relative_value = value - self.min_value
angle = relative_value*math.pi/scale
#inversion min at left
angle = math.pi - angle
radius = 0.5
self.indicator.add_coord(math.cos(angle)*radius, radius-math.sin(angle)*radius)
self.indicator.add_coord(math.cos(angle+0.5)*0.04, radius-math.sin(angle+0.5)*0.04) #self.indicator.add_coord(0.02,0.4)
self.indicator.add_coord(0,radius)
self.indicator.add_coord(math.cos(angle-0.5)*0.04, radius-math.sin(angle-0.5)*0.04)
if hasattr(self, "actual_value"):
self.text_actual_value.set_text(str(value))
|
import importlib
import torch
from torch import nn
from torch.nn import functional as F
class MyModel(nn.Module):
def __init__(self, args, network):
super(MyModel, self).__init__()
self.args = args
self.encoder = network
def forward(self, images, output_type='loss'):
embeddings = self.encoder(images)
embeddings = embeddings.view(self.args.N * (self.args.K + self.args.Q), -1)
support_embeddings = embeddings[:self.args.N * self.args.K, :]
query_embeddings = embeddings[self.args.N * self.args.K:, :]
prototypes = torch.mean(support_embeddings.view(self.args.K, self.args.N, -1), dim=0)
prototypes = F.normalize(prototypes, dim=1, p=2)
logits = torch.mm(query_embeddings, prototypes.t()) / self.args.tau
if output_type == 'logits':
return logits
elif output_type == 'loss':
query_targets = torch.arange(self.args.N).repeat(self.args.Q).long()
query_targets = query_targets.cuda(self.args.devices[0])
loss = nn.CrossEntropyLoss()(logits, query_targets)
return loss
def get_network_params(self):
modules = [self.encoder]
for i in range(len(modules)):
for j in modules[i].parameters():
yield j
def get_other_params(self):
modules = []
for i in range(len(modules)):
for j in modules[i].parameters():
yield j
|
import matplotlib.pyplot as plt
import PIL.Image
import numpy as np
import os
import random
from patch.constants import PATCH_SIZE
def _convert(im: PIL.Image):
return ((im + 1) * 127.5).astype(np.uint8)
def show(im: PIL.Image):
plt.axis('off')
plt.imshow(_convert(im), interpolation="nearest")
plt.show()
def load_image(image_path: str):
im = PIL.Image.open(image_path)
im = im.resize(PATCH_SIZE, PIL.Image.ANTIALIAS)
if image_path.endswith('.png'):
ch = 4
else:
ch = 3
try:
im = np.array(im.getdata()).reshape(im.size[0], im.size[1], ch)[:,:,:3]
except ValueError as e:
print("An error ocurred when processing file", image_path)
raise e
return im / 127.5 - 1
class StubImageLoader():
"""An image loader that uses just a few ImageNet-like images.
In fact, all images are supplied by the user.
"""
def __init__(self, images_dir, batch_size):
self.image_paths = []
self.batch_size = batch_size
#only keep the image paths and load them when requested
for dirpath, _, filenames in os.walk(images_dir):
for image_path in filenames:
self.image_paths.append(os.path.join(dirpath, image_path))
def get_images(self):
# fetch a random sample of images
chosen = random.sample(self.image_paths, self.batch_size)
return [load_image(img_path) for img_path in chosen]
|
import sys
my_list = [1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89]
new_list=[element for element in my_list if element<5]
print(new_list)
if sys.version_info[0] >= 3:
num=int(input("Please enter a number: "))
else:
num=int(raw_input("Please enter a number: "))
newer_list=[element for element in my_list if element<num]
print(newer_list)
|
# Let's finish the file access project in Python.
|
import pytz
from pathlib import Path
def separate_log_file(current_timestamp, internet_connected, just_booted, ping_time):
if just_booted:
just_booted_message = "YES"
else:
just_booted_message = "NO"
now = current_timestamp.now(pytz.timezone('America/New_York'))
file_name = now.strftime("%Y-%m-%d")
log_line_date = now.strftime("%H:%M:%S")
l = f"Script has run at {log_line_date}. Internet connected: {internet_connected}. Just booted: {just_booted_message}. Ping Time: {ping_time}\n"
file = Path.home().joinpath(f'.config/outagedetector/{file_name}.log')
# print(f"Writing to {file}")
with open(file, 'a+') as f:
f.writelines(l)
f.close()
|
from ..base.command_with_args import CommandWithArgs
from ..base.argument import Argument
from pyod.models.abod import ABOD
from pyod.models.cblof import CBLOF
from pyod.models.auto_encoder import AutoEncoder
from .pyod import *
class AlgorithmCommand(CommandWithArgs):
tag = "algorithm"
patterns = [
"Queria rodar o algoritmo",
"vamos a aplicar o algoritmo",
"Queria rodar o algoritmo",
"Treina o algoritmo",
"Vamos dar fit do algoritmo",
"Executa o algoritmo"
]
def __init__(self, parent, task_manager):
super(AlgorithmCommand, self).__init__(parent, task_manager)
self.responses = ["Posso rodar o algortimo {0} no dataset {1} e guardar o resultao em {2}."]
self.user_config_tag = 'algorithm'
self.algorithms = {
'abod': AbodAlgorithm,
'cblof': CblofAlgorithm,
'autoencoder': AutoEncoderAlgorithm,
'hhos': HbosAlgorithm,
'iforest': IForestAlgorithm,
'knn': KnnAlgorithm,
'lmdd': LmddAlgorithm,
'loda': LodaAlgorithm
}
self.complete = False
self.algorithm_name = Argument({
'parent': self,
'name': 'algorithm_name',
'trigger': 'algoritmo',
'position': 1
})
self.dataset_name = Argument({
'parent': self,
'name': 'dataset_name',
'trigger': 'dataset',
'position': 1
})
self.context_variable = Argument({
'parent': self,
'name': 'variable_name',
'trigger': 'guarda em',
'position': 1
})
self.children = [
self.algorithm_name,
self.dataset_name,
self.context_variable
]
def run(self, context):
algorithm_name = self.algorithm_name.value
dataset_name = self.dataset_name.value
context_variable = self.context_variable.value
algorithm = self.algorithms[algorithm_name].algorithm
dataframe = context[dataset_name]
algorithm_agrs = self.algorithms[algorithm_name].get_arg(dataframe=dataframe)
clf = algorithm(**algorithm_agrs)
clf.fit(dataframe)
context[context_variable] = clf
def generate_code(self, code_generator, context):
algorithm_name = self.algorithm_name.value
algorithm = self.algorithms[algorithm_name]
dataset_name = self.dataset_name.value
dataframe = context[dataset_name]
code_generator.write("")
code_generator.write("# Fit the {0} algorithm".format(algorithm.name))
code_generator.write(algorithm.import_code)
dataset_name = self.dataset_name.value
context_variable = self.context_variable.value
algorithm_args = algorithm.get_args_code(dataframe=dataframe)
code_generator.write("{0} = {1}({2})".format(context_variable, algorithm.name, algorithm_args))
code_generator.write("{0}.fit({1})".format(context_variable,dataset_name))
|
import random
import time
from secrets import token_hex
from pyramid.httpexceptions import HTTPCreated
from pyramid.view import view_defaults
from sqlalchemy.orm.exc import NoResultFound
from {{cookiecutter.project_slug}}.handlers import view_config
from {{cookiecutter.project_slug}}.handlers.auth import LoginHandler
from {{cookiecutter.project_slug}}.lib.decorators import validate
from {{cookiecutter.project_slug}}.lib.factories.auth.recovery import AccountRecoveryFactory
from {{cookiecutter.project_slug}}.lib.hash import hash_plaintext
from {{cookiecutter.project_slug}}.lib.middleware.sendgrid import SendGridClient
from {{cookiecutter.project_slug}}.lib.schemas.auth import (AccountRecoveryLoginSchema,
AccountRecoverySchema)
from {{cookiecutter.project_slug}}.lib.security.auth import AuthWithRecoveryTokenManager
from {{cookiecutter.project_slug}}.models import save
from {{cookiecutter.project_slug}}.models.security.recovery_token import RecoveryToken
from {{cookiecutter.project_slug}}.models.user import get_one_user_by_email_address, User
NUMBER_OF_TOKEN_BYTES = 3
TOKEN_TTL_IN_SECONDS = 7200
MIN_TIME_PADDING_IN_DECISECONDS = 2
MAX_TIME_PADDING_IN_DECISECONDS = 8
@view_defaults(
containment=AccountRecoveryFactory,
context=AccountRecoveryFactory,
renderer='json'
)
class AccountRecoveryHandler(LoginHandler):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.auth_manager = AuthWithRecoveryTokenManager(self.request)
@validate(AccountRecoverySchema())
@view_config(
path_hints=['/auth/recover-account'],
request_schema_class=AccountRecoverySchema,
permission='recovery.request_token',
tags=['authentication', 'account recovery'],
request_method='POST',
public_hint=True
)
def request_account_recovery_token(self, request_data):
response = HTTPCreated()
token = token_hex(NUMBER_OF_TOKEN_BYTES)
email_address = request_data['email_address']
self._prevent_user_enumeration()
try:
recipient = get_one_user_by_email_address(email_address)
self._invalidate_any_current_recovery_token(recipient)
self._save_recovery_token(recipient, token)
SendGridClient().send_account_recovery_email(email_address, token)
except NoResultFound:
# To avoid user enumeration we don't indicate failure.
pass
raise response
@staticmethod
def _prevent_user_enumeration():
time.sleep(random.randint(
MIN_TIME_PADDING_IN_DECISECONDS,
MAX_TIME_PADDING_IN_DECISECONDS
) / 10)
@staticmethod
def _invalidate_any_current_recovery_token(user):
try:
user.active_recovery_token.invalidate()
except AttributeError:
pass
@staticmethod
def _save_recovery_token(for_user: User, token: str):
token_hash, token_salt = hash_plaintext(token)
recovery_token = RecoveryToken(
token_hash=token_hash,
token_salt=token_salt,
for_user=for_user
)
save(recovery_token)
@validate(AccountRecoveryLoginSchema())
@view_config(
path_hints=['/auth/recover-account/login'],
request_schema_class=AccountRecoveryLoginSchema,
permission='recovery.login',
request_method='POST',
successful_response_code=200,
tags=['authentication', 'account recovery'],
name='login',
public_hint=True
)
def login(self, login_data):
self.auth_manager.login(login_data)
raise self.request.response
|
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchaudio import datasets
from sklearn.model_selection import train_test_split
from util import load_data
from rnn_lstm import TikTokModel, prepare_datasets
from tqdm.auto import tqdm
if __name__ == "__main__":
X_train, X_validation, X_test, y_train, y_validation, y_test = prepare_datasets(
0.25, 0.2
)
X_test = torch.from_numpy(X_test).float()
y_test = torch.from_numpy(y_test).float()
trained_tiktok_model = TikTokModel(X_test.shape[2])
trained_tiktok_model.load_state_dict(torch.load("trained_tiktok_model.pt"))
print(trained_tiktok_model)
outputs_test = trained_tiktok_model(X_test)
outputs_test = torch.squeeze(outputs_test)
test_data_labels = y_test
test_data_labels = test_data_labels.to(torch.float32)
predicted_test = outputs_test.round().detach().numpy()
total_test = test_data_labels.size(0)
correct_test = np.sum(predicted_test == test_data_labels.detach().numpy())
accuracy_test = 100 * correct_test / total_test
print(accuracy_test)
|
from threading import Thread, Lock
from time import sleep
try:
from queue import Queue, Empty
except ImportError:
from Queue import Queue, Empty
from .performer import Performer
from .stacktracer import *
from .backend import *
from .logger import *
from .utils import *
class Manager(object):
def __init__(self, root, **kwargs):
super(Manager, self).__init__()
self._performer = Performer(self, **kwargs)
self._lock = Lock()
self._state = Lock()
# @NOTE: these parameters are used to tracking down system status
self._use_parallel = None
self._current_dir_path = None
self._current_dir_type = None
self._count_payload = 0
self._on_going = 0
self._output = None
self._root = None
self._error = False
self._keep = True
# @NOTE: define our slot callbacks
self._departure_callbacks = []
# @NOTE: queue task of build and teardown stage
self._build = Queue()
self._teardown = Queue()
# @NOTE: eveything about a real workspace will be represented here
self._count_rules = 0
self._running = 0
self._await = []
self._rules = {}
self._functions = {}
# @NOTE: share resouce between objects
self._languages = {}
self._plugins = {}
self._backends = {
'package': Package(**kwargs),
'config': Config(**kwargs),
'file': File(**kwargs),
'hook': Hook(**kwargs)
}
# @NOTE: okey now, load backends
for name in self._backends:
self._backends[name].mount(self)
self._backends[name].check()
self._backends[name].define()
@property
def root(self):
return self._root
@property
def backends(self):
return self._backends
@property
def languages(self):
return self._languages
@property
def plugins(self):
return self._plugins
@property
def type(self):
return 'Manager'
@property
def error(self):
return self._error
@property
def count_rules(self):
return is_okey_to_continue(self._lock, lambda: self._count_rules)
@property
def count_payload(self):
return is_okey_to_continue(self._lock, lambda: self._count_payload)
def __enter_payload(self):
def update():
self._count_payload += 1
update_variable_safety(self._state, update)
def __exit_payload(self):
def update():
self._count_payload -= 1
update_variable_safety(self._state, update)
def package_update(self, passed=False):
return self._backends['package'].update(passed)
def package_install(self, packages):
if isinstance(packages, list):
for package in packages:
if self._backends['package'].install(package, True) is True:
return True
else:
return False
else:
return self._backends['package'].install(packages, True)
def support(self, languages):
if isinstance(languages, list):
for language in languages:
self._languages[Manager.derived(language)] = language
elif isinstance(language, Language):
self._languages[Manager.derived(language)] = language
elif not languages is None:
raise AssertionError('\'languages\' must be a list')
else:
self._languages = {}
for name in self._languages:
self._languages[name].mount(self._performer)
for name in self._languages:
try:
self._languages[name].mount(self)
except Exception as error:
Logger.error(str(error))
self._languages[name] = None
for name in self._languages:
if self._languages[name] is None:
continue
try:
self._languages[name].define()
except Exception as error:
Logger.error(str(error))
self._languages[name] = None
def install(self, plugins):
if isinstance(plugins, list):
for plugin in plugins:
self._plugins[Manager.derived(plugin)] = plugin
elif isinstance(plugin, Plugin):
self._plugins[Manager.derived(plugin)] = plugin
elif not plugins is None:
raise AssertionError('\'plugins\' must be a list')
else:
self._plugins = []
for name in self._plugins:
self._plugins[name].mount(self)
for name in self._plugins:
self._plugins[name].define()
def set_current_dir(self, type, dir_path):
if dir_path != self._current_dir_path and (not self._current_dir_path is None):
for callback in self._departure_callbacks:
callback(mode=self._current_dir_type)
if type.lower() == 'workspace':
self._root = dir_path
self._current_dir_path = dir_path
self._current_dir_type = type
self._on_going = 0
def create_new_node(self, name, node):
name = self.convert_name_to_absolute_name(name)
if name in self._rules:
return False
try:
self._rules[name] = {
'dir': self._current_dir_path,
'define': node,
'depends_on': [],
'depends_by': []
}
self._build.put(name)
self._count_rules += 1
self._on_going += 1
return True
except Queue.Full:
return False
def read_value_from_node(self, dep, key):
# @NOTE: this helper function helps to read value from a node of our
# build tree
if dep[0] == ':' or dep[0:2] == '//':
name = self.convert_dep_to_absolute_name(dep)
else:
name = dep
if not name in self._rules:
return None
elif key in ['define', 'depends_on', 'depends_by']:
return None
else:
return self._rules[name].get(key)
def modify_node_inside_dependency_tree(self, name, key, value):
# @NOTE: this helper function helps to modify node inside our build tree
if name in self._rules:
if key in ['dir', 'define', 'depends_on', 'depends_by']:
return False
else:
self._rules[name][key] = value
else:
return False
return True
@staticmethod
def convert_absolute_name_to_name(absolute_name):
# @NOTE: this helper function helps to convert abs_name to name
return absolute_name.split(':')[1]
@staticmethod
def convert_absolute_name_to_dep(root, absolute_name):
# @NOTE: this helper function helps to convert abs_name to dependency
if absolute_name is None:
return "Unknown"
elif root[-1] == '/':
return '//' + absolute_name[len(root):]
else:
return '/' + absolute_name[len(root):]
def convert_name_to_absolute_name(self, name, path=None):
# @NOTE: this helper function helps to convert name to abs_name
if path is None:
return '%s:%s' % (self._current_dir_path, name)
else:
return '%s:%s' % (path, name)
def convert_dep_to_absolute_name(self, dep):
# @NOTE: this helper function helps to convert dependency to abs_name
if dep[0] == ':':
return '%s:%s' % (self._current_dir_path, dep[1:])
elif dep[0:2] == '//':
if self._root[-1] == '/':
return '%s%s' % (self._root, dep[2:])
else:
return '%s%s' % (self._root, dep[1:])
def add_to_dependency_tree(self, name, node, deps):
# @NOTE: this helper function helps to add a new node to building tree
name = self.convert_name_to_absolute_name(name)
if name in self._rules:
return False
elif deps is None:
return self.create_new_node(name, node)
else:
wait_to_remove = []
self._lock.acquire()
self._rules[name] = {
'dir': self._current_dir_path,
'define': node,
'depends_on': [],
'depends_by': []
}
for dep in deps:
dep = self.convert_dep_to_absolute_name(dep)
self._rules[name]['depends_on'].append(dep)
if not dep in self._rules:
Logger.debug('rule %s is waiting %s' % (name, dep))
self._await.append((name, dep))
else:
Logger.debug('rule %s is depended by %s' % (name, dep))
self._rules[dep]['depends_by'].append(name)
for index, (node_name, dep) in enumerate(self._await):
if dep in self._rules:
Logger.debug('rule %s is depended by %s' % (dep, node_name))
# @NOTE: add dependency and mark to remove this waiting task
# at lastly
self._rules[dep]['depends_by'].append(node_name)
wait_to_remove.append(index)
if len(wait_to_remove):
wait_to_remove.sort()
# @NOTE: be carefull removing an item of list, we must reduce
# index after finish removing an item
for counter, index in enumerate(wait_to_remove):
del self._await[index - counter]
self._count_rules += 1
self._lock.release()
return True
def eval_function(self, function, path=None, node=None, **kwargs):
# @NOTE: this helper function helps to perform a function
function = self.find_function(function)
if function is None:
return None
elif node is None:
return function(root=self._root, output=self._output, **kwargs)
else:
name_node = self.convert_name_to_absolute_name(node, path=path)
if name_node in self._rules:
return function(root=self._rules[name_node]['dir'],
output=self._output, **kwargs)
else:
return None
def add_rule(self, owner, function, use_on_workspace=False):
self._functions[function.__name__] = {
'owner': owner,
'function': function,
'use_on_workspace': use_on_workspace
}
def find_function(self, name, position=None):
if not name in self._functions:
return None
else:
function = self._functions[name]
if (not position is None) and (position == 'workspace') \
and function['use_on_workspace'] is False:
return None
else:
if function['owner'].check() is False:
return None
return function['function']
def show_pending_tasks(self):
# @NOTE: show pending tasks when threads are stucked, sometime it's because
# there are nothing to do so nothing to be show and we will close application
# after that
have_pending = False
for rule_name in self._rules:
rule = self._rules[rule_name]
if not 'done' in rule or rule['done'] is False:
print('>> Rule %s is waiting tasks:' % \
Manager.convert_absolute_name_to_dep(self._root, rule_name))
for dep in rule['depends_on']:
if not dep in self._rules:
print('\t\t%s -> didn\'t exist' % Manager.convert_absolute_name_to_dep(self._root, dep))
have_pending = True
elif not 'done' in self._rules[dep] or self._rules[dep]['done'] is False:
print('\t\t%s' % Manager.convert_absolute_name_to_dep(self._root, dep))
have_pending = True
else:
return have_pending
def teardown(self, root, output=None):
# @NOTE: now run teardown callbacks. Since i don't want to solve race
# condition problems on 'root', everything will be run on master-thread
# after all our consumer-thread have finished
while self._teardown.empty() is False:
callback, info = self._teardown.get()
if callable(info) is True:
info(is_on_running=False)
if callable(callback) is False:
continue
elif callback(root=root, output=output) is False:
raise AssertionError('there some problem when teardown this project')
self._teardown.task_done()
def found_bug(self, bug, turn_to_debug=False, no_lock=False):
def update():
self._error = True
# @NOTE: this function will use to notify when a bug has been found
if turn_to_debug is True:
Logger.debug('Got an exception: %s -> going to teardown this project' % str(bug))
else:
Logger.error('Got an exception: %s -> going to teardown this project' % str(bug))
if no_lock or self._use_parallel is False or self._current_dir_type == 'workspace':
update()
else:
update_variable_safety(self._lock, update)
def perform(self, root, output=None, timeout=1,
retry_checking_multithread=30):
# @NOTE: sometime, adding rules don't run as i expected, for example
# adding rule with unknown dependecies, we must scan throw self._await
# to make sure everything has been added completely
if len(self._await) > 0:
Logger.debug('It seems self._await still have %d rules '
'under implemented' % len(self._await))
while True:
wait_to_remove = []
for index, (node_name, dep) in enumerate(self._await):
if dep in self._rules:
Logger.debug('rule %s is depended by %s' % (dep, node_name))
# @NOTE: add dependency and mark to remove this waiting task
# at lastly
self._rules[dep]['depends_by'].append(node_name)
wait_to_remove.append(index)
else:
# @NOTE: if we don't have anything on waiting to remove, it
# means we finish checking now
if len(wait_to_remove) == 0:
break
# @NOTE: on the otherhand, we must remove waiting dependencies
# and do again
wait_to_remove.sort()
for counter, index in enumerate(wait_to_remove):
del self._await[index - counter]
# @NOTE: sometime we use undefined dependencies and cause system
# hange forever, this way will check everything before invoke
# building system
if len(self._await) > 0:
list_untagged_rules = ()
for rule, dep in self._await:
list_untagged_rules.add(rule)
self.found_bug(
AssertionError('still have untagged rules, please check '
'your `.build` and `.workspace` here it\'s '
'the list of untagged rules:\n%s' % \
'\n'.join(list_untagged_rules)))
# @NOTE: unexpected error can happen before performing rules.
# if it happens, abandon this project now
if self._error:
return not self._error
# @NOTE: narrate how to implement the project according instructions
if self._use_parallel is None:
self._use_parallel = can_run_on_multi_thread(retry_checking_multithread)
if self._use_parallel is True and self._current_dir_type == 'build':
return self.perform_on_multi_thread(root, output, timeout)
else:
return self.perform_on_single_thread(root, output)
def perform_on_single_thread(self, root, output=None, timeout=1):
# @NOTE: there was a big issue when run this tool on a single core machine
# since we only have a master, we never run payload at here and we can't
# compile anything
self._root = root
self._output = output
self._count_payload = 0
self._performer.reset()
Logger.debug('Run on single-thread')
parsing = self._performer.perform_on_single_thread(timeout=timeout)
running = self._performer.perform_on_single_thread(timeout=timeout)
if parsing is None or running is None:
self.found_bug(AssertionError('it seems performer got bugs'), no_lock=True)
return False
while self._keep is True and self.count_rules > 0 and not self._error:
while self._current_dir_type == 'build' and \
not self._error and self._keep is True:
if self._performer._pipe.qsize() > 0:
if self._keep is True:
parsing()
if self._keep is True:
running()
elif self._performer._jobs.qsize() > 0:
if self._keep is True:
running()
else:
break
try:
# @NOTE: fetch a new task
task_name = self._build.get(timeout=timeout)
except Empty:
if self._performer._pipe.qsize() == 0 and \
self._performer._jobs.qsize() == 0:
self._keep = False
continue
# @NOTE: parse this task
if not task_name in self._rules:
self.found_bug(AssertionError('it seems there is a race condition with '
'task %s' % task_name), no_lock=True)
continue
define = self._rules[task_name]['define']
depends_by = self._rules[task_name]['depends_by']
if 'info' in define:
define['info'](is_on_running=True, **define)
if 'callback' in define:
kwargs = define.get('kwargs')
try:
if self._rules[task_name]['dir'] is None:
workspace = root
else:
workspace = self._rules[task_name]['dir']
# @NOTE: check dependency here
if kwargs is None:
result = define['callback'](root=workspace,
workspace=output)
else:
result = define['callback'](root=workspace,
workspace=output, **kwargs)
if result is False:
self._keep = False
elif not define.get('teardown') is None:
self._teardown.put((define['teardown'], define.get('info')))
if 'info' in define:
define['info'](is_on_running=True,
is_finish_successful=True, **define)
if self._keep is True:
parsing()
if self._keep is True:
running()
if self._keep is False:
continue
for name in depends_by:
# @NOTE: detect position of this dependency and remove
# it out of dependency list
try:
index = self._rules[name]['depends_on'].index(task_name)
except ValueError:
index = -1
if index >= 0:
del self._rules[name]['depends_on'][index]
# @NOTE: if the task's dependencies is None now, put it to queue 'build'
if len(self._rules[name]['depends_on']) == 0:
self._build.put(name)
else:
Logger.debug('Finish pushing task %s' % task_name)
self._rules[task_name]['done'] = True
except Exception as error:
# @NOTE: update status to from running -> stoping because we
# found a bug inside our code
self.found_bug(error, no_lock=True)
# @NOTE: print exception
Logger.exception()
else:
return not self._error
def perform_on_multi_thread(self, root, output=None, timeout=1, parallel_core=4):
lock = Lock()
# @NOTE: tracer consumes so much time so we must consider using it
# start_tracer('/tmp/test.html')
self._root = root
self._output = output
self._count_payload = 0
self._performer.reset()
Logger.debug('Run on multi-threads')
if len(self._rules) == 0:
return True
if self._current_dir_type == 'build':
for callback in self._departure_callbacks:
callback(mode=self._current_dir_type)
def stop_running():
self._keep = False
def wrapping():
# @NOTE: make all consumer-threads wait until the main-thread finishs
# creating and configures
if (not self._current_dir_type is None) and self._current_dir_type.lower() == 'build':
role, performing_callback = self._performer.perform_on_multi_thread(timeout=timeout)
else:
role, performing_callback = 'master', None
wait(lock)
if not performing_callback is None:
if role == 'payload' and self._performer.pending():
self.__enter_payload()
begin = True
# @NOTE: fetch a task from queue and perform it
while self._keep is True and (self.count_rules > 0 or self.count_payload > 0):
if is_okey_to_continue(self._lock, lambda: self._keep) is False:
if role != 'master':
break
else:
self._performer.clear_pipe()
continue
# @NOTE: payload must finish its tasks first before supporting
# parsing rules
if role == 'payload' and (self._performer.pending() or self._build.qsize() == 0):
performing_callback()
if role == 'payload':
if self._build.qsize() == 0 and self.count_rules == 0:
# @NOTE: when payload determines that rules are converted
# it must reload to check and run finish its task
# instead of waiting task from main-thread
continue
else:
sleep(timeout)
# Logger.debug('Payload turn off when '
# 'count_rules=%d' % self.count_rules)
# @NOTE: now get task from self._build and peform parsing
try:
if role == 'master':
# @NOTE: when master determines that rules are converted
# it must jump to perform tasks instead of waiting new
# task from main-thread
Logger.debug('Master have %d to do now' \
% self._performer._pipe.qsize())
while self._performer._pipe.qsize() > 0 and self._keep:
performing_callback()
else:
if self._build.qsize() == 0 and self.count_rules == 0:
Logger.debug('Master off but there were nothing '
'to do now')
raise Empty
task_name = self._build.get(timeout=timeout)
self._lock.acquire()
Logger.debug('Convert rule %s now' % task_name)
if not task_name is None:
self._running += 1
self._lock.release()
except Empty:
if self._running == 0 and self._performer.running == 0:
# @NOTE: since this might be caused by a race condition
# with the build script, we must show tasks that are on
# pending
if self._performer._jobs.qsize() == 0:
update_variable_safety(self._lock, stop_running)
if self.show_pending_tasks():
self.found_bug(AssertionError('it seems there is a race condition, '
'nothing run recently'), turn_to_debug=True)
if role != 'master' and self._performer._inside.locked():
performing_callback()
continue
else:
if role == 'payload':
performing_callback()
continue
if not task_name in self._rules:
update_variable_safety(self._lock, stop_running)
raise AssertionError('it seems there is a race condition with task %s' % task_name)
define = self._rules[task_name]['define']
depends_by = self._rules[task_name]['depends_by']
if 'info' in define:
define['info'](is_on_running=True, **define)
if 'callback' in define:
kwargs = define.get('kwargs')
try:
if self._rules[task_name]['dir'] is None:
workspace = root
else:
workspace = self._rules[task_name]['dir']
# @NOTE: check dependency here
if kwargs is None:
result = define['callback'](root=workspace, workspace=output)
else:
result = define['callback'](root=workspace, workspace=output, **kwargs)
if result is False:
update_variable_safety(self._lock, stop_running)
elif not define.get('teardown') is None:
self._teardown.put((define['teardown'], define.get('info')))
if 'info' in define:
define['info'](is_on_running=True, is_finish_successful=True, **define)
self._lock.acquire()
for name in depends_by:
# @NOTE: detect position of this dependency and remove
# it out of dependency list
try:
index = self._rules[name]['depends_on'].index(task_name)
except ValueError:
index = -1
if index >= 0:
del self._rules[name]['depends_on'][index]
# @NOTE: if the task's dependencies is None now, put it to queue 'build'
if len(self._rules[name]['depends_on']) == 0:
self._build.put(name)
else:
self._rules[task_name]['done'] = True
self._count_rules -= 1
self._lock.release()
if role == 'master' and not performing_callback is None:
performing_callback()
elif role == 'payload' and self._performer.pending() is True:
performing_callback()
except Exception as error:
# @NOTE: update status to from running -> stoping because we found a bug inside our code
update_variable_safety(self._lock, stop_running)
self.found_bug(error)
# @NOTE: print exception
Logger.exception()
finally:
self._running -= 1
self._build.task_done()
if role == 'master':
Logger.debug('Master finish transfer request to Performer')
while self._performer._pipe.qsize() > 0 and self._keep:
performing_callback()
else:
Logger.debug('Master deliver task to payloads')
if self._keep:
if self._performer._jobs.qsize() == 0:
update_variable_safety(self._lock, stop_running)
else:
Logger.debug('Master become a payload and will support '
'another payloads')
_, performing_callback = \
self._performer.perform_on_multi_thread(timeout=timeout)
performing_callback()
Logger.debug('Teardown master now')
else:
Logger.debug('Teardown payload now')
if not performing_callback is None:
if role == 'payload':
self.__exit_payload()
lock.acquire()
consumers = []
for i in range(parallel_core*multiprocessing.cpu_count()):
thread = Thread(target=wrapping)
# @NOTE: configure consumer-threads
thread.setName('Builder-%d' % i)
thread.start()
# @NOTE: add this thread to consumer-list
consumers.append(thread)
else:
# @NOTE: okey, now release lock to enable consumer-threads finish our
# backlog and wait until they are finish our backlog
if not self._current_dir_type is None:
Logger.debug('begin parsing %s' % (self._current_dir_type.upper()))
lock.release()
for i in range(parallel_core*multiprocessing.cpu_count()):
consumers[i].join()
# stop_tracer()
return not self._error
@property
def languages(self):
return self._languages
@property
def plugins(self):
return self._plugins
@staticmethod
def derived(instance):
result = None
try:
for class_name in reversed(instance.derived()):
if class_name == 'Plugin' or class_name == 'Language':
if result is None:
return instance.__class__.__name__.lower()
else:
return result.lower()
else:
result = class_name
except Exception as error:
Logger.error(error)
Logger.exception()
return None
def when_depart(self, type):
def route(function):
def wrapping(mode, *args, **kwargs):
if mode.lower() == type.lower():
return function(*args, **kwargs)
else:
return None
self._departure_callbacks.append(wrapping)
return wrapping
return route
def hook(self, when_calling_happen, status=True):
def route(function):
self._performer.install_event(when_calling_happen,
status,
function)
return function
return route
|
# -*- coding: utf-8 -*-
"""
Created on Thu Sep 12 12:23:35 2019
@author: Joule
"""
import random
from cipher import Cipher
from crypto_utils import modular_inverse
class Multiplikasjon(Cipher):
"""Krypter ved hjelp av multiplikasjons cipheret"""
def encode(self, text, key):
encoded = ''
for letter in text:
encoded += self.dictionary.get(((ord(letter)-32)*key)%95)
return encoded
def decode(self, coded_text, key):
decoded = ''
for letter in coded_text:
decoded += self.dictionary.get(((ord(letter)-32)*(modular_inverse(key, 95)))%95)
return decoded
def generate_key(self):
key = 0
while True:
key = random.randint(2, 94)
if modular_inverse(key, 95):
return key
|
import numpy as np
def test_q1_shape(df):
try:
assert df.shape[0] == 45
except AssertionError:
print("Your dataframe doesn't have the correct number of rows.")
print("Try looking back at conditional statements using loc/iloc.")
return
print("You successfully used pandas to filter a dataset")
def test_q2_shape(df):
try:
assert df.shape[0] == 42
except AssertionError:
print("Your dataframe doesn't have the correct number of rows.")
print("Try looking back at conditional statements using loc/iloc.")
return
print("You successfully filtered the dataset")
def test_q2_statement(statement):
error = False
drug1 = statement.split(" ")[1]
ratio = statement.split(" ")[3]
drug2 = statement.split(" ")[-1]
if drug1 != "Ebselen":
print("Your top scoring drug does not appear to be correct.")
error = True
if drug2 != "Remdesivir":
print("Your second scoring drug does not appear to be correct.")
error = True
if not np.isclose(float(ratio), 1.05133928571, atol=0.001):
print("Your ratio does not appear to be correct.")
error = True
if not error:
print("You successfully used pandas!")
def a_column_or_index_contains(df, contains):
for series in [df[col] for col in df.columns] + [df.index]:
if np.all([contained in series.to_list() for contained in contains]):
return True
return False
def test_popular_drugs(df):
assert a_column_or_index_contains(
df, ["Remdesivir", "Tocilizumab", "Ebselen"]
), "Couldn't find the correct popular drugs in your dataset"
print("You found the correct popular drugs")
|
import numpy as np
import time
import copy
#np.random.seed(134213)
def smape(X, y, w):
y_pred = X @ w
n = X.shape[0]
res = .0
for y_true, y_hat in zip(y, y_pred):
res += abs(y_hat - y_true) / (abs(y_hat) + abs(y_true))
res /= n
return res
def normalize(X):
n = X.shape[0]
k = X.shape[1]
coeffs = np.zeros(k)
for j in range(k):
coeffs[j] = np.max(np.absolute(X[:, j]))
if coeffs[j] == 0.:
coeffs[j] = 1.
X[:, j] /= coeffs[j]
return coeffs
def stochastic_gradient(X_i, y_i, weights, lambda_reg):
y_hat = X_i @ weights
grad = 2 * ((y_hat - y_i) * X_i + lambda_reg * weights)
return grad
def gradient_smape(X, y, weights):
avg_grad = np.zeros(X.shape[1])
for X_i, y_i in zip(X, y):
y_hat = X_i @ weights
t = y_hat * y_i
num = X_i * (abs(t) + t)
denom = abs(y_hat) * (abs(y_hat) + abs(y_i)) ** 2
g = np.sign(y_hat - y_i) * num / denom
avg_grad += g
avg_grad /= X.shape[0]
return avg_grad
def sgd(X_train, y_train, lambd=0.0, learning_rate=0.01, t=1.1, w=None):
""" Stochastic Gradient Descent of Linear Regression """
n = X_train.shape[0]
k = X_train.shape[1]
# Uniform initilization
weights = np.random.uniform(low=-1/(2 * n), high=1/(2 * n), size=k) if w is None else w
start_time = time.process_time()
while time.process_time() - start_time < t:
sample_idx = np.random.randint(n)
y_hat = X_train[sample_idx] @ weights
weights -= learning_rate * stochastic_gradient(
X_train[sample_idx], y_train[sample_idx], weights, lambd)
return weights
def gd(X_train, y_train, learning_rate=0.01, t=1.1, w=None):
n = X_train.shape[0]
k = X_train.shape[1]
# Uniform initilization or cont training
weights = np.random.uniform(low=-1/(2 * n), high=1/(2 * n), size=k) if w is None else w
start_time = time.process_time()
while time.process_time() - start_time < t:
g = gradient_smape(X_train, y_train, weights)
weights -= learning_rate * g
return weights
def fit_least_squares(X, y, lambd=0.0):
inv = np.linalg.inv(X.T @ X + lambd * np.eye(X.shape[1]))
pinv = inv @ X.T
weights = pinv @ y
return weights
if __name__ == '__main__':
n, m = map(int, input().split())
X = np.zeros((n, m + 1))
y = np.zeros(n)
for i in range(n):
s = list(map(int, input().split()))
X[i, :] = s[:-1] + [1.]
y[i] = s[-1]
X_old = copy.deepcopy(X)
coeffs = normalize(X)
try:
w = fit_least_squares(X, y, 0.0) / coeffs
except:
w1 = gd(X, y, 1.5e7, 1.1) / coeffs
w2 = gd(X, y, 1e7, 2.0) / coeffs
if smape(X_old, y, w1) <= smape(X_old, y, w2):
w = w1
else:
w = w2
print(*w)
|
# %% [markdown]
#
# # Workshop: NumPy Universal Operations and Broadcasting
#
# In this workshop we'll explore universal operations and broadcasting in more
# depth. Some of the operations used in this workshop were not presented in the
# lectures, you have to look into [the NumPy
# documentation](https://numpy.org/doc/stable/reference/ufuncs.html) to discover
# them.
# %%
import numpy as np
# %%
arr1 = np.arange(1, 25).reshape(2, 3, 4)
lst1 = [2, 3, 5, 7]
# %% [markdown]
#
# ## Universal Operations
#
# Compute arrays `arr2` and `arr3` that contain the elements of `arr1` and
# `lst1` squared, respectively.
# %% [markdown]
#
# Compute the product of `arr1` and `lst1`. Before evaluating your solution: try
# to determine the shape of the result. How is the shape of the result
# determined? Do you need an universal function or can you perform the
# multiplication as just a normal product?
# %% [markdown]
#
# Write a function `may_consume_alcohol(ages)` that takes a list or
# 1-dimensional array of ages and returns an array containing the values `"no"`
# if the corresponding index in the input array is less than 18, `"maybe"` if
# the value is above 18 but lower than 21 and `"yes"` if the value is at least
# 21.
#
# For example `may_consume_alcohol([15, 20, 30, 21, 20, 17, 18])` returns an
# array containing `['no', 'maybe', 'yes', 'yes', 'maybe', 'no', 'maybe']`.
# %% [markdown]
#
# Write a function `double_or_half(values)` that takes a list or 1-dimensional
# array of numbers and returns a vector of the same length containing `v * 2` if
# the corresponding member of `values` is odd and `v // 2` if the corresponding
# member is even.
#
# For example, `double_or_half([0, 1, 2, 5, 10, 99])` should return a vector
# containing the values `[0, 2, 1, 10, 5, 198]`.
#
# *Hint:* Check the documentation for the `choose` function.
# %%
|
#!/usr/bin/python
# coding: utf-8
from datetime import date, datetime, timedelta
from predict_functions import *
from sklearn.externals import joblib
import pandas as pd
import pymongo as pym
import re
date = datetime.strftime(datetime.utcnow() - timedelta(hours=24), '%Y-%m-%d')
fname = 'data/twitter_sentiments_{}.json'.format(date)
df_pred = pd.DataFrame()
# 1. Chargement du vocabulaire
voca = pd.read_json('trained_dict.json').to_dict()[0]
# 2. Chargement des tweets a predire
# dataframe contenant les tweets a predire dans une colonne 'text'
print('Chargement des tweets des candidats depuis la base MongoDB ...')
df = extract_tweets(date, days=1, port=27017)
other_politicians = ['bayrou', 'aignan', 'poutou', 'arthaud', 'cheminade', 'valls', 'sarko', 'hollande']
candidates = {'macron': 'macron|emmanuel',
'fillon': 'fillon',
'hamon': 'hamon|benoit|benoît',
'melenchon': 'melenchon|mélenchon|jlm',
'le pen': 'le pen|lepen|mlp|marine'
}
# on repère les tweets où plusieurs candidats sont cités
stop_words = '|'.join([pol for pol in other_politicians])
df['other'] = df['text'].str.contains(stop_words, case=False)
# on repère les candidats contenus dans les tweets
for candidate in candidates:
df[candidate] = df['text'].str.contains(candidate, case=False)
# filtrage des tweets contenant d'autres personnalités politiques
df = df[df['other']==False]
# filtrage des tweets contenant plusieurs des 5 candidats (ou aucun candidat)
df['count'] = 1 * df.fillon + df.macron + df['le pen'] + df.melenchon + df.hamon
df = df[df['count']==1]
df.reset_index(drop=True, inplace=True)
# 3. Creation des features et de la matrice TF-IDF pour la base test
X_test= build_X(df, drop_dups=False, vocab=voca, min_df=3, n_grams=(1,1))
# 4. Chargement du modele entraine
clf = joblib.load('trained_logistic_regression.pkl')
# 5. Prediction
print('Prediction des tweets...')
y_pred = clf.predict(X_test)
df['sentiment'] = y_pred
# 6. Sauvegarder les predictions
# ajout de la ligne du candidat dans le dataframe
for candidate in candidates:
curr_df = df[df[candidate]==True]
taille = curr_df.shape[0]
rec = {'count': taille, 'candidat': candidate}
try:
rec['neg'] = curr_df[curr_df['sentiment']==-1].shape[0] / taille
rec['neu'] = curr_df[curr_df['sentiment']==0].shape[0] / taille
rec['pos'] = curr_df[curr_df['sentiment']==1].shape[0] / taille
except:
# si aucun tweet pour le candidat courant n'est dans la base
rec['neg'], rec['neu'], rec['pos'] = ('-', '-', '-')
df_pred = df_pred.append(rec, verify_integrity=False, ignore_index=True)
df_pred.set_index('candidat', drop=True, inplace=True)
print('Sauvegarde des pourcentages par candidat dans un .json : {}'.format(fname))
print(df_pred)
df_pred.to_json(fname)
print('Insertion dans la base MongoDB "predicted"...')
insert_in_mongo(df.drop(['other', 'count'], axis=1), port=27017)
|
'''
Builder for the snapshot from smaller snapshots.
'''
from amuse.datamodel.particles import Particles
from amuse.lab import units
from amuse.units.quantities import VectorQuantity
from omtool.core.datamodel import Snapshot
class SnapshotBuilder:
'''
Builder for the snapshot from smaller snapshots.
'''
def __init__(self):
self.snapshot = Snapshot(Particles(), 0 | units.Myr)
def add_snapshot(self,
snapshot: Snapshot,
offset: VectorQuantity = [0, 0, 0] | units.kpc,
velocity: VectorQuantity = [0, 0, 0] | units.kms
):
'''
Appends snapshot of any number of particles to the result.
'''
snapshot.particles.position += offset
snapshot.particles.velocity += velocity
self.snapshot = self.snapshot + snapshot
def add_particles(self, particles: Particles):
'''
Appends particles to the result and takes timestamp from it.
'''
self.snapshot.particles.add_particles(particles)
def get_result(self) -> Snapshot:
'''
Returns resulting snapshot.
'''
self.snapshot.particles.move_to_center()
return self.snapshot
def to_fits(self, filename: str):
'''
Writes reult to FITS file.
'''
self.snapshot.particles.move_to_center()
self.snapshot.to_fits(filename)
|
fruits = ['orange', 'apple', 'banana', 'pineapple', 'coconut', 'banana']
print(fruits)
count = fruits.count('banana') # 2
print(f"There are {count} bananas")
|
from __future__ import print_function
import sublime
import sublime_plugin
import traceback
try:
from latextools_utils import get_setting
from latextools_utils.distro_utils import using_miktex
from latextools_utils.external_command import external_command
except ImportError:
from .latextools_utils import get_setting
from .latextools_utils.distro_utils import using_miktex
from .latextools_utils.external_command import external_command
if sublime.version() < '3000':
_ST3 = False
strbase = basestring
else:
_ST3 = True
strbase = str
def _view_texdoc(file):
if file is None:
raise Exception('File must be specified')
if not isinstance(file, strbase):
raise TypeError('File must be a string')
command = ['texdoc']
if using_miktex():
command.append('--view')
command.append(file)
try:
external_command(command)
except OSError:
traceback.print_exc()
sublime.error_message('Could not run texdoc. Please ensure that your texpath setting is configured correctly in the LaTeXTools settings.')
class LatexPkgDocCommand(sublime_plugin.WindowCommand):
def run(self):
window = self.window
def _on_done(file):
if (
file is not None and
isinstance(file, strbase) and
file != ''
):
window.run_command('latex_view_doc', {'file': file})
window.show_input_panel(
'View documentation for which package?',
'',
_on_done,
None,
None
)
class LatexViewDocCommand(sublime_plugin.WindowCommand):
def run(self, file):
_view_texdoc(file)
def is_visible(self):
return False # hide this from menu
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.