content
stringlengths 5
1.05M
|
|---|
import peo_pycuda.chordal_gen as cg
for i in range(10):
n = (i+1) * 10
G = cg.generateChordalGraph(n, 0.5)
cg.exportGraphCsr(G, filename="graph_"+str(n)+".txt")
|
# -*- coding: utf-8 -*-
"""
Created on Mon May 18 01:59:23 2020
@author: dreis
"""
import datetime
import time
def met_append():
caminho = open(r'C:\Users\dreis\Desktop\Estudos\Projetos\words.txt', 'r')
t = list()
for palavra in caminho:
t.append(palavra.strip())
return t
def met_mais():
caminho = open(r'C:\Users\dreis\Desktop\Estudos\Projetos\words.txt', 'r')
t = list()
for palavra in caminho:
word = palavra.strip()
t = t + [word]
return t
startappend = time.time()
print('Método append.')
apen = met_append()
print(len(apen))
print(apen[:10])
finalappend = (time.time() - startappend)
print(f'{finalappend}')
print('===============================')
startsoma = time.time()
print('Método concatena.')
som = met_mais()
print(len(som))
print(som[:10])
finalsoma = (time.time() - startsoma)
tempo = finalsoma - finalappend
minutos = datetime.timedelta(seconds=tempo)
print(f'{finalsoma}')
print('===============================')
print(f'A Diferença é de {minutos}')
|
# Copyright (C) 2018 Pierre Jean Fichet
# <pierrejean dot fichet at posteo dot net>
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""
Functions for the command look.
"""
import re
from .message import info, fmt
def look(from_char, command):
"""Command look. We check various matches and pick the correct
one."""
# first, we sanitize spaces
command = re.sub(r"\s+", " ", command).lower()
# regarder l'objet:
match = re.match(r"(le |la |les |l')?(\w+)\s*$", command)
if match:
look_at(from_char, match.group(2))
return
# regarder dans la boite:
match = re.match(r"(dans|sur) (le |la |les |l')?(\w+)\s*$", command)
if match:
look_in(from_char, match.group(3))
return
# regarder l'objet dans la boite
match = re.match(r"(le |la |les |l')?(\w+)\s(dans|de|sur)\s(le |la |les |l')?(\w+)\s*$", command)
if match:
look_at_in(from_char, match.group(5), match.group(2))
return
info(from_char.player, """<b>Usage:</b>
<code>regarder [le|la|les|l'] <i>mot_clé</i></code><br/>
<code>regarder [dans|sur] [le|la|les|l'] <i>mot_clé</i></code><br/>
<code>regarder [le|la|les|l'] <i>mot_clé</i> [dans|de|sur|d']
[le|la|les|l'] <i>mot_clé</i></code>""")
def look_fmt(character, shortdesc, longdesc):
character.player.client.send("<p><b>{} regarde {}</b>. — {}</p>"
.format(character.data["name"], shortdesc, longdesc))
def look_at(from_char, keyword):
"""Regarder émilie"""
# look at character
for character in from_char.room.characters:
if keyword in from_char.remember.get_remember(character).lower():
look_at_character(from_char, character)
return
# look at item in inventory
item = from_char.inventory.get_item('shortdesc', keyword)
if item:
look_at_item(from_char, item)
return
# look at item in equipment
item = from_char.equipment.get_item('worndesc', keyword)
if item:
look_at_item(from_char, item)
return
# look at item in room
item = from_char.room.container.get_item('roomdesc', keyword)
if item:
look_at_item(from_char, item)
return
# nothing found
info(from_char.player,
"Aucun personnage ni objet ne correspond au mot clé « {} »."
.format(keyword))
def wornlist(character, top=False):
if not character.equipment.items:
if character.data['gender'] > 1:
return "Elle est toute nue."
else:
return "Il est tout nu."
layers = {}
visible = 0
for item in character.equipment.items:
# sort items by wornplace
key = item.data['wornplace']
if key > visible:
visible = key
if key in layers:
layers[key] = layers[key] + " " + item.data['worndesc']
else:
layers[key] = item.data['worndesc']
if top:
return layers[visible]
garment = ' '.join([layers[key] for key in layers])
return garment
def look_in_equipment(from_char, to_char):
""" Look in equipment, only show visible items."""
layers = wornlist(from_char)
fmt(from_char,
"{} regarde son équipement".format(from_char.data['name']),
layers)
def look_at_character(from_char, to_char):
"""Look at a character."""
visible = wornlist(from_char, top=True)
if from_char == to_char:
title = "{} se regarde".format(from_char.data['name'])
else:
title = "{} regarde {}".format(
from_char.data['name'],
from_char.remember.get_remember(to_char))
content = "{}</p><p>{}".format(to_char.data['longdesc'], visible)
fmt(from_char, title, content)
def look_at_item(from_char, item):
"""Look at an item."""
title = "{} regarde {}".format(
from_char.data['name'],
item.data['shortdesc'])
fmt(from_char, title, item.data['longdesc'])
def look_in(from_char, keyword):
"""Regarder dans le coffre"""
# look in character = look at his equipment
for character in from_char.room.characters:
if keyword in from_char.remember.get_remember(character).lower():
visible = wornlist(character, top=True)
if from_char == character:
title = "{} regarde son équipement".format(from_char.data['name'])
else:
title = "{} regarde l'équipement de {}".format(
from_char.data['name'],
from_char.remember.get_remember(character))
fmt(from_char, title, visible)
return
# look in something in inventory
item = from_char.inventory.get_item('shortdesc', keyword)
if item:
look_in_container(from_char, item)
return
# look in something in equipment
item = from_char.equipment.get_item('worndesc', keyword)
if item:
look_in_container(from_char, item)
return
# look in something in room
item = from_char.room.container.get_item('roomdesc', keyword)
if item:
look_in_container(from_char, item)
return
info(from_char.player, "Aucun objet ne correspond au mot clé « {} »."
.format(keyword))
def look_in_container(from_char, item):
"""Look in a container."""
if not item.container:
info(from_char.player, "{} ne peut pas regarder dans {}."
.format(from_char.data["name"], item.data['shortdesc']))
return
if not item.container.items:
info(from_char.player, "{} est vide.".format(
item.data["shortdesc"].capitalize() ))
return
title = "{} regarde dans {}".format(from_char.data['name'],
item.data['shortdesc'])
fmt(from_char, title, item.container.list_items('shortdesc'))
def look_in_inventory(from_char, to_char):
""" look in inventory."""
if not to_char.inventory.items:
info(from_char.player, "{} ne transporte rien.".format(
to_char.data["name"]))
return
items = to_char.inventory.list_items('shortdesc')
title = "{} regarde son inventaire".format(from_char.data['name'])
fmt(from_char, title, items)
def look_at_in(from_char, key_container, key_item):
"""look at something in container. Here, we search for the correct
container key"""
# look at something on character
for character in from_char.room.characters:
if key_container in from_char.remember.get_remember(character).lower():
look_at_in_equipment(from_char, character, key_item)
return
# look at something in inventory
item = from_char.inventory.get_item('shortdesc', key_container)
if item:
look_at_in_container(from_char, item, key_item)
return
# look at something in equipment
item = from_char.equipment.get_item('shortdesc', key_container)
if item:
look_at_in_container(from_char, item, key_item)
return
# look at something in room
item = from_char.room.container.get_item('roomdesc', key_container)
if item :
look_at_in_container(from_char, item, key_item)
return
# nothing found
info(from_char.player,
"Aucun personnage ni objet ne correspond au mot clé « {} »."
.format(key_container))
def look_at_in_equipment(from_char, to_char, keyword):
"look at something in equipment"
item = to_char.equipment.get_item('worndesc', keyword)
if item:
look_at_item(from_char, item)
else:
info(from_char.player,
"{} ne porte aucun objet correspondant au mot clé « {} »."
.format(from_char.remember.get_remember(to_char), keyword))
def look_at_in_container(from_char, item, keyword):
"""look at something in container. Here, we have found the
container, we search for an object in it"""
if not item.container:
info(from_char.player, "{} ne peut pas regarder dans {}."
.format(from_char.data["name"], item.data['shortdesc']))
return
obj = item.container.get_item('shortdesc', keyword)
if obj:
look_at_item(from_char, obj)
else:
info(from_char.player,
"{} ne contient aucun object correspondant au mot clé « {} »."
.format(item.data["shortdesc"], keyword))
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: query/query.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='query/query.proto',
package='query',
syntax='proto3',
serialized_options=_b('\n\024io.grpc.examples.apiB\005QueryP\001\242\002\003HLW'),
serialized_pb=_b('\n\x11query/query.proto\x12\x05query\"\'\n\x04Vec3\x12\t\n\x01x\x18\x01 \x01(\x02\x12\t\n\x01y\x18\x02 \x01(\x02\x12\t\n\x01z\x18\x03 \x01(\x02\"\x92\x01\n\x06\x42ounds\x12\x1b\n\x06\x63\x65nter\x18\x01 \x01(\x0b\x32\x0b.query.Vec3\x12\x1c\n\x07\x65xtents\x18\x02 \x01(\x0b\x32\x0b.query.Vec3\x12\x18\n\x03max\x18\x03 \x01(\x0b\x32\x0b.query.Vec3\x12\x18\n\x03min\x18\x04 \x01(\x0b\x32\x0b.query.Vec3\x12\x19\n\x04size\x18\x05 \x01(\x0b\x32\x0b.query.Vec3\"\'\n\x13VesselBoundsRequest\x12\x10\n\x08vesselId\x18\x01 \x01(\t\"5\n\x14VesselBoundsResponse\x12\x1d\n\x06\x62ounds\x18\x01 \x01(\x0b\x32\r.query.Bounds\"#\n\x13\x41llVesselIdsRequest\x12\x0c\n\x04type\x18\x01 \x01(\t\"#\n\x14\x41llVesselIdsResponse\x12\x0b\n\x03ids\x18\x01 \x03(\t\"\x1e\n\nIMURequest\x12\x10\n\x08vesselId\x18\x01 \x01(\t\"\x8d\x01\n\x0bIMUResponse\x12\x1d\n\x08position\x18\x01 \x01(\x0b\x32\x0b.query.Vec3\x12\x1d\n\x08velocity\x18\x02 \x01(\x0b\x32\x0b.query.Vec3\x12$\n\x0f\x61ngularVelocity\x18\x03 \x01(\x0b\x32\x0b.query.Vec3\x12\x1a\n\x05\x61ngle\x18\x04 \x01(\x0b\x32\x0b.query.Vec32\xe3\x01\n\x0cQueryService\x12L\n\x0fGetAllVesselIds\x12\x1a.query.AllVesselIdsRequest\x1a\x1b.query.AllVesselIdsResponse\"\x00\x12\x37\n\x0cGetVesselIMU\x12\x11.query.IMURequest\x1a\x12.query.IMUResponse\"\x00\x12L\n\x0fGetVesselBounds\x12\x1a.query.VesselBoundsRequest\x1a\x1b.query.VesselBoundsResponse\"\x00\x42%\n\x14io.grpc.examples.apiB\x05QueryP\x01\xa2\x02\x03HLWb\x06proto3')
)
_VEC3 = _descriptor.Descriptor(
name='Vec3',
full_name='query.Vec3',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='x', full_name='query.Vec3.x', index=0,
number=1, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='y', full_name='query.Vec3.y', index=1,
number=2, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='z', full_name='query.Vec3.z', index=2,
number=3, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=28,
serialized_end=67,
)
_BOUNDS = _descriptor.Descriptor(
name='Bounds',
full_name='query.Bounds',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='center', full_name='query.Bounds.center', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='extents', full_name='query.Bounds.extents', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='max', full_name='query.Bounds.max', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='min', full_name='query.Bounds.min', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='size', full_name='query.Bounds.size', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=70,
serialized_end=216,
)
_VESSELBOUNDSREQUEST = _descriptor.Descriptor(
name='VesselBoundsRequest',
full_name='query.VesselBoundsRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='vesselId', full_name='query.VesselBoundsRequest.vesselId', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=218,
serialized_end=257,
)
_VESSELBOUNDSRESPONSE = _descriptor.Descriptor(
name='VesselBoundsResponse',
full_name='query.VesselBoundsResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='bounds', full_name='query.VesselBoundsResponse.bounds', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=259,
serialized_end=312,
)
_ALLVESSELIDSREQUEST = _descriptor.Descriptor(
name='AllVesselIdsRequest',
full_name='query.AllVesselIdsRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='type', full_name='query.AllVesselIdsRequest.type', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=314,
serialized_end=349,
)
_ALLVESSELIDSRESPONSE = _descriptor.Descriptor(
name='AllVesselIdsResponse',
full_name='query.AllVesselIdsResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='ids', full_name='query.AllVesselIdsResponse.ids', index=0,
number=1, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=351,
serialized_end=386,
)
_IMUREQUEST = _descriptor.Descriptor(
name='IMURequest',
full_name='query.IMURequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='vesselId', full_name='query.IMURequest.vesselId', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=388,
serialized_end=418,
)
_IMURESPONSE = _descriptor.Descriptor(
name='IMUResponse',
full_name='query.IMUResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='position', full_name='query.IMUResponse.position', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='velocity', full_name='query.IMUResponse.velocity', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='angularVelocity', full_name='query.IMUResponse.angularVelocity', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='angle', full_name='query.IMUResponse.angle', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=421,
serialized_end=562,
)
_BOUNDS.fields_by_name['center'].message_type = _VEC3
_BOUNDS.fields_by_name['extents'].message_type = _VEC3
_BOUNDS.fields_by_name['max'].message_type = _VEC3
_BOUNDS.fields_by_name['min'].message_type = _VEC3
_BOUNDS.fields_by_name['size'].message_type = _VEC3
_VESSELBOUNDSRESPONSE.fields_by_name['bounds'].message_type = _BOUNDS
_IMURESPONSE.fields_by_name['position'].message_type = _VEC3
_IMURESPONSE.fields_by_name['velocity'].message_type = _VEC3
_IMURESPONSE.fields_by_name['angularVelocity'].message_type = _VEC3
_IMURESPONSE.fields_by_name['angle'].message_type = _VEC3
DESCRIPTOR.message_types_by_name['Vec3'] = _VEC3
DESCRIPTOR.message_types_by_name['Bounds'] = _BOUNDS
DESCRIPTOR.message_types_by_name['VesselBoundsRequest'] = _VESSELBOUNDSREQUEST
DESCRIPTOR.message_types_by_name['VesselBoundsResponse'] = _VESSELBOUNDSRESPONSE
DESCRIPTOR.message_types_by_name['AllVesselIdsRequest'] = _ALLVESSELIDSREQUEST
DESCRIPTOR.message_types_by_name['AllVesselIdsResponse'] = _ALLVESSELIDSRESPONSE
DESCRIPTOR.message_types_by_name['IMURequest'] = _IMUREQUEST
DESCRIPTOR.message_types_by_name['IMUResponse'] = _IMURESPONSE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Vec3 = _reflection.GeneratedProtocolMessageType('Vec3', (_message.Message,), {
'DESCRIPTOR' : _VEC3,
'__module__' : 'query.query_pb2'
# @@protoc_insertion_point(class_scope:query.Vec3)
})
_sym_db.RegisterMessage(Vec3)
Bounds = _reflection.GeneratedProtocolMessageType('Bounds', (_message.Message,), {
'DESCRIPTOR' : _BOUNDS,
'__module__' : 'query.query_pb2'
# @@protoc_insertion_point(class_scope:query.Bounds)
})
_sym_db.RegisterMessage(Bounds)
VesselBoundsRequest = _reflection.GeneratedProtocolMessageType('VesselBoundsRequest', (_message.Message,), {
'DESCRIPTOR' : _VESSELBOUNDSREQUEST,
'__module__' : 'query.query_pb2'
# @@protoc_insertion_point(class_scope:query.VesselBoundsRequest)
})
_sym_db.RegisterMessage(VesselBoundsRequest)
VesselBoundsResponse = _reflection.GeneratedProtocolMessageType('VesselBoundsResponse', (_message.Message,), {
'DESCRIPTOR' : _VESSELBOUNDSRESPONSE,
'__module__' : 'query.query_pb2'
# @@protoc_insertion_point(class_scope:query.VesselBoundsResponse)
})
_sym_db.RegisterMessage(VesselBoundsResponse)
AllVesselIdsRequest = _reflection.GeneratedProtocolMessageType('AllVesselIdsRequest', (_message.Message,), {
'DESCRIPTOR' : _ALLVESSELIDSREQUEST,
'__module__' : 'query.query_pb2'
# @@protoc_insertion_point(class_scope:query.AllVesselIdsRequest)
})
_sym_db.RegisterMessage(AllVesselIdsRequest)
AllVesselIdsResponse = _reflection.GeneratedProtocolMessageType('AllVesselIdsResponse', (_message.Message,), {
'DESCRIPTOR' : _ALLVESSELIDSRESPONSE,
'__module__' : 'query.query_pb2'
# @@protoc_insertion_point(class_scope:query.AllVesselIdsResponse)
})
_sym_db.RegisterMessage(AllVesselIdsResponse)
IMURequest = _reflection.GeneratedProtocolMessageType('IMURequest', (_message.Message,), {
'DESCRIPTOR' : _IMUREQUEST,
'__module__' : 'query.query_pb2'
# @@protoc_insertion_point(class_scope:query.IMURequest)
})
_sym_db.RegisterMessage(IMURequest)
IMUResponse = _reflection.GeneratedProtocolMessageType('IMUResponse', (_message.Message,), {
'DESCRIPTOR' : _IMURESPONSE,
'__module__' : 'query.query_pb2'
# @@protoc_insertion_point(class_scope:query.IMUResponse)
})
_sym_db.RegisterMessage(IMUResponse)
DESCRIPTOR._options = None
_QUERYSERVICE = _descriptor.ServiceDescriptor(
name='QueryService',
full_name='query.QueryService',
file=DESCRIPTOR,
index=0,
serialized_options=None,
serialized_start=565,
serialized_end=792,
methods=[
_descriptor.MethodDescriptor(
name='GetAllVesselIds',
full_name='query.QueryService.GetAllVesselIds',
index=0,
containing_service=None,
input_type=_ALLVESSELIDSREQUEST,
output_type=_ALLVESSELIDSRESPONSE,
serialized_options=None,
),
_descriptor.MethodDescriptor(
name='GetVesselIMU',
full_name='query.QueryService.GetVesselIMU',
index=1,
containing_service=None,
input_type=_IMUREQUEST,
output_type=_IMURESPONSE,
serialized_options=None,
),
_descriptor.MethodDescriptor(
name='GetVesselBounds',
full_name='query.QueryService.GetVesselBounds',
index=2,
containing_service=None,
input_type=_VESSELBOUNDSREQUEST,
output_type=_VESSELBOUNDSRESPONSE,
serialized_options=None,
),
])
_sym_db.RegisterServiceDescriptor(_QUERYSERVICE)
DESCRIPTOR.services_by_name['QueryService'] = _QUERYSERVICE
# @@protoc_insertion_point(module_scope)
|
'''
Created on Jul 23, 2021
@author: mballance
'''
class WildcardBinFactory(object):
@classmethod
def str2bin(cls, val) -> tuple:
# val is string in oct, hex, or bin
value = 0
mask = 0
if val.startswith("0o") or val.startswith("0O"):
# octal format
for c in val[2:]:
if c != '_':
value <<= 3
mask <<= 3
if c not in ['x', 'X', '?']:
mask |= 0x7
value |= int(c, 8)
elif val.startswith("0x") or val.startswith("0X"):
for c in val[2:]:
if c != '_':
value <<= 4
mask <<= 4
if c not in ['x', 'X', '?']:
mask |= 0xF
value |= int(c, 16)
elif val.startswith("0b") or val.startswith("0B"):
for c in val[2:]:
if c != '_':
value <<= 1
mask <<= 1
if c not in ['x', 'X', '?']:
mask |= 0x1
value |= int(c, 2)
else:
raise Exception("unknown base for value %s" % str(val))
return (value,mask)
@classmethod
def valmask2binlist(cls, value, mask):
"""Converts value/mask representation to a list of bin specifications"""
n_bits = 0
mask_t = mask
bit_i = 0
total_mask_bits = 0
directives = []
while mask_t != 0:
if (mask_t & 1) == 0:
# Collect this grouping
group_start_bit = bit_i
group_n_bits = 0
while (mask_t & 1) == 0:
group_n_bits += 1
total_mask_bits += 1
mask_t >>= 1
bit_i += 1
pass
directives.append((group_start_bit, group_n_bits))
else:
mask_t >>= 1
bit_i += 1
if total_mask_bits > 20:
raise Exception("Wildcard array bins limited to 20 mask bits")
ranges = []
for val in range(0, (1 << total_mask_bits)):
val_i = val
val_t = (value & mask)
for d in directives:
val_t |= ((val_i & ((1 << d[1])-1)) << d[0])
val_i >>= d[1]
if len(ranges) > 0 and ranges[-1][1]+1 == val_t:
ranges[-1] = (ranges[-1][0], val_t)
else:
ranges.append((val_t, val_t))
return ranges
|
from ...UI.Base import Document
from ...UI.Elements import div,img,label
from ...Core.DataTypes.UI import EventListener
from ...UI.CustomElements import AspectRatioPreservedContainer
class MainMenu(Document):
Name = "Pong/MainMenu"
StyleSheet = "Styles/MainMenu/MainMenu.json"
ResourceKey = "MainMenu"
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# region Background
container = AspectRatioPreservedContainer() # The container
self.Children += container # Add child
background = img(self.Window.Resources.Images.Background,classes = ".background") # The background
container.Children += background # Add child
# endregion
# title
title = label(text="PONG", classes = ".title")
background.Children += title
# region Navigation buttons
onlineButton = label(text="Online Multiplayer", classes = ".navigationButton .onlineButton")
localButton = label(text="Local Multiplayer", classes = ".navigationButton .localButton")
arcadeButton = label(text="Arcade", classes = ".navigationButton .arcadeButton")
quitButton = label(text="Quit", classes = ".navigationButton .quitButton")
onlineButton.EventListeners += EventListener("<Button-1>", lambda e: self.NavigateTo('Online'))
localButton.EventListeners += EventListener("<Button-1>", lambda e: self.NavigateTo('Local'))
arcadeButton.EventListeners += EventListener("<Button-1>", lambda e: self.NavigateTo('Arcade'))
quitButton.EventListeners += EventListener("<Button-1>", lambda e: self.QuitGame())
background.Children += onlineButton
background.Children += localButton
background.Children += arcadeButton
background.Children += quitButton
# endregion
def QuitGame(self):
self.Window.Quit()
def NavigateTo(self, dest):
if dest == "Online":
from ..OnlineMultiplayer import Lobby
self.Window.ChangeDocumentTo(Lobby)
elif dest == "Local":
from ..LocalMultiplayer import CreateGame
self.Window.ChangeDocumentTo(CreateGame)
elif dest == "Arcade":
from ..Arcade import CreateGame
self.Window.ChangeDocumentTo(CreateGame)
|
from typing import Dict, Any
import pandas as pd
import streamlit as st
import core.formatter as fm
def _impact_results(impact: Dict[str, str]) -> None:
"""
Reports impact of current filters vs. original table.
:param impact: Dictionary with filter and value.
:return: None
"""
df = pd.DataFrame(list(impact.items()), columns=['Filter', '% of Total'])
df.set_index('Filter', inplace=True)
st.table(df)
def naked(df_in: pd.DataFrame, filters: dict) -> pd.DataFrame:
"""Retrieve naked options based on the parameters given.
:param df_in: Option chain table.
:param filters: Filters for the options to be shown.
:return: Options table dataframe.
"""
# variable assignment
# TODO: Make Debit/credit premium type work
# TODO: Check on ITM treatment
d: Dict[str, Any] = {
'premium_type': filters['premium_type'].lower().strip(),
'option_itm': False,
'option_type': filters['option_type'].lower().strip(),
'margin_requirement': float(filters['margin_requirement']),
'max_risk': float(filters['max_risk']),
'min_return_pct': float(filters['min_return_pct']) / 100,
'max_dte': int(filters['max_dte']),
'min_dte': int(filters['min_dte']),
'max_delta': float(filters['max_delta']),
'min_pop': float(filters['min_pop']) / 100,
'min_p50': float(filters['min_p50']) / 100,
'min_open_int_pctl': float(filters['min_open_int_pctl']) / 100,
'min_volume_pctl': float(filters['min_volume_pctl']) / 100,
'max_bid_ask_pctl': float(filters['max_bid_ask_pctl']) / 100
}
df: pd.DataFrame = df_in.copy()
del filters, df_in
# Clean table.
# Comment out columns to keep.
df.drop(inplace=True, columns=[
# 'description',
'exchangeName',
# 'bid',
# 'ask',
'last',
# 'mark',
'bidSize',
'askSize',
'bidAskSize',
'lastSize',
'highPrice',
'lowPrice',
'openPrice',
'closePrice',
# 'totalVolume',
'tradeTimeInLong',
'quoteTimeInLong',
'netChange',
# 'volatility',
# 'delta',
'gamma',
'theta',
'vega',
'rho',
# 'openInterest',
'timeValue',
'theoreticalOptionValue',
'theoreticalVolatility',
# 'strikePrice',
'expirationDate',
# 'daysToExpiration',
'expirationType',
'lastTradingDay',
# 'multiplier',
'percentChange',
'markChange',
'markPercentChange',
'intrinsicValue',
# 'inTheMoney',
'mini',
'nonStandard',
'pennyPilot',
# 'lastPrice',
# 'option_value_mc',
# 'probability_ITM',
# 'probability_of_50',
])
df.reset_index(inplace=True)
df.sort_values(by='symbol', ascending=True, inplace=True)
df.drop(columns=['symbol'], inplace=True) # Actual option's symbol.
# Filter: Pass 1. Before calculation.
df = df[(df['delta'] != 'NaN')
& (df['openInterest'] > 0)
& (df['totalVolume'] > 0)
& (df['bid'] > 0)
& (df['ask'] > 0)
]
_shp = df.shape[0] # Impact log initialization.
impact: Dict[str, str] = {'Starting size': f"{_shp}"}
# filter
if d['option_type'] in ['put', 'call']:
df = df[df['option_type'].str.lower() == d['option_type']]
impact['Option type'] = f"{df.shape[0]/_shp:.0%}"
if d['premium_type'] == 'credit':
df = df[(df['inTheMoney'] == d['option_itm'])]
impact['ITM'] = f"{df.shape[0] / _shp:.0%}"
df = df[(df['delta'] <= d['max_delta'])
& (df['delta'] >= -d['max_delta'])]
impact['Delta'] = f"{df.shape[0] / _shp:.0%}"
df = df[df['probability_ITM'] >= d['min_pop']]
impact['Prob of Profit (ITM)'] = f"{df.shape[0]/_shp:.0%}"
df = df[df['probability_of_50'] >= d['min_p50']]
impact['Prob of 50% Profit (ITM)'] = f"{df.shape[0]/_shp:.0%}"
df = df[(df['daysToExpiration'] >= d['min_dte'])
& (df['daysToExpiration'] <= d['max_dte'])]
impact['DTE'] = f"{df.shape[0] / _shp:.0%}"
# Calculated columns.
df['bid_ask_pct'] = df['ask']/df['bid'] - 1
df['bid_ask_rank'] = (df.groupby('stock')['bid_ask_pct']
.rank(pct=True, ascending=False, method='dense'))
df['open_int_rank'] = (df.groupby('stock')['openInterest']
.rank(pct=True, ascending=True, method='dense'))
df['volume_rank'] = (df.groupby('stock')['totalVolume']
.rank(pct=True, ascending=True, method='dense'))
df['break_even'] = df['strikePrice'] - df['mark']
df['margin_requirement'] = df['strikePrice'] * df['multiplier']
df['max_profit'] = df['mark'] * df['multiplier']
df['risk'] = df['strikePrice']*df['multiplier'] - df['max_profit']
df['return'] = df['max_profit'] / df['risk']
df['return_day'] = df['return'] / (df['daysToExpiration'] + .00001)
df['quantity'] = d['max_risk'] / df['risk']
df['search'] = (
# Stock.
df['description'].str.split(' ').str[0].astype(str)
# Day.
+ " "
+ df['description'].str.split(' ').str[2].astype(str)
# Month.
+ " "
+ df['description'].str.split(' ').str[1].astype(str)
# Year.
+ " "
+ df['description'].str.split(' ').str[3].str[::3].astype(str)
# Days to expiration.
+ " (" + df['daysToExpiration'].astype(str) + ") "
# Option Type (PUT or CALL).
+ df['option_type'].str.upper().astype(str)
# Strike price.
+ " "
+ df['strikePrice'].astype(str)
)
# More filters
df = df[df['bid_ask_rank'] >= d['max_bid_ask_pctl']]
impact['Bid/Ask'] = f"{df.shape[0] / _shp:.0%}"
df = df[df['open_int_rank'] >= d['min_open_int_pctl']]
impact['Open Interest'] = f"{df.shape[0] / _shp:.0%}"
df = df[df['volume_rank'] >= d['min_volume_pctl']]
impact['Volume'] = f"{df.shape[0] / _shp:.0%}"
df = df[df['margin_requirement'] <= d['margin_requirement']]
impact['Margin requirement'] = f"{df.shape[0] / _shp:.0%}"
df = df[df['risk'] <= d['max_risk']]
impact['Risk'] = f"{df.shape[0] / _shp:.0%}"
if d['premium_type'] == 'credit':
df = df[df['return'] >= d['min_return_pct']]
impact['Return'] = f"{df.shape[0] / _shp:.0%}"
if d['premium_type'] == 'credit':
df = df[df['risk'] >= df['max_profit']]
impact['Risk>Profit'] = f"{df.shape[0] / _shp:.0%}"
# Exit if table is empty.
if len(df.index) == 0:
st.warning("**Nothing to see here!** Criteria not met")
st.write("**Here's the impact of the filters you've set:**")
_impact_results(impact)
st.stop()
# Formatting the table.
df.sort_values(by='return', ascending=False, inplace=True)
df.set_index('search', inplace=True)
df = df[[
'return',
'return_day',
'max_profit',
'risk',
'quantity',
'margin_requirement',
'mark',
'break_even',
'delta',
# 'lastPrice',
'option_value_mc',
'probability_ITM',
'probability_of_50',
# 'volatility',
'daysToExpiration',
'stock'
]]
df = df.rename(columns={
'return': 'Return',
'return_day': 'Daily Return',
'max_profit': 'Max Profit',
'risk': 'Risk',
'quantity': 'Qty',
'margin_requirement': 'Margin Req\'d',
'mark': 'Prem Mark',
'break_even': 'Break Even',
'delta': 'Delta',
'option_value_mc': 'Theo Value',
'probability_ITM': 'Prob ITM',
'probability_of_50': 'Prob 50%',
# 'volatility': 'IV',
'daysToExpiration': 'DTE',
'stock': 'Stock',
})
# Display results.
min_max = ['Return', 'Max Profit', 'Daily Return', 'Prob ITM', 'Prob 50%']
df_print = (df.style
.set_table_styles(
[dict(selector='th', props=[('text-align', 'left')])])
.set_properties(**{'text-align': 'right'})
.background_gradient(
axis=0,
subset=['Return', 'Daily Return'])
.highlight_max(
subset=min_max,
color=fm.HI_MAX_COLOR)
.highlight_min(
subset=min_max,
color=fm.HI_MIN_COLOR)
.format({
'Return': fm.PERCENT2,
'Daily Return': fm.PERCENT2,
'Max Profit': fm.DOLLAR,
'Risk': fm.DOLLAR,
'Margin Req\'d': fm.DOLLAR,
'Break Even': fm.DOLLAR,
'Qty': fm.FLOAT0 + "x",
'Prem Mark': fm.DOLLAR,
'Delta': fm.FLOAT,
'Theo Value': fm.DOLLAR,
'Prob ITM': fm.PERCENT0,
'Prob 50%': fm.PERCENT0,
'DTE': fm.FLOAT0,
# 'IV': fm.FLOAT0
})
)
st.header('Naked Options')
st.dataframe(data=df_print)
return df
|
import threading as thr
import huion.daemon.daemon as daemon
import huion.events.buttons as buttons
import huion.events.touchstrip as touchstrip
import huion.gui.gui as gui
class Main:
def __init__(self):
self.gui = gui.Gui()
self.buttons = buttons.Buttons(self.gui)
self.touchstrip = touchstrip.Touchstrip()
self.daemon = daemon.HuionDaemon(self.buttons, self.touchstrip)
main = Main()
thr.Thread(target=main.daemon.start, name="Huion Daemon").start()
main.gui.create()
|
from __future__ import generators
from spark.internal.version import *
from spark.internal.parse.usagefuns import *
from spark.pylang.implementation import Imp
from spark.internal.common import SOLVED
################################################################
# Conditional
class IfOnce(Imp):
__slots__ = ()
def solutions(self, agent, bindings, zexpr):
if predSolve1(agent, bindings, zexpr[0]):
return predSolve(agent, bindings, zexpr[1])
else:
return predSolve(agent, bindings, zexpr[2])
def solution(self, agnt, bindings, zexpr):
if predSolve1(agent, bindings, zexpr[0]):
return predSolve1(agent, bindings, zexpr[1])
else:
return predSolve1(agent, bindings, zexpr[2])
def conclude(self, agent, bindings, zexpr):
if predSolve1(agent, bindings, zexpr[0]):
predUpdate(agent, bindings, zexpr[1])
else:
predUpdate(agent, bindings, zexpr[2])
def retractall(self, agent, bindings, zexpr):
if predSolve1(agent, bindings, zexpr[0]):
predRetractall(agent, bindings, zexpr[1])
else:
predRetractall(agent, bindings, zexpr[2])
class IfMultiple(Imp):
__slots__ = ()
def solutions(self, agent, bindings, zexpr):
solutionExists = False
for x in predSolve(agent, bindings, zexpr[0]):
if x:
solutionExists = True
for y in predSolve(agent, bindings, zexpr[1]):
yield y
if not solutionExists:
for y in predSolve(agent, bindings, zexpr[2]):
yield y
def solution(self, agnt, bindings, zexpr):
solutionExists = False
for x in predSolve(agent, bindings, zexpr[0]):
solutionExists = True
if predSolve1(agent, bindings, zexpr[1]):
return SOLVED
if not solutionExists:
return predSolve1(agent, bindings, zexpr[2])
def conclude(self, agent, bindings, zexpr):
# There should be no more than one solution to zexpr[0].
# Let's assume there is at most one.
if predSolve1(agent, bindings, zexpr[0]):
predUpdate(agent, bindings, zexpr[1])
else:
predUpdate(agent, bindings, zexpr[2])
def retractall(self, agent, bindings, zexpr):
solutionExists = False
for x in predSolve(agent, bindings, zexpr[0]):
solutionExists = True
predRetractall(agent, bindings, zexpr[1])
if not solutionExists:
predRetractall(agent, bindings, zexpr[2])
|
# Copyright 2013 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Utility class for VHD related operations.
Based on the "root/virtualization/v2" namespace available starting with
Hyper-V Server / Windows Server 2012.
"""
import struct
import sys
if sys.platform == 'win32':
import wmi
from xml.etree import ElementTree
from oslo_utils import units
from nova.i18n import _
from nova.virt.hyperv import constants
from nova.virt.hyperv import vhdutils
from nova.virt.hyperv import vmutils
from nova.virt.hyperv import vmutilsv2
VHDX_BAT_ENTRY_SIZE = 8
VHDX_HEADER_OFFSETS = [64 * units.Ki, 128 * units.Ki]
VHDX_HEADER_SECTION_SIZE = units.Mi
VHDX_LOG_LENGTH_OFFSET = 68
VHDX_METADATA_SIZE_OFFSET = 64
VHDX_REGION_TABLE_OFFSET = 192 * units.Ki
VHDX_BS_METADATA_ENTRY_OFFSET = 48
class VHDUtilsV2(vhdutils.VHDUtils):
_VHD_TYPE_DYNAMIC = 3
_VHD_TYPE_DIFFERENCING = 4
_vhd_format_map = {
constants.DISK_FORMAT_VHD: 2,
constants.DISK_FORMAT_VHDX: 3,
}
def __init__(self):
self._vmutils = vmutilsv2.VMUtilsV2()
if sys.platform == 'win32':
self._conn = wmi.WMI(moniker='//./root/virtualization/v2')
def create_dynamic_vhd(self, path, max_internal_size, format):
vhd_format = self._vhd_format_map.get(format)
if not vhd_format:
raise vmutils.HyperVException(_("Unsupported disk format: %s") %
format)
self._create_vhd(self._VHD_TYPE_DYNAMIC, vhd_format, path,
max_internal_size=max_internal_size)
def create_differencing_vhd(self, path, parent_path):
# Although this method can take a size argument in case of VHDX
# images, avoid it as the underlying Win32 is currently not
# resizing the disk properly. This can be reconsidered once the
# Win32 issue is fixed.
parent_vhd_info = self.get_vhd_info(parent_path)
self._create_vhd(self._VHD_TYPE_DIFFERENCING,
parent_vhd_info["Format"],
path, parent_path=parent_path)
def _create_vhd(self, vhd_type, format, path, max_internal_size=None,
parent_path=None):
vhd_info = self._conn.Msvm_VirtualHardDiskSettingData.new()
vhd_info.Type = vhd_type
vhd_info.Format = format
vhd_info.Path = path
vhd_info.ParentPath = parent_path
if max_internal_size:
vhd_info.MaxInternalSize = max_internal_size
image_man_svc = self._conn.Msvm_ImageManagementService()[0]
(job_path, ret_val) = image_man_svc.CreateVirtualHardDisk(
VirtualDiskSettingData=vhd_info.GetText_(1))
self._vmutils.check_ret_val(ret_val, job_path)
def reconnect_parent_vhd(self, child_vhd_path, parent_vhd_path):
image_man_svc = self._conn.Msvm_ImageManagementService()[0]
vhd_info_xml = self._get_vhd_info_xml(image_man_svc, child_vhd_path)
et = ElementTree.fromstring(vhd_info_xml)
item = et.find(".//PROPERTY[@NAME='ParentPath']/VALUE")
if item is not None:
item.text = parent_vhd_path
else:
msg = (_("Failed to reconnect image %(child_vhd_path)s to "
"parent %(parent_vhd_path)s. The child image has no "
"parent path property.") %
{'child_vhd_path': child_vhd_path,
'parent_vhd_path': parent_vhd_path})
raise vmutils.HyperVException(msg)
vhd_info_xml = ElementTree.tostring(et)
(job_path, ret_val) = image_man_svc.SetVirtualHardDiskSettingData(
VirtualDiskSettingData=vhd_info_xml)
self._vmutils.check_ret_val(ret_val, job_path)
def _get_resize_method(self):
image_man_svc = self._conn.Msvm_ImageManagementService()[0]
return image_man_svc.ResizeVirtualHardDisk
def get_internal_vhd_size_by_file_size(self, vhd_path,
new_vhd_file_size):
"""Get internal size of a VHD according to new VHD file size.
VHDX Size = Header (1MB) + Log + Metadata Region + BAT + Payload Blocks
The chunk size is the maximum number of bytes described by a SB
block.
Chunk size = 2^{23} * LogicalSectorSize
:param str vhd_path: VHD file path
:param new_vhd_file_size: Size of the new VHD file.
:return: Internal VHD size according to new VHD file size.
"""
vhd_format = self.get_vhd_format(vhd_path)
if vhd_format == constants.DISK_FORMAT_VHD:
return super(VHDUtilsV2,
self).get_internal_vhd_size_by_file_size(
vhd_path, new_vhd_file_size)
else:
vhd_info = self.get_vhd_info(vhd_path)
vhd_type = vhd_info['Type']
if vhd_type == self._VHD_TYPE_DIFFERENCING:
vhd_parent = self.get_vhd_parent_path(vhd_path)
return self.get_internal_vhd_size_by_file_size(vhd_parent,
new_vhd_file_size)
else:
try:
with open(vhd_path, 'rb') as f:
hs = VHDX_HEADER_SECTION_SIZE
bes = VHDX_BAT_ENTRY_SIZE
lss = vhd_info['LogicalSectorSize']
bs = self._get_vhdx_block_size(f)
ls = self._get_vhdx_log_size(f)
ms = self._get_vhdx_metadata_size_and_offset(f)[0]
chunk_ratio = (1 << 23) * lss / bs
size = new_vhd_file_size
max_internal_size = (bs * chunk_ratio * (size - hs -
ls - ms - bes - bes / chunk_ratio) / (bs *
chunk_ratio + bes * chunk_ratio + bes))
return max_internal_size - (max_internal_size % bs)
except IOError as ex:
raise vmutils.HyperVException(_("Unable to obtain "
"internal size from VHDX: "
"%(vhd_path)s. Exception: "
"%(ex)s") %
{"vhd_path": vhd_path,
"ex": ex})
def _get_vhdx_current_header_offset(self, vhdx_file):
sequence_numbers = []
for offset in VHDX_HEADER_OFFSETS:
vhdx_file.seek(offset + 8)
sequence_numbers.append(struct.unpack('<Q',
vhdx_file.read(8))[0])
current_header = sequence_numbers.index(max(sequence_numbers))
return VHDX_HEADER_OFFSETS[current_header]
def _get_vhdx_log_size(self, vhdx_file):
current_header_offset = self._get_vhdx_current_header_offset(vhdx_file)
offset = current_header_offset + VHDX_LOG_LENGTH_OFFSET
vhdx_file.seek(offset)
log_size = struct.unpack('<I', vhdx_file.read(4))[0]
return log_size
def _get_vhdx_metadata_size_and_offset(self, vhdx_file):
offset = VHDX_METADATA_SIZE_OFFSET + VHDX_REGION_TABLE_OFFSET
vhdx_file.seek(offset)
metadata_offset = struct.unpack('<Q', vhdx_file.read(8))[0]
metadata_size = struct.unpack('<I', vhdx_file.read(4))[0]
return metadata_size, metadata_offset
def _get_vhdx_block_size(self, vhdx_file):
metadata_offset = self._get_vhdx_metadata_size_and_offset(vhdx_file)[1]
offset = metadata_offset + VHDX_BS_METADATA_ENTRY_OFFSET
vhdx_file.seek(offset)
file_parameter_offset = struct.unpack('<I', vhdx_file.read(4))[0]
vhdx_file.seek(file_parameter_offset + metadata_offset)
block_size = struct.unpack('<I', vhdx_file.read(4))[0]
return block_size
def _get_vhd_info_xml(self, image_man_svc, vhd_path):
(job_path,
ret_val,
vhd_info_xml) = image_man_svc.GetVirtualHardDiskSettingData(vhd_path)
self._vmutils.check_ret_val(ret_val, job_path)
return vhd_info_xml.encode('utf8', 'xmlcharrefreplace')
def get_vhd_info(self, vhd_path):
image_man_svc = self._conn.Msvm_ImageManagementService()[0]
vhd_info_xml = self._get_vhd_info_xml(image_man_svc, vhd_path)
vhd_info_dict = {}
et = ElementTree.fromstring(vhd_info_xml)
for item in et.findall("PROPERTY"):
name = item.attrib["NAME"]
value_item = item.find("VALUE")
if value_item is None:
value_text = None
else:
value_text = value_item.text
if name in ["Path", "ParentPath"]:
vhd_info_dict[name] = value_text
elif name in ["BlockSize", "LogicalSectorSize",
"PhysicalSectorSize", "MaxInternalSize"]:
vhd_info_dict[name] = long(value_text)
elif name in ["Type", "Format"]:
vhd_info_dict[name] = int(value_text)
return vhd_info_dict
def get_best_supported_vhd_format(self):
return constants.DISK_FORMAT_VHDX
|
from fractions import Fraction
def sum_fracts(lst):
res = 0
for i in lst:
res =res + Fraction(i[0],i[1])
if res.numerator == 0:
return None
if res.denominator != 1:
return [res.numerator, res.denominator]
else:
return res.numerator
def sum_fractsB(lst):
if lst:
ret = sum(Fraction(a, b) for (a, b) in lst)
return ret.numerator if ret.denominator == 1 else [ret.numerator, ret.denominator]
|
"""
File storage config
"""
from django_cleanup.signals import cleanup_pre_delete
from storages.backends.s3boto3 import S3Boto3Storage
class MediaStorage(S3Boto3Storage):
location = "media"
def sorl_delete(**kwargs):
"""
Function to delete thumbnails when deleting the original photo
"""
from sorl.thumbnail import delete
delete(kwargs["file"])
cleanup_pre_delete.connect(sorl_delete)
|
"""
Mapsforge map file parser (for version 3 files).
Author: Oliver Gerlich
References:
- http://code.google.com/p/mapsforge/wiki/SpecificationBinaryMapFile
- http://mapsforge.org/
"""
from hachoir.parser import Parser
from hachoir.field import (Bit, Bits, UInt8, UInt16, UInt32, Int32, UInt64, String,
PaddingBits,
Enum, Field, FieldSet, SeekableFieldSet, RootSeekableFieldSet)
from hachoir.core.endian import BIG_ENDIAN
# micro-degrees factor:
UDEG = float(1000 * 1000)
CoordinateEncoding = {
0: "single delta encoding",
1: "double delta encoding",
}
class UIntVbe(Field):
def __init__(self, parent, name, description=None):
Field.__init__(self, parent, name, description=description)
value = 0
size = 0
while True:
byteValue = self._parent.stream.readBytes(
self.absolute_address + (size * 8), 1)[0]
haveMoreData = (byteValue & 0x80)
value = value | ((byteValue & 0x7f) << (size * 7))
size += 1
assert size < 100, "UIntVBE is too large"
if not(haveMoreData):
break
self._size = size * 8
self.createValue = lambda: value
class IntVbe(Field):
def __init__(self, parent, name, description=None):
Field.__init__(self, parent, name, description=description)
value = 0
size = 0
shift = 0
while True:
byteValue = self._parent.stream.readBytes(
self.absolute_address + (size * 8), 1)[0]
haveMoreData = (byteValue & 0x80)
if size == 0:
isNegative = (byteValue & 0x40)
value = (byteValue & 0x3f)
shift += 6
else:
value = value | ((byteValue & 0x7f) << shift)
shift += 7
size += 1
assert size < 100, "IntVBE is too large"
if not(haveMoreData):
break
if isNegative:
value *= -1
self._size = size * 8
self.createValue = lambda: value
class VbeString(FieldSet):
def createFields(self):
yield UIntVbe(self, "length")
yield String(self, "chars", self["length"].value, charset="UTF-8")
def createDescription(self):
return '(%d B) "%s"' % (self["length"].value, self["chars"].value)
class TagStringList(FieldSet):
def createFields(self):
yield UInt16(self, "num_tags")
for i in range(self["num_tags"].value):
yield VbeString(self, "tag[]")
def createDescription(self):
return "%d tag strings" % self["num_tags"].value
class ZoomIntervalCfg(FieldSet):
def createFields(self):
yield UInt8(self, "base_zoom_level")
yield UInt8(self, "min_zoom_level")
yield UInt8(self, "max_zoom_level")
yield UInt64(self, "subfile_start")
yield UInt64(self, "subfile_size")
def createDescription(self):
return "zoom level around %d (%d - %d)" % (self["base_zoom_level"].value,
self["min_zoom_level"].value, self["max_zoom_level"].value)
class TileIndexEntry(FieldSet):
def createFields(self):
yield Bit(self, "is_water_tile")
yield Bits(self, "offset", 39)
class TileZoomTable(FieldSet):
def createFields(self):
yield UIntVbe(self, "num_pois")
yield UIntVbe(self, "num_ways")
def createDescription(self):
return "%d POIs, %d ways" % (self["num_pois"].value, self["num_ways"].value)
class TileHeader(FieldSet):
def __init__(self, parent, name, zoomIntervalCfg, **kw):
FieldSet.__init__(self, parent, name, **kw)
self.zoomIntervalCfg = zoomIntervalCfg
def createFields(self):
numLevels = int(self.zoomIntervalCfg[
"max_zoom_level"].value - self.zoomIntervalCfg["min_zoom_level"].value) + 1
assert(numLevels < 50)
for i in range(numLevels):
yield TileZoomTable(self, "zoom_table_entry[]")
yield UIntVbe(self, "first_way_offset")
class POIData(FieldSet):
def createFields(self):
if self["/have_debug"].value:
yield String(self, "signature", 32)
if not self['signature'].value.startswith("***POIStart"):
raise ValueError
yield IntVbe(self, "lat_diff")
yield IntVbe(self, "lon_diff")
yield Bits(self, "layer", 4)
yield Bits(self, "num_tags", 4)
for i in range(self["num_tags"].value):
yield UIntVbe(self, "tag_id[]")
yield Bit(self, "have_name")
yield Bit(self, "have_house_number")
yield Bit(self, "have_ele")
yield PaddingBits(self, "pad[]", 5)
if self["have_name"].value:
yield VbeString(self, "name")
if self["have_house_number"].value:
yield VbeString(self, "house_number")
if self["have_ele"].value:
yield IntVbe(self, "ele")
def createDescription(self):
s = "POI"
if self["have_name"].value:
s += ' "%s"' % self["name"]["chars"].value
s += " @ %f/%f" % (self["lat_diff"].value / UDEG,
self["lon_diff"].value / UDEG)
return s
class SubTileBitmap(FieldSet):
static_size = 2 * 8
def createFields(self):
for y in range(4):
for x in range(4):
yield Bit(self, "is_used[%d,%d]" % (x, y))
class WayProperties(FieldSet):
def createFields(self):
if self["/have_debug"].value:
yield String(self, "signature", 32)
if not self['signature'].value.startswith("---WayStart"):
raise ValueError
yield UIntVbe(self, "way_data_size")
# WayProperties is split into an outer and an inner field, to allow
# specifying data size for inner part:
yield WayPropertiesInner(self, "inner", size=self["way_data_size"].value * 8)
class WayPropertiesInner(FieldSet):
def createFields(self):
yield SubTileBitmap(self, "sub_tile_bitmap")
# yield Bits(self, "sub_tile_bitmap", 16)
yield Bits(self, "layer", 4)
yield Bits(self, "num_tags", 4)
for i in range(self["num_tags"].value):
yield UIntVbe(self, "tag_id[]")
yield Bit(self, "have_name")
yield Bit(self, "have_house_number")
yield Bit(self, "have_ref")
yield Bit(self, "have_label_position")
yield Bit(self, "have_num_way_blocks")
yield Enum(Bit(self, "coord_encoding"), CoordinateEncoding)
yield PaddingBits(self, "pad[]", 2)
if self["have_name"].value:
yield VbeString(self, "name")
if self["have_house_number"].value:
yield VbeString(self, "house_number")
if self["have_ref"].value:
yield VbeString(self, "ref")
if self["have_label_position"].value:
yield IntVbe(self, "label_lat_diff")
yield IntVbe(self, "label_lon_diff")
numWayDataBlocks = 1
if self["have_num_way_blocks"].value:
yield UIntVbe(self, "num_way_blocks")
numWayDataBlocks = self["num_way_blocks"].value
for i in range(numWayDataBlocks):
yield WayData(self, "way_data[]")
def createDescription(self):
s = "way"
if self["have_name"].value:
s += ' "%s"' % self["name"]["chars"].value
return s
class WayData(FieldSet):
def createFields(self):
yield UIntVbe(self, "num_coord_blocks")
for i in range(self["num_coord_blocks"].value):
yield WayCoordBlock(self, "way_coord_block[]")
class WayCoordBlock(FieldSet):
def createFields(self):
yield UIntVbe(self, "num_way_nodes")
yield IntVbe(self, "first_lat_diff")
yield IntVbe(self, "first_lon_diff")
for i in range(self["num_way_nodes"].value - 1):
yield IntVbe(self, "lat_diff[]")
yield IntVbe(self, "lon_diff[]")
class TileData(FieldSet):
def __init__(self, parent, name, zoomIntervalCfg, **kw):
FieldSet.__init__(self, parent, name, **kw)
self.zoomIntervalCfg = zoomIntervalCfg
def createFields(self):
if self["/have_debug"].value:
yield String(self, "signature", 32)
if not self['signature'].value.startswith("###TileStart"):
raise ValueError
yield TileHeader(self, "tile_header", self.zoomIntervalCfg)
numLevels = int(self.zoomIntervalCfg[
"max_zoom_level"].value - self.zoomIntervalCfg["min_zoom_level"].value) + 1
for zoomLevel in range(numLevels):
zoomTableEntry = self["tile_header"][
"zoom_table_entry[%d]" % zoomLevel]
for poiIndex in range(zoomTableEntry["num_pois"].value):
yield POIData(self, "poi_data[%d,%d]" % (zoomLevel, poiIndex))
for zoomLevel in range(numLevels):
zoomTableEntry = self["tile_header"][
"zoom_table_entry[%d]" % zoomLevel]
for wayIndex in range(zoomTableEntry["num_ways"].value):
yield WayProperties(self, "way_props[%d,%d]" % (zoomLevel, wayIndex))
class ZoomSubFile(SeekableFieldSet):
def __init__(self, parent, name, zoomIntervalCfg, **kw):
SeekableFieldSet.__init__(self, parent, name, **kw)
self.zoomIntervalCfg = zoomIntervalCfg
def createFields(self):
if self["/have_debug"].value:
yield String(self, "signature", 16)
if self['signature'].value != "+++IndexStart+++":
raise ValueError
indexEntries = []
numTiles = None
i = 0
while True:
entry = TileIndexEntry(self, "tile_index_entry[]")
indexEntries.append(entry)
yield entry
i += 1
if numTiles is None:
# calculate number of tiles (TODO: better calc this from map
# bounding box)
firstOffset = self["tile_index_entry[0]"]["offset"].value
if self["/have_debug"].value:
firstOffset -= 16
numTiles = firstOffset / 5
if i >= numTiles:
break
for i, indexEntry in enumerate(indexEntries):
offset = indexEntry["offset"].value
self.seekByte(offset, relative=True)
if i != len(indexEntries) - 1:
next_offset = indexEntries[i + 1]["offset"].value
size = (next_offset - offset) * 8
else:
size = self.size - offset * 8
if size == 0:
# hachoir doesn't support empty field.
continue
yield TileData(self, "tile_data[%d]" % i, zoomIntervalCfg=self.zoomIntervalCfg, size=size)
class MapsforgeMapFile(Parser, RootSeekableFieldSet):
PARSER_TAGS = {
"id": "mapsforge_map",
"category": "misc",
"file_ext": ("map",),
"min_size": 62 * 8,
"description": "Mapsforge map file",
}
endian = BIG_ENDIAN
def validate(self):
return self["file_magic"].value == "mapsforge binary OSM" and self["file_version"].value == 3
def createFields(self):
yield String(self, "file_magic", 20)
yield UInt32(self, "header_size")
yield UInt32(self, "file_version")
yield UInt64(self, "file_size")
yield UInt64(self, "creation_date")
yield Int32(self, "min_lat")
yield Int32(self, "min_lon")
yield Int32(self, "max_lat")
yield Int32(self, "max_lon")
yield UInt16(self, "tile_size")
yield VbeString(self, "projection")
# flags
yield Bit(self, "have_debug")
yield Bit(self, "have_map_start")
yield Bit(self, "have_start_zoom")
yield Bit(self, "have_language_preference")
yield Bit(self, "have_comment")
yield Bit(self, "have_created_by")
yield Bits(self, "reserved[]", 2)
if self["have_map_start"].value:
yield UInt32(self, "start_lat")
yield UInt32(self, "start_lon")
if self["have_start_zoom"].value:
yield UInt8(self, "start_zoom")
if self["have_language_preference"].value:
yield VbeString(self, "language_preference")
if self["have_comment"].value:
yield VbeString(self, "comment")
if self["have_created_by"].value:
yield VbeString(self, "created_by")
yield TagStringList(self, "poi_tags")
yield TagStringList(self, "way_tags")
yield UInt8(self, "num_zoom_intervals")
for i in range(self["num_zoom_intervals"].value):
yield ZoomIntervalCfg(self, "zoom_interval_cfg[]")
for i in range(self["num_zoom_intervals"].value):
zoomIntervalCfg = self["zoom_interval_cfg[%d]" % i]
self.seekByte(zoomIntervalCfg[
"subfile_start"].value, relative=False)
yield ZoomSubFile(self, "subfile[]", size=zoomIntervalCfg["subfile_size"].value * 8, zoomIntervalCfg=zoomIntervalCfg)
|
"""Sets the version number of paci."""
__version__ = "1.10.5"
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
import json
from frappe.model.naming import set_name_by_naming_series
from frappe import _, msgprint
import frappe.defaults
from frappe.utils import flt, cint, cstr, today, get_formatted_email
from frappe.desk.reportview import build_match_conditions, get_filters_cond
from erpnext.utilities.transaction_base import TransactionBase
from erpnext.accounts.party import validate_party_accounts, get_dashboard_info, get_timeline_data # keep this
from frappe.contacts.address_and_contact import load_address_and_contact, delete_contact_and_address
from frappe.model.rename_doc import update_linked_doctypes
from frappe.model.mapper import get_mapped_doc
from frappe.utils.user import get_users_with_role
class Customer(TransactionBase):
def get_feed(self):
return self.customer_name
def onload(self):
"""Load address and contacts in `__onload`"""
load_address_and_contact(self)
self.load_dashboard_info()
def load_dashboard_info(self):
info = get_dashboard_info(self.doctype, self.name, self.loyalty_program)
self.set_onload('dashboard_info', info)
def autoname(self):
cust_master_name = frappe.defaults.get_global_default('cust_master_name')
if cust_master_name == 'Customer Name':
self.name = self.get_customer_name()
else:
set_name_by_naming_series(self)
def get_customer_name(self):
if frappe.db.get_value("Customer", self.customer_name):
count = frappe.db.sql("""select ifnull(MAX(CAST(SUBSTRING_INDEX(name, ' ', -1) AS UNSIGNED)), 0) from tabCustomer
where name like %s""", "%{0} - %".format(self.customer_name), as_list=1)[0][0]
count = cint(count) + 1
return "{0} - {1}".format(self.customer_name, cstr(count))
return self.customer_name
def after_insert(self):
'''If customer created from Lead, update customer id in quotations, opportunities'''
self.update_lead_status()
def validate(self):
self.flags.is_new_doc = self.is_new()
self.flags.old_lead = self.lead_name
validate_party_accounts(self)
self.validate_credit_limit_on_change()
self.set_loyalty_program()
self.check_customer_group_change()
self.validate_default_bank_account()
self.validate_internal_customer()
# set loyalty program tier
if frappe.db.exists('Customer', self.name):
customer = frappe.get_doc('Customer', self.name)
if self.loyalty_program == customer.loyalty_program and not self.loyalty_program_tier:
self.loyalty_program_tier = customer.loyalty_program_tier
if self.sales_team:
if sum([member.allocated_percentage or 0 for member in self.sales_team]) != 100:
frappe.throw(_("Total contribution percentage should be equal to 100"))
def check_customer_group_change(self):
frappe.flags.customer_group_changed = False
if not self.get('__islocal'):
if self.customer_group != frappe.db.get_value('Customer', self.name, 'customer_group'):
frappe.flags.customer_group_changed = True
def validate_default_bank_account(self):
if self.default_bank_account:
is_company_account = frappe.db.get_value('Bank Account', self.default_bank_account, 'is_company_account')
if not is_company_account:
frappe.throw(_("{0} is not a company bank account").format(frappe.bold(self.default_bank_account)))
def validate_internal_customer(self):
internal_customer = frappe.db.get_value("Customer",
{"is_internal_customer": 1, "represents_company": self.represents_company, "name": ("!=", self.name)}, "name")
if internal_customer:
frappe.throw(_("Internal Customer for company {0} already exists").format(
frappe.bold(self.represents_company)))
def on_update(self):
self.validate_name_with_customer_group()
self.create_primary_contact()
self.create_primary_address()
if self.flags.old_lead != self.lead_name:
self.update_lead_status()
if self.flags.is_new_doc:
self.create_lead_address_contact()
self.update_customer_groups()
def update_customer_groups(self):
ignore_doctypes = ["Lead", "Opportunity", "POS Profile", "Tax Rule", "Pricing Rule"]
if frappe.flags.customer_group_changed:
update_linked_doctypes('Customer', self.name, 'Customer Group',
self.customer_group, ignore_doctypes)
def create_primary_contact(self):
if not self.customer_primary_contact and not self.lead_name:
if self.mobile_no or self.email_id:
contact = make_contact(self)
self.db_set('customer_primary_contact', contact.name)
self.db_set('mobile_no', self.mobile_no)
self.db_set('email_id', self.email_id)
def create_primary_address(self):
if self.flags.is_new_doc and self.get('address_line1'):
make_address(self)
def update_lead_status(self):
'''If Customer created from Lead, update lead status to "Converted"
update Customer link in Quotation, Opportunity'''
if self.lead_name:
lead = frappe.get_doc('Lead', self.lead_name)
lead.status = 'Converted'
lead.save()
def create_lead_address_contact(self):
if self.lead_name:
# assign lead address to customer (if already not set)
address_names = frappe.get_all('Dynamic Link', filters={
"parenttype":"Address",
"link_doctype":"Lead",
"link_name":self.lead_name
}, fields=["parent as name"])
for address_name in address_names:
address = frappe.get_doc('Address', address_name.get('name'))
if not address.has_link('Customer', self.name):
address.append('links', dict(link_doctype='Customer', link_name=self.name))
address.save(ignore_permissions=self.flags.ignore_permissions)
lead = frappe.db.get_value("Lead", self.lead_name, ["organization_lead", "lead_name", "email_id", "phone", "mobile_no", "gender", "salutation"], as_dict=True)
if not lead.lead_name:
frappe.throw(_("Please mention the Lead Name in Lead {0}").format(self.lead_name))
if lead.organization_lead:
contact_names = frappe.get_all('Dynamic Link', filters={
"parenttype":"Contact",
"link_doctype":"Lead",
"link_name":self.lead_name
}, fields=["parent as name"])
for contact_name in contact_names:
contact = frappe.get_doc('Contact', contact_name.get('name'))
if not contact.has_link('Customer', self.name):
contact.append('links', dict(link_doctype='Customer', link_name=self.name))
contact.save(ignore_permissions=self.flags.ignore_permissions)
else:
lead.lead_name = lead.lead_name.lstrip().split(" ")
lead.first_name = lead.lead_name[0]
lead.last_name = " ".join(lead.lead_name[1:])
# create contact from lead
contact = frappe.new_doc('Contact')
contact.first_name = lead.first_name
contact.last_name = lead.last_name
contact.gender = lead.gender
contact.salutation = lead.salutation
contact.email_id = lead.email_id
contact.phone = lead.phone
contact.mobile_no = lead.mobile_no
contact.is_primary_contact = 1
contact.append('links', dict(link_doctype='Customer', link_name=self.name))
if lead.email_id:
contact.append('email_ids', dict(email_id=lead.email_id, is_primary=1))
if lead.mobile_no:
contact.append('phone_nos', dict(phone=lead.mobile_no, is_primary_mobile_no=1))
contact.flags.ignore_permissions = self.flags.ignore_permissions
contact.autoname()
if not frappe.db.exists("Contact", contact.name):
contact.insert()
def validate_name_with_customer_group(self):
if frappe.db.exists("Customer Group", self.name):
frappe.throw(_("A Customer Group exists with same name please change the Customer name or rename the Customer Group"), frappe.NameError)
def validate_credit_limit_on_change(self):
if self.get("__islocal") or not self.credit_limits:
return
past_credit_limits = [d.credit_limit
for d in frappe.db.get_all("Customer Credit Limit", filters={'parent': self.name}, fields=["credit_limit"], order_by="company")]
current_credit_limits = [d.credit_limit for d in sorted(self.credit_limits, key=lambda k: k.company)]
if past_credit_limits == current_credit_limits:
return
company_record = []
for limit in self.credit_limits:
if limit.company in company_record:
frappe.throw(_("Credit limit is already defined for the Company {0}").format(limit.company, self.name))
else:
company_record.append(limit.company)
outstanding_amt = get_customer_outstanding(self.name, limit.company)
if flt(limit.credit_limit) < outstanding_amt:
frappe.throw(_("""New credit limit is less than current outstanding amount for the customer. Credit limit has to be atleast {0}""").format(outstanding_amt))
def on_trash(self):
if self.customer_primary_contact:
frappe.db.sql("""update `tabCustomer`
set customer_primary_contact=null, mobile_no=null, email_id=null
where name=%s""", self.name)
delete_contact_and_address('Customer', self.name)
if self.lead_name:
frappe.db.sql("update `tabLead` set status='Interested' where name=%s", self.lead_name)
def after_rename(self, olddn, newdn, merge=False):
if frappe.defaults.get_global_default('cust_master_name') == 'Customer Name':
frappe.db.set(self, "customer_name", newdn)
def set_loyalty_program(self):
if self.loyalty_program: return
loyalty_program = get_loyalty_programs(self)
if not loyalty_program: return
if len(loyalty_program) == 1:
self.loyalty_program = loyalty_program[0]
else:
frappe.msgprint(_("Multiple Loyalty Program found for the Customer. Please select manually."))
def create_onboarding_docs(self, args):
defaults = frappe.defaults.get_defaults()
company = defaults.get('company') or \
frappe.db.get_single_value('Global Defaults', 'default_company')
for i in range(1, args.get('max_count')):
customer = args.get('customer_name_' + str(i))
if customer:
try:
doc = frappe.get_doc({
'doctype': self.doctype,
'customer_name': customer,
'customer_type': 'Company',
'customer_group': _('Commercial'),
'territory': defaults.get('country'),
'company': company
}).insert()
if args.get('customer_email_' + str(i)):
create_contact(customer, self.doctype,
doc.name, args.get("customer_email_" + str(i)))
except frappe.NameError:
pass
def create_contact(contact, party_type, party, email):
"""Create contact based on given contact name"""
contact = contact.split(' ')
contact = frappe.get_doc({
'doctype': 'Contact',
'first_name': contact[0],
'last_name': len(contact) > 1 and contact[1] or ""
})
contact.append('email_ids', dict(email_id=email, is_primary=1))
contact.append('links', dict(link_doctype=party_type, link_name=party))
contact.insert()
@frappe.whitelist()
def make_quotation(source_name, target_doc=None):
def set_missing_values(source, target):
_set_missing_values(source, target)
target_doc = get_mapped_doc("Customer", source_name,
{"Customer": {
"doctype": "Quotation",
"field_map": {
"name":"party_name"
}
}}, target_doc, set_missing_values)
target_doc.quotation_to = "Customer"
target_doc.run_method("set_missing_values")
target_doc.run_method("set_other_charges")
target_doc.run_method("calculate_taxes_and_totals")
price_list, currency = frappe.db.get_value("Customer", {'name': source_name}, ['default_price_list', 'default_currency'])
if price_list:
target_doc.selling_price_list = price_list
if currency:
target_doc.currency = currency
return target_doc
@frappe.whitelist()
def make_opportunity(source_name, target_doc=None):
def set_missing_values(source, target):
_set_missing_values(source, target)
target_doc = get_mapped_doc("Customer", source_name,
{"Customer": {
"doctype": "Opportunity",
"field_map": {
"name": "party_name",
"doctype": "opportunity_from",
}
}}, target_doc, set_missing_values)
return target_doc
def _set_missing_values(source, target):
address = frappe.get_all('Dynamic Link', {
'link_doctype': source.doctype,
'link_name': source.name,
'parenttype': 'Address',
}, ['parent'], limit=1)
contact = frappe.get_all('Dynamic Link', {
'link_doctype': source.doctype,
'link_name': source.name,
'parenttype': 'Contact',
}, ['parent'], limit=1)
if address:
target.customer_address = address[0].parent
if contact:
target.contact_person = contact[0].parent
@frappe.whitelist()
def get_loyalty_programs(doc):
''' returns applicable loyalty programs for a customer '''
from frappe.desk.treeview import get_children
lp_details = []
loyalty_programs = frappe.get_all("Loyalty Program",
fields=["name", "customer_group", "customer_territory"],
filters={"auto_opt_in": 1, "from_date": ["<=", today()],
"ifnull(to_date, '2500-01-01')": [">=", today()]})
for loyalty_program in loyalty_programs:
customer_groups = [d.value for d in get_children("Customer Group", loyalty_program.customer_group)] + [loyalty_program.customer_group]
customer_territories = [d.value for d in get_children("Territory", loyalty_program.customer_territory)] + [loyalty_program.customer_territory]
if (not loyalty_program.customer_group or doc.customer_group in customer_groups)\
and (not loyalty_program.customer_territory or doc.territory in customer_territories):
lp_details.append(loyalty_program.name)
return lp_details
@frappe.whitelist()
@frappe.validate_and_sanitize_search_inputs
def get_customer_list(doctype, txt, searchfield, start, page_len, filters=None):
from erpnext.controllers.queries import get_fields
fields = ["name", "customer_name", "customer_group", "territory"]
if frappe.db.get_default("cust_master_name") == "Customer Name":
fields = ["name", "customer_group", "territory"]
fields = get_fields("Customer", fields)
match_conditions = build_match_conditions("Customer")
match_conditions = "and {}".format(match_conditions) if match_conditions else ""
if filters:
filter_conditions = get_filters_cond(doctype, filters, [])
match_conditions += "{}".format(filter_conditions)
return frappe.db.sql("""
select %s
from `tabCustomer`
where docstatus < 2
and (%s like %s or customer_name like %s)
{match_conditions}
order by
case when name like %s then 0 else 1 end,
case when customer_name like %s then 0 else 1 end,
name, customer_name limit %s, %s
""".format(match_conditions=match_conditions) % (", ".join(fields), searchfield, "%s", "%s", "%s", "%s", "%s", "%s"),
("%%%s%%" % txt, "%%%s%%" % txt, "%%%s%%" % txt, "%%%s%%" % txt, start, page_len))
def check_credit_limit(customer, company, ignore_outstanding_sales_order=False, extra_amount=0):
customer_outstanding = get_customer_outstanding(customer, company, ignore_outstanding_sales_order)
if extra_amount > 0:
customer_outstanding += flt(extra_amount)
credit_limit = get_credit_limit(customer, company)
if credit_limit > 0 and flt(customer_outstanding) > credit_limit:
msgprint(_("Credit limit has been crossed for customer {0} ({1}/{2})")
.format(customer, customer_outstanding, credit_limit))
# If not authorized person raise exception
credit_controller_role = frappe.db.get_single_value('Accounts Settings', 'credit_controller')
if not credit_controller_role or credit_controller_role not in frappe.get_roles():
# form a list of emails for the credit controller users
credit_controller_users = get_users_with_role(credit_controller_role or "Sales Master Manager")
# form a list of emails and names to show to the user
credit_controller_users_formatted = [get_formatted_email(user).replace("<", "(").replace(">", ")") for user in credit_controller_users]
if not credit_controller_users_formatted:
frappe.throw(_("Please contact your administrator to extend the credit limits for {0}.").format(customer))
message = """Please contact any of the following users to extend the credit limits for {0}:
<br><br><ul><li>{1}</li></ul>""".format(customer, '<li>'.join(credit_controller_users_formatted))
# if the current user does not have permissions to override credit limit,
# prompt them to send out an email to the controller users
frappe.msgprint(message,
title="Notify",
raise_exception=1,
primary_action={
'label': 'Send Email',
'server_action': 'erpnext.selling.doctype.customer.customer.send_emails',
'args': {
'customer': customer,
'customer_outstanding': customer_outstanding,
'credit_limit': credit_limit,
'credit_controller_users_list': credit_controller_users
}
}
)
@frappe.whitelist()
def send_emails(args):
args = json.loads(args)
subject = (_("Credit limit reached for customer {0}").format(args.get('customer')))
message = (_("Credit limit has been crossed for customer {0} ({1}/{2})")
.format(args.get('customer'), args.get('customer_outstanding'), args.get('credit_limit')))
frappe.sendmail(recipients=args.get('credit_controller_users_list'), subject=subject, message=message)
def get_customer_outstanding(customer, company, ignore_outstanding_sales_order=False, cost_center=None):
# Outstanding based on GL Entries
cond = ""
if cost_center:
lft, rgt = frappe.get_cached_value("Cost Center",
cost_center, ['lft', 'rgt'])
cond = """ and cost_center in (select name from `tabCost Center` where
lft >= {0} and rgt <= {1})""".format(lft, rgt)
outstanding_based_on_gle = frappe.db.sql("""
select sum(debit) - sum(credit)
from `tabGL Entry` where party_type = 'Customer'
and party = %s and company=%s {0}""".format(cond), (customer, company))
outstanding_based_on_gle = flt(outstanding_based_on_gle[0][0]) if outstanding_based_on_gle else 0
# Outstanding based on Sales Order
outstanding_based_on_so = 0.0
# if credit limit check is bypassed at sales order level,
# we should not consider outstanding Sales Orders, when customer credit balance report is run
if not ignore_outstanding_sales_order:
outstanding_based_on_so = frappe.db.sql("""
select sum(base_grand_total*(100 - per_billed)/100)
from `tabSales Order`
where customer=%s and docstatus = 1 and company=%s
and per_billed < 100 and status != 'Closed'""", (customer, company))
outstanding_based_on_so = flt(outstanding_based_on_so[0][0]) if outstanding_based_on_so else 0.0
# Outstanding based on Delivery Note, which are not created against Sales Order
unmarked_delivery_note_items = frappe.db.sql("""select
dn_item.name, dn_item.amount, dn.base_net_total, dn.base_grand_total
from `tabDelivery Note` dn, `tabDelivery Note Item` dn_item
where
dn.name = dn_item.parent
and dn.customer=%s and dn.company=%s
and dn.docstatus = 1 and dn.status not in ('Closed', 'Stopped')
and ifnull(dn_item.against_sales_order, '') = ''
and ifnull(dn_item.against_sales_invoice, '') = ''
""", (customer, company), as_dict=True)
outstanding_based_on_dn = 0.0
for dn_item in unmarked_delivery_note_items:
si_amount = frappe.db.sql("""select sum(amount)
from `tabSales Invoice Item`
where dn_detail = %s and docstatus = 1""", dn_item.name)[0][0]
if flt(dn_item.amount) > flt(si_amount) and dn_item.base_net_total:
outstanding_based_on_dn += ((flt(dn_item.amount) - flt(si_amount)) \
/ dn_item.base_net_total) * dn_item.base_grand_total
return outstanding_based_on_gle + outstanding_based_on_so + outstanding_based_on_dn
def get_credit_limit(customer, company):
credit_limit = None
if customer:
credit_limit = frappe.db.get_value("Customer Credit Limit",
{'parent': customer, 'parenttype': 'Customer', 'company': company}, 'credit_limit')
if not credit_limit:
customer_group = frappe.get_cached_value("Customer", customer, 'customer_group')
credit_limit = frappe.db.get_value("Customer Credit Limit",
{'parent': customer_group, 'parenttype': 'Customer Group', 'company': company}, 'credit_limit')
if not credit_limit:
credit_limit = frappe.get_cached_value('Company', company, "credit_limit")
return flt(credit_limit)
def make_contact(args, is_primary_contact=1):
contact = frappe.get_doc({
'doctype': 'Contact',
'first_name': args.get('name'),
'is_primary_contact': is_primary_contact,
'links': [{
'link_doctype': args.get('doctype'),
'link_name': args.get('name')
}]
})
if args.get('email_id'):
contact.add_email(args.get('email_id'), is_primary=True)
if args.get('mobile_no'):
contact.add_phone(args.get('mobile_no'), is_primary_mobile_no=True)
contact.insert()
return contact
def make_address(args, is_primary_address=1):
reqd_fields = []
for field in ['city', 'country']:
if not args.get(field):
reqd_fields.append( '<li>' + field.title() + '</li>')
if reqd_fields:
msg = _("Following fields are mandatory to create address:")
frappe.throw("{0} <br><br> <ul>{1}</ul>".format(msg, '\n'.join(reqd_fields)),
title = _("Missing Values Required"))
address = frappe.get_doc({
'doctype': 'Address',
'address_title': args.get('name'),
'address_line1': args.get('address_line1'),
'address_line2': args.get('address_line2'),
'city': args.get('city'),
'state': args.get('state'),
'pincode': args.get('pincode'),
'country': args.get('country'),
'links': [{
'link_doctype': args.get('doctype'),
'link_name': args.get('name')
}]
}).insert()
return address
@frappe.whitelist()
@frappe.validate_and_sanitize_search_inputs
def get_customer_primary_contact(doctype, txt, searchfield, start, page_len, filters):
customer = filters.get('customer')
return frappe.db.sql("""
select `tabContact`.name from `tabContact`, `tabDynamic Link`
where `tabContact`.name = `tabDynamic Link`.parent and `tabDynamic Link`.link_name = %(customer)s
and `tabDynamic Link`.link_doctype = 'Customer'
and `tabContact`.name like %(txt)s
""", {
'customer': customer,
'txt': '%%%s%%' % txt
})
|
#!/usr/bin/env python
# Copyright 2017 Calico LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =========================================================================
from __future__ import print_function
from optparse import OptionParser
import os
import sys
import time
import h5py
import numpy as np
import tensorflow as tf
import basenji
"""basenji_final.py
Write the weights from the final model layer.
"""
################################################################################
# main
################################################################################
def main():
usage = 'usage: %prog [options] <params_file> <model_file>'
parser = OptionParser(usage)
parser.add_option('-o', dest='out_npy', default='final.npy')
(options, args) = parser.parse_args()
if len(args) != 2:
parser.error('Must provide parameters, model, and test data HDF5')
else:
params_file = args[0]
model_file = args[1]
#######################################################
# model parameters and placeholders
job = basenji.dna_io.read_job_params(params_file)
model = basenji.seqnn.SeqNN()
model.build(job)
# initialize saver
saver = tf.train.Saver()
with tf.Session() as sess:
# load variables into session
saver.restore(sess, model_file)
for v in tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES):
if v.name == 'final/dense/kernel:0':
np.save(options.out_npy, v.eval())
################################################################################
# __main__
################################################################################
if __name__ == '__main__':
main()
|
from django.contrib import admin
from django.contrib import admin
from .models import Budget, Transaction
# Register your models here.
admin.site.register((Budget, Transaction))
|
import sys
V = [0x64, 0x73, 0x66, 0x64, 0x3b, 0x6b, 0x66, 0x6f, 0x41, 0x2c, 0x2e,
0x69, 0x79, 0x65, 0x77, 0x72, 0x6b, 0x6c, 0x64, 0x4a, 0x4b, 0x44,
0x48, 0x53, 0x55, 0x42, 0x73, 0x67, 0x76, 0x63, 0x61, 0x36, 0x39,
0x38, 0x33, 0x34, 0x6e, 0x63, 0x78, 0x76, 0x39, 0x38, 0x37, 0x33,
0x32, 0x35, 0x34, 0x6b, 0x3b, 0x66, 0x67, 0x38, 0x37]
if len(sys.argv) != 2:
print("Usage: type7.py hash")
sys.exit(0)
hash = sys.argv[1]
i = int(hash[:2], 16)
r = ""
for j in range(2, len(hash) - 2, 2):
h = int(hash[j:j+2], 16)
r = r + chr(h ^ V[i])
i = (i + 1) % 53
print r
|
#! /usr/bin/python
import sys
fo = open(sys.argv[1])
ifo = iter(fo)
size = 0
num = 0
for line in ifo:
num += 1
size += len(line)
print size, num
|
from typing import List
class Solution:
def findErrorNums(self, nums: List[int]) -> List[int]:
missing = duplicated = 0
for i, num in enumerate(nums):
val = abs(num)
duplicated ^= (i+1) ^ val
index = val - 1
if nums[index] < 0:
missing = val
else:
nums[index] = -nums[index]
duplicated ^= missing
return [missing, duplicated]
|
#!/usr/bin/env
"""
ConfigParser.py
Parse .pyini files (custom generated user ini files)
These files are JSON created and are flat files which can be edited by any text reader
Using Anaconda packaged Python
"""
#System Stack
import json
import sys
def get_config(infile):
""" Input - full path to config file
Output - dictionary of file config parameters
"""
infile = str(infile)
try:
d = json.load(open(infile))
except:
print "Invalid or unfound config file \n Exitting without changes"
sys.exit()
return d
def write_config(infile, d):
""" Input - full path to config file
Dictionary of parameters to write
Output - None
"""
infile = str(infile)
try:
d = json.dump(d, open(infile,'w'), sort_keys=True, indent=4)
except:
print "Invalid or unfound config file \n Exitting without changes"
sys.exit()
|
import numpy as np
import torch
from scipy.integrate import solve_ivp
from experiments.hnn.data import plot_test2, integrate_model, plot_training2, get_dataset_osc
from experiments.hnn.data import m, g
from modelzoo.hnn import HNN, MLP
def train(args):
# set random seed
torch.manual_seed(args.seed)
np.random.seed(args.seed)
# init model and optimizer
if args.verbose:
print("Training baseline model:" if args.baseline else "Training HNN model:")
output_dim = args.input_dim if args.baseline else 2
nn_model = MLP(args.input_dim, args.hidden_dim, output_dim, args.nonlinearity)
model = HNN(args.input_dim, differentiable_model=nn_model,
field_type=args.field_type, baseline=args.baseline)
optim = torch.optim.Adam(model.parameters(), args.learn_rate, weight_decay=1e-4)
l2_loss = torch.nn.MSELoss()
# arrange data
r, b, noise, seq_length = .3, 0.1, 0., 400
print(r, b, noise, seq_length)
data = get_dataset_osc(seed=args.seed, radius=r, length=seq_length, start=0, noise_std=noise, damping=b)
x = torch.tensor(data['x'], requires_grad=True, dtype=torch.float32)
test_x = torch.tensor(data['test_x'], requires_grad=True, dtype=torch.float32)
dxdt = torch.Tensor(data['dx'])
test_dxdt = torch.Tensor(data['test_dx'])
# ground truth energy
t = np.linspace(0, 25, 1001)
ivp_kwargs = {
't_span': (t[0], t[-1]),
'y0': np.array([r, 0]),
'rtol': 1e-12
}
def _dynamics(t, theta):
dtheta1 = theta[1] / m
dtheta2 = -b * dtheta1 - m * g * np.sin(theta[0])
return [dtheta1, dtheta2]
res = solve_ivp(fun=_dynamics, t_eval=t, **ivp_kwargs)
q, p = res['y']
e_pot, e_kin = m * g * (1 - np.cos(q)), p ** 2 / (2 * m)
# vanilla train loop
stats = {'train_loss': [], 'test_loss': []}
for step in range(args.total_steps + 1):
# train step
dxdt_hat = model.time_derivative(x)
loss = l2_loss(dxdt, dxdt_hat)
loss.backward();
optim.step();
optim.zero_grad()
# run test data
test_dxdt_hat = model.time_derivative(test_x)
test_loss = l2_loss(test_dxdt, test_dxdt_hat)
# logging
stats['train_loss'].append(loss.item())
stats['test_loss'].append(test_loss.item())
if args.verbose and step % args.print_every == 0:
print("step {}, train_loss {:.4e}, test_loss {:.4e}".format(step, loss.item(), test_loss.item()))
res = integrate_model(model, t_eval=t[:seq_length], **ivp_kwargs)
q_hat, p_hat = res['y']
e_pot_hat, e_kin_hat = m * g * (1 - np.cos(q_hat)), p_hat ** 2 / (2 * m)
plot_training2(t, e_kin, e_pot, e_kin_hat, e_pot_hat, False,
seq_len=seq_length, title_appendix=f"Epoch {step}").show()
train_dxdt_hat = model.time_derivative(x)
train_dist = (dxdt - train_dxdt_hat) ** 2
test_dxdt_hat = model.time_derivative(test_x)
test_dist = (test_dxdt - test_dxdt_hat) ** 2
print('Final train loss {:.4e} +/- {:.4e}\nFinal test loss {:.4e} +/- {:.4e}'
.format(train_dist.mean().item(), train_dist.std().item() / np.sqrt(train_dist.shape[0]),
test_dist.mean().item(), test_dist.std().item() / np.sqrt(test_dist.shape[0])))
res = integrate_model(model, t_eval=t, **ivp_kwargs)
q_hat, p_hat = res['y']
e_pot_hat, e_kin_hat = m * g * (1 - np.cos(q_hat)), p_hat ** 2 / (2 * m)
mse_pot = np.power(e_pot_hat - e_pot, 2)
mse_kin = np.power(e_kin_hat - e_kin, 2)
print(f"MSE pot - Train: {mse_pot[:seq_length].mean():.5f}, Test: {mse_pot[seq_length:].mean():.5f}")
print(f"MSE kin - Train: {mse_kin[:seq_length].mean():.5f}, Test: {mse_kin[seq_length:].mean():.5f}")
fig = plot_test2(t, e_kin, e_pot, e_kin_hat, e_pot_hat, length=seq_length, modeltype='HNN')
fig.gca().set_title("")
fig.savefig("hnn_friction.png")
fig.show()
return model, stats
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description=None)
parser.add_argument('--input_dim', default=2, type=int, help='dimensionality of input tensor')
parser.add_argument('--hidden_dim', default=200, type=int, help='hidden dimension of mlp')
parser.add_argument('--learn_rate', default=1e-3, type=float, help='learning rate')
parser.add_argument('--nonlinearity', default='tanh', type=str, help='neural net nonlinearity')
parser.add_argument('--total_steps', default=2000, type=int, help='number of gradient steps')
parser.add_argument('--print_every', default=200, type=int, help='number of gradient steps between prints')
parser.add_argument('--name', default='pend', type=str, help='only one option right now')
parser.add_argument('--baseline', dest='baseline', action='store_true', help='run baseline or experiment?')
parser.add_argument('--verbose', dest='verbose', action='store_true', help='verbose?')
parser.add_argument('--field_type', default='solenoidal', type=str, help='type of vector field to learn')
parser.add_argument('--seed', default=0, type=int, help='random seed')
parser.set_defaults(feature=True)
args = parser.parse_args()
model, stats = train(args)
|
"""A set of views every cart needs.
On success, each view returns a JSON-response with the cart
representation. For the details on the format of the return value,
see the :meth:`~easycart.cart.BaseCart.encode` method of the
:class:`~easycart.cart.BaseCart` class.
If a parameter required by a view is not present in the request's POST
data, then the JSON-response will have the format::
{'error': 'MissingRequestParam', 'param': parameter_name}
Almost the same thing happens, if a parameter is invalid and results in
an exception, which is a subclass of :class:`~easycart.cart.CartException`.
In this case, the error value will be the name of the concrete exception
class (e.g. ``'ItemNotInCart'`` or ``'NegativeItemQuantity'``).
And instead of ``param`` there may be one or more items providing
additional info on the error, for example, the primary key of an item
you was trying to change or an invalid quantity passed in the request.
Note
----
All of the views in this module accept only POST requests.
Warning
-------
The views in this module do not protect you from race conditions, which
may occur if, for example, server receives requests changing the cart
state almost simultaneously. It seems there's no good
platform-independent way to do it (see `this issue
<https://github.com/nevimov/django-easycart/issues/8>`_).
For now, I suggest to use JavaScript to ensure that you don't make
new requests to the cart until you have a response for the current one.
Feel free to reopen the issue, if you have any suggestions on how to
improve the situation.
"""
from importlib import import_module
from django.conf import settings
from django.http import JsonResponse
from django.views.generic import View
from easycart.cart import CartException
__all__ = [
'AddItem',
'RemoveItem',
'ChangeItemQuantity',
'EmptyCart',
]
cart_module, cart_class = settings.EASYCART_CART_CLASS.rsplit('.', 1)
Cart = getattr(import_module(cart_module), cart_class)
class CartView(View):
"""Base class for views operating the cart."""
action = None
"""Attribute of the cart object, which will be called to perform
some action on the cart.
"""
required_params = ()
"""Iterable of parameters, which MUST be present in the post data."""
optional_params = {}
"""Dictionary of parameters, which MAY be present in the post data.
Parameters serve as keys. Associated values will be used as fallbacks
in case the parameter is not in the post data.
"""
def post(self, request):
# Extract parameters from the post data
params = {}
for param in self.required_params:
try:
params[param] = request.POST[param]
except KeyError:
return JsonResponse({
'error': 'MissingRequestParam',
'param': param,
})
for param, fallback in self.optional_params.items():
params[param] = request.POST.get(param, fallback)
# Perform an action on the cart using these parameters
cart = Cart(request)
action = getattr(cart, self.action)
try:
action(**params)
except CartException as exc:
return JsonResponse(dict({'error': exc.__class__.__name__},
**exc.kwargs))
return cart.encode()
class AddItem(CartView):
"""Add an item to the cart.
This view expects `request.POST` to contain:
+------------+----------------------------------------------------+
| key | value |
+============+====================================================+
| `pk` | the primary key of an item to add |
+------------+----------------------------------------------------+
| `quantity` | a quantity that should be associated with the item |
+------------+----------------------------------------------------+
The `quantity` parameter is optional (defaults to 1).
"""
action = 'add'
required_params = ('pk',)
optional_params = {'quantity': 1}
class ChangeItemQuantity(CartView):
"""Change the quantity associated with an item.
This view expects `request.POST` to contain:
+------------+----------------------------------------------------+
| key | value |
+============+====================================================+
| `pk` | the primary key of an item |
+------------+----------------------------------------------------+
| `quantity` | a new quantity to associate with the item |
+------------+----------------------------------------------------+
"""
action = 'change_quantity'
required_params = ('pk', 'quantity')
class RemoveItem(CartView):
"""Remove an item from the cart.
Expects `request.POST` to contain key *pk*. The associated value
should be the primary key of an item you wish to remove.
"""
action = 'remove'
required_params = ('pk',)
class EmptyCart(CartView):
"""Remove all items from the cart."""
action = 'empty'
|
from alsaaudio import Mixer
from datetime import datetime
import i3ipc
import re
import socket
import struct
import subprocess
from time import time
import gi
gi.require_version('Playerctl', '1.0')
from gi.repository import Playerctl
import sys
class Base:
is_callback_block = False
display = True
def __init__(self, icon='', interval=1, margin=0, padding=0,
foreground=None, background=None, swap=False):
self.interval = interval
template = '{value}'
# Set icon
if icon:
icon_str = self.format_icon(icon)
template = f'{icon_str} {template}'
# Set padding
if padding:
template = self.__set_spacing(padding, template)
# Setting colors
if foreground:
template = self.__set_foreground(template, foreground)
if background:
template = self.__set_background(template, background)
if swap:
template = '%{{R}}' + template + '%{{R}}'
# Set margin
if margin:
template = self.__set_spacing(margin, template)
self.template = template
self.output = ''
@staticmethod
def __set_spacing(spacing, o):
if type(spacing) == list:
l = ' '*spacing[0]
r = ' '*spacing[1]
else:
l = ' '*spacing
r = ' '*spacing
return l + o + r
@staticmethod
def __set_foreground(string, color):
return f'%{{F{color}}}{string}%{{F-}}'
@staticmethod
def __set_background(string, color):
return f'%{{B{color}}}{string}%{{B-}}'
@staticmethod
def format_icon(icon_obj):
icon_str = ''
if type(icon_obj) == str:
icon_str = icon_obj
elif type(icon_obj) == dict:
icon_str = icon_obj['str']
if icon_obj.get('foreground'):
icon_str = Base.__set_foreground(icon_str,
icon_obj['foreground'])
if icon_obj.get('background'):
icon_str = Base.__set_background(icon_str,
icon_obj['background'])
return icon_str
def __call__(self):
raise NotImplementedError('Function needs to be implemented')
class SafeDict(dict):
''' Dict that leaves missing format keys as is '''
def __missing__(self, key):
return '{' + key + '}'
class Widget(Base):
def __init__(self, **kwds):
super().__init__(**kwds)
self.prevtime = 0
self.refresh = False
def __call__(self):
if self.interval == -1:
# Only call update once
if self.prevtime != -1:
self.update()
self.prevtime = -1
elif (time() >= self.prevtime + self.interval) or self.refresh:
self.update()
self.prevtime = time()
self.refresh = False
if self.display:
return self.template.format_map(self.SafeDict(value=self.output))
return ''
def update(self):
raise NotImplementedError('Function needs to be implemented')
class Raw(Base):
def __init__(self, text='', **kwds):
super().__init__(**kwds)
self.output = text
def __call__(self):
return self.template.format_map(self.SafeDict(value=self.output))
class Align(Raw):
def __init__(self, align, **kwds):
super().__init__(**kwds)
self.output = '%{{{}}}'.format(align)
class Clock(Widget):
def __init__(self, layout='%d %b %Y %H:%M:%S', **kwds):
super().__init__(**kwds)
self.layout = layout
def update(self):
self.output = datetime.today().strftime(self.layout)
class Volume(Widget):
def update(self):
m = Mixer()
self.output = '{}%'.format(m.getvolume()[0])
class WorkspacesDots(Widget):
i3 = i3ipc.Connection()
def __init__(self, underline=None,
icons={'empty': 'o', 'nonempty': '0',
'visible': 'x'}, spacing=0, **kwds):
super().__init__(**kwds)
self.icons = {}
for k, icon in icons.items():
self.icons[k] = self.format_icon(icon)
self.underline = underline
self.spacing = spacing
def update(self):
out = [self.icons['empty'] for __ in range(10)]
for workspace in self.i3.get_workspaces():
ind = int(workspace['num']) - 1
if ind < 0:
ind = 9
if workspace['visible']:
out[ind] = self.icons['visible']
else:
out[ind] = self.icons['nonempty']
if workspace['focused']:
out[ind] = '%{!u}' + out[ind] + '%{!u}'
self.output = (' '*self.spacing).join(out)
if self.underline:
self.output = '%{{U{}}}'.format(self.underline)\
+ self.output + '%{U-}'
class Memory(Widget):
def __init__(self, percentage=False, interval=5, **kwds):
super().__init__(interval=interval, **kwds)
self.percentage = percentage
def update(self):
with open('/proc/meminfo', 'r') as mem:
total = 0
available = 0
for line in mem:
line_split = line.split()
if line_split[0] == 'MemTotal:':
total = int(line_split[1])
elif line_split[0] in ['MemFree:', 'Buffers:', 'Cached:']:
available += int(line_split[1])
used_mb = round((total-available)/1024)
used_perc = round((available/total)*100)
self.output = used_perc if self.percentage else used_mb
self.output = str(self.output) + 'M'
class IPAddress(Widget):
def __init__(self, interface='eth0', interval=900, **kwds):
super().__init__(interval=interval, **kwds)
self.interface = interface.encode('utf-8')
def update(self):
def get_ip_address(ifname):
# Props to 'Martin Konecny' on SO
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
return socket.inet_ntoa(fcntl.ioctl(
s.fileno(),
0x8915, # SIOCGIFADDR
struct.pack('256s', ifname[:15])
)[20:24])
self.output = get_ip_address(self.interface)
class Ping(Widget):
def __init__(self, host='8.8.8.8', interval=5, **kwds):
super().__init__(interval=interval, **kwds)
self.host = host
def update(self):
ping = subprocess.Popen('ping -c1 -W1 {}'.format(self.host),
shell=True, stdout=subprocess.PIPE
).stdout.read()
reg = re.search('\d\d\.\d{3}/(\d\d\.\d{3})/\d\d\.\d{3}',
ping.decode())
if reg:
self.output = round(float(reg.groups()[0]))
else:
self.output = 0
self.output = str(self.output) + 'ms'
class Music(Widget):
player = Playerctl.Player()
def update(self):
if self.player.get_property('status') == 'Playing':
self.display = True
self.output = '{} - {}'.format(self.player.get_artist(),
self.player.get_title())
else:
self.display = False
class Battery(Widget):
''' Load from sys class '''
def __init__(self, power_supply='BAT0',
icons={'charging': 'c', 'discharging': 'd'}, **kwds):
super().__init__(**kwds)
self.power_supply = power_supply
self.icons = icons
def update(self):
with open('/sys/class/power_supply/{}/status'\
.format(self.power_supply), 'r') as f:
charging = 'Charging' in f.read()
with open('/sys/class/power_supply/{}/capacity'\
.format(self.power_supply), 'r') as f:
capacity = f.read().strip()
if charging:
self.output = '{} '.format(self.icons['charging'])
else:
num_of_icons = len(self.icons['discharging'])
ind = round(int(capacity)/100 * (num_of_icons-1))
self.output = '{} '.format(self.icons['discharging'][ind])
self.output += '{}%'.format(capacity)
class Wifi(Widget):
def __init__(self, interface='wlan0', **kwds):
super().__init__(**kwds)
self.interface = interface
def update(self):
try:
ssid = subprocess.check_output(['iw', 'dev', self.interface, 'info'])
for l in ssid.split(b'\n\t'):
if l.startswith(b'ssid'):
self.output = l[5:].decode()
except subprocess.CalledProcessError:
self.output = 'Interface N/A'
|
from colusa.etr import Extractor, register_extractor
@register_extractor('//xp123.com')
class XP123Extractor(Extractor):
def _find_main_content(self):
return self.bs.find('article', attrs={'class': 'post'})
def cleanup(self):
self.remove_tag(self.main_content, 'header', attrs={'class': 'entry-header'})
self.remove_tag(self.main_content, 'section', attrs={'class': 'yikes-mailchimp-container'})
self.remove_tag(self.main_content, 'footer', attrs={'class': 'entry-meta'})
|
import unittest
import time
from .lrucache import LRUCache, CacheData
class LRUCacheTest(unittest.TestCase):
def test_get1(self):
count = 0
def dummyLoadCB(key):
nonlocal count
count += 1
return {"mydata": key}
cache = LRUCache(dummyLoadCB, 0.5)
value = cache.get(0)
self.assertEqual(value['mydata'], 0)
self.assertEqual(count, 1)
self.assertEqual(len(cache._cache), 1)
self.assertEqual(cache._head.key, 0)
self.assertEqual(cache._cache[0].prev, None)
self.assertEqual(cache._cache[0].next, None)
def test_get2(self):
count = 0
def dummyLoadCB(key):
nonlocal count
count += 1
return {"mydata": key}
cache = LRUCache(dummyLoadCB, 0.5)
for x in range(2):
value = cache.get(x)
self.assertEqual(value['mydata'], x)
self.assertEqual(len(cache._cache), 2)
self.assertEqual(count, 2)
self.assertEqual(cache._head.key, 1)
self.assertEqual(cache._cache[1].prev, None)
self.assertEqual(cache._cache[1].next.key, 0)
self.assertEqual(cache._cache[0].prev.key, 1)
self.assertEqual(cache._cache[0].next, None)
def test_evict(self):
count = 0
def dummyLoadCB(key):
nonlocal count
count += 1
return {"mydata": key}
cache = LRUCache(dummyLoadCB, 0.5)
for x in range(100):
value = cache.get(x)
self.assertEqual(value['mydata'], x)
self.assertEqual(len(cache._cache), 100)
self.assertEqual(count, 100)
for x in range(100):
self.assertEqual(cache.get(x)['mydata'], x)
self.assertEqual(count, 100)
self.assertEqual(cache._head.key, 99)
self.assertEqual(cache._head.prev, None)
self.assertEqual(cache._cache[0].next, None)
self.assertEqual(cache._head.key, 99)
cache._evict(49) # evict from 49 to 0, 50-99 are newer
self.assertEqual(count, 100)
self.assertEqual(cache._head.key, 99)
self.assertEqual(cache._tail.key, 50)
self.assertEqual(len(cache._cache), 50)
self.assertEqual(cache._cache[50].prev.key, 51)
self.assertEqual(cache._cache[50].next, None)
cache._evict(cache._head.key)
self.assertEqual(len(cache._cache), 0)
self.assertEqual(cache._head, None)
self.assertEqual(cache._tail, None)
def test_promote(self):
count = 0
def dummyLoadCB(key):
nonlocal count
count += 1
return {"mydata": key}
cache = LRUCache(dummyLoadCB, 0.5)
for x in range(10):
cache.get(x)
cache._promote(cache._cache[5])
self.assertEqual(cache._head.key, 5)
self.assertEqual(cache._cache[5].prev, None)
self.assertEqual(cache._cache[5].next.key, 9)
self.assertEqual(cache._cache[9].prev.key, 5)
self.assertEqual(cache._cache[9].next.key, 8)
self.assertEqual(cache._cache[8].prev.key, 9)
self.assertEqual(cache._cache[8].next.key, 7)
self.assertEqual(cache._cache[7].prev.key, 8)
self.assertEqual(cache._cache[7].next.key, 6)
self.assertEqual(cache._cache[6].prev.key, 7)
self.assertEqual(cache._cache[6].next.key, 4)
self.assertEqual(cache._cache[4].prev.key, 6)
self.assertEqual(cache._cache[4].next.key, 3)
self.assertEqual(cache._cache[0].prev.key, 1)
self.assertEqual(cache._cache[0].next, None)
cache._promote(cache._cache[0])
self.assertEqual(cache._cache[0].prev, None)
self.assertEqual(cache._cache[0].next.key, 5)
self.assertEqual(cache._cache[1].prev.key, 2)
self.assertEqual(cache._cache[1].next, None)
cache._promote(cache._cache[0])
self.assertEqual(cache._cache[0].prev, None)
self.assertEqual(cache._cache[0].next.key, 5)
self.assertEqual(cache._cache[5].prev.key, 0)
self.assertEqual(cache._cache[5].next.key, 9)
cache._promote(cache._cache[5])
self.assertEqual(cache._cache[5].prev, None)
self.assertEqual(cache._cache[5].next.key, 0)
self.assertEqual(cache._cache[0].prev.key, 5)
self.assertEqual(cache._cache[0].next.key, 9)
self.assertEqual(cache._cache[9].prev.key, 0)
self.assertEqual(cache._cache[9].next.key, 8)
def test_store_promote(self):
count = 0
def dummyLoadCB(key):
nonlocal count
count += 1
return {"mydata": key}
cache = LRUCache(dummyLoadCB, 0.5)
cache._store(10, 10)
self.assertEqual(cache._head.key, 10)
self.assertEqual(cache._cache[10].prev, None)
self.assertEqual(cache._cache[10].next, None)
self.assertNotEqual(cache._cache[10].stamp, 0)
prevStamp = cache._cache[10].stamp
cache._promote(cache._cache[10])
self.assertEqual(cache._head.key, 10)
self.assertEqual(cache._cache[10].prev, None)
self.assertEqual(cache._cache[10].next, None)
self.assertGreater(cache._cache[10].stamp, prevStamp)
def test_shrink(self):
def dummyLoadCB(key):
return {"mydata": key}
cache = LRUCache(dummyLoadCB, 0.5)
cache._shrink(10)
self.assertEqual(len(cache._cache), 0)
for x in range(10):
cache.get(x)
self.assertEqual(len(cache._cache), 10)
cache._shrink(2)
self.assertEqual(len(cache._cache), 2)
self.assertEqual(cache._tail.key, 8)
self.assertEqual(cache._tail.next, None)
cache._shrink(0)
self.assertEqual(len(cache._cache), 0)
self.assertEqual(cache._head, None)
self.assertEqual(cache._tail, None)
def test_sizelimit(self):
def dummyLoadCB(key):
return {"mydata": key}
cache = LRUCache(dummyLoadCB, 0.5, 2)
for x in range(10):
cache.get(x)
self.assertEqual(len(cache._cache), 2)
self.assertEqual(cache._tail.key, 8)
self.assertEqual(cache._tail.next, None)
def test_timeout(self):
count = 0
def dummyLoadCB(key):
nonlocal count
count += 1
return {"mydata": key}
cache = LRUCache(dummyLoadCB, 0.1, 2)
for x in range(3):
cache.get(x)
self.assertEqual(count, 3)
time.sleep(0.1)
for x in range(3):
cache.get(x)
self.assertEqual(count, 6)
time.sleep(0.05)
cache.get(1)
time.sleep(0.05)
cache.get(1)
time.sleep(0.05)
cache.get(1)
self.assertEqual(count, 6)
for x in range(3):
cache.get(x)
self.assertEqual(count, 8)
|
import json
from gevent import monkey
monkey.patch_all()
import base64
import requests
import time
from gevent.pool import Pool
import argparse
parser = argparse.ArgumentParser(description='Inferenc Server: Client')
parser.add_argument('--url', type=str, default='http://localhost:8000/predict', help='test server')
parser.add_argument('--file', type=str, default='dog.jpg', help='test image file')
parser.add_argument('--n', type=int, default=1, help='request number')
args = parser.parse_args()
def req(json_data):
st = time.time()
resp = requests.post(args.url, json = json_data)
et = time.time()
print(f'req time: {et-st}, {resp.json()}')
def main():
# Initialize image path
with open(args.file, 'rb') as f:
image_data = base64.b64encode(f.read())
image_str =image_data.decode('utf-8')
json_data = {
'salt_uuid': '550e8400-e29b-41d4-a716-446655440000',
'image_data': image_str
}
# with open('input.json', 'w') as f:
# json.dump(json_data, f)
req_numbers = [json_data] * args.n
pool = Pool()
st = time.time()
pool.map(req, req_numbers)
print(f'total time: {time.time() - st}')
return
if __name__ == '__main__':
main()
|
import argparse
import math
import numpy as np
import os
import pandas as pd
import sys
import yaml
## valid workflow id and minimum versions supported
VALID_WORKFLOWS = {
162: 16.67, ## 'NerveMarking1'
}
TASK_KEY_DISK_BOX = (162, 'T1')
TASK_KEY_MARK_FOVEA = (162, 'T3')
TASK_KEY_CUP_BOUNDARY = (162, 'T4')
TASK_KEY_DISK_BOUNDARY = (162, 'T5')
TASK_KEY_MARK_NOTCH_HAEMORRHAGE = (162, 'T6')
CSV_KEY_ORDER = ['created_at', 'user_name', 'expert', 'subject_id', 'subject_filename']
def df_xcols(df, cols):
xs = np.concatenate((cols, df.columns.values))
_, idx = np.unique(xs, return_index=True)
return df[xs[np.sort(idx)]]
def row_ukey(row):
expert_val = 1 if str(row['expert']) == 'expert' else 0
return {'user_name': row['user_name'], 'created_at': row['created_at'], 'expert': expert_val}
def row_skey(row):
## NB: assumes only one subject per classification
sub_dat = parse_field(row['subject_data'])
sub_key = next(iter(sub_dat.keys()))
## handle multiple versions
fnme_key = 'Filename' if 'Filename' in sub_dat[sub_key] else 'filename'
return {'subject_id': sub_key, 'subject_filename': sub_dat[sub_key][fnme_key]}
def parse_field(field):
return yaml.safe_load(field) if type(field) is str else field
def parse_point_array(point_list):
return pd.DataFrame(point_list).values
def is_task_key(task_key, workflow_id, annotation_row, skip_empty_value=True):
if workflow_id != task_key[0]:
return False
if ('task' in annotation_row) and ('task_label' in annotation_row):
if annotation_row['task'] == task_key[1]:
if not skip_empty_value:
return True
else:
arv = annotation_row['value']
return (not arv is None) and (len(annotation_row['value']) > 0)
return False
else:
print('No task, task_label with value in annotation: %s'%str(annotation_row))
return False
def push_keys_and_dict_onto_list(ukey, skey, rdict, the_list):
rdict.update(ukey)
rdict.update(skey)
the_list.append(rdict)
return the_list
def poly_area(x,y):
return 0.5*np.abs(np.dot(x,np.roll(y,1))-np.dot(y,np.roll(x,1)))
def nan2value(x, v):
return x if not math.isnan(x) else v
def calc_f2d_mu_scale(df):
return 4500./np.sqrt((df['fovea_center_x'].values - df['disk_center_x'].values)**2 + (df['fovea_center_y'].values - df['disk_center_y'].values)**2)
class BaseAccumulator(object):
def setup(self, df): pass
def handle_row(self, rkey, skey, row): pass
def finish(self, df): pass
class DataFrameAccumulator(BaseAccumulator):
def __init__(self):
self.row_list = []
self.stat_df = None
def dataframe(self):
if self.stat_df is None and len(self.row_list)>0:
self.stat_df = df_xcols(pd.DataFrame(self.row_list), CSV_KEY_ORDER)
return self.stat_df
class AccumulateWorkflows(BaseAccumulator):
def finish(self, df):
print('Workflow stats:')
print(df.groupby(['workflow_id', 'workflow_name', 'workflow_version'])['user_name'].count())
class AccumulateTasks(BaseAccumulator):
def setup(self, df):
self.task_dict = {}
def handle_row(self, rkey, skey, row):
workflow_id = row['workflow_id']
annotations = parse_field(row['annotations'])
for x in annotations:
if ('task' in x) and ('task_label' in x):
k = (workflow_id, x['task'], x['task_label'])
if k not in self.task_dict: self.task_dict[k] = 0
self.task_dict[k] += 1
else:
print('No task, task_label in annotation of row: %s|%s item dump: %s'%(rkey, skey, str(x)))
def finish(self, df):
print('Task label stats:')
for k, v in iter(self.task_dict.items()):
print("%s: %i"%(k,v))
class AccumulateOpticNerveBox(DataFrameAccumulator):
def __init__(self, out_file=None):
super(AccumulateOpticNerveBox, self).__init__()
self.out_file = out_file
def handle_row(self, rkey, skey, row):
workflow_id = row['workflow_id']
annotations = parse_field(row['annotations'])
for x in annotations:
if is_task_key(TASK_KEY_DISK_BOX, workflow_id, x):
dat = x['value']
## handle multiple versions
if type(dat) is list:
dat = dat[0]
rdict = {'nerve_box_height': dat['height'],
'nerve_box_width': dat['width'],
'nerve_box_center_x': dat['x']+0.5*dat['width'],
'nerve_box_center_y': dat['y']+0.5*dat['height'],}
push_keys_and_dict_onto_list(rkey, skey, rdict, self.row_list)
def finish(self, df):
stat_df = self.dataframe()
grouped = stat_df.groupby(['subject_id'], as_index=False)
tmp = pd.merge(grouped['nerve_box_center_x'].agg({'n': len}),
grouped[['nerve_box_center_x', 'nerve_box_center_y', 'nerve_box_width', 'nerve_box_height']].agg(np.mean))
print('Optic Nerve Box statistics: ')
print(tmp)
print('Count Optic Nerve Box rows: %i'%len(stat_df))
print('Mean Optic Nerve Box height/width: %.4f'%np.mean(stat_df['nerve_box_height'].values/stat_df['nerve_box_width'].values))
if self.out_file:
stat_df.to_csv(self.out_file, index=False)
class AccumulateFoveaMarks(DataFrameAccumulator):
def __init__(self, out_file=None):
super(AccumulateFoveaMarks, self).__init__()
self.out_file = out_file
def handle_row(self, rkey, skey, row):
workflow_id = row['workflow_id']
annotations = parse_field(row['annotations'])
for x in annotations:
if is_task_key(TASK_KEY_MARK_FOVEA, workflow_id, x):
dat = x['value'][0]
rdict = {'fovea_center_x': dat['x'],
'fovea_center_y': dat['y'],}
push_keys_and_dict_onto_list(rkey, skey, rdict, self.row_list)
def finish(self, df):
stat_df = self.dataframe()
grouped = stat_df.groupby(['subject_id'], as_index=False)
tmp = pd.merge(grouped['fovea_center_x'].agg({'n': len}),
grouped[['fovea_center_x', 'fovea_center_y']].agg(np.mean))
print('Fovea mark statistics: ')
print(tmp)
if self.out_file:
stat_df.to_csv(self.out_file, index=False)
class AccumulateCupDiskBoundaryBox(DataFrameAccumulator):
def __init__(self, out_file=None):
super(AccumulateCupDiskBoundaryBox, self).__init__()
self.out_file = out_file
def handle_row(self, rkey, skey, row):
workflow_id = row['workflow_id']
annotations = parse_field(row['annotations'])
valid_disk = valid_cup = False
for x in annotations:
if is_task_key(TASK_KEY_DISK_BOUNDARY, workflow_id, x):
dat = x['value']
if not 'points' in dat[-1]:
print('WARNING: skipping disk boundary as no points field: %s'%str(x))
else:
dat = dat[-1]
assert(dat['tool'] == 0)
points = parse_point_array(dat['points'])
disk_area = poly_area(points[:,1], points[:,0])
disk_min = np.min(points, axis=0)
disk_max = np.max(points, axis=0)
valid_disk = True
if is_task_key(TASK_KEY_CUP_BOUNDARY, workflow_id, x):
dat = x['value']
if not 'points' in dat[-1]:
print('WARNING: skipping cup boundary as no points field: %s'%str(x))
else:
dat = dat[-1]
assert(dat['tool'] == 0)
points = parse_point_array(dat['points'])
cup_area = poly_area(points[:,1], points[:,0])
cup_min = np.min(points, axis=0)
cup_max = np.max(points, axis=0)
valid_cup = True
if valid_cup and valid_disk:
rdict = {
'disk_center_x': 0.5*(disk_min[0]+disk_max[0]),
'disk_center_y': 0.5*(disk_min[1]+disk_max[1]),
'disk_width': disk_max[0] - disk_min[0],
'disk_height': disk_max[1] - disk_min[1],
'disk_area': disk_area,
'cup_center_x': 0.5*(cup_min[0]+cup_max[0]),
'cup_center_y': 0.5*(cup_min[1]+cup_max[1]),
'cup_width': cup_max[0] - cup_min[0],
'cup_height': cup_max[1] - cup_min[1],
'cup_area': cup_area,
}
rdict['cdr_vertical'] = rdict['cup_height']/rdict['disk_height']
rdict['cdr_horizontal'] = rdict['cup_width']/rdict['disk_width']
rdict['cdr_area'] = math.sqrt(rdict['cup_area']/rdict['disk_area'])
rdict['nerve_cd_area'] = rdict['disk_area'] - rdict['cup_area']
push_keys_and_dict_onto_list(rkey, skey, rdict, self.row_list)
def finish(self, df):
stat_df = self.dataframe()
grouped = stat_df.groupby(['subject_id'], as_index=False)
tmp = pd.merge(grouped['disk_center_x'].agg({'n': len}),
grouped[['cdr_vertical', 'cdr_horizontal', 'cdr_area', 'nerve_cd_area']].agg(np.mean))
print('CupDiskBoundaryBox statistics: ')
print(tmp)
if self.out_file:
stat_df.to_csv(self.out_file, index=False)
class AccumulateNotchHaemorrhageMarks(DataFrameAccumulator):
def __init__(self, out_file=None):
super(AccumulateNotchHaemorrhageMarks, self).__init__()
self.out_file = out_file
def handle_row(self, rkey, skey, row):
workflow_id = row['workflow_id']
annotations = parse_field(row['annotations'])
for x in annotations:
if is_task_key(TASK_KEY_MARK_NOTCH_HAEMORRHAGE, workflow_id, x, skip_empty_value=False):
for mark in x['value']:
rdict = { 'mark_id': mark['tool'],
'mark_label': mark['tool_label'],
'mark_center_x': mark['x'],
'mark_center_y': mark['y'], }
push_keys_and_dict_onto_list(rkey, skey, rdict, self.row_list)
if len(x['value']) == 0:
rdict = { 'mark_id': -1,
'mark_label': 'No_Notch_Or_Haemorrhage',
'mark_center_x': -1,
'mark_center_y': -1, }
push_keys_and_dict_onto_list(rkey, skey, rdict, self.row_list)
def dataframe(self):
NOTCH_ID = 0
HAEMORRHAGE_ID = 1
df = super(AccumulateNotchHaemorrhageMarks, self).dataframe()
if df is not None:
grouped = df.groupby(CSV_KEY_ORDER, as_index=False)
return grouped['mark_id'].agg({
'n_notch': lambda xs: np.sum(xs == NOTCH_ID),
'n_heamorrhage': lambda xs: np.sum(xs == HAEMORRHAGE_ID),
})
else:
return None
def finish(self, df):
stat_df = super(AccumulateNotchHaemorrhageMarks, self).dataframe()
if stat_df is not None:
grouped = stat_df.groupby(['subject_id', 'mark_id', 'mark_label'], as_index=False)
tmp = grouped['mark_center_x'].agg({'n': len})
print('Notch/Haemorrhage mark statistics: ')
print(tmp)
if self.out_file:
stat_df.to_csv(self.out_file, index=False)
else:
print('No Notch/Haemorrhage mark records processed!')
class PrintSubjectInfo(BaseAccumulator):
def handle_row(self, rkey, skey, row):
subject = parse_field(row['subject_data'])
print(yaml.dump(subject))
class PrintRowsForTaskKey(BaseAccumulator):
def __init__(self, tkey):
self.tkey = tkey
def handle_row(self, rkey, skey, row):
workflow_id = row['workflow_id']
annotations = parse_field(row['annotations'])
for x in annotations:
if is_task_key(self.tkey, workflow_id, x):
print('rkey: "%s"'%str(rkey))
print(yaml.dump(x))
def ParseExpertCSV(args):
expert_set = set()
if args.expert_csv is not None:
if args.verbose: print('Reading expert set from: '+args.expert_csv)
df = pd.read_csv(args.expert_csv)
expert_set = set(df['user_name'].values)
return expert_set
def main(args):
expert_set = ParseExpertCSV(args)
for fnme in args.file:
if args.verbose: print('Processing: '+fnme)
df = pd.read_csv(fnme, nrows=args.nrows)
if args.verbose: df.info()
if args.process_rows is not None:
sa = [min(int(n), len(df)-1) for n in args.process_rows.split(',')]
print('Taking rows %d:%d'%(sa[0], sa[1]))
df = df[sa[0]:sa[1]]
## filter to valid workflows
df = df.loc[df['workflow_id'].isin(VALID_WORKFLOWS.keys())]
min_id = df['workflow_id'].replace(VALID_WORKFLOWS)
df = df.loc[df['workflow_version'] >= min_id]
df = df.reset_index(drop=True)
outdir_fn = lambda x: os.path.join(args.outpath, x)
accum_fovea = AccumulateFoveaMarks(outdir_fn('fovea_data.csv'))
accum_optic_box = AccumulateOpticNerveBox(outdir_fn('optic_nerve_box_data.csv'))
accum_cup_disk = AccumulateCupDiskBoundaryBox(outdir_fn('cup_disk_data.csv'))
accum_notch_haem = AccumulateNotchHaemorrhageMarks(outdir_fn('notch_haemorrhage_marks.csv'))
df_accumulators = [
accum_fovea,
accum_optic_box,
accum_cup_disk,
accum_notch_haem,
]
accumulators = df_accumulators + [
AccumulateTasks(),
AccumulateWorkflows(),
##PrintSubjectInfo(),
##PrintRowsForTaskKey(TASK_KEY_MARK_FOVEA),
##PrintRowsForTaskKey(TASK_KEY_DISK_BOUNDARY),
##PrintRowsForTaskKey(TASK_KEY_CUP_BOUNDARY),
##PrintRowsForTaskKey(TASK_KEY_MARK_NOTCH_HAEMORRHAGE),
]
for a in accumulators:
a.setup(df)
for idx, row in df.iterrows():
if args.verbose: print(row)
rkey = row_ukey(row)
skey = row_skey(row)
if args.verbose: print(' Processing row: %s|%s'%(str(rkey), str(skey)))
if args.dump_annotations: print(yaml.dump(parse_field(row['annotations'])))
if rkey['user_name'] in expert_set: rkey['expert'] = 1
for a in accumulators:
a.handle_row(rkey, skey, row)
if (idx+1) % 100 == 0: print('Handled %d rows'%(idx+1))
for a in accumulators:
a.finish(df)
## create single dataframe and calculate mu_scale
merged_df = df_accumulators[0].dataframe()
for dfa in df_accumulators[1:]:
tmp = dfa.dataframe()
if tmp is not None:
merged_df = pd.merge(merged_df, tmp, how='outer')
merged_df['f2d_mu_scale'] = calc_f2d_mu_scale(merged_df)
merged_df['nerve_cd_area_mu2'] = merged_df['nerve_cd_area']*(merged_df['f2d_mu_scale']**2)
merged_df = df_xcols(merged_df, CSV_KEY_ORDER)
merged_df.to_csv(outdir_fn('all_data.csv'), index=False)
## create aggregated cdr ratios by subject
def group_and_describe(df, group_key, trgt_nme):
tmp = df.groupby(group_key)
if len(tmp) == 0: return pd.DataFrame()
tmp = tmp[trgt_nme].describe().unstack()
tmp.columns = [trgt_nme+'_'+n for n in tmp.columns.values]
return tmp
def fn(merged_df, trgt_nme):
sub_key = ['subject_id', 'subject_filename']
tmp_df = merged_df[sub_key+['expert', trgt_nme]]
ok = np.isfinite(tmp_df[trgt_nme].values)
normal_df = group_and_describe(tmp_df[(tmp_df.expert==0) & ok], sub_key, trgt_nme)
expert_df = group_and_describe(tmp_df[(tmp_df.expert>0) & ok], sub_key, trgt_nme)
if len(expert_df) == 0: return normal_df
if len(normal_df) == 0: return expert_df
return normal_df.join(expert_df, how='outer', rsuffix='_expert')
tmp = fn(merged_df, 'cdr_horizontal')
tmp.to_csv(outdir_fn('cdr_horizontal_aggregate.csv'))
tmp = fn(merged_df, 'cdr_vertical')
tmp.to_csv(outdir_fn('cdr_vertical_aggregate.csv'))
print('Total classifications by normal/expert:')
print(merged_df.groupby('expert')['expert'].count())
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Tool to read Zooniverse input and convert to objects')
parser.add_argument('file', nargs='+', help='files to process')
parser.add_argument('-o', '--outpath', help='output path', default='')
parser.add_argument('-e', '--expert_csv', help='csv (name, user_name) listing expert users', default=None)
parser.add_argument('-v', '--verbose', help='verbose output', default=False, action='store_true')
parser.add_argument('--process_rows', help='range of rows to process (all if not present, otherwise comma seperated, e.g. 34,50', default=None)
parser.add_argument('--nrows', help='number of rows to read from the CSV file (all if not specified)', default=None, type=int)
parser.add_argument('--dump_annotations', help='dump every parsed annotation field', default=False, action='store_true')
args = parser.parse_args()
if args.verbose:
print(args)
main(args)
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import logging
import time
import redis
from ...utils import s, d
class EventStore(object):
def __init__(self):
pass
def add(self, event, pk, ts=None):
raise NotImplementedError
def replay(self, event, ts=0, end_ts=None, with_ts=False):
raise NotImplementedError
def query(self, event, pk, ts=None):
raise NotImplementedError
def clear(self, event, ts=None):
raise NotImplementedError
class RedisEventStore(EventStore):
"""EventStore based on redis.
The event store use namespace and event name as key and store primary
keys using redis sorted set, with event timestamp as score.
**General Usage**
Init event store with redis_dsn::
event_store = RedisEventStore("redis://localhost/", "store")
You can also pass a function to namespace, it'll accept timestamp as
arg, this can be used to separate events store based on hour, day or
week etc.::
event_store = RedisEventStore(
"redis://localhost/", lambda ts: "store:%s" % d(ts, "%Y%m%d"))
Add a event with::
event_store.add("test_write", 1)
Or add a event with timestamp passed in::
event_store.add("test_write", 2, ts=1024)
Clear all records of an event within a namespace::
event_store.clear("test_write")
**Events Replay**
One important feature for eventsourcing is replay, it can replay what has
changed and the latest update timestamp of events.
Replay all records of an event within a namespace::
event_store.replay("test_write")
Or replay all records since timestamp::
# all events since timestamp 1024
event_store.replay("test_write", ts=1024)
# all events between timestamp 1024 and now
event_store.replay("test_write", ts=1024, end_ts=time.time())
You can also replay all events with it's latest updating time::
event_store.replay("test_write", with_ts=True)
**Events Query**
You can query the last change timestamp of an event with query api.
Query records within current namespace::
event_store.query("test_write", 1)
The return value will either be int timestamp or None if record not
exists.
Add a timestamp to query events within other namespace (assume you
separate the event store namespace by day, you may want to query event
happened yesterday.)::
event_store.query("test_write", 1, ts=some_value)
.. note::
The redis event store class is compat with twemproxy.
:param redis_dsn: the redis instance uri
:param namespace: namespace func for event key, the func should accept
event timestamp and return namespace of the func. namespace also
accepts str type arg, which will always return the same namespace
for all timestamps.
:param ttl: expiration time for events stored, default to 3 days.
:param socket_timeout: redis socket timeout
:param kwargs: kwargs to be passed to redis instance init func.
"""
LUA_TIME = "return tonumber(redis.call('TIME')[1])"
LUA_ZADD = ' '.join("""
local score = redis.call('ZSCORE', KEYS[1], ARGV[2])
if score and tonumber(ARGV[1]) <= tonumber(score) then
return 0
else
redis.call('ZADD', KEYS[1], ARGV[1], ARGV[2])
return 1
end
""".split())
def __init__(self, redis_dsn, namespace=None, ttl=3600*24*3,
socket_timeout=1, **kwargs):
super(RedisEventStore, self).__init__()
self.r = redis.StrictRedis.from_url(
redis_dsn, socket_timeout=socket_timeout, **kwargs)
self.ttl = ttl
self.logger = logging.getLogger("meepo.redis_es")
if namespace is None:
self.namespace = lambda ts: "meepo:redis_es:%s" % d(ts, "%Y%m%d")
elif isinstance(namespace, str):
self.namespace = lambda ts: namespace
elif callable(namespace):
self.namespace = namespace
def _keygen(self, event, ts=None):
"""Generate redis key for event at timestamp.
:param event: event name
:param ts: timestamp, default to current timestamp if left as None
"""
return "%s:%s" % (self.namespace(ts or time.time()), event)
def _time(self):
"""Redis lua func to get timestamp from redis server, use this func to
prevent time inconsistent across servers.
"""
return self.r.eval(self.LUA_TIME, 1, 1)
def _zadd(self, key, pk, ts=None, ttl=None):
"""Redis lua func to add an event to the corresponding sorted set.
:param key: the key to be stored in redis server
:param pk: the primary key of event
:param ts: timestamp of the event, default to redis_server's
current timestamp
:param ttl: the expiration time of event since the last update
"""
return self.r.eval(self.LUA_ZADD, 1, key, ts or self._time(), pk)
def add(self, event, pk, ts=None, ttl=None):
"""Add an event to event store.
All events were stored in a sorted set in redis with timestamp as
rank score.
:param event: the event to be added, format should be ``table_action``
:param pk: the primary key of event
:param ts: timestamp of the event, default to redis_server's
current timestamp
:param ttl: the expiration time of event since the last update
:return: bool
"""
key = self._keygen(event, ts)
try:
self._zadd(key, pk, ts, ttl)
return True
except redis.ConnectionError as e:
# connection error typically happens when redis server can't be
# reached or timed out, the error will be silent with an error
# log and return None.
self.logger.error(
"redis event store failed with connection error %r" % e)
return False
def replay(self, event, ts=0, end_ts=None, with_ts=False):
"""Replay events based on timestamp.
If you split namespace with ts, the replay will only return events
within the same namespace.
:param event: event name
:param ts: replay events after ts, default from 0.
:param end_ts: replay events to ts, default to "+inf".
:param with_ts: return timestamp with events, default to False.
:return: list of pks when with_ts set to False, list of (pk, ts) tuples
when with_ts is True.
"""
key = self._keygen(event, ts)
end_ts = end_ts if end_ts else "+inf"
elements = self.r.zrangebyscore(key, ts, end_ts, withscores=with_ts)
if not with_ts:
return [s(e) for e in elements]
else:
return [(s(e[0]), int(e[1])) for e in elements]
def query(self, event, pk, ts=None):
"""Query the last update timestamp of an event pk.
You can pass a timestamp to only look for events later than that
within the same namespace.
:param event: the event name.
:param pk: the pk value for query.
:param ts: query event pk after ts, default to None which will query
all span of current namespace.
"""
key = self._keygen(event, ts)
pk_ts = self.r.zscore(key, pk)
return int(pk_ts) if pk_ts else None
def clear(self, event, ts=None):
"""Clear all stored record of event.
:param event: event name to be cleared.
:param ts: timestamp used locate the namespace
"""
return self.r.delete(self._keygen(event, ts))
|
class MessageErrors(Exception):
def __init__(self, message):
self.message = message
class MessageBodyEmpty(MessageErrors):
pass
|
from dsmpy.dataset import Dataset
from dsmpy import rootdsm_psv
from dsmpy import rootdsm_psv, rootdsm_sh
if __name__ == '__main__':
parameter_files = [
rootdsm_psv + 'test2.inf',
rootdsm_psv + 'test3.inf']
dataset = Dataset.dataset_from_files(parameter_files)
counts, displacements = dataset.get_chunks_station(2)
counts_eq, displacements_eq = dataset.get_chunks_eq(2)
parameter_files_sh = [rootdsm_sh + 'AK135_SH.inf']
dataset_sh = Dataset.dataset_from_files(parameter_files_sh, mode=1)
|
#!/usr/bin/python
#-*- coding: utf-8 -*-
import pyaudio
import wave
class Record():
def record(wav_output_filename):
CHUNK = 1024
FORMAT = pyaudio.paInt16
CHANNELS = 1
RATE = 16000
RECORD_SECONDS = 6
WAVE_OUTPUT_FILENAME = wav_output_filename
p = pyaudio.PyAudio()
stream = p.open(format=FORMAT,
channels=CHANNELS,
rate=RATE,
input=True,
frames_per_buffer=CHUNK)
print("Recording...")
frames = []
for i in range(0, int(RATE / CHUNK * RECORD_SECONDS)):
data = stream.read(CHUNK)
frames.append(data)
print("Done recording!")
print("*******************************************************************\n")
stream.stop_stream()
stream.close()
p.terminate()
wf = wave.open(WAVE_OUTPUT_FILENAME, 'wb')
wf.setnchannels(CHANNELS)
wf.setsampwidth(p.get_sample_size(FORMAT))
wf.setframerate(RATE)
wf.writeframes(b''.join(frames))
wf.close()
|
import dataclasses
import typing
import time
import itertools
import numpy as np
from numpy import linalg as LA
import pandas as pd
from sklearn.cluster import KMeans
from .example import Example, Vector
from .cluster import Cluster
def minDist(clusters, centers, item):
assert type(clusters) is list or len(clusters) > 0, f'Expected clusters to be non empty list. Got {type(clusters)} => {clusters}'
assert type(centers) is not list or type(centers) is np.array or len(clusters) > 0, f'Expected centers to be non empty numpy array. Got {type(centers)} => {centers}'
assert type(item) is np.array or len(item) > 0, f'Expected item to be non empty numpy array. Got {type(item)} => {item}'
assert type(clusters[0]) is Cluster, f'Expected clusters to be non empty Cluster list'
try:
assert hasattr(centers, 'shape') is not None, f'Expected centers to have shape. Got centers={centers}.'
assert hasattr(item, 'shape') is not None, f'Expected item to have shape. Got item={item}.'
assert centers.shape is not None, f'Expected centers to have shape. Got centers={centers}.'
assert item.shape is not None, f'Expected item to have shape. Got item={item}.'
assert centers.shape[1] == item.shape[0], f'Expected centers and item to have equivalent shape. Got centers={centers.shape}, item={item.shape}.'
except Exception as ex:
print('\n\nMin Dist ERROR', centers, item, '\n\n')
raise Exception('Min Dist ERROR') from ex
dists = LA.norm(centers - item, axis=1)
d = dists.min()
cl = clusters[ dists.tolist().index(d) ]
return d, cl
def mkCenters(clusters):
return np.array([cl.center for cl in clusters])
def clustering(unknownBuffer, label=None, MAX_K_CLUSTERS=100, REPR_TRESHOLD=20):
df = pd.DataFrame(unknownBuffer).drop_duplicates()
if len(df) == 0:
return []
n_samples = len(df)
n_clusters = min(MAX_K_CLUSTERS, n_samples // REPR_TRESHOLD)
if n_clusters < n_samples:
n_clusters = n_samples
kmeans = KMeans(n_clusters=n_clusters)
kmeans.fit(df)
newClusters = [Cluster(center=centroid, label=label, n=0, maxDistance=0, latest=0) for centroid in kmeans.cluster_centers_]
return newClusters
def minasOnline(exampleSource, inClusters=[]):
RADIUS_FACTOR = 1.1
EXTENTION_FACTOR = 3
BUFF_FULL = 100
MAX_K_CLUSTERS = 100
REPR_TRESHOLD = 20
CLEANUP_WINDOW = 100
#
unknownBuffer = []
clusters=[cl for cl in inClusters]
centers = mkCenters(clusters)
sleepClusters = []
counter = 0
noveltyIndex = 0
sentinel = object()
while True:
example = next(exampleSource, sentinel)
if example is sentinel:
yield 'done'
return
example = Example(item=example.item)
counter += 1
example.timestamp = time.time_ns()
example.n = counter
d, cl = minDist(clusters, centers, example.item)
if (d / max(1.0, cl.maxDistance)) <= RADIUS_FACTOR:
cl.maxDistance = max(cl.maxDistance, d)
cl.latest = counter
cl.n += 1
yield f"[CLASSIFIED] {example.n}: {cl.label}"
else:
unknownBuffer.append(example)
yield f"[UNKNOWN] {example.n}: {example.item}"
if len(unknownBuffer) > BUFF_FULL:
if len(sleepClusters) > 0:
yield f'[recurenceDetection] unk={len(unknownBuffer)}, sleep={len(sleepClusters)}'
for sleepExample in unknownBuffer:
d, cl = minDist(sleepClusters, centers, sleepExample.item)
if (d / max(1.0, cl.maxDistance)) <= RADIUS_FACTOR:
cl.maxDistance = max(cl.maxDistance, d)
cl.latest = counter
unknownBuffer.remove(sleepExample)
yield f"[CLASSIFIED] {sleepExample.n}: {cl.label}"
if cl in sleepClusters:
clusters.append(cl)
sleepClusters.remove(cl)
yield f"[Recurence] {cl.label}"
if len(unknownBuffer) % (BUFF_FULL // 10) == 0:
yield '[noveltyDetection]'
newClusters = clustering([ ex.item for ex in unknownBuffer ])
temp_examples = {cl: [] for cl in newClusters}
for sleepExample in unknownBuffer:
d, cl = minDist(newClusters, sleepExample.item)
cl.maxDistance = max(cl.maxDistance, d)
cl.latest = counter
cl.n += 1
temp_examples[cl].append((sleepExample, d))
for ncl in newClusters:
if ncl.n < 2: continue
distances = [ d for ex, d in temp_examples[ncl] ]
if len(distances) == 0: continue
distCl2Cl, nearCl2Cl = minDist(clusters + sleepClusters, ncl.center)
#
mean = sum(distances) / len(distances)
devianceSqrSum = sum([(d - mean) **2 for d in distances])
var = devianceSqrSum / len(distances)
stdDevDistance = var **0.5
silhouetteFn = lambda a, b: (b - a) / max([a, b])
silhouette = silhouetteFn(stdDevDistance, distCl2Cl)
if silhouette < 0: continue
#
sameLabel = [ cl for cl in clusters + sleepClusters if cl.label == nearCl2Cl.label ]
sameLabelDists = [ sum((cl1.center - cl2.center) ** 2) ** (1/2) for cl1, cl2 in itertools.combinations(sameLabel, 2) ]
#
if distCl2Cl / max(1.0, nearCl2Cl.maxDistance) < EXTENTION_FACTOR or distCl2Cl / max(sameLabelDists) < 2:
yield f'Extention {nearCl2Cl.label}'
ncl.label = nearCl2Cl.label
else:
label = 'Novelty {}'.format(noveltyIndex)
ncl.label = label
yield label
noveltyIndex += 1
clusters.append(ncl)
for ex, d in temp_examples[ncl]:
if ex in unknownBuffer:
yield f"[CLASSIFIED] {ex.n}: {ncl.label}"
unknownBuffer.remove(ex)
if counter % CLEANUP_WINDOW == 0:
yield '[cleanup]'
for ex in unknownBuffer:
if counter - ex.n < 3 * CLEANUP_WINDOW:
unknownBuffer.remove(ex)
for cl in clusters:
if counter - cl.latest < 2 * CLEANUP_WINDOW:
sleepClusters.append(cl)
clusters.remove(cl)
if len(clusters) == 0:
yield f'[fallback] {len(sleepClusters)} => clusters'
# fallback
clusters.extend(sleepClusters)
sleepClusters.clear()
#
#
#
#
def chunks(l, n):
"""Yield successive n-sized chunks from l."""
for i in range(0, len(l), n):
end = i + n
if end + i > len(l):
end = len(l)
yield l[i:end]
break
if end > len(l):
break
yield l[i:end]
def minasOffline(examplesDf):
RADIUS_FACTOR = 1.1
MAX_K_CLUSTERS = 50
REPR_TRESHOLD = 5
#
clusters = []
groupSize = MAX_K_CLUSTERS * REPR_TRESHOLD
for label, group in examplesDf.groupby('label'):
group = list(group['item'])
# print('grouped', label, len(group), 'groupSize', groupSize)
for chunk in chunks(group, groupSize):
unknownBuffer = chunk
# print('grouped', label, len(group), len(unknownBuffer))
newClusters = clustering(unknownBuffer, label=label)
temp_examples = {cl: [] for cl in newClusters}
for sleepExample in unknownBuffer:
centers = mkCenters(newClusters)
d, cl = minDist(newClusters, centers, sleepExample)
cl.maxDistance = max(cl.maxDistance, d)
cl.n += 1
temp_examples[cl].append((sleepExample, d))
for ncl in newClusters:
if ncl.n < 2 or cl.maxDistance <= 0:
continue
#
clusters.append(ncl)
#
#
return clusters
#
|
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Mock of Google Maps APIs.
This application is intended for load testing _your_ applications, by providing
you a way to query a _mock_ of some of the Google Maps APIs, which you need to
run on _your_ own AppEngine instance.
See the app = ... block at the end for supported APIs. Adding more APIs (e.g.
Elevation, Places, etc.) should be pretty straight forward. Each endpoint (e.g.
/maps/api/geocode/json) will return a randomly picked response from the data
directory, from there you can serve either dummy responses or copies from the
original API. You should always including the most typical errors responses
(OVER_QUERY_LIMIT and ZERO_RESULTS at least) to test how your application
reacts to them.
"""
import os
import random
import webapp2
DATA_ROOT_PATH = 'data'
def ListdirFullpath(directory):
"""Like os.listdir but returns full paths.
Source: http://stackoverflow.com/questions/120656/directory-listing-in-python
Args:
directory: A string with a directory name.
Returns:
A list of strings with the full path of every file in that directory.
"""
return [os.path.join(directory, filename)
for filename in os.listdir(directory)]
class GenericMapsApiResponse(webapp2.RequestHandler):
"""Base class that returns generic Maps API responses.
You need to override the following methods to actually return some
sensible content: GetContent() GetContentType().
"""
def get(self): # pylint: disable=g-bad-name
self.response.headers['content-type'] = self.GetContentType()
# Common headers from the Google Maps APIs as of June 2013.
self.response.headers['access-control-allow-origin'] = '*'
self.response.headers['cache-control'] = 'public, max-age=86400'
self.response.headers['vary'] = 'Accept-Language'
self.response.headers['x-xss-protection'] = '1; mode=block'
self.response.write(self.GetContent())
def GetContent(self):
return ''
def GetContentType(self):
return 'text/plain'
class RandomHttpResponse(GenericMapsApiResponse):
"""Returns random plain-text responses.
Implements GetContent() to populate the content of a file picked at
random from whichever directory GetDataPath() returns. You need to
override GetDataPath() and GetContentType().
"""
def GetContentPath(self):
return os.path.join(DATA_ROOT_PATH,
self.GetContentTypePath(),
self.GetApiShortName())
def GetErrorsPath(self):
return os.path.join(DATA_ROOT_PATH,
self.GetContentTypePath(),
'errors')
def GetContent(self):
files = (ListdirFullpath(self.GetContentPath()) +
ListdirFullpath(self.GetErrorsPath()))
fd = open(random.choice(files), 'r')
return fd.read()
class JsonApiResponse(RandomHttpResponse):
"""Templated JSON response."""
def GetContentTypePath(self):
return 'json'
def GetContentType(self):
return 'application/json; charset=UTF-8'
class XmlApiResponse(RandomHttpResponse):
"""Templated XML response."""
def GetContentTypePath(self):
return 'xml'
def GetContentType(self):
return 'application/xml; charset=UTF-8'
class GeocodingApiResponse(object):
"""Helper class to return static values through inheritance."""
def GetApiShortName(self):
return 'geocoding'
class GeocodingApiJsonResponse(JsonApiResponse, GeocodingApiResponse):
"""Mock JSON response from the Google Maps Geocoding API V3."""
pass
class GeocodingApiXmlResponse(XmlApiResponse, GeocodingApiResponse):
"""Mock XML response from the Google Maps Geocoding API V3."""
pass
class DirectionsApiResponse(object):
"""Helper class to return static values through inheritance."""
def GetApiShortName(self):
return 'directions'
class DirectionsApiJsonResponse(JsonApiResponse, DirectionsApiResponse):
"""Mock JSON response from the Google Maps Directions API V3."""
pass
class DirectionsApiXmlResponse(XmlApiResponse, DirectionsApiResponse):
"""Mock XML response from the Google Maps Directions API V3."""
pass
class MainPage(webapp2.RequestHandler):
def get(self): # pylint: disable=g-bad-name
self.response.headers['Content-Type'] = 'text/plain'
self.response.write('Hello, webapp2 World!')
app = webapp2.WSGIApplication([
('/maps/api/geocode/json', GeocodingApiJsonResponse),
('/maps/api/geocode/xml', GeocodingApiXmlResponse),
('/maps/api/directions/json', DirectionsApiJsonResponse),
('/maps/api/directions/xml', DirectionsApiXmlResponse),
], debug=True)
|
while True:
try:
e = str(input()).split()
d = e[0]
l = e[1]
p = e[2]
g = ''
if d == l == p or d != l != p != d: g = 'e'
else:
if d == l == 'papel':
if p == 'pedra': g = 'e'
elif p == 'tesoura': g = 'p'
elif d == l == 'pedra':
if p == 'papel': g = 'p'
elif p == 'tesoura': g = 'e'
elif d == l == 'tesoura':
if p == 'pedra': g = 'p'
elif p == 'papel': g = 'e'
elif l == p == 'papel':
if d == 'pedra': g = 'e'
elif d == 'tesoura': g = 'd'
elif l == p == 'pedra':
if d == 'papel': g = 'd'
elif d == 'tesoura': g = 'e'
elif l == p == 'tesoura':
if d == 'pedra': g = 'd'
elif d == 'papel': g = 'e'
elif d == p == 'papel':
if l == 'pedra': g = 'e'
elif l == 'tesoura': g = 'l'
elif d == p == 'pedra':
if l == 'papel': g = 'l'
elif l == 'tesoura': g = 'e'
elif d == p == 'tesoura':
if l == 'pedra': g = 'l'
elif l == 'papel': g = 'e'
if g == 'e': print('Putz vei, o Leo ta demorando muito pra jogar...')
elif g == 'd': print('Os atributos dos monstros vao ser inteligencia, sabedoria...')
elif g == 'l': print('Iron Maiden\'s gonna get you, no matter how far!')
elif g == 'p': print('Urano perdeu algo muito precioso...')
elif g == '3': print('3 iguais ou diferentes')
except EOFError: break
|
#!/usr/bin/env python3
import fitparse
import sys
import time
import os
start = time.time()
fitfile = fitparse.FitFile( sys.argv[1] )
records = []
laps = []
for record in fitfile.get_messages( 'record' ):
records.append( record )
for lap in fitfile.get_messages( 'lap' ):
laps.append( lap )
print( 'record: {} messages'.format( len( records) ) )
print( 'laps: {} messages'.format( len( laps) ) )
end = time.time()
print( "| python | {} | {} | {:.3f} seconds | [python-fitparse](https://github.com/dtcooper/python-fitparse) |".format( os.path.basename(sys.argv[0]), sys.argv[1], end-start ) )
|
import pytest
from verta.endpoint.resources import Resources
@pytest.mark.parametrize("data", [3, 64, 0.25])
def test_cpu_milli(data):
Resources(cpu=data)
@pytest.mark.parametrize("data", [-12, 0])
def test_cpu_milli_negative(data):
with pytest.raises(ValueError):
Resources(cpu=data)
@pytest.mark.parametrize("data", ["T", "0.5"])
def test_cpu_milli_negative_type(data):
with pytest.raises(TypeError):
Resources(cpu=data)
@pytest.mark.parametrize("data", ['128974848', '129e6', '129M', '123Mi'])
def test_memory(data):
Resources(memory=data)
@pytest.mark.parametrize("data", ['12M3M', 'e12M3M', 'G', '123e6.3Gi', '123.3', '-5'])
def test_memory_negative(data):
with pytest.raises(ValueError):
Resources(memory=data)
@pytest.mark.parametrize("data", [12.2, 4])
def test_memory_negative_type(data):
with pytest.raises(TypeError):
Resources(memory=data)
|
# -*- coding: utf-8 -*-
"""
Created on Wed May 12 08:10:00 2021
@author: Swapnanil Sharma, https://github.com/swapnanilsharma
"""
__version__ = 0.1
|
__author__ = "Thomas Bell"
__version__ = (0, 2, 0)
__version_info__ = ".".join(map(str, __version__))
APP_NAME = "saving-place"
APP_AUTHOR = "/u/isurvived12"
APP_VERSION = __version_info__
USER_AGENT = "desktop:{}:v{} (by {})".format(
APP_NAME, APP_VERSION, APP_AUTHOR)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import random
import numpy as np
from collections import deque, namedtuple
import os
import torch
import torch.nn.functional as F
import torch.optim as optim
import math
from itertools import count
import gc
from agent import Agent
from dqn_model import DQN
from dueling_dqn_model import DuelingDQN
from crnn_model import CrnnDQN
import time
from torch.autograd import Variable
import json
import uuid
from humanize import naturaltime
"""
you can import any package and define any extra function as you need
"""
torch.manual_seed(595)
np.random.seed(595)
random.seed(595)
class JsonEncoder(json.JSONEncoder):
""" Special json encoder for numpy types """
def default(self, obj):
if isinstance(obj, (np.int_, np.intc, np.intp, np.int8,
np.int16, np.int32, np.int64, np.uint8,
np.uint16, np.uint32, np.uint64)):
return int(obj)
elif isinstance(obj, (np.float_, np.float16, np.float32,
np.float64)):
return float(obj)
elif isinstance(obj, (np.ndarray,)):
return obj.tolist()
elif isinstance(obj, set):
return list(obj)
else:
try:
return obj.default()
except Exception:
return f'Object not serializable - {obj}'
class MetaData(object):
"""
Medata for model monitor and restore purpose
"""
def __init__(self, fp, args):
self.episode_template = namedtuple('EpisodeData',
(
"episode", "step", "time", "time_elapsed", "ep_len", "buffer_len",
"epsilon",
"reward", "avg_reward", "max_q", "max_avg_q", "loss", "avg_loss", "mode",
"lr"))
self.step_template = namedtuple('StepData', ("step", "epsilon", "reward", "max_q", "loss", "lr"))
self.fp = fp
self.episode_data = None
self.step_data = None
self.args = args
if self.args.tb_summary:
from tensorboardX import SummaryWriter
self.writer = SummaryWriter('/'.join(self.fp.name.split('/')[:-1]) + '/tb_logs/')
def update_step(self, *args):
self.step_data = self.step_template(*args)
if self.args.tb_summary:
self.writer.add_scalar('step/epsilon', self.step_data.epsilon, self.step_data.step)
self.writer.add_scalar('step/learning_rate', self.step_data.lr, self.step_data.step)
self.writer.add_scalar('step/reward', self.step_data.reward, self.step_data.step)
self.writer.add_scalar('step/max_q', self.step_data.max_q, self.step_data.step)
self.writer.add_scalar('step/loss', self.step_data.loss, self.step_data.step)
def update_episode(self, *args):
"""
Update metadata
:param args: args
"""
self.episode_data = self.episode_template(*args)
if self.episode_data.episode % self.args.disp_freq == 0:
print(
f"E: {self.episode_data.episode} | M: {self.episode_data.buffer_len} | Step: {self.episode_data.step} "
f"| T: {self.episode_data.time:.2f} | Len: {self.episode_data.ep_len} | EPS: {self.episode_data.epsilon:.5f} "
f"| LR: {self.episode_data.lr:.7f} | R: {self.episode_data.reward} | AR: {self.episode_data.avg_reward:.3f} "
f"| MAQ:{self.episode_data.max_avg_q:.2f} "
f"| L: {self.episode_data.loss:.2f} | AL: {self.episode_data.avg_loss:.4f} | Mode: {self.episode_data.mode} "
f"| ET: {naturaltime(self.episode_data.time_elapsed)}")
if self.args.tb_summary:
self.writer.add_scalar('episode/epsilon', self.episode_data.epsilon, self.episode_data.episode)
self.writer.add_scalar('episode/steps', self.episode_data.step, self.episode_data.episode)
self.writer.add_scalar('episode/learning_rate', self.episode_data.lr, self.episode_data.episode)
self.writer.add_scalar('episode/avg_reward', self.episode_data.avg_reward, self.episode_data.episode)
self.writer.add_scalar('episode/avg_max_q', self.episode_data.max_avg_q, self.episode_data.episode)
self.writer.add_scalar('episode/avg_loss', self.episode_data.avg_loss, self.episode_data.episode)
self.fp.write(self.episode_data._asdict().values().__str__().replace('odict_values([', '').replace('])', '\n'))
def load(self, f):
"""
Load Metadata
:param f: File Pointer
:return:
"""
self.episode_data = self.episode_data(*json.load(f).values())
def dump(self, f):
"""
JSONify metadata
:param f: file pointer
"""
json.dump(self.episode_data._asdict(), f, cls=JsonEncoder, indent=2)
class NaivePrioritizedBuffer(object):
def __init__(self, capacity, args, prob_alpha=0.6):
self.prob_alpha = prob_alpha
self.capacity = capacity
self.memory = []
self.pos = 0
self.priorities = np.zeros((capacity,), dtype=np.float32)
self.transition = namedtuple('Transition',
('state', 'action', 'next_state', 'reward', 'done'))
self.args = args
def push(self, *args):
max_prio = self.priorities.max() if self.memory else 1.0
if len(self.memory) < self.capacity:
self.memory.append(self.transition(*args))
else:
self.memory[self.pos] = self.transition(*args)
self.priorities[self.pos] = max_prio
self.pos = (self.pos + 1) % self.capacity
def sample(self, batch_size, beta=0.4):
if len(self.memory) == self.capacity:
prios = self.priorities
else:
prios = self.priorities[:self.pos]
probs = prios ** self.prob_alpha
probs /= probs.sum()
indices = np.random.choice(len(self.memory), batch_size, p=probs)
samples = [self.memory[idx] for idx in indices]
total = len(self.memory)
weights = (total * probs[indices]) ** (-beta)
weights /= weights.max()
weights = np.array(weights, dtype=np.float32)
return [*zip(*samples), indices,
weights] # [*map(lambda x: Variable(torch.cat(x, 0)).to(self.args.device), zip(*samples)), indices, weights]
def update_priorities(self, batch_indices, batch_priorities):
for idx, prio in zip(batch_indices, batch_priorities):
self.priorities[idx] = prio
def __len__(self):
return len(self.memory)
class ReplayBuffer(object):
""" Facilitates memory replay. """
def __init__(self, capacity, args):
self.capacity = capacity
self.memory = []
self.idx = 0
self.args = args
self.transition = namedtuple('Transition',
('state', 'action', 'next_state', 'reward', 'done'))
def push(self, *args):
if len(self.memory) < self.capacity:
self.memory.append(None)
self.memory[self.idx] = self.transition(*args)
self.idx = (self.idx + 1) % self.capacity
def sample(self, bsz):
batch = random.sample(self.memory, bsz)
return [*zip(*batch)]
class Agent_DQN(Agent):
def __init__(self, env, args):
"""
Initialize everything you need here.
For example:
paramters for neural network
initialize Q net and target Q net
parameters for repaly buffer
parameters for q-learning; decaying epsilon-greedy
...
"""
super(Agent_DQN, self).__init__(env)
###########################
# YOUR IMPLEMENTATION HERE #
# Declare variables
self.exp_id = uuid.uuid4().__str__().replace('-', '_')
self.args = args
self.env = env
self.eps_threshold = None
self.nA = env.action_space.n
self.action_list = np.arange(self.nA)
self.reward_list = deque(maxlen=args.window) # np.zeros(args.window, np.float32)
self.max_q_list = deque(maxlen=args.window) # np.zeros(args.window, np.float32)
self.loss_list = deque(maxlen=args.window) # np.zeros(args.window, np.float32)
self.probability_list = np.zeros(env.action_space.n, np.float32)
self.cur_eps = self.args.eps
self.t = 0
self.ep_len = 0
self.mode = None
if self.args.use_pri_buffer:
self.replay_buffer = NaivePrioritizedBuffer(capacity=self.args.capacity, args=self.args)
else:
self.replay_buffer = ReplayBuffer(capacity=self.args.capacity, args=self.args)
self.position = 0
self.args.save_dir += f'/{self.exp_id}/'
os.system(f"mkdir -p {self.args.save_dir}")
self.meta = MetaData(fp=open(os.path.join(self.args.save_dir, 'result.csv'), 'w'), args=self.args)
self.eps_delta = (self.args.eps - self.args.eps_min) / self.args.eps_decay_window
self.beta_by_frame = lambda frame_idx: min(1.0, args.pri_beta_start + frame_idx * (1.0 - args.pri_beta_start) / args.pri_beta_decay)
# Create Policy and Target Networks
if self.args.use_dueling:
print("Using dueling dqn . . .")
self.policy_net = DuelingDQN(env, self.args).to(self.args.device)
self.target_net = DuelingDQN(env, self.args).to(self.args.device)
elif self.args.use_crnn:
print("Using dueling crnn . . .")
self.policy_net = CrnnDQN(env).to(self.args.device)
self.target_net = CrnnDQN(env).to(self.args.device)
else:
self.policy_net = DQN(env, self.args).to(self.args.device)
self.target_net = DQN(env, self.args).to(self.args.device)
self.target_net.load_state_dict(self.policy_net.state_dict())
self.optimizer = optim.Adam(self.policy_net.parameters(), lr=self.args.lr, eps=self.args.optimizer_eps)
if self.args.lr_scheduler:
print("Enabling LR Decay . . .")
self.scheduler = optim.lr_scheduler.ExponentialLR(optimizer=self.optimizer, gamma=self.args.lr_decay)
self.cur_lr = self.optimizer.param_groups[0]['lr']
# Compute Huber loss
self.loss = F.smooth_l1_loss
# todo: Support for Multiprocessing. Bug in pytorch - https://github.com/pytorch/examples/issues/370
self.policy_net.share_memory()
self.target_net.share_memory()
# Set defaults for networks
self.policy_net.train()
self.target_net.eval()
self.target_net.load_state_dict(self.policy_net.state_dict())
if args.test_dqn:
# you can load your model here
###########################
# YOUR IMPLEMENTATION HERE #
print('loading trained model')
self.load_model()
if args.use_pri_buffer:
print('Using priority buffer . . .')
if args.use_double_dqn:
print('Using double dqn . . .')
if args.use_bnorm:
print("Using batch normalization . . .")
print("Arguments: \n", json.dumps(vars(self.args), indent=2), '\n')
def init_game_setting(self):
pass
def make_action(self, observation, test=True):
"""
Return predicted action of your agent
Input:
observation: np.array
stack 4 last preprocessed frames, shape: (84, 84, 4)
Return:
action: int
the predicted action from trained model
"""
###########################
# YOUR IMPLEMENTATION HERE #
with torch.no_grad():
if self.args.test_dqn:
q, argq = self.policy_net(Variable(self.channel_first(observation))).data.cpu().max(1)
return self.action_list[argq]
# Fill up probability list equal for all actions
self.probability_list.fill(self.cur_eps / self.nA)
# Fetch q from the model prediction
q, argq = self.policy_net(Variable(self.channel_first(observation))).data.cpu().max(1)
# Increase the probability for the selected best action
self.probability_list[argq[0].item()] += 1 - self.cur_eps
# Use random choice to decide between a random action / best action
action = torch.tensor([np.random.choice(self.action_list, p=self.probability_list)])
###########################
return action.item(), q.item()
def optimize_model(self):
"""
Function to perform optimization on DL Network
:return: Loss
"""
# Return if initial buffer is not filled.
if len(self.replay_buffer.memory) < self.args.mem_init_size:
return 0
if self.args.use_pri_buffer:
batch_state, batch_action, batch_next_state, batch_reward, batch_done, indices, weights = self.replay_buffer.sample(
self.args.batch_size, beta=self.beta_by_frame(self.t))
else:
batch_state, batch_action, batch_next_state, batch_reward, batch_done = self.replay_buffer.sample(
self.args.batch_size)
batch_state = Variable(self.channel_first(torch.tensor(np.array(batch_state), dtype=torch.float32)))
batch_action = Variable(torch.tensor(np.array(batch_action), dtype=torch.long))
batch_next_state = Variable(self.channel_first(torch.tensor(np.array(batch_next_state), dtype=torch.float32)))
batch_reward = Variable(torch.tensor(np.array(batch_reward), dtype=torch.float32))
batch_done = Variable(torch.tensor(np.array(batch_done), dtype=torch.float32))
policy_max_q = self.policy_net(batch_state).gather(1, batch_action.unsqueeze(1)).squeeze(1)
if self.args.use_double_dqn:
policy_ns_max_q = self.policy_net(batch_next_state)
next_q_value = self.target_net(batch_next_state).gather(1, torch.max(policy_ns_max_q, 1)[1].unsqueeze(
1)).squeeze(1)
target_max_q = next_q_value * self.args.gamma * (1 - batch_done)
else:
target_max_q = self.target_net(batch_next_state).detach().max(1)[0].squeeze(0) * self.args.gamma * (
1 - batch_done)
# Compute Huber loss
if self.args.use_pri_buffer:
loss = (policy_max_q - (batch_reward + target_max_q.detach())).pow(2) * Variable(
torch.tensor(weights, dtype=torch.float32))
prios = loss + 1e-5
loss = loss.mean()
else:
loss = self.loss(policy_max_q, batch_reward + target_max_q)
# Optimize the model
self.optimizer.zero_grad()
loss.backward()
# Clip gradients between -1 and 1
for param in self.policy_net.parameters():
param.grad.data.clamp_(-1, 1)
if self.args.use_pri_buffer:
self.replay_buffer.update_priorities(indices, prios.data.cpu().numpy())
self.optimizer.step()
return loss.cpu().detach().numpy()
def train(self):
"""
Implement your training algorithm here
"""
###########################
# YOUR IMPLEMENTATION HERE #
def train_fn():
self.t = 1
self.mode = "Random"
train_start = time.time()
if not self.args.load_dir == '':
self.load_model()
for i_episode in range(1, self.args.max_episodes + 1):
# Initialize the environment and state
start_time = time.time()
state = self.env.reset()
self.reward_list.append(0)
self.loss_list.append(0)
self.max_q_list.append(0)
self.ep_len = 0
done = False
# Save Model
self.save_model(i_episode)
# Collect garbage
self.collect_garbage(i_episode)
# Run the game
while not done:
# Update the target network, copying all weights and biases in DQN
if self.t % self.args.target_update == 0:
print("Updating target network . . .")
self.target_net.load_state_dict(self.policy_net.state_dict())
# Select and perform an action
self.cur_eps = max(self.args.eps_min, self.cur_eps - self.eps_delta)
if self.cur_eps == self.args.eps_min:
self.mode = 'Exploit'
else:
self.mode = "Explore"
action, q = self.make_action(state)
next_state, reward, done, _ = self.env.step(action)
self.reward_list[-1] += reward
self.max_q_list[-1] = max(self.max_q_list[-1], q)
# Store the transition in memory
self.replay_buffer.push(state, action, next_state, reward, done)
self.meta.update_step(self.t, self.cur_eps, self.reward_list[-1], self.max_q_list[-1],
self.loss_list[-1], self.cur_lr)
# Increment step and Episode Length
self.t += 1
self.ep_len += 1
# Move to the next state
state = next_state
# Perform one step of the optimization (on the target network)
if self.ep_len % self.args.learn_freq == 0:
loss = self.optimize_model()
self.loss_list[-1] += loss
self.loss_list[-1] /= self.ep_len
# Decay Step:
if self.args.lr_scheduler:
self.cur_lr = self.scheduler.get_lr()[0]
if i_episode % self.args.lr_decay_step == 0 and self.cur_lr > self.args.lr_min:
self.scheduler.step(i_episode)
# Update meta
self.meta.update_episode(i_episode, self.t, time.time() - start_time, time.time() - train_start,
self.ep_len, len(self.replay_buffer.memory), self.cur_eps,
self.reward_list[-1], np.mean(self.reward_list),
self.max_q_list[-1], np.mean(self.max_q_list),
self.loss_list[-1], np.mean(self.loss_list),
self.mode, self.cur_lr)
import multiprocessing as mp
processes = []
for rank in range(4):
p = mp.Process(target=train_fn)
p.start()
processes.append(p)
for p in processes:
p.join()
###########################
|
if __name__ == "__main__":
# Criase a var "n_times_de_futebol" para representar o numero de times
n_times_de_futebol = int(input("Digite o numero de times de futebol \n"))
# Criase a var "ranking_de_gols_de_cada_time" para representar o numero
# de gols de cada time
ranking_de_gols_de_cada_time = []
# Criase i para limitar o loop
i = 1
# Criase um loop que se repetira enquanto i for menor ou igual ao numero de times de futebol
while i <= n_times_de_futebol:
# Coleta o numero de gols atual de cada time
ranking_do_time_atual_do_loop = int(input("Digite o "
"ranking de gols do time {i} \n".format(i=i)))
# Acrescenta numa lista a quantidade de gols de cada time em ordem crescente
ranking_de_gols_de_cada_time.append(ranking_do_time_atual_do_loop)
# Serve para parar o loop
i = i + 1
# Criase a var "k_novos_times_futebol" para representar o numero de times
# novos a serem criados a partir do times antigos
k_novos_times_futebol = int(input("Digite a quantidade de novos times a serem criados\n"))
# Os novos times devem ser metade ou menos dos totais para não faltar times
metade_dos_times = n_times_de_futebol-k_novos_times_futebol
# Se a quantidade de times for maior que a metade
if k_novos_times_futebol > metade_dos_times:
# Enquanto a quantidade for maior que a metade
while k_novos_times_futebol > metade_dos_times:
# Troca o valor da var "k_novos_times_futebol" para um valor que seja a metade
# dos times totais ou menor
k_novos_times_futebol = int(input("Digite a "
"quantidade de "
"novos times a serem criados"
"tem que ser metade ou menos do times "
"totais\n"))
# Pega o maior time do ranking de gols
melhor_time = max(ranking_de_gols_de_cada_time)
# Pega o menor time do ranking de gols
pior_time = min(ranking_de_gols_de_cada_time)
# Gera i para controlar o loop
i = 1
# Enquando i for menor ou igual ao total de novos times
while i <= k_novos_times_futebol:
# Imprime o numero do time novo e o numero dos times que o compõem
print("O time {i} deve ser formado pelos times"
" {melhor_time} e "
"{pior_time}".format(melhor_time=melhor_time,
pior_time=pior_time,
i=i))
# Remove o melhor time ja utilizado
ranking_de_gols_de_cada_time.remove(melhor_time)
# Remove o pior time ja utilizado
ranking_de_gols_de_cada_time.remove(pior_time)
# Se o lista de rankings de gols "ranking_de_gols_de_cada_time" não estiver
# vazia
if len(ranking_de_gols_de_cada_time) > 0:
# Troca o valor do melhor time
melhor_time = max(ranking_de_gols_de_cada_time)
# Troca o valor do pior time
pior_time = min(ranking_de_gols_de_cada_time)
# Finaliza o loop
i = i + 1
|
"""Check access to the source files of virtual datasets
When you read a virtual dataset, HDF5 will skip over source files it can't open,
giving you the virtual dataset's fill value instead.
It's not obvious whether you have a permissions problem, a missing file, or
a genuinely empty part of the dataset.
This script checks all virtual datasets in a file to alerts you to any
problems opening the source files.
"""
import argparse
from collections import defaultdict
import h5py
import os
import sys
__version__ = '1.0'
def print_problem(filename, details):
print(" {}:".format(filename))
print(" ", details)
def check_dataset(path, obj):
print("Checking virtual dataset:", path)
files_datasets = defaultdict(list)
n_maps = 0
for vmap in obj.virtual_sources():
n_maps += 1
files_datasets[vmap.file_name].append(vmap.dset_name)
n_ok = 0
for src_path, src_dsets in files_datasets.items():
try:
# stat() gives nicer error messages for missing files, so
# try that first.
os.stat(src_path)
src_file = h5py.File(src_path, 'r')
except Exception as e:
print_problem(src_path, e)
continue
for src_dset in src_dsets:
try:
ds = src_file[src_dset]
except KeyError:
print_problem(src_path, "Missing dataset: {}".format(src_dset))
else:
if isinstance(ds, h5py.Dataset):
n_ok += 1
else:
print_problem(src_path,
"Not a dataset: {}".format(src_dset))
src_file.close()
print(" {}/{} sources accessible".format(n_ok, n_maps))
print()
return n_maps - n_ok # i.e number of inaccessible mappings
def find_virtual_datasets(file: h5py.File):
"""Return a list of 2-tuples: (path in file, dataset)"""
res = []
def visit(path, obj):
if isinstance(obj, h5py.Dataset) and obj.is_virtual:
res.append((path, obj))
file.visititems(visit)
return sorted(res)
def check_file(filename):
n_problems = 0
with h5py.File(filename, 'r') as f:
virtual_dsets = find_virtual_datasets(f)
print(f"Found {len(virtual_dsets)} virtual datasets to check.")
for path, ds in virtual_dsets:
n_problems += check_dataset(path, ds)
if not virtual_dsets:
pass
elif n_problems == 0:
print("All virtual data sources accessible")
else:
print("ERROR: Access problems for virtual data sources")
return n_problems
def main(argv=None):
ap = argparse.ArgumentParser()
ap.add_argument('file', help="File containing virtual datasets to check")
args = ap.parse_args(argv)
n_problems = check_file(args.file)
if n_problems > 0:
return 1
if __name__ == '__main__':
sys.exit(main())
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# ---------------------------------------------------------------------
# Copyright (c) 2012 Michael Hull.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------
from morphforge.core import mfrandom
import random
def get_exp2_syn_nmda_modfile():
x = """
COMMENT
MODIFIED BY MIKE HULL, TO ALLOW FOR STOCHASITIC TRANSMISSION
ENDCOMMENT
VERBATIM
#include <stdlib.h>
ENDVERBATIM
NEURON {
POINT_PROCESS Exp2SynNMDAMorphforge
RANGE tau1, tau2, e, i
NONSPECIFIC_CURRENT i
RANGE g
RANGE gtot
RANGE popening
RANGE voltage_dependancy
RANGE is_vdep_on
RANGE peak_conductance
RANGE eta
RANGE mg2conc
RANGE gamma
RANGE is_conductance_limited_on
RANGE conductance_limit
}
UNITS {
(nA) = (nanoamp)
(mV) = (millivolt)
(uS) = (microsiemens)
}
PARAMETER {
tau1=.1 (ms) <1e-9, 1e9>
tau2 = 10 (ms) <1e-9, 1e9>
e=0 (mV)
popening=1.0 () <0.0, 1.0>
is_vdep_on = 1
peak_conductance = -100000 (uS)
is_conductance_limited_on = -1
conductance_limit = -1
eta = 0.1
mg2conc=0.5
gamma=0.08 (/mV)
}
ASSIGNED {
v (mV)
i (nA)
g (uS)
gtot (uS)
factor
voltage_dependancy
}
STATE {
A (uS)
B (uS)
}
INITIAL {
LOCAL tp
if (tau1/tau2 > .9999) {
tau1 = .9999*tau2
}
A = 0
B = 0
tp = (tau1*tau2)/(tau2 - tau1) * log(tau2/tau1)
factor = -exp(-tp/tau1) + exp(-tp/tau2)
factor = 1/factor
VERBATIM
{
$COMMENT srand($randomseed);
}
ENDVERBATIM
}
FUNCTION vdep_func(Vin(mV), vdep)
{
if(vdep<0.5){
vdep_func = 1.0
}
else {
vdep_func = (1. / (1.+ eta*mg2conc*exp(-gamma*Vin)))
}
}
BREAKPOINT {
SOLVE state METHOD cnexp
voltage_dependancy = vdep_func(v, is_vdep_on)
g = (B - A)
gtot = g*voltage_dependancy
i = gtot*(v - e)
}
DERIVATIVE state {
A' = -A/tau1
B' = -B/tau2
}
NET_RECEIVE(weight (uS)) {
LOCAL clip, sv_max
VERBATIM
float x = ((float) rand()) / RAND_MAX;
if(x < popening)
{
ENDVERBATIM
weight = 1.0
A = A + weight*factor * peak_conductance
B = B + weight*factor * peak_conductance
if(is_conductance_limited_on> 0.5)
{
sv_max = weight*factor * peak_conductance * conductance_limit
if(A>sv_max) {A=sv_max}
if(B>sv_max) {B=sv_max}
}
://clip = weight*factor *3000 * peak_conductance
://if(A>clip) {A=clip}
://if(B>clip) {B=clip}
VERBATIM
}
ENDVERBATIM
}
"""
seed_val = (mfrandom.MFRandom._seed if mfrandom.MFRandom._seed
is not None else 0)
comment_val = ('//' if mfrandom.MFRandom._seed is not None else '')
return x.replace('$randomseed', '%d' % seed_val).replace('$COMMENT'
, comment_val)
|
from constants import Fields as F
from constants import NaN
def get(dic, key):
# Get value from a dic, if key not exists, set as NaN
if key not in dic:
if key == F.PLANS:
dic[key] = []
else:
dic[key] = NaN
return dic[key]
def process_plan(explain_list):
for e in explain_list:
e[F.MAXIMUM_COSTS] = 0
e[F.MAXIMUM_ROWS] = 0
e[F.MAXIMUM_DURATION] = 0
e[F.TOTAL_COST] = 0
process(e[F.PLAN], e)
def process(plan, explain):
# print(plan[F.NODE_TYPE])
calculatePlannerEstimate(plan, explain)
calculateActuals(plan, explain)
calculateMaximums(plan, explain)
for i in get(plan, F.PLANS):
process(i, explain)
return plan
def calculatePlannerEstimate(node, explain):
node[F.PLANNER_ESTIMATE_FACTOR] = get(node, F.ACTUAL_ROWS) / get(node, F.PLAN_ROWS)
# node[F.PLANNER_ESIMATE_DIRECTION] = EstimateDirection.under
if get(node, F.PLANNER_ESTIMATE_FACTOR) < 1:
# node[PLANNER_ESIMATE_DIRECTION] = EstimateDirection.over
node[F.PLANNER_ESTIMATE_FACTOR] = get(node, F.PLAN_ROWS) / get(node, F.ACTUAL_ROWS)
def calculateActuals(node, explain):
node[F.ACTUAL_DURATION] = get(node, F.ACTUAL_TOTAL_TIME)
node[F.ACTUAL_COST] = get(node, F.TOTAL_COST)
for subplan in get(node, F.PLANS):
if get(subplan, F.NODE_TYPE) != F.CTE_SCAN:
node[F.ACTUAL_DURATION] = get(node, F.ACTUAL_DURATION) - get(subplan, F.ACTUAL_TOTAL_TIME)
node[F.ACTUAL_COST] = get(node, F.ACTUAL_COST) - get(subplan, F.TOTAL_COST)
if get(node, F.ACTUAL_COST) < 0:
node[F.ACTUAL_COST] = 0
node[F.ACTUAL_DURATION] = get(node, F.ACTUAL_DURATION) * get(node, F.ACTUAL_LOOPS)
####**
explain[F.TOTAL_COST] += get(node, F.ACTUAL_COST)
def calculateMaximums(node, explain):
if explain[F.MAXIMUM_ROWS] < get(node, F.ACTUAL_ROWS):
explain[F.MAXIMUM_ROWS] = get(node, F.ACTUAL_ROWS)
if explain[F.MAXIMUM_COSTS] < get(node, F.ACTUAL_COST):
explain[F.MAXIMUM_COSTS] = get(node, F.ACTUAL_COST)
if explain[F.MAXIMUM_DURATION] < get(node, F.ACTUAL_DURATION):
explain[F.MAXIMUM_DURATION] = get(node, F.ACTUAL_DURATION)
|
import json
from calm.dsl.builtins import Project
from calm.dsl.builtins import Provider, Ref, read_local_file
DSL_CONFIG = json.loads(read_local_file(".tests/config.json"))
NTNX_LOCAL_ACCOUNT = DSL_CONFIG["ACCOUNTS"]["NTNX_LOCAL_AZ"]
ACCOUNT_NAME = NTNX_LOCAL_ACCOUNT["NAME"]
SUBNET_NAME = NTNX_LOCAL_ACCOUNT["SUBNETS"][0]["NAME"]
CLUSTER_NAME = NTNX_LOCAL_ACCOUNT["SUBNETS"][0]["CLUSTER"]
VCPUS = 1
STORAGE = 2 # GiB
MEMORY = 1 # GiB
class DSL_PROJECT(Project):
"""Test project"""
providers = [
Provider.Ntnx(
account=Ref.Account(ACCOUNT_NAME),
subnets=[Ref.Subnet(name=SUBNET_NAME, cluster=CLUSTER_NAME)],
)
]
quotas = {"vcpus": VCPUS, "storage": STORAGE, "memory": MEMORY}
|
import format
import parcel_info
import ups
import usps
import yaml
import sys
with open(r'config.yml') as file:
# The FullLoader parameter handles the conversion from YAML
# scalar values to Python the dictionary format
CONFIG = yaml.load(file, Loader=yaml.FullLoader)
if len(sys.argv)>1:
tracking_file = sys.argv[1]
else:
tracking_file = CONFIG['TRACKING_FILE']
with open(tracking_file, "r+") as f:
trackingNumbers = f.read().splitlines()
ups_conn = ups.UPSTrackerConnection(CONFIG['UPS_ACCESS_KEY'],
CONFIG['UPS_USER_ID'],
CONFIG['UPS_PASSWORD'])
usps_conn = usps.USPSTrackerConnection(CONFIG['USPS_USER_ID'],
CONFIG['USPS_SOURCE_NAME'])
parcel_object = parcel_info.ParcelInfo(ups_conn, usps_conn)
parcel_object.delay = 2
format.print_tracking(parcel_object, trackingNumbers)
|
from tkinter import *
from src.cliente import Client
from src.servido import Server
def Interfce():
janela = Tk()
janela.title("Socket")
janela.geometry("500x500")
janela.resizable(0, 0)
#Ip ou DNS
Label(janela,text="IP").place(x=5,y=4)
ip = Entry()
ip.place(x=5,y=20)
#Usuário
Label(janela,text="Usuário").place(x=155,y=4)
user = Entry(janela)
user.place(x=155,y=20)
#Senha
Label(janela,text="Senha").place(x=305,y=4)
pasw = Entry(janela,show="*")
pasw.place(x=305,y=20)
#Porta
Label(janela,text="Porta").place(x=460,y=4)
port = Entry(janela,width=5)
port.place(x=460,y=20)
#Are de saida de informação
text_area = Text(janela,width=60,height=25)
text_area.place(x=5,y=90)
#Butões
try:
btn_client = Button(janela,text="Cliente", command=lambda: text_area.insert('1.0',Client(host=ip.get(), porta=int(port.get()))))
btn_client.place(x=5, y=50)
except:
pass
try:
btn_server = Button(janela,text="Server",command=lambda: Server(host=ip.get(),porta=int(port.get())))
btn_server.place(x=70,y=50)
except:
pass
janela.mainloop()
|
from ..schema.nexusphp import NexusPHP
from ..schema.site_base import SignState, NetworkState, Work
from ..utils.net_utils import NetUtils
class MainClass(NexusPHP):
URL = 'https://pt.hd4fans.org/'
USER_CLASSES = {
'downloaded': [805306368000, 3298534883328],
'share_ratio': [3.05, 4.55],
'days': [280, 700]
}
DATA = {
'fixed': {
'action': 'checkin'
}
}
def build_workflow(self, entry, config):
return [
Work(
url='/',
method='get',
succeed_regex='<span id="checkedin">\\[签到成功\\]</span>',
fail_regex=None,
check_state=('sign_in', SignState.NO_SIGN_IN),
is_base_content=True
),
Work(
url='/checkin.php',
method='post',
data=self.DATA,
succeed_regex=None,
fail_regex=None,
check_state=('network', NetworkState.SUCCEED)
),
Work(
url='/',
method='get',
succeed_regex='<span id="checkedin">\\[签到成功\\]</span>',
fail_regex=None,
check_state=('final', SignState.SUCCEED)
)
]
def build_selector(self):
selector = super(MainClass, self).build_selector()
NetUtils.dict_merge(selector, {
'details': {
'hr': None
}
})
return selector
|
from nonbonded.library.factories.inputs.inputs import InputFactory
__all__ = [InputFactory]
|
from flask import Flask, render_template , render_template, url_for , request, redirect
app = Flask(__name__)
# Routing de toutes les pages HTML pour que l'application puisse nous rediriger sur les bonnes pages depuis le site
@app.route('/', methods=['POST', 'GET'])
def index():
return render_template('index.html')
@app.route('/Contact', methods=['POST', 'GET'])
def contact():
return render_template('Contact.html')
@app.route('/Qui nous sommes', methods=['POST', 'GET'])
def quiNousSommes():
return render_template('Qui nous sommes.html')
@app.route('/FAQ', methods=['POST', 'GET'])
def faq():
return render_template('FAQ.html')
@app.route('/Nos solutions', methods=['POST', 'GET'])
def nosSolutions():
return render_template('Nos solutions.html')
@app.route('/Nos offres', methods=['POST', 'GET'])
def nosOffres():
return render_template('Nos offres.html')
@app.route('/page docteur', methods=['POST', 'GET'])
def docteur():
return render_template('page docteur.html')
@app.route('/page essential', methods=['POST', 'GET'])
def essential():
return render_template('page essential.html')
@app.route('/page pharmacy', methods=['POST', 'GET'])
def pharmacy():
return render_template('page pharmacy.html')
if __name__ == "__main__":
app.run(debug=True)
|
__all__ = ["decryptor", "encryptor", "characters_keys", "answer", "introduce"]
|
from django.shortcuts import render
from django.http import HttpResponse
from django.contrib.auth.forms import AuthenticationForm
from django.contrib.auth import login as auth_login
from django.views.generic import FormView, CreateView
from django.utils.decorators import method_decorator
from django.views.decorators.cache import never_cache
from django.views.decorators.csrf import csrf_protect
from django.views.decorators.debug import sensitive_post_parameters
from .forms import RegistrationForm
from .models import student
applink = "/searcher"
#put the full weblink to forum here
forumlink = ""
boards_list = [
'CBSE Class X',
'CBSE Class XII',
'JEE',
'ICSE',
'WBJEE',
]
def index(request):
items_list = []
for board in boards_list:
dict = {
'header': "Search past %s papers" % board,
'link': "applink" + '/search?',
'content': "Click here to search within %s exam question papers" % board,
}
items_list.append(dict)
context_dict = {
'applink' : applink,
'forumlink' : forumlink,
'itemslist' : items_list,
}
return render(request, 'searcher/index.html', context_dict)
class Login(FormView):
template_name = 'searcher/login.html'
form_class = AuthenticationForm
success_url = '/searcher/'
@method_decorator(sensitive_post_parameters('password'))
@method_decorator(csrf_protect)
@method_decorator(never_cache)
def dispatch(self, request, *args, **kwargs):
# Sets a test cookie to make sure the user has cookies enabled
request.session.set_test_cookie()
return super(Login, self).dispatch(request, *args, **kwargs)
def form_valid(self, form):
auth_login(self.request, form.get_user())
# If the test cookie worked, go ahead and
# delete it since its no longer needed
if self.request.session.test_cookie_worked():
self.request.session.delete_test_cookie()
return super(Login, self).form_valid(form)
def get_context_data(self, **kwargs):
context = super(Login, self).get_context_data(**kwargs)
context['applink'] = applink
context['forumlink'] = forumlink
return context
class Register(CreateView):
template_name = 'searcher/register.html'
form_class = RegistrationForm
model = student
success_url = '/searcher/login'
def get_context_data(self, **kwargs):
context = super(Register, self).get_context_data(**kwargs)
context['applink'] = applink
context['forumlink'] = forumlink
return context
|
#
# Script that tries to find to which function given address belongs. (in a messy way :P)
#
import os
import re
import sys
swosppDir = os.getenv('SWOSPP', r'c:\swospp');
swosDir = os.getenv('SWOS', r'c:\games\swos');
hexRe = '[a-fA-F0-9]+'
def getAddress():
if len(sys.argv) < 2:
sys.exit('Usage {} <address>'.format(os.path.basename(sys.argv[0])))
try:
return int(sys.argv[1], 0)
except ValueError:
try:
return int(sys.argv[1], 16)
except:
sys.exit("Don't know what to do with that address.")
def getBuild():
build = 'rel';
if os.path.isfile(os.path.join(swosppDir, 'bin', 'DEBUG')):
build = 'dbg'
return build
def getMapFilename(build):
return os.path.join(swosppDir, 'var', 'swospp_' + build + '.map')
def parseSwosppMapFile(build, address):
addressRegex = re.compile(r'\s+(?P<address>[a-fA-F0-9]+)\s+(?P<function>\w+)\s+(?P<section>[\w\.]+)\s+(?P<objFile>[\w\.]+)')
with open(getMapFilename(build)) as mapFile:
prevAddress = 0
prevFunction = ''
prevFunctionObjFile = ''
for line in mapFile.readlines():
match = addressRegex.match(line)
if match and match.group('section') == '.text':
functionAddress = int(match.group('address'), 16)
functionName = match.group('function')
functionObjFile = match.group('objFile')
if functionAddress >= address:
if prevAddress == 0:
prevAddress = functionAddress
prevFunction = functionName
prevFunctionObjFile = functionObjFile
return prevAddress, functionAddress, prevFunction, functionName, prevFunctionObjFile, functionObjFile
prevAddress = functionAddress
prevFunction = functionName
prevFunctionObjFile = functionObjFile
return None, None, None, None, None, None
generatedLabelRegex = re.compile(r'(LBL|LVL|LBB|LBE|LFE|LFB|LC|LCOLDB|LHOTB|LCOLDE|LHOTE|L)\d+')
def isGeneratedLabel(label):
return generatedLabelRegex.match(label)
codeRegex = re.compile(r'\s+\d+\s+(?P<address>[a-fA-F0-9]+)\s+[a-fA-F0-9()]+')
funcRegex = re.compile(r'\s+\d+\s+(?P<functionName>[\w.]+):$')
sectionRegex = re.compile(r'\s+\d+\s+\.?section\s+(?P<sectionName>[\w.]+),("[^"]+")?$')
def pinpointFunction(delta, functionName, objFile):
lstFilename = os.path.join(swosppDir, 'var', objFile.replace('.obj', '.lst'))
if not os.path.isfile(lstFilename):
return None
with open(lstFilename) as lstFile:
symbolRegex = re.compile(r'\s+\d+\s+' + functionName + ':$')
startLooking = False
counting = False
currentFunctionName = functionName
currentSection = ''
for line in lstFile.readlines():
sectionMatch = sectionRegex.match(line)
if sectionMatch:
currentSection = sectionMatch.group('sectionName')
if not currentSection.startswith('.text'):
continue
if startLooking:
codeMatch = codeRegex.match(line)
if codeMatch:
startAddress = int(codeMatch.group('address'), 16)
startLooking = False
counting = True
elif counting:
funcMatch = funcRegex.match(line)
if funcMatch and not isGeneratedLabel(funcMatch.group('functionName')):
currentFunctionName = funcMatch.group('functionName')
continue
else:
codeMatch = codeRegex.match(line)
if codeMatch:
address = int(codeMatch.group('address'), 16)
if address - startAddress >= delta:
return currentFunctionName
else:
symMatch = symbolRegex.match(line)
if symMatch:
startLooking = True
return None
def findNegativeOffsetFunction(delta, functionName, functionAddress, objFile):
symbolRegex = re.compile(r'\s+\d+\s+' + functionName + ':$')
lstFilename = os.path.join(swosppDir, 'var', objFile.replace('.obj', '.lst').replace('.cpp', ''))
if not os.path.isfile(lstFilename):
return None
with open(lstFilename) as lstFile:
currentSection = ''
currentFunction = ''
currentOffset = 0
currentFunctionOffset = 0
lineToLabel = {}
offsetToFunctionStart = {}
for line in lstFile.readlines():
sectionMatch = sectionRegex.match(line)
if sectionMatch:
currentSection = sectionMatch.group('sectionName')
if not currentSection.startswith('.text'):
continue
codeMatch = codeRegex.match(line)
if codeMatch:
currentOffset = int(codeMatch.group('address'), 16)
lineToLabel[currentOffset] = currentFunction
offsetToFunctionStart[currentOffset] = currentFunctionOffset
else:
symMatch = symbolRegex.match(line)
if symMatch:
start = currentOffset - delta
while start <= currentOffset:
if start in lineToLabel:
return lineToLabel[start], functionAddress - (currentOffset - offsetToFunctionStart[start])
start += 1
return functionName, functionAddress
else:
funcMatch = funcRegex.match(line)
if funcMatch and not isGeneratedLabel(funcMatch.group('functionName')):
currentFunction = funcMatch.group('functionName')
currentFunctionOffset = currentOffset
return None, None
def getLoadAddress():
loadAddrRegex = re.compile((r'SWOS\+\+.*loaded at 0x(?P<swosppLoadAddress>{})'
'.*SWOS code segment starting at 0x(?P<swosCodeAddress>{})'
'.*data starting at 0x(?P<swosDataAddress>{})').format(hexRe, hexRe, hexRe))
try:
with open(os.path.join(swosDir, 'SWOSPP.LOG')) as logFile:
for line in logFile.readlines():
loadAddrMatch = loadAddrRegex.search(line)
if loadAddrMatch:
return int(loadAddrMatch.group('swosppLoadAddress'), 16), int(loadAddrMatch.group('swosCodeAddress'), 16), \
int(loadAddrMatch.group('swosDataAddress'), 16)
except FileException:
pass
defaultSwosppLoadAddress = 0x387000
defaultSwosCodeAddress = 0x220000
defaultSwosDataAddress = 0x2c1000
return defaultSwosppLoadAddress, defaultSwosCodeAddress, defaultSwosDataAddress
def getBase(build):
baseRegex = re.compile(r'(?P<base>{}).*\.text.*PUBLIC\s+USE32\s+PARA'.format(hexRe))
try:
with open(os.path.join(swosppDir, 'var', 'swospp_' + build + '.map')) as mapFile:
for line in mapFile.readlines():
baseMatch = baseRegex.search(line)
if baseMatch:
return int(baseMatch.group('base'), 16)
except FileException:
pass
return 0x401000
def findSwosAddress(address, isCode):
addrRegex = re.compile(r'^\s*000{}:(?P<address>{})\s+(?P<symbol>[\w]+)$'.format(('2', '1')[isCode], hexRe))
closestAddress = -1
smallestDiff = address + 1
section = ('data', 'code')[isCode]
idaOffset = (0xc0000, 0x10000)[isCode]
idaAddress = address + idaOffset
with open(os.path.join(swosppDir, 'mapcvt', 'swos.map')) as mapFile:
for line in mapFile.readlines():
addrMatch = addrRegex.match(line)
if addrMatch:
currentAddress = int(addrMatch.group('address'), 16)
if currentAddress == address:
print(addrMatch.group('symbol'), hex(idaAddress), '[SWOS {}: exact match]'.format(section))
return
elif currentAddress < address and address - currentAddress < smallestDiff:
smallestDiff = address - currentAddress
closestAddress = currentAddress
symbol = addrMatch.group('symbol')
if closestAddress > 0:
print('{}+{} {} [SWOS {}]'.format(symbol, hex(smallestDiff), hex(idaAddress), section))
else:
print('Address not found.')
def findSwosppAddress(build, address):
address >= 0 or sys.exit('Address seems to be below SWOS++ area.')
functionAddress, nextFunctionAddress, functionName, nextFunctionName, objFile, nextObjFile = parseSwosppMapFile(build, address)
functionAddress or sys.exit('Specified address not found!')
if nextFunctionAddress == address:
functionAddress = nextFunctionAddress
functionName = nextFunctionName
objFile = nextObjFile
else:
newFunctionName = pinpointFunction(address - functionAddress, functionName, objFile)
if not newFunctionName and objFile != nextObjFile:
newFunctionName, newFunctionAddress = findNegativeOffsetFunction(nextFunctionAddress - address,
nextFunctionName, nextFunctionAddress, nextObjFile)
if newFunctionName:
functionName = newFunctionName
objFile = nextObjFile
functionAddress = newFunctionAddress
print(hex(functionAddress), '{}{:+#x}'.format(functionName, functionAddress - address), objFile)
def main():
address = getAddress()
build = getBuild()
swosppLoadAddress, swosCodeAddress, swosDataAddress = getLoadAddress()
belongsTo = ''
if address >= swosCodeAddress and address < swosDataAddress:
belongsTo = 'SWOS code'
elif swosppLoadAddress < swosCodeAddress:
if address >= swosDataAddress:
belongsTo = 'SWOS data'
elif address >= swosppLoadAddress:
belongsTo = 'SWOS++'
else:
if address >= swosppLoadAddress:
belongsTo = 'SWOS++'
elif address >= swosDataAddress:
belongsTo = 'SWOS data'
belongsTo or sys.exit('Address seems to be below SWOS/SWOS++ area.')
if belongsTo == 'SWOS code':
address = address - swosCodeAddress
findSwosAddress(address, isCode=True)
elif belongsTo == 'SWOS data':
address = address - swosDataAddress
findSwosAddress(address, isCode=False)
else:
base = getBase(build)
address = address - swosppLoadAddress + base
findSwosppAddress(build, address)
if __name__ == '__main__':
main()
|
# coding=utf-8
import re
def remove_comment(text):
text = re.sub(re.compile("#.*"), "", text)
text = "\n".join(filter(lambda x: x, text.split("\n")))
return text
|
f = 1.0; print(f.hex())
f = 1.5; print(f.hex())
|
# stdlib
import glob
import json
import os
from pathlib import Path
import platform
import sys
from typing import Any
from typing import Dict
from typing import List
from typing import Union
# third party
from jinja2 import Template
from packaging import version
# this forces the import priority to use site-packages first and current dir last
# this allows us to import torch when calling this file directly since there is a
# subdir here also called torch
del sys.path[0]
sys.path.append("")
# third party
import torch as th # noqa: E402
# syft absolute
from syft.lib.torch import allowlist # noqa: E402
TORCH_VERSION = version.parse(th.__version__.split("+")[0])
py_ver = sys.version_info
PYTHON_VERSION = version.parse(f"{py_ver.major}.{py_ver.minor}")
OS_NAME = platform.system().lower()
# we need a file to keep all the errors in that makes it easy to debug failures
TARGET_PLATFORM = f"{PYTHON_VERSION}_{OS_NAME}"
REPORT_FILE_PATH = os.path.abspath(
Path(__file__) / "../../../.." / f"allowlist_report_{TARGET_PLATFORM}.html"
)
report_path = os.path.abspath((Path(__file__) / "../../../.."))
support_files = glob.glob(os.path.join(report_path, "allowlist_test_support_*.jsonl"))
if len(support_files) < 1:
print("Generate allowlist_test_support files first.")
sys.exit(1)
# complex have been removed for now as they are rare and have some known bugs
# qints have been disabled for now and are added as a separate ticket
dtypes = [
"bool",
"uint8",
"int8",
"int16",
"int32",
"int64",
"float16",
"bfloat16",
"float32",
"float64",
# "complex32",
# "complex64",
# "complex128",
# "qint8",
# "quint8",
# "qint32",
]
# 1.4.0 has been temporarily disabled and will be re-investigated
torch_versions = ["1.5.0", "1.5.1", "1.6.0"]
# this handles instances where the allow list provides more meta information
def get_return_type(support_dict: Union[str, Dict[str, str]]) -> str:
if isinstance(support_dict, str):
return support_dict
else:
return support_dict["return_type"]
BASIC_OPS = list()
BASIC_OPS_RETURN_TYPE = {}
# here we are loading up the true allowlist which means that we are only testing what
# can be used by the end user
for method, return_type_name_or_dict in allowlist.items():
if method.startswith("torch.Tensor."):
return_type = get_return_type(support_dict=return_type_name_or_dict)
method_name = method.split(".")[-1]
BASIC_OPS.append(method_name)
BASIC_OPS_RETURN_TYPE[method_name] = return_type
ops: Dict[str, Any] = {}
# these are all the expected ops
for op in BASIC_OPS:
ops[op] = {"dtypes": {}}
for torch_version in torch_versions:
ops[op]["dtypes"][torch_version] = {}
for dtype in dtypes:
ops[op]["dtypes"][torch_version][dtype] = {
"status": "untested",
"num_pass": 0,
"num_fail": 0,
"num_skip": 0,
"num_not_available": 0,
}
def parse_filename_versions(file_path: str) -> List[str]:
filename = os.path.basename(file_path)
return filename.replace("allowlist_test_support_", "").split("_")
# parse the data by reading every line in and building a dict
for support_file in support_files:
with open(support_file, "r") as f:
versions = parse_filename_versions(support_file)
python_version = versions[0]
torch_version = versions[1]
os_name = versions[2]
for line in f.readlines():
test_run = json.loads(line)
op_name = test_run["op_name"]
dtype = test_run["tensor_type"]
status = test_run["status"]
if op_name not in ops:
print(f"op {op_name} not found in main ops list")
continue
if dtype not in ops[op_name]["dtypes"][torch_version]:
print(f"dtype {dtype} not found in {torch_version} main ops list")
else:
# here we have many repeat tests for the same op and dtype, for now lets
# mark them as either skip, pass, or fail where failure takes highest
# priority, then pass, then skip, meaning a single failure will mark
# the whole cell, with no failure a single pass wil mark as pass
# and if nothing but skip, it will be marked skip
# lets count them for later
if status in ["pass", "fail", "skip", "not_available"]:
key = f"num_{status}"
ops[op_name]["dtypes"][torch_version][dtype][key] += 1
# recalculate a rough ratio
num_pass = ops[op_name]["dtypes"][torch_version][dtype]["num_pass"]
num_fail = ops[op_name]["dtypes"][torch_version][dtype]["num_fail"]
ratio = num_pass + 1 / (num_pass + num_fail + 1)
ops[op_name]["dtypes"][torch_version][dtype]["majority"] = (
ratio >= 0.5 and num_pass > 0
)
current_status = ops[op_name]["dtypes"][torch_version][dtype]["status"]
if status == "fail":
# set fail
ops[op_name]["dtypes"][torch_version][dtype]["status"] = "fail"
elif status == "pass" and current_status != "fail":
# set pass
ops[op_name]["dtypes"][torch_version][dtype]["status"] = "pass"
elif status == "not_available" and current_status == "untested":
# set not_available
ops[op_name]["dtypes"][torch_version][dtype][
"status"
] = "not_available"
elif status == "skip" and current_status == "not_available":
# set skip
ops[op_name]["dtypes"][torch_version][dtype]["status"] = "skip"
with open(__file__.replace(".py", ".j2"), "r") as f:
tm = Template(f.read())
report_html = tm.render(dtypes=dtypes, torch_versions=torch_versions, ops=ops)
with open(REPORT_FILE_PATH, "w+") as f:
f.write(report_html)
print("\nPySyft Torch Compatibility Report Created:")
print(REPORT_FILE_PATH)
|
# SVM = Support Vector Machine
import numpy as np
from sklearn import preprocessing, cross_validation, neighbors, svm
import pandas as pd
df = pd.read_csv('breast-cancer-wisconsin.data')
df.replace('?', -99999, inplace=True)
df.drop(['id'], 1, inplace=True)
X = np.array(df.drop(['class'], 1))
y = np.array(df['class'])
X_train, X_test, y_train, y_test = cross_validation.train_test_split(X, y, test_size=0.2)
clf = svm.SVC()
clf.fit(X_train, y_train)
accuracy = clf.score(X_test, y_test)
print(accuracy)
example_measures = np.array([[4, 2, 1, 1, 1, 2, 3, 2, 1], [4, 2, 1, 2, 2, 2, 3, 2, 1]])
example_measures = example_measures.reshape(len(example_measures), -1)
prediction = clf.predict(example_measures)
print(prediction)
|
#!/usr/bin/env python3
def nl_to_string(n,l,elec):
l_dict = {0:'s',1:'p',2:'d',3:'f'}
return str(n)+l_dict[l]+str(elec)
def print_string(entry_list):
conf_strings = []
for entry in entry_list:
string = nl_to_string(entry['n'],entry['l'],entry['elec'])
conf_strings.append(string)
print('.'.join(conf_strings))
def get_combs(nl):
combs = []
for n in range(1,nl+1):
l = nl - n
if l < n:
combs.append((n,l))
return combs
def get_entries(N):
entries = []
nl = 1
while N > 0:
combs = get_combs(nl)
for comb in combs:
n = comb[0]
l = comb[1]
elec = 4*l + 2
if elec <= N:
N -= elec
elif elec > N:
elec = N
N = 0
entries.append({'n':n,'l':l,'elec':elec})
if N == 0:
break
nl += 1
return entries
if __name__ == '__main__':
N = int(input('Give me the atomic number! '))
conf_entries = get_entries(N)
print_string(conf_entries)
|
# Problem Set 4A
# Name: <your name here>
# Collaborators:
# Time Spent: x:xx
def get_permutations(sequence):
'''
Enumerate all permutations of a given string
sequence (string): an arbitrary string to permute. Assume that it is a
non-empty string.
You MUST use recursion for this part. Non-recursive solutions will not be
accepted.
Returns: a list of all permutations of sequence
Example:
['abc', 'acb', 'bac', 'bca', 'cab', 'cba']
Note: depending on your implementation, you may return the permutations in
a different order than what is listed here.
'''
if len(sequence) == 1:
output = sequence
else:
output = []
for i in get_permutations(sequence[1:]):
for j in range(len(i) + 1):
tmp = list(i)
tmp.insert(j, sequence[0])
tmp = ''.join(tmp)
output.append(tmp)
return output
if __name__ == '__main__':
# #EXAMPLE
# example_input = 'abc'
# print('Input:', example_input)
# print('Expected Output:', ['abc', 'acb', 'bac', 'bca', 'cab', 'cba'])
# print('Actual Output:', get_permutations(example_input))
# # Put three example test cases here (for your sanity, limit your inputs
# to be three characters or fewer as you will have n! permutations for a
# sequence of length n)
example_input = 'abc'
print('Input:', example_input)
print('Expected Output:', ['abc', 'acb', 'bac', 'bca', 'cab', 'cba'])
print('Actual Output:', get_permutations(example_input))
|
#!/usr/bin/env python3
import pytest
import create_release as cr
# unit test
def test_version_from_string():
assert cr.version_from_string("") == [0, 0, 0]
assert cr.version_from_string("vffdf") == [0, 0, 0]
assert cr.version_from_string("v1.1.1") == [1, 1, 1]
assert cr.version_from_string("v999.9999.99999") == [999, 9999, 99999]
assert cr.version_from_string("999.9999.99999") == [999, 9999, 99999]
assert cr.version_from_string("r999.9999.99999") == [0, 0, 0]
# unit test
def test_bump_version():
for i in range(200):
for j in range(200):
assert cr.bump_version([i,j,0], 'major')[0] == '{}.{}.{}'.format(i+1, 0, 0)
assert cr.bump_version([1,i,j], 'major')[0] == '2.0.0'
assert cr.bump_version([i,j,0], 'minor')[0] == '{}.{}.{}'.format(i, j + 1, 0)
assert cr.bump_version([2,i,j], 'minor')[0] == '{}.{}.{}'.format(2, i + 1, 0)
assert cr.bump_version([3,i,j], 'patch')[0] == '{}.{}.{}'.format(3, i , j + 1)
# unit test - list of tags (valid and invalid versions)
def test_latest_ver_from_list():
assert cr.latest_ver_from_list('my_tag a.b.c gylle') == [0, 0, 0]
assert cr.latest_ver_from_list('v0.0.0 va.b.c gylle v3.2.1') == [3, 2, 1]
assert cr.latest_ver_from_list('v0.0.0 v1.2.3 v3.2.1') == [3, 2, 1]
assert cr.latest_ver_from_list('v3.2.1 v1.2.3 v3.2.0') == [3, 2, 1]
assert cr.latest_ver_from_list('v1.2.40 v1.2.30 v1.2.10') == [1, 2, 40]
assert cr.latest_ver_from_list('v1.4.10 v1.3.10 v1.2.80') == [1, 4, 10]
assert cr.latest_ver_from_list('v200.3.10 v300.4.10 v100.2.80') == [300, 4, 10]
#
if __name__ == '__main__':
print("to run the unit tests type")
print("> pytest unit_test.py")
|
from django.urls import path
from .views import base
urlpatterns = [
path('', base, name='base'),
]
|
import pytest
import time
from datetime import date, datetime
from unittest.mock import MagicMock, Mock, PropertyMock, patch
from hubbypy.hubbypy.hub_api import HubSpot
from hubbypy.hubbypy.contact_properties import (
AccessorProperty,
BaseUserProperty,
ConstantProperty,
FunctionProperty,
UserPropertyManager
)
hs_user_property_manager = UserPropertyManager(
groups=[
{
'name': 'your_org',
'displayName': 'Your API Data'
}
]
)
class SimpleCache:
def __init__(self):
self._cache = {}
def set(self, key, value):
self._cache[key] = value
def get(self, key):
return self._cache.get(key)
def test_bool_native_type_to_hs_type():
active_user = BaseUserProperty(
name='some_org_is_active',
label='Active Account User',
group_name='some_org',
native_type='bool',
)
assert active_user.hs_type == 'enumeration'
assert active_user.field_type == 'booleancheckbox'
def test_bool_native_type_to_hs_type_get_dict():
active_user = BaseUserProperty(
name='some_org_is_active',
label='Active Account User',
group_name='some_org',
native_type='bool',
)
_dict = active_user.get_dict()
assert 'Yes' in [o['label'] for o in _dict['options']]
def test_date_native_type_to_hs_type():
active_user = BaseUserProperty(
name='some_org_is_active',
label='Active Account User',
group_name='some_org',
native_type='date',
)
assert active_user.hs_type == 'date'
assert active_user.field_type == 'date'
def test_datetime_native_type_to_hs_type():
active_user = BaseUserProperty(
name='some_org_is_active',
label='Active Account User',
group_name='some_org',
native_type='datetime',
)
assert active_user.hs_type == 'datetime'
assert active_user.field_type == 'date'
def test_varchar_native_type_to_hs_type():
active_user = BaseUserProperty(
name='some_org_is_active',
label='Active Account User',
group_name='some_org',
native_type='varchar',
)
assert active_user.hs_type == 'string'
assert active_user.field_type == 'text'
def test_texarea_native_type_to_hs_type():
active_user = BaseUserProperty(
name='some_org_is_active',
label='Active Account User',
group_name='some_org',
native_type='textarea',
)
assert active_user.hs_type == 'string'
assert active_user.field_type == 'textarea'
def test_number_native_type_to_hs_type():
active_user = BaseUserProperty(
name='some_org_is_active',
label='Active Account User',
group_name='some_org',
native_type='number',
)
assert active_user.hs_type == 'number'
assert active_user.field_type == 'number'
def test_cannot_use_type_other_than_those_listed():
with pytest.raises(KeyError):
BaseUserProperty(
name='some_org_is_active',
label='Active Account User',
group_name='some_org',
native_type='numberolog',
)
def test_adding_two_properties_with_same_name_raises_error():
property_manager = UserPropertyManager(
groups=[
{
'name': 'your_org',
'displayName': 'Your API Data'
}
]
)
property_manager.add_prop(
BaseUserProperty(
name='some_org_is_active',
label='Active Account User',
group_name='some_org',
native_type='bool',
)
)
with pytest.raises(ValueError) as err:
property_manager.add_prop(
BaseUserProperty(
name='some_org_is_active',
label='Active Account User',
group_name='some_org',
native_type='bool',
)
)
assert 'Manager already contains' in str(err.value)
def test_get_value_user_accessor():
user = Mock()
user.is_active = 'yes'
active_user = AccessorProperty(
name='some_org_is_active',
label='Active Account User',
group_name='some_org',
native_type='bool',
accessor='is_active'
)
assert active_user._get_value(user) == 'yes'
def test_get_value_user_accessor_date():
user = Mock()
now = datetime.now()
user.joined_date = now
active_user = AccessorProperty(
name='some_org_is_active',
label='Active Account User',
group_name='some_org',
native_type='date',
accessor='joined_date'
)
expected = int(time.mktime(now.date().timetuple()) * 1000)
assert active_user.get_formatted_value(user) == expected
def test_get_value_user_accessor_date_time():
user = Mock()
now = datetime.now()
user.joined_date = now
active_user = AccessorProperty(
name='some_org_is_active',
label='Active Account User',
group_name='some_org',
native_type='datetime',
accessor='joined_date'
)
expected = int(time.mktime(now.timetuple()) * 1e3 + now.microsecond / 1e3)
assert active_user.get_formatted_value(user) == expected
def test_get_value_func_property():
user = Mock()
func = MagicMock(return_value='test call')
active_user = FunctionProperty(
name='some_org_is_active',
label='Active Account User',
group_name='some_org',
native_type='bool',
func=func,
send_user=False
)
assert active_user._get_value(user) == 'test call'
assert func.called
def test_get_formatted_value_user_accessor_boolean():
user = Mock()
user.is_active = True
active_user = AccessorProperty(
name='some_org_is_active',
label='Active Account User',
group_name='some_org',
native_type='bool',
accessor='is_active'
)
assert active_user.get_formatted_value(user) == 'true'
def test_nested_user_accessor():
user = Mock()
user.company = Mock()
user.company.name = 'Test Account'
company_name = AccessorProperty(
name='some_org_company_name',
label='User Account Name',
group_name='some_org',
native_type='varchar',
accessor='company.name'
)
assert company_name.get_formatted_value(user) == 'Test Account'
def test_constant_property():
user = Mock()
company_name = ConstantProperty(
name='some_org_company_name',
label='User Account Name',
group_name='some_org',
native_type='varchar',
value='Some Company'
)
assert company_name.get_formatted_value(user) == 'Some Company'
def test_request_queing():
with patch('hubbypy.hubbypy.hub_api.HubSpot.client',
new_callable=PropertyMock) as mock_client:
client = Mock()
client.request = MagicMock(return_value=True)
mock_client.return_value = client
cache = SimpleCache()
test_hubspot = HubSpot(
api_key='testing',
user_property_manager=hs_user_property_manager,
cache_backend=cache
)
test_hubspot.request('post', 'www.test.com')
test_hubspot.request('post', 'www.test.com')
test_hubspot.request('post', 'www.test.com')
assert len(cache.get(test_hubspot.cache_key)) == 3
assert test_hubspot.client.request.called
def test_request_queing_sleeping():
with patch('hubbypy.hubbypy.hub_api.time.sleep', return_value=None) as sleeper:
with patch('hubbypy.hubbypy.hub_api.HubSpot.client',
new_callable=PropertyMock) as mock_client:
client = Mock()
client.request = MagicMock(return_value=True)
mock_client.return_value = client
cache = SimpleCache()
test_hubspot = HubSpot(
api_key='testing',
user_property_manager=hs_user_property_manager,
cache_backend=cache
)
cache.set(test_hubspot.cache_key, None)
for _ in range(12):
test_hubspot.request('post', 'www.test.com')
assert len(cache.get(test_hubspot.cache_key)) == 12
assert sleeper.call_count == 4
def test_old_requests_cleared_from_cache():
with patch('hubbypy.hubbypy.hub_api.time.sleep', return_value=None) as sleeper:
with patch('hubbypy.hubbypy.hub_api.HubSpot.client',
new_callable=PropertyMock) as mock_client:
client = Mock()
client.request = MagicMock(return_value=True)
mock_client.return_value = client
cache = SimpleCache()
test_hubspot = HubSpot(
api_key='testing',
user_property_manager=hs_user_property_manager,
cache_backend=cache
)
now = time.time()
old_time_stamps = [now - 11 for _ in range(11)]
cache.set(test_hubspot.cache_key, old_time_stamps)
test_hubspot.request('post', 'www.test.com')
assert len(cache.get(test_hubspot.cache_key)) == 1
assert sleeper.call_count == 0
|
from .models import MixerBlock, MlpBlock, MlpMixer # noqa
__version__ = "0.0.1"
|
from rtl.tasks.divide import divide
from dummy_data import KWARGS, CONTENTS
def test_divide():
KWARGS = {
'operations': [
{
'a': 'a',
'b': 'b',
'column': 'c1'
},
{
'a': 'a',
'b': 2,
'column': 'c2'
}
]
}
r = divide(KWARGS, CONTENTS)
assert r['c1'][2] == 1
assert r['c2'][2] == 2.0
|
from __future__ import annotations
import pytest
import coredis
@pytest.fixture
def s(redis_cluster_server):
cluster = coredis.RedisCluster(
startup_nodes=[{"host": "localhost", "port": 7000}], decode_responses=True
)
assert cluster.connection_pool.nodes.slots == {}
assert cluster.connection_pool.nodes.nodes == {}
yield cluster
cluster.connection_pool.disconnect()
@pytest.fixture
def sr(redis_cluster_server):
cluster = coredis.RedisCluster(
startup_nodes=[{"host": "localhost", "port": 7000}],
reinitialize_steps=1,
decode_responses=True,
)
yield cluster
cluster.connection_pool.disconnect()
@pytest.fixture
def ro(redis_cluster_server):
cluster = coredis.RedisCluster(
startup_nodes=[{"host": "localhost", "port": 7000}],
readonly=True,
decode_responses=True,
)
yield cluster
cluster.connection_pool.disconnect()
@pytest.fixture(autouse=True)
def cluster(redis_cluster_server):
pass
|
import os
import datetime
import logging
import sqlite3
import pytest
from utils import setup_mdb_dir, all_book_info, load_db_from_sql_file, TESTS_DIR
from manga_db.manga_db import MangaDB
from manga_db.manga import Book
from manga_db.ext_info import ExternalInfo
from manga_db.constants import LANG_IDS
@pytest.mark.parametrize("title_eng, title_foreign, expected", [
("English", "Foreign", "English / Foreign"),
("English", None, "English"),
(None, "Foreign", "Foreign")])
def test_build_title(title_eng, title_foreign, expected):
assert Book.build_title(title_eng, title_foreign) == expected
def test_fetch_extinfo(monkeypatch, setup_mdb_dir):
tmpdir = setup_mdb_dir
os.chdir(tmpdir)
mdb_file = os.path.join(TESTS_DIR, "all_test_files", "manga_db.sqlite.sql")
memdb = load_db_from_sql_file(mdb_file, ":memory:", True)
monkeypatch.setattr("manga_db.manga_db.MangaDB._load_or_create_sql_db",
lambda x, y, z: (memdb, None))
mdb = MangaDB(tmpdir, mdb_file)
b = Book(mdb, in_db=False, id=16)
assert b.ext_infos == []
db_con = memdb
ei_rows_man = db_con.execute("SELECT * FROM ExternalInfo WHERE id IN (16, 18)").fetchall()
ei1 = ExternalInfo(mdb, b, **ei_rows_man[0])
ei2 = ExternalInfo(mdb, b, **ei_rows_man[1])
assert b._fetch_external_infos() == [ei1, ei2]
def test_fetch_assoc_col(monkeypatch, setup_mdb_dir):
tmpdir = setup_mdb_dir
os.chdir(tmpdir)
mdb_file = os.path.join(TESTS_DIR, "all_test_files", "manga_db.sqlite.sql")
memdb = load_db_from_sql_file(mdb_file, ":memory:", True)
monkeypatch.setattr("manga_db.manga_db.MangaDB._load_or_create_sql_db",
lambda x, y, z: (memdb, None))
mdb = MangaDB(tmpdir, mdb_file)
b = Book(mdb, in_db=False, id=14)
tags = ["Ahegao", "Anal", "Collar", "Large Breasts", "Maid", "Mind Break",
"Mind Control", "Nakadashi", "Office Lady", "Pantyhose", "Rape", "Stockings",
"X-ray"]
assert sorted(b._fetch_associated_column("tag")) == sorted(tags)
assert b._fetch_associated_column("character") == []
assert b._fetch_associated_column("artist") == ["Fan no Hitori"]
def test_upd_assoc_col(monkeypatch, setup_mdb_dir):
# update_assoc_columns/get_assoc_cols
tmpdir = setup_mdb_dir
os.chdir(tmpdir)
mdb_file = os.path.join(TESTS_DIR, "all_test_files", "manga_db.sqlite.sql")
memdb = load_db_from_sql_file(mdb_file, ":memory:", True)
monkeypatch.setattr("manga_db.manga_db.MangaDB._load_or_create_sql_db",
lambda x, y, z: (memdb, None))
mdb = MangaDB(tmpdir, mdb_file)
db_con = memdb
# pass last_change kwarg so it doesnt get auto set and counts as change
b = Book(mdb, in_db=False, id=12, last_change=datetime.date.today())
ei_row = db_con.execute("SELECT * FROM ExternalInfo WHERE id = 12").fetchone()
ei = ExternalInfo(mdb, b, **ei_row)
tags = ("Anal;Femdom;Large Breasts;Nakadashi;Straight Shota;Big Ass;Short Hair;Hat"
";Royalty;Dark Skin;Huge Penis;Big Areola;Defloration;Double Penetration;"
"Elder Sister;Tall Girl".split(";"))
artists = ["Kaneda Asou"]
category = ["Doujinshi"]
groups = ["Dokumushi Shokeitai"]
lists = ["to-read"]
assoc_cols = b.get_associated_columns()
assert assoc_cols["tag"] == tags
assert assoc_cols["artist"] == artists
assert assoc_cols["category"] == category
assert assoc_cols["groups"] == groups
assert assoc_cols["list"] == lists
assert assoc_cols["character"] == []
assert assoc_cols["collection"] == []
assert assoc_cols["parody"] == []
assert assoc_cols["ext_infos"] == [ei]
# upd
# changes
b.tag = ["delchange1", "delchange"]
b.category = ["testcat"]
b.update_assoc_columns_from_db()
# changes should be reset
assert not b._committed_state
assert b.tag == tags
assert b.artist == artists
assert b.category == category
assert b.groups == groups
assert b.list == lists
assert b.character == []
assert b.collection == []
assert b.parody == []
assert b.ext_infos == [ei]
b = Book(mdb, in_db=False, id=16, last_change=datetime.date.today())
ei_rows = db_con.execute("SELECT * FROM ExternalInfo WHERE id IN (16, 18)").fetchall()
ei1 = ExternalInfo(mdb, b, **ei_rows[0])
ei2 = ExternalInfo(mdb, b, **ei_rows[1])
tags = ("Blowjob;Ahegao;Megane;Happy Sex;Threesome;Group Sex;Layer Cake;Selfcest".split(";"))
artists = ["bariun"]
category = ["Doujinshi"]
characters = ["Akira Kurusu", "Futaba Sakura"]
parodies = ["Persona 5 / ペルソナ5"]
lists = ["to-read"]
assoc_cols = b.get_associated_columns()
assert assoc_cols["tag"] == tags
assert assoc_cols["artist"] == artists
assert assoc_cols["category"] == category
assert assoc_cols["groups"] == []
assert assoc_cols["list"] == lists
assert assoc_cols["character"] == characters
assert assoc_cols["collection"] == []
assert assoc_cols["parody"] == parodies
assert assoc_cols["ext_infos"] == [ei1, ei2]
# upd
# changes
b.groups = ["delchange1", "delchange"]
b.artist = ["tartist"]
b.update_assoc_columns_from_db()
# changes should be reset
assert not b._committed_state
assert b.tag == tags
assert b.artist == artists
assert b.category == category
assert b.groups == []
assert b.list == lists
assert b.character == characters
assert b.collection == []
assert b.parody == parodies
assert b.ext_infos == [ei1, ei2]
def test_diff(monkeypatch, setup_mdb_dir):
tmpdir = setup_mdb_dir
os.chdir(tmpdir)
mdb_file = os.path.join(TESTS_DIR, "all_test_files", "manga_db.sqlite.sql")
memdb = load_db_from_sql_file(mdb_file, ":memory:", True)
monkeypatch.setattr("manga_db.manga_db.MangaDB._load_or_create_sql_db",
lambda x, y, z: (memdb, None))
mdb = MangaDB(tmpdir, mdb_file)
# not testing change_str
b1_data = dict(
id=None,
title_eng="Same",
title_foreign="Different1",
language_id=1,
pages=25,
status_id=1,
my_rating=4.3,
category=["Manga"],
collection=["Diff collection1"],
groups=["Artistgroup"],
artist=["Diff1", "Diff2"],
parody=["Blabla"],
character=["Char1", "Char2", "Char3"],
list=["to-read", "to-download"],
tag=["Tag1", "Tag2", "Tag3"],
ext_infos=None,
last_change=datetime.date(2018, 6, 3),
note=None,
favorite=0
)
b1 = Book(mdb, **b1_data)
b2_data = dict(
id=None,
title_eng="Same",
title_foreign="Different2",
language_id=1,
pages=27,
status_id=1,
my_rating=None,
category=["Manga"],
collection=["Diff collection2"],
groups=["Artistgroup"],
artist=["Diff", "Diff2", "Diff3"],
parody=["Blabla"],
character=["Char1", "Char5", "Char3"],
list=["to-read", "to-download"],
tag=["Tag1", "Tag2", "Tag3"],
ext_infos=None,
last_change=datetime.date(2018, 4, 3),
note=None,
favorite=1
)
b2 = Book(mdb, **b2_data)
changes, change_str = b1.diff(b2)
changes_expected = dict(
title_foreign="Different2",
pages=27,
my_rating=None,
# added removed
collection=({"Diff collection2"}, {"Diff collection1"}),
artist=({"Diff", "Diff3"}, {"Diff1"}),
character=({"Char5"}, {"Char2"}),
last_change=datetime.date(2018, 4, 3),
favorite=1
)
assert changes == changes_expected
def test_add_rem_assoc(monkeypatch, setup_mdb_dir):
# _add/_remove assoc col
tmpdir = setup_mdb_dir
os.chdir(tmpdir)
mdb_file = os.path.join(TESTS_DIR, "all_test_files", "manga_db.sqlite.sql")
memdb = load_db_from_sql_file(mdb_file, ":memory:", True)
monkeypatch.setattr("manga_db.manga_db.MangaDB._load_or_create_sql_db",
lambda x, y, z: (memdb, None))
mdb = MangaDB(tmpdir, mdb_file)
db_con = memdb
b = mdb.get_book(5)
tag_before = b.tag.copy()
tag_change = ["Test1", "Test2", "Blabla"]
# _add_associated_column_values doesnt commit
with mdb.db_con:
b._add_associated_column_values("tag", tag_change)
tag = db_con.execute("""
SELECT group_concat(Tag.name, ';')
FROM Books, BookTag bt, Tag
WHERE Books.id = bt.book_id
AND Tag.id = bt.tag_id
AND Books.id = 5""").fetchone()
assert tag[0].split(";")[-3:] == tag_change
with mdb.db_con:
b._remove_associated_column_values("tag", tag_change)
tag = db_con.execute("""
SELECT group_concat(Tag.name, ';')
FROM Books, BookTag bt, Tag
WHERE Books.id = bt.book_id
AND Tag.id = bt.tag_id
AND Books.id = 5""").fetchone()
assert tag[0].split(";") == tag_before
def test_static_db_methods(monkeypatch, setup_mdb_dir):
# static db methods
tmpdir = setup_mdb_dir
os.chdir(tmpdir)
mdb_file = os.path.join(TESTS_DIR, "all_test_files", "manga_db.sqlite.sql")
memdb = load_db_from_sql_file(mdb_file, ":memory:", True)
monkeypatch.setattr("manga_db.manga_db.MangaDB._load_or_create_sql_db",
lambda x, y, z: (memdb, None))
mdb = MangaDB(tmpdir, mdb_file)
db_con = memdb
tag_before = "Large Breasts;Nakadashi;Blowjob;Threesome;Bikini;Group Sex;Swimsuit".split(";")
tag_change = ["Test1", "Test2", "Blabla"]
# before is last arg so staticmethod can set attr on book if its loaded (in id_map)
Book.add_assoc_col_on_book_id(mdb, 13, "tag", tag_change, tag_before)
tag = db_con.execute("""
SELECT group_concat(Tag.name, ';')
FROM Books, BookTag bt, Tag
WHERE Books.id = bt.book_id
AND Tag.id = bt.tag_id
AND Books.id = 13""").fetchone()
assert tag[0].split(";")[-3:] == tag_change
Book.remove_assoc_col_on_book_id(mdb, 13, "tag", tag_change, tag_before + tag_change)
tag = db_con.execute("""
SELECT group_concat(Tag.name, ';')
FROM Books, BookTag bt, Tag
WHERE Books.id = bt.book_id
AND Tag.id = bt.tag_id
AND Books.id = 13""").fetchone()
assert tag[0].split(";") == tag_before
# load book so its in id_map and make sure add_remove_assoc also sets attr on book
b = mdb.get_book(16)
tag_before = ("Blowjob;Ahegao;Megane;Happy Sex;Threesome;Group Sex;"
"Layer Cake;Selfcest".split(";"))
tag_change = ["Test3", "Test4", "Blablabla"]
# before is last arg so staticmethod can set attr on book if its loaded (in id_map)
Book.add_assoc_col_on_book_id(mdb, 16, "tag", tag_change, tag_before)
tag = db_con.execute("""
SELECT group_concat(Tag.name, ';')
FROM Books, BookTag bt, Tag
WHERE Books.id = bt.book_id
AND Tag.id = bt.tag_id
AND Books.id = 16""").fetchone()
assert tag[0].split(";")[-3:] == tag_change
# also set attr on book
assert b.tag[-3:] == tag_change
Book.remove_assoc_col_on_book_id(mdb, 16, "tag", tag_change, tag_before + tag_change)
tag = db_con.execute("""
SELECT group_concat(Tag.name, ';')
FROM Books, BookTag bt, Tag
WHERE Books.id = bt.book_id
AND Tag.id = bt.tag_id
AND Books.id = 16""").fetchone()
assert tag[0].split(";") == tag_before
# also set attr on book
assert b.tag == tag_before
Book.set_favorite_id(mdb, 2, 1)
fav = db_con.execute("SELECT favorite FROM Books WHERE id = 2").fetchone()
assert 1 == fav[0]
b = mdb.get_book(7)
Book.set_favorite_id(mdb, 7, 1)
fav = db_con.execute("SELECT favorite FROM Books WHERE id = 7").fetchone()
assert 1 == fav[0]
# also set on book
assert b.favorite == 1
Book.rate_book_id(mdb, 3, 3.5)
rat = db_con.execute("SELECT my_rating FROM Books WHERE id = 3").fetchone()
assert 3.5 == rat[0]
b = mdb.get_book(8)
Book.rate_book_id(mdb, 8, 4.25)
rat = db_con.execute("SELECT my_rating FROM Books WHERE id = 8").fetchone()
assert 4.25 == rat[0]
# also set on book
assert b.my_rating == 4.25
def test_remove_book(monkeypatch, setup_mdb_dir):
tmpdir = setup_mdb_dir
os.chdir(tmpdir)
mdb_file = os.path.join(TESTS_DIR, "all_test_files", "manga_db.sqlite.sql")
memdb = load_db_from_sql_file(mdb_file, ":memory:", True)
monkeypatch.setattr("manga_db.manga_db.MangaDB._load_or_create_sql_db",
lambda x, y, z: (memdb, None))
mdb = MangaDB(tmpdir, mdb_file)
import shutil
# copy cover
os.makedirs(os.path.join(tmpdir, "thumbs"))
cover_path = os.path.join(tmpdir, "thumbs", "16")
shutil.copyfile(os.path.join(tmpdir, os.pardir, "book_test_files", "16"), cover_path)
db_con = memdb
# book removed and all ext infos
b = mdb.get_book(16)
b.remove()
assert b._in_db is False
# deleted from id map
with pytest.raises(KeyError):
mdb.id_map[b.key]
b_row = db_con.execute("SELECT id FROM Books WHERE id = 16").fetchall()
assert not b_row
ei_rows = db_con.execute("SELECT id FROM ExternalInfo WHERE id IN (16, 18)").fetchall()
assert not ei_rows
# cover deleted
assert not os.path.exists(cover_path)
def test_remove_extinfo(monkeypatch, setup_mdb_dir, caplog):
tmpdir = setup_mdb_dir
os.chdir(tmpdir)
mdb_file = os.path.join(TESTS_DIR, "all_test_files", "manga_db.sqlite.sql")
memdb = load_db_from_sql_file(mdb_file, ":memory:", True)
monkeypatch.setattr("manga_db.manga_db.MangaDB._load_or_create_sql_db",
lambda x, y, z: (memdb, None))
mdb = MangaDB(tmpdir, mdb_file)
b = mdb.get_book(16)
caplog.clear()
assert b.remove_ext_info(99) is None
assert caplog.record_tuples == [
("manga_db.manga", logging.ERROR, "No external info with id 99 found!")
]
assert b.remove_ext_info(18) == "https://www.tsumino.com/entry/43454"
assert len(b.ext_infos) == 1
assert b.ext_infos[0].id == 16
assert b.remove_ext_info(16)
assert not b.ext_infos
caplog.clear()
assert b.remove_ext_info(4939) is None
assert caplog.record_tuples == [
("manga_db.manga", logging.WARNING, "No external infos on book with id 16 or not"
" fetched from DB yet!")
]
def test_save_book(monkeypatch, setup_mdb_dir, caplog):
# save: _add _update
# incl! _update_assoc_cols -> "
tmpdir = setup_mdb_dir
os.chdir(tmpdir)
mdb_file = os.path.join(TESTS_DIR, "all_test_files", "manga_db.sqlite.sql")
memdb = load_db_from_sql_file(mdb_file, ":memory:", True)
monkeypatch.setattr("manga_db.manga_db.MangaDB._load_or_create_sql_db",
lambda x, y, z: (memdb, None))
mdb = MangaDB(tmpdir, mdb_file)
db_con = memdb
# _add
ei_data = dict(
id=None,
book_id=None,
url="http://test1.com",
id_onpage='1111',
imported_from=1,
upload_date=datetime.date(2018, 4, 13),
uploader="Uploader",
censor_id=1,
rating=4.19,
ratings=165,
favorites=300,
downloaded=None,
last_update=None,
outdated=None,
)
b1_data = dict(
id=None,
title_eng="Add1",
title_foreign="Foreign1",
language_id=1,
pages=25,
chapter_status="Vol. 2 Ch. 14",
read_status=13,
status_id=1,
my_rating=None,
category=["Manga"],
collection=None,
groups=["Artistgroup"],
artist=["Diff1", "Diff2"],
parody=["Blabla"],
character=["Char1", "Char2", "Char3"],
list=["to-read", "to-download"],
tag=["Tag1", "Tag2", "Tag3"],
ext_infos=None,
last_change=datetime.date(2018, 6, 3),
note=None,
favorite=None,
cover_timestamp=None,
nsfw=1
)
b1 = Book(mdb, **b1_data)
# since we later check that cover_timestamp gets saved as 0.0 if None
b1_data['cover_timestamp'] = 0.0
ei1 = ExternalInfo(mdb, b1, **ei_data)
ei2 = ExternalInfo(mdb, b1, **ei_data)
# will outdate extinfo 8
ei2.id_onpage = '43506'
b1.ext_infos = [ei1, ei2]
assert b1._in_db is False
bid, outdated = b1.save()
assert bid == 18
assert b1.id == 18
# in_db + id_map, committed reset
assert b1._in_db is True
assert mdb.id_map[b1.key] is b1
assert not b1._committed_state
book_info_db = all_book_info(db_con, 18, include_id=True)
assert len(book_info_db) == 2
# fav set correctly
assert book_info_db[0]["favorite"] == 0
assert b1.favorite == 0
compare_cols_row_book_data(b1, book_info_db[0], b1_data, special={"favorite": 0})
# outdated, list of ext info ids that outdated others
assert outdated == [20]
# extinfo saved
eis = db_con.execute("SELECT id, book_id, id_onpage FROM ExternalInfo "
"WHERE id > 18").fetchall()
assert len(eis) == 2
assert eis[0]["book_id"] == 18
assert eis[1]["book_id"] == 18
assert eis[0]["id_onpage"] == '1111'
assert eis[1]["id_onpage"] == '43506'
# add book with new lang
b2 = Book(mdb, title_eng="Test2", favorite=1, pages=11, status_id=1, nsfw=0)
b2.language = "Krababbl"
bid, _ = b2.save()
assert bid == 19
assert b2.id == 19
# /2 since we have double indirection id->name name->id
expected_lang_id = len(LANG_IDS) / 2 + 1
assert b2.language_id == expected_lang_id
lang = db_con.execute("SELECT id FROM Languages WHERE name = 'Krababbl'").fetchall()
assert lang
assert lang[0][0] == expected_lang_id
brow = db_con.execute("SELECT title_eng, favorite FROM Books WHERE id = 19").fetchone()
assert brow[0] == "Test2"
assert brow["favorite"] == 1
assert b2.favorite == 1
assert b2._in_db is True
assert not b2._committed_state
assert mdb.id_map[b2.key] is b2
# _update
bu1 = Book(mdb, id=None, title_eng="Kangofu-san ni Kintama Sakusei Saremashita",
title_foreign="看護婦さんにキンタマ搾精されました", in_db=False)
bu1.in_db = True
# test not updating when block_update kwarg is true
caplog.clear()
assert bu1.save(block_update=True) == (None, None)
assert caplog.record_tuples == [
("manga_db.manga", logging.DEBUG,
f"Book was found in DB(id 15) but saving was blocked due to "
"block_update option!")
]
bu2 = mdb.get_book(11)
# dont do anything if no changes
caplog.clear()
assert not bu2._committed_state
assert bu2.save() == (11, None)
assert caplog.record_tuples == [
("manga_db.manga", logging.DEBUG, "No changes to save for book with id 11")
]
assert not bu2._committed_state
before = bu2.export_for_db()
# empty assoc list to None
before.update({col: getattr(bu2, col) if getattr(bu2, col) else None
for col in bu2.ASSOCIATED_COLUMNS})
bu2.language = "adlalad"
change = {
"title_eng": "Altered",
"language_id": 3,
"my_rating": 4.75,
"favorite": 1,
# removed and added
"tag": ("Large Breasts;Test33;Nakadashi;Ahegao;Gender Bender;Dark Skin;Elf;Body Swap"
";Bondage;Filming;Test Tag".split(";")),
# added
"artist": ["Taniguchi-san", "Newartist"],
# same
"category": ["Manga"],
# none added
"character": ["Char111", "Char222"]
}
bu2.update_from_dict(change)
before.update(change)
bid, _ = bu2.save()
book_info_db = all_book_info(db_con, 11, include_id=True)
compare_cols_row_book_data(bu2, book_info_db, before,
special={"last_change": datetime.date.today()})
# committed reset
assert not bu2._committed_state
# last_change
assert bu2.last_change == datetime.date.today()
assert book_info_db["last_change"] == datetime.date.today()
bu3 = mdb.get_book(7)
assert not bu3._committed_state
before = bu3.export_for_db()
# empty assoc list to None
before.update({col: getattr(bu3, col) if getattr(bu3, col) else None
for col in bu3.ASSOCIATED_COLUMNS})
change = {
"title_foreign": "ForeignAltered",
"pages": 13,
"note": "Note blabla",
# set None
"tag": None,
# set None
"artist": None,
# changed
"category": ["Manga"],
# none added
"collection": ["Col1", "Col2"],
"groups": ["Grp1", "Grp2", "Senpenbankashiki"]
}
bu3.update_from_dict(change)
before.update(change)
bid, _ = bu3.save()
book_info_db = all_book_info(db_con, 7, include_id=True)
compare_cols_row_book_data(bu3, book_info_db, before,
special={"last_change": datetime.date.today()})
# committed reset
assert not bu3._committed_state
# last_change
assert bu3.last_change == datetime.date.today()
assert book_info_db["last_change"] == datetime.date.today()
assoc_concat = {
"tag": "tags", "artist": "artists", "category": "categories", "character": "characters",
"collection": "collections", "groups": "groups", "list": "lists", "parody": "parodies"
}
def compare_cols_row_book_data(book, row, data, special=None):
if special is None:
special = {}
for col in Book.COLUMNS:
row_val = row[col]
data_val = data[col]
if col in special:
# specific values that are incorrect in data
assert row_val == special[col]
assert getattr(book, col) == special[col]
elif data_val is None:
# use is comparison for None
assert row_val is None
assert getattr(book, col) is None
else:
assert row_val == data_val
assert getattr(book, col) == data_val
for col in Book.ASSOCIATED_COLUMNS:
if col == "ext_infos":
continue
# look up plural of col to get name of concat assoc col
col_assoc_concat = assoc_concat[col]
row_val = row[col_assoc_concat]
if row_val is not None:
# row_val is concatted values
# need sorted to compare (or use set)
row_val = sorted(row_val.split(";")) if ";" in row_val else [row_val]
# need sorted to compare (or use set)
data_val = sorted(data[col]) if data[col] else None
book_val = getattr(book, col)
book_val = sorted(book_val) if book_val else book_val
if col in special:
# specific values that are incorrect in data
assert row_val == special[col]
assert book_val == special[col]
elif data_val is None:
# assoc col doesnt return None only empty trackable
assert row_val is None
assert book_val == []
else:
assert row_val == data_val
assert book_val == data_val
|
from adapt.intent import IntentBuilder
from mycroft import MycroftSkill, intent_handler
from mycroft.tts.espeak_tts import ESpeak
from os.path import join, dirname
class StephenHawkingTributeSkill(MycroftSkill):
def __init__(self):
super().__init__()
try:
self.espeak = ESpeak("en-uk", {"voice": "m1"})
except:
self.espeak = None
def initialize(self):
if self.espeak:
self.espeak.init(self.bus)
def hawking_speak(self, utterance):
if self.espeak:
self.espeak.execute(utterance)
else:
self.speak(utterance)
@intent_handler(IntentBuilder("StephenHawkingQuote").require(
'StephenHawking').require(
'quote'))
def handle_quote(self, message):
utterance = self.dialog_renderer.render("quote", {})
self.log.info("speak: " + utterance)
self.hawking_speak(utterance)
self.gui.clear()
self.gui.show_image(join(dirname(__file__), "ui", "hawking.jpg"),
caption=utterance, fill='PreserveAspectFit')
@intent_handler(IntentBuilder("StephenHawkingBirth").require(
'StephenHawking').require(
'birth'))
def handle_birth(self, message):
utterance = self.dialog_renderer.render("birth", {})
self.speak_dialog(utterance)
self.gui.clear()
self.gui.show_image(join(dirname(__file__), "ui", "young_hawking.jpg"),
caption=utterance, fill='PreserveAspectFit')
@intent_handler(IntentBuilder("StephenHawkingDeath").require(
'StephenHawking').require(
'death'))
def handle_death(self, message):
utterance = self.dialog_renderer.render("death", {})
self.speak_dialog(utterance)
self.gui.clear()
self.gui.show_image(join(dirname(__file__), "ui", "hawking.jpg"),
caption=utterance, fill='PreserveAspectFit')
def create_skill():
return StephenHawkingTributeSkill()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Let's create a set
aset1 = {'one', 'two', 'three', 'four', 'five'}
print(aset1) # Notice that the printed order is different (sets are unordered)
# We can create a set from anything iterable
alist = ['one', 'two', 'three']
aset2 = set(alist)
# We can add a new element to the set
aset1.add('six')
print(aset1)
# We can add an existing element but it has no effect
aset1.add('six')
print(aset1)
print('one' in aset1) # Testing existence
print(aset1.isdisjoint(aset2)) # Testing whether intersection is empty
print(aset2 <= aset1) # Testing whether aset2 is subset of aset1
print(aset2.issubset(aset1)) # The same as above
print(aset2 < aset1) # Testing whether aset2 is proper subset of aset1
print(aset1 | aset2) # Union of sets
print(aset1.union(aset2)) # As above
print(aset1 & aset2) # Intersection of sets
print(aset1.intersection(aset2)) # As above
print(aset1 - aset2) # Difference of sets
print(aset1.difference(aset2)) # As above
print(aset1 ^ aset2) # Symmetric difference (elements in either the set or other but not both)
print(aset1.symmetric_difference(aset2)) # As above
aset1.remove('six') # Removing an element
print(aset1)
aset1.discard('six') # As above but generates no error in the case the element is not present
print(aset1)
print(aset1.pop()) # Removes and returns an arbitrary element
# frozensets works as regular sets but cannot be modified after creation
afset = frozenset(alist)
# Uncomment the following line to get an exception (no add() method)
# afset.add('six')
# Let's create several dicts
adict1 = {'one': 1, 'two': 2, 'three': 3}
# All the below creations create the same dictionary as above
adict2 = dict(one=1, two=2, three=3)
adict3 = dict([('two', 2), ('one', 1), ('three', 3)])
adict4 = dict(zip(['one', 'two', 'three'], [1, 2, 3])) # the builtin function zip() zips several lists into a list of tuples
print(adict1['one']) # dicts can be indexed by the keys
adict1['four'] = 4 # We can add new elements.
print(adict1)
adict1['five'] = 6 # Oh, we have made a mistake here. Let's correct it.
adict1['five'] = 5
print(adict1) # See that each key is just once in the dict (i.e., the keys behave as a set)
print('one' in adict1) # Testing existence of a key in the dict
print(adict1.keys()) # Keys in the dict
print(adict1.values()) # Values in the dict
print(adict1.items()) # Tuples (key, value)
# All the three methods return objects that look like list, but they are view objects.
# They provide a dynamic view on the dictionary’s entries, which means that when
# the dictionary changes, the view reflects these changes.
|
a = int(input())
b = int(input())
c = int(input())
d = int(input())
if a < b < c < d:
print("Fish Rising")
elif a > b > c > d:
print("Fish Diving")
elif a == b == c == d:
print("Fish At Constant Depth")
else:
print("No Fish")
|
import pandas as pd
import os.path
local = True
data_set_url = '/nethome/kkrishnan8/multi-sensor-data/data/raw/PEACHData/'
if local:
data_set_url = '/Users/koushikkrishnan/Documents/CODE/multi-sensor-data/data/raw/PEACHData/'
user_profiles = pd.read_csv(os.path.join(data_set_url, 'User Profiles/up_explicit_data.csv'))
body_temp = pd.read_csv(os.path.join(data_set_url, 'IoT/iot_bodytemp_data.csv'), parse_dates=['DateTime'])
heart_rate = pd.read_csv(os.path.join(data_set_url, 'IoT/iot_heartrate_data.csv'), parse_dates=['DateTime'])
sleep_quality = pd.read_csv(os.path.join(data_set_url, 'IoT/iot_sleepquality_data.csv'), parse_dates=['DateTime'])
avg_body_temp_by_user = body_temp.groupby(['UserId']).mean()
increased_heart_rate = heart_rate[['UserId', 'Increased_Heart_Rate']]
resting_heart_rate = heart_rate[['UserId', 'User_Resting_Heart_Rate']]
avg_increased_heart_rate = increased_heart_rate.groupby(['UserId']).mean()
avg_resting_heart_rate = resting_heart_rate.groupby(['UserId']).mean()
avg_body_temp_by_user.plot()
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
from django.contrib.auth.models import User
from django.db.models.signals import post_save
from django.dispatch import receiver
def user_directory_path(instance, filename):
# file will be uploaded to MEDIA_ROOT/user_<id>/<filename>
return 'user_{0}/{1}'.format(instance.user.id, filename)
class Profile(models.Model):
user = models.OneToOneField(User,on_delete=models.CASCADE)
@receiver(post_save, sender= User)
def update_user_profile(sender, instance, created, **kwargs):
if created:
Profile.objects.create(user=instance)
instance.profile.save()
class Child(models.Model):
user = models.ForeignKey(Profile,default=0)
name = models.CharField(max_length=30,null=True)
age = models.IntegerField(blank=False,null=True)
details = models.TextField(null=True)
image = models.ImageField(upload_to=user_directory_path)
def __str__(self):
return self.user.username
|
#!/usr/bin/env python3
import sys
from PyQt5.QtCore import QUrl, QCoreApplication
from PyQt5.QtWidgets import QVBoxLayout
from PyQt5.QtWebEngineWidgets import QWebEnginePage, QWebEngineView
from PyQt5.QtWidgets import QApplication, QMainWindow,QPushButton, QWidget
import threading
x_size = 1366
y_size = 768
class EditorWindow(QMainWindow):
def __init__(self):
super().__init__()
self.resize(x_size, y_size)
self.setup_ui()
def setup_ui(self):
self.setMinimumSize(x_size-50, y_size)
self.setWindowTitle('ESPBlocks')
self.btn = QPushButton("Run", self)
self.btn.resize(50, 50)
self.btn.move(x_size-50, 0)
self.show()
class WebView(QWebEngineView):
def clear_button(self):
import time
while self.not_clear:
time.sleep(1)
self.page().runJavaScript('$("#command-download").remove();$("#script-name").remove();', self.get_clear_callback)
def bind_event(self, event):
self._event = event
self.not_clear = True
# print(res)
def get_code_callback(self, result):
print(result)
self._event(result)
def get_clear_callback(self, result):
if result:
self.not_clear = False
def get_code(self):
self.page().runJavaScript("($(editor)[0].getCode());", self.get_code_callback)
# print(res)
class BlocklyThread(threading.Thread):
def __init__(self, event, name="BlocklyThread"):
super().__init__()
self.name = name
self.event = event
self.start()
def run(self):
app = QApplication([])
layout = QVBoxLayout()
editor_window = EditorWindow()
browser = WebView(editor_window)
browser.bind_event(self.event)
editor_window.btn.clicked.connect(lambda: browser.get_code())
layout.addWidget(browser)
browser.resize(x_size-50, y_size)
url = 'https://micropython.top/editor-zh-hans.html#'
browser.load(QUrl(url))
browser.show()
threading.Thread(target=browser.clear_button).start()
app.exec_()
# browser.close()
# browser.destroy()
# editor_window.close()
if __name__ == "__main__":
# app = QApplication([])
def print_code(text):
print("====\n{}\n====".format(text))
blockly_thread = BlocklyThread(print_code)
|
from netmiko import ConnectHandler
from getpass import getpass
ios_devices = [
{
"host": "x",
"username": "x",
"password": getpass(),
"device_type": "cisco_nxos",
#"session_log": "my_session.txt",
}
# ,
#{
#"host": "x",
#"username": "x",
#"password": getpass(),
#"device_type": "cisco_nxos",
#}""
]
for device in ios_devices:
net_connect = ConnectHandler(**device)
print(net_connect.find_prompt())
output = net_connect.send_command("show version")
print(output)
|
from os import listdir
from os.path import isfile, join
import re
import random
import os
import sys, getopt
def get_filenames(data_dir, regex):
# Create list with the training filenames
return [f for f in listdir(data_dir) if test_file(data_dir, f, regex)]
def test_file(data_dir, file, regex):
# Match file with regex
match = re.search(regex, file)
# Check if file exists and matches regex
if isfile(join(data_dir, file)) and match:
return True
else:
return False
def get_shuffled_fns_path():
# Get HOME environment var
home = os.environ['HOME']
# Get DL Caching lib path
dl_caching_dir = home + "/dlcaching"
# Create DL caching dir if necessary
if not os.path.exists(dl_caching_dir):
print("DL Caching directory " + dl_caching_dir + " created.")
os.makedirs(dl_caching_dir)
return dl_caching_dir + "/shuffled_filenames.txt"
def shuffle(data_dir, regex, num_epochs):
# Get filenames list
filenames = get_filenames(data_dir, regex)
# Check if filenames list is empty
if not filenames:
print("No training filenames found in" + data_dir)
# Open out file
shuffled_filenames_path = get_shuffled_fns_path()
out_file = open(shuffled_filenames_path, "w")
# Repeat for the number of epochs
for i in range(num_epochs):
# Shuffle filenames
random.shuffle(filenames)
# Write filename
for f in filenames:
file_path = data_dir + "/" + f
out_file.write("%s\n" % file_path)
print("Shuffled filenames file " + shuffled_filenames_path + " created.")
# Close out file
out_file.close()
def main(argv):
data_dir = ''
regex = ''
epochs = 0
try:
opts, args = getopt.getopt(argv, "hd:r:e:", ["data_dir=", "regex=", "epochs="])
except getopt.GetoptError:
print('shuffle_filenames.py -d <data_dir> -r <regex> -e <num_epochs>')
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print('shuffle_filenames.py -d <data_dir> -r <regex> -e <num_epochs>')
sys.exit()
elif opt in ("-d", "--data_dir"):
data_dir = arg
elif opt in ("-r", "--regex"):
regex = arg
elif opt in ("-e", "--epochs"):
epochs = int(arg)
shuffle(data_dir, regex, epochs)
if __name__ == "__main__":
main(sys.argv[1:])
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright 2020 Félix Chénier
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Identify cycles and time-normalize data.
"""
__author__ = "Félix Chénier"
__copyright__ = "Copyright (C) 2020 Félix Chénier"
__email__ = "chenier.felix@uqam.ca"
__license__ = "Apache 2.0"
import numpy as np
from kineticstoolkit.timeseries import TimeSeries, TimeSeriesEvent
from kineticstoolkit.decorators import directory
import warnings
from typing import List, Dict, Tuple, Sequence, Optional
def detect_cycles(ts: TimeSeries,
data_key: str, *,
event_names: Sequence[str] = ['phase1', 'phase2'],
thresholds: Sequence[float] = [0., 1.],
directions: Sequence[str] = ['rising', 'falling'],
min_durations: Sequence[float] = [0., 0.],
max_durations: Sequence[float] = [np.Inf, np.Inf],
min_peak_heights: Sequence[float] = [-np.Inf, -np.Inf],
max_peak_heights: Sequence[float] = [np.Inf, np.Inf],
**kwargs,
) -> TimeSeries:
"""
Detect cycles in a TimeSeries based on a dual threshold approach.
This function detects biphasic cycles and identifies the transitions as
new events in the output TimeSeries. These new events are named:
- `event_name1`:
corresponds to the start of phase 1
- `event_name2`:
corresponds to the start of phase 2
- '_':
corresponds to the end of the cycle.
Warning
-------
This function, which has been introduced in 0.4, is still experimental and
may change signature or behaviour in the future.
Parameters
----------
ts
TimeSeries to analyze.
data_key
Name of the data key to analyze in the TimeSeries.
event_names
Optional. Name of the events to add in the output TimeSeries. Default
is ['phase1', 'phase2'].
thresholds
Optional. Value to cross to register phase changes. Default is
[0., 1.].
directions
Optional. Direction to cross thresholds to register phase changes.
Either ['rising', 'falling'] or ['falling', 'rising']. Default is
['rising', 'falling'].
min_durations
Optional. Minimal phase durations in seconds. Default is [0., 0.].
max_durations
Optional. Maximal phase durations in seconds. Default is
[np.Inf, np.Inf]
min_peak_heights
Optional. Minimal peak values to be reached in both phases. Default is
[-np.Inf, -np.Inf].
max_peak_heights
Optional. Maximal peak values to be reached in both phases. Default is
[np.Inf, np.Inf].
Returns
-------
TimeSeries
A copy of ts with the events added.
"""
# lowercase directions[0] once
directions[0] = directions[0].lower() # type: ignore
if directions[0] != 'rising' and directions[0] != 'falling':
raise ValueError("directions[0] must be 'rising' or 'falling'")
# Find the pushes
time = ts.time
data = ts.data[data_key]
events = []
is_phase1 = True
for i in range(time.shape[0]):
if directions[0] == 'rising':
crossing1 = data[i] >= thresholds[0]
crossing2 = data[i] <= thresholds[1]
else:
crossing1 = data[i] <= thresholds[0]
crossing2 = data[i] >= thresholds[1]
if is_phase1 and crossing1:
is_phase1 = False
events.append(TimeSeriesEvent(time[i], event_names[0]))
elif (not is_phase1) and crossing2:
is_phase1 = True
events.append(TimeSeriesEvent(time[i], event_names[1]))
# Ensure that we start with event_name1 and that it's not on time0
while (events[0].name != event_names[0]) or (events[0].time == time[0]):
events = events[1:]
# Remove cycles where criteria are not reached.
valid_events = []
for i_event in range(0, len(events) - 1, 2):
time1 = events[i_event].time
time2 = events[i_event + 1].time
try:
time3 = events[i_event + 2].time
except IndexError:
time3 = np.Inf
sub_ts1 = ts.get_ts_between_times(time1, time2, inclusive=True)
sub_ts2 = ts.get_ts_between_times(time1, time3, inclusive=True)
if directions[0] == 'rising':
the_peak1 = np.max(sub_ts1.data[data_key])
the_peak2 = np.min(sub_ts2.data[data_key])
else:
the_peak1 = np.min(sub_ts1.data[data_key])
the_peak2 = np.max(sub_ts2.data[data_key])
if (time2 - time1 >= min_durations[0] and
time2 - time1 <= max_durations[0] and
time3 - time2 >= min_durations[1] and
time3 - time2 <= max_durations[1] and
the_peak1 >= min_peak_heights[0] and
the_peak1 <= max_peak_heights[0] and
the_peak2 >= min_peak_heights[1] and
the_peak2 <= max_peak_heights[1]):
# Save it.
valid_events.append(events[i_event])
valid_events.append(events[i_event + 1])
if not np.isinf(time3):
valid_events.append(TimeSeriesEvent(time3, '_'))
# Form the output timeseries
tsout = ts.copy()
for event in valid_events:
tsout = tsout.add_event(event.time, event.name)
tsout.sort_events()
return tsout
def time_normalize(
ts: TimeSeries, /,
event_name1: str,
event_name2: str, *,
n_points: int = 100,
span: Optional[Sequence[int]] = None,
) -> TimeSeries:
"""
Time-normalize cycles in a TimeSeries.
This method time-normalizes the TimeSeries at each cycle defined by
event_name1 and event_name2 on n_points. The time-normalized cycles are
put end to end. For example, for a TimeSeries that contains three
cycles, a time normalization with 100 points will give a TimeSeries
of length 300. The TimeSeries' events are also time-normalized, including
event_name1 but with event_name2 renamed as '_'.
Parameters
----------
ts
The TimeSeries to analyze.
event_name1
The event name that correspond to the begin of a cycle.
event_name2
The event name that correspond to the end of a cycle.
n_points
Optional. The number of points of the output TimeSeries.
span
Optional. Specifies which normalized points to include in the output
TimeSeries. See note below.
Returns
-------
TimeSeries
A new TimeSeries where each cycle has been time-normalized.
Notes
-----
The span argument is experimental and has been introduced in version 0.4.
Use it to define which normalized points to include in the output
TimeSeries. For example, to normalize in percents and to include only data
from 10 to 90% of each cycle, assign 100 to n_points and [10, 90] to span.
The resulting TimeSeries will then be expressed in percents and wrap each
80 points. It is also possible to include pre-cycle or post-cycle data.
For example, to normalize in percents and to include 20% pre-cycle and 15%
post-cycle, assign 100 to n_points and [-20, 15] to span. The resulting
TimeSeries will then wrap each 135 points with the cycles starting at 20,
155, etc. and ending at 119, 254, etc. For each cycle, events outside the
0-100% spans are ignored.
"""
# Optional span
if span is None:
span = [0, n_points]
# Find the final number of cycles
if len(ts.events) < 2:
raise(ValueError('No cycle can be defined from these event names.'))
i_cycle = 0
# Initialize the destination TimeSeries
dest_ts = ts.copy()
dest_ts.events = []
if n_points == 100:
dest_ts.time_info['Unit'] = '%'
else:
dest_ts.time_info['Unit'] = f"1/{n_points}"
dest_data = {} # type: Dict[str, List[np.ndarray]]
dest_data_shape = {} # type: Dict[str, Tuple[int, ...]]
# Go through all cycles
while True:
# Get the begin time for this cycle
begin_time = ts.get_event_time(event_name1, i_cycle)
# Get the end time for this cycle
end_cycle = 0
end_time = ts.get_event_time(event_name2, end_cycle)
while end_time <= begin_time:
end_cycle += 1
end_time = ts.get_event_time(event_name2, end_cycle)
# We are done. Quit the loop.
if np.isnan(begin_time) or np.isnan(end_time):
break
# Get the extended begin and end times considering relative_span
extended_begin_time = (begin_time +
span[0] / n_points *
(end_time - begin_time))
extended_end_time = (begin_time +
span[1] / n_points *
(end_time - begin_time))
# Extract this cycle
subts = ts.get_ts_between_times(
extended_begin_time, extended_end_time, inclusive=True)
subts_backup = subts.copy()
if subts.time.shape[0] == 0:
raise ValueError("")
# Resample this cycle on span + 1
# (and get the first points after)
try:
subts = subts.resample(
np.linspace(extended_begin_time,
extended_end_time,
span[1] - span[0] + 1))
except ValueError:
subts = subts_backup # In case the try messed with subts
subts = subts.resample(
np.linspace(extended_begin_time,
extended_end_time,
span[1] - span[0] + 1),
fill_value='extrapolate')
warnings.warn(f"Cycle {i_cycle} has been extrapolated.")
# Keep only the first points (the last one belongs to the next cycle)
subts = subts.get_ts_between_indexes(
0, span[1] - span[0] - 1, inclusive=True)
# Keep only the events in the unextended span
events = []
for event in subts.events:
if event.time >= begin_time and event.time < end_time:
events.append(event)
subts.events = events
subts = subts.sort_events()
# Separate start/end events from the other
start_end_events = []
other_events = []
for event in subts.events:
if event.name == event_name1 or event.name == event_name2:
start_end_events.append(event)
else:
other_events.append(event)
# Add event_name1 at the beginning and end (duplicates will be
# cancelled at the end)
dest_ts = dest_ts.add_event(
-span[0] + i_cycle * (span[1] - span[0]),
event_name1
)
dest_ts = dest_ts.add_event(
-span[0] + n_points + i_cycle * (span[1] - span[0]),
'_'
)
# Add the other events
def time_to_normalized_time(time):
"""Resample the events times."""
return ((time - extended_begin_time) /
(extended_end_time - extended_begin_time) *
(span[1] - span[0]) + i_cycle * (span[1] - span[0]))
for i_event, event in enumerate(other_events):
# Resample
new_time = time_to_normalized_time(event.time)
dest_ts = dest_ts.add_event(new_time, event.name)
# Add this cycle to dest_time and dest_data
for key in subts.data:
if key not in dest_data:
dest_data[key] = []
dest_data_shape[key] = ts.data[key].shape
dest_data[key].append(subts.data[key])
i_cycle += 1
n_cycles = i_cycle
# Put back dest_time and dest_data in dest_ts
dest_ts.time = 1.0 * np.arange(n_cycles * (span[1] - span[0]))
for key in ts.data:
# Stack the data into a [cycle, percent, values] shape
temp = np.array(dest_data[key])
# Reshape to put all cycles end to end
new_shape = list(dest_data_shape[key])
new_shape[0] = n_cycles * (span[1] - span[0])
dest_ts.data[key] = np.reshape(temp, new_shape)
dest_ts = dest_ts.sort_events()
return dest_ts
def stack(ts: TimeSeries, /, n_points: int = 100) -> Dict[str, np.ndarray]:
"""
Stack time-normalized TimeSeries' data into a dict of arrays.
This methods returns the data of a time-normalized TimeSeries as a dict
where each key corresponds to a TimeSeries' data, and contains a numpy
array where the first dimension is the cycle, the second dimension is the
percentage of cycle, and the other dimensions are the data itself.
Parameters
----------
ts
The time-normalized TimeSeries.
n_points
Optional. The number of points the TimeSeries has been time-normalized
on.
Returns
-------
Dict[str, np.ndarray]
See Also
--------
kineticstoolkit.cycles.unstack
"""
if np.mod(len(ts.time), n_points) != 0:
raise(ValueError(
'It seems that this TimeSeries is not time-normalized.'))
data = dict()
for key in ts.data.keys():
current_shape = ts.data[key].shape
new_shape = [-1, n_points]
for i in range(1, len(current_shape)):
new_shape.append(ts.data[key].shape[i])
data[key] = ts.data[key].reshape(new_shape, order='C')
return data
def unstack(data: Dict[str, np.ndarray], /) -> TimeSeries:
"""
Unstack time-normalized data from a dict of arrays to a TimeSeries.
This methods creates a time-normalized TimeSeries by putting each cycle
from the provided data dictionary end to end.
Parameters
----------
data
A dict where each key contains a numpy array where the first dimension
is the cycle, the second dimension is the percentage of cycle, and
the other dimensions are the data itself.
Returns
-------
TimeSeries
See Also
--------
ktk.cycles.stack
"""
ts = TimeSeries()
for key in data.keys():
current_shape = data[key].shape
n_cycles = current_shape[0]
n_points = current_shape[1]
ts.data[key] = data[key].reshape([n_cycles * n_points], order='C')
ts.time = np.arange(n_cycles * n_points)
ts.time_info['Unit'] = ''
return ts
# The stack_events function is working but commented for now, since I could not
# figure an obvious, undiscutable way to represent its output (use lists,
# TimeSeriesEvents, numpy arrays?). It's also unclear for me how to integrate
# with the standard stack function and its unstack counterpart.
#
# def stack_events(
# ts: TimeSeries, /,
# n_points: int = 100) -> Dict[str, np.ndarray]:
# """
# Stack time-normalized TimeSeries' events into a dict of arrays.
# This methods returns the a dictionary where each key corresponds to an
# event name, and contains a 2d numpy array that contains the event's
# normalized time, with the first dimension being the cycle and the second
# dimension being the occurrence of this event during this cycle.
# Warning
# -------
# This function is currently experimental and may change signature and
# behaviour in the future.
# Parameters
# ----------
# ts
# The time-normalized TimeSeries.
# n_points
# Optional. The number of points the TimeSeries has been
# time-normalized on.
# Returns
# -------
# Dict[str, np.ndarray]
# Example
# -------
# >>> import kineticstoolkit.lab as ktk
# >>> # Create a TimeSeries with different time-normalized events
# >>> ts = ktk.TimeSeries(time=np.arange(400)) # 4 cycles of 100%
# >>> ts = ts.add_event(9, 'event1') # event1 at 9% of cycle 0
# >>> ts = ts.add_event(110, 'event1') # event1 at 10% of cycle 1
# >>> ts = ts.add_event(312, 'event1') # event1 at 12% of cycle 3
# >>> ts = ts.add_event(382, 'event1') # 2nd occ. event1 at 82% of cycle 3
# >>> # Stack these events
# >>> events = ktk.cycles.stack_events(ts)
# >>> events['event1']
# [[9.0], [10.0], [], [12.0, 82.0]]
# """
# ts = ts.copy()
# ts.sort_events()
# n_cycles = int(ts.time.shape[0] / n_points)
# out = {} # type: Dict[str, np.ndarray]
# # Init
# for event in ts.events:
# if event.name not in out:
# out[event.name] = [[] for i in range(n_cycles)]
# for event in ts.events:
# event_cycle = int(event.time / n_points)
# out[event.name][event_cycle].append(np.mod(event.time, n_points))
# return out
def most_repeatable_cycles(data: np.ndarray, /) -> List[int]:
"""
Get the indexes of the most repeatable cycles in TimeSeries or array.
This function returns an ordered list of the most repeatable to the least
repeatable cycles.
It works by recursively discarding the cycle than maximizes the
root-mean-square error between the cycle and the average of every
remaining cycle, until there are only two cycles remaining. The function
returns a list that is the reverse order of cycle removal: first the two
last cycles, then the last-removed cycle, and so on. If two cycles are as
equivalently repeatable, they are returned in order of appearance.
Note
----
Cycles that include at least one NaN are excluded.
Parameters
----------
data
Stacked time-normalized data to analyze, in the shape
(n_cycles, n_points).
Returns
-------
List[int]
List of indexes corresponding to the cycles in most to least
repeatable order.
Example
-------
>>> import kineticstoolkit.lab as ktk
>>> import numpy as np
>>> # Create a data sample with four different cycles, the most different
>>> # begin cycle 2 (cos instead of sin), then cycle 0.
>>> x = np.arange(0, 10, 0.1)
>>> data = np.array([[np.sin(x)], \
[np.sin(x) + 0.14], \
[np.cos(x) + 0.14], \
[np.sin(x) + 0.15]])
>>> ktk.cycles.most_repeatable_cycles(data)
[1, 3, 0, 2]
"""
data = data.copy()
n_cycles = data.shape[0]
out_cycles = [] # type: List[int]
done_cycles = [] # type: List[int] # Like out_cycles but includes NaNs
# Exclude cycles with nans: put nans for all data of this cycle
for i_cycle in range(n_cycles-1, -1, -1):
if np.isnan(np.sum(data[i_cycle])):
data[i_cycle] = np.nan
done_cycles.append(i_cycle)
# Iteratively remove the cycle that is the most different from the
# mean of the remaining cycles.
while len(done_cycles) < n_cycles - 2:
current_mean_cycle = np.nanmean(data, axis=0)
rms = np.zeros(n_cycles)
for i_curve in range(n_cycles-1, -1, -1):
rms[i_curve] = np.sqrt(np.mean(np.sum(
(data[i_curve] - current_mean_cycle) ** 2)))
i_cycle = np.nanargmax(rms)
out_cycles.append(i_cycle)
done_cycles.append(i_cycle)
data[i_cycle] = np.nan
# Find the two remaining cycles
set_all = set(range(n_cycles))
set_done = set(done_cycles)
remain = sorted(list(set_all - set_done))
if len(remain) > 1:
out_cycles.append(remain[1])
if len(remain) > 0:
out_cycles.append(remain[0])
return out_cycles[-1::-1]
module_locals = locals()
def __dir__():
return directory(module_locals)
if __name__ == "__main__":
import doctest
doctest.testmod(optionflags=doctest.NORMALIZE_WHITESPACE)
|
import argparse
import os
import time
from shutil import copyfile
import numpy as np
import torch
import torch.optim as optim
from torchvision.utils import make_grid, save_image
from datasets import UnpairDataset, denorm
from models import FUnIEUpGenerator, FUnIEUpDiscriminator
from torch.utils.tensorboard import SummaryWriter
from utils import AverageMeter, ProgressMeter
class Trainer(object):
def __init__(self, train_loader, valid_loader, lr, epochs, gen_dstd2ehcd_resume, gen_ehcd2dstd_resume, dis_dstd_resume, dis_ehcd_resume, save_path, is_cuda):
self.train_loader = train_loader
self.valid_loader = valid_loader
self.start_epoch = 0
self.epochs = epochs
self.save_path = save_path
os.makedirs(f"{self.save_path}/viz", exist_ok=True)
self.writer = SummaryWriter(log_dir=self.save_path)
self.is_cuda = is_cuda
self.print_freq = 20
self.best_gen_loss = 1e6
self.gen_dstd2ehcd = FUnIEUpGenerator()
self.gen_ehcd2dstd = FUnIEUpGenerator()
self.dis_dstd = FUnIEUpDiscriminator()
self.dis_ehcd = FUnIEUpDiscriminator()
if gen_dstd2ehcd_resume and gen_ehcd2dstd_resume and dis_dstd_resume and dis_ehcd_resume:
self.load(gen_dstd2ehcd_resume, gen_ehcd2dstd_resume,
dis_dstd_resume, dis_ehcd_resume)
if self.is_cuda:
self.gen_dstd2ehcd.cuda()
self.gen_ehcd2dstd.cuda()
self.dis_dstd.cuda()
self.dis_ehcd.cuda()
self.mse = torch.nn.MSELoss()
self.mae = torch.nn.L1Loss()
dis_params = list(self.dis_dstd.parameters()) + \
list(self.dis_ehcd.parameters())
gen_params = list(self.gen_dstd2ehcd.parameters()) + \
list(self.gen_ehcd2dstd.parameters())
self.dis_optimizer = optim.Adam(
filter(lambda p: p.requires_grad, dis_params), lr)
self.gen_optimizer = optim.Adam(
filter(lambda p: p.requires_grad, gen_params), lr)
def train(self):
for e in range(self.start_epoch, self.epochs):
self.epoch = e
_, _ = self.train_epoch()
valid_gen_loss, _ = self.validate()
# Save models
self.save(valid_gen_loss)
self.writer.close()
def train_epoch(self):
self.gen_dstd2ehcd.train()
self.gen_ehcd2dstd.train()
self.dis_dstd.train()
self.dis_ehcd.train()
batch_time = AverageMeter("Time", "3.3f")
gen_losses = AverageMeter("Generator Loss")
dis_losses = AverageMeter("Discriminator Loss")
progress = ProgressMeter(len(self.train_loader), [
batch_time, gen_losses, dis_losses], prefix="Train: ")
end = time.time()
for batch_idx, (dstd_images, ehcd_images) in enumerate(self.train_loader):
bs = dstd_images.size(0)
valid = torch.ones((bs, 16, 16))
fake = torch.zeros((bs, 16, 16))
if self.is_cuda:
dstd_images = dstd_images.cuda()
ehcd_images = ehcd_images.cuda()
valid = valid.cuda()
fake = fake.cuda()
# Train the discriminator using real samples
valid_dstd = self.dis_dstd(dstd_images)
valid_ehcd = self.dis_ehcd(ehcd_images)
d_loss_real = self.mse(valid, valid_dstd) + self.mse(valid, valid_ehcd)
self.dis_optimizer.zero_grad()
d_loss_real.backward()
self.dis_optimizer.step()
# Train the discriminator using fake samples
valid_dstd = self.dis_dstd(self.gen_ehcd2dstd(ehcd_images))
valid_ehcd = self.dis_ehcd(self.gen_dstd2ehcd(dstd_images))
d_loss_fake = self.mse(fake, valid_dstd) + self.mse(fake, valid_ehcd)
self.dis_optimizer.zero_grad()
d_loss_fake.backward()
self.dis_optimizer.step()
# Train the generator using dstd->ehcd->dstd cycle
fake_ehcd = self.gen_dstd2ehcd(dstd_images)
valid_ehcd = self.dis_ehcd(fake_ehcd)
recn_dstd = self.gen_ehcd2dstd(fake_ehcd)
g_loss_dstd = self.mae(valid, valid_ehcd) + \
10 * self.mae(dstd_images, recn_dstd)
self.gen_optimizer.zero_grad()
g_loss_dstd.backward()
self.gen_optimizer.step()
# Train the generator using ehcd->dstd->ehcd cycle
fake_dstd = self.gen_ehcd2dstd(ehcd_images)
valid_dstd = self.dis_dstd(fake_dstd)
recn_ehcd = self.gen_dstd2ehcd(fake_dstd)
g_loss_ehcd = self.mae(valid, valid_dstd) + \
10 * self.mae(ehcd_images, recn_ehcd)
self.gen_optimizer.zero_grad()
g_loss_ehcd.backward()
self.gen_optimizer.step()
# Total loss
d_loss = d_loss_real + d_loss_fake
g_loss = g_loss_dstd + g_loss_ehcd
# Update
dis_losses.update(d_loss.item(), bs)
gen_losses.update(g_loss.item(), bs)
batch_time.update(time.time() - end)
end = time.time()
if batch_idx % self.print_freq == 0:
progress.display(batch_idx)
# Write stats to tensorboard
self.writer.add_scalar("Generator Loss/Train",
gen_losses.avg, self.epoch)
self.writer.add_scalar("Discriminator Loss/Train",
dis_losses.avg, self.epoch)
return gen_losses.avg, dis_losses.avg
def validate(self):
self.gen_dstd2ehcd.eval()
self.gen_ehcd2dstd.eval()
self.dis_dstd.eval()
self.dis_ehcd.eval()
batch_time = AverageMeter("Time", "3.3f")
gen_losses = AverageMeter("Generator Loss")
dis_losses = AverageMeter("Discriminator Loss")
progress = ProgressMeter(len(self.valid_loader), [
batch_time, gen_losses, dis_losses], prefix="Valid: ")
with torch.no_grad():
end = time.time()
for batch_idx, (dstd_images, ehcd_images) in enumerate(self.valid_loader):
bs = dstd_images.size(0)
valid = torch.ones((bs, 16, 16))
fake = torch.zeros((bs, 16, 16))
if self.is_cuda:
dstd_images = dstd_images.cuda()
ehcd_images = ehcd_images.cuda()
valid = valid.cuda()
fake = fake.cuda()
# Train the discriminator using real samples
valid_dstd = self.dis_dstd(dstd_images)
valid_ehcd = self.dis_ehcd(ehcd_images)
d_loss_real = self.mse(valid, valid_dstd) + \
self.mse(valid, valid_ehcd)
# Train the discriminator using fake samples
fake_dstd = self.gen_ehcd2dstd(ehcd_images)
fake_ehcd = self.gen_dstd2ehcd(dstd_images)
valid_dstd = self.dis_dstd(fake_dstd)
valid_ehcd = self.dis_ehcd(fake_ehcd)
d_loss_fake = self.mse(fake, valid_dstd) + \
self.mse(fake, valid_ehcd)
# Train the generator using dstd->ehcd->dstd cycle
valid_ehcd = self.dis_ehcd(fake_ehcd)
recn_dstd = self.gen_ehcd2dstd(fake_ehcd)
g_loss_dstd = self.mse(valid, valid_ehcd) + \
self.mse(dstd_images, recn_dstd)
# Train the generator using ehcd->dstd->ehcd cycle
valid_dstd = self.dis_dstd(fake_dstd)
recn_ehcd = self.gen_dstd2ehcd(fake_dstd)
g_loss_ehcd = self.mse(valid, valid_dstd) + \
self.mse(ehcd_images, recn_ehcd)
# Total loss
d_loss = d_loss_real + d_loss_fake
g_loss = g_loss_dstd + g_loss_ehcd
# Update
dis_losses.update(d_loss.item(), bs)
gen_losses.update(g_loss.item(), bs)
batch_time.update(time.time() - end)
end = time.time()
# Vis
if batch_idx == 0:
fake_ehcd_grid = denorm(
make_grid(fake_ehcd.data)).div_(255.)
fake_dstd_grid = denorm(
make_grid(fake_dstd.data)).div_(255.)
recn_ehcd_grid = denorm(
make_grid(recn_ehcd.data)).div_(255.)
recn_dstd_grid = denorm(
make_grid(recn_dstd.data)).div_(255.)
save_image(
fake_ehcd_grid, f"{self.save_path}/viz/fake_ehcd_{self.epoch}.png")
save_image(
fake_dstd_grid, f"{self.save_path}/viz/fake_dstd_{self.epoch}.png")
save_image(
recn_ehcd_grid, f"{self.save_path}/viz/recn_ehcd_{self.epoch}.png")
save_image(
recn_dstd_grid, f"{self.save_path}/viz/recn_dstd_{self.epoch}.png")
self.writer.add_image(
"Viz/Fake Distort", fake_ehcd_grid, self.epoch)
self.writer.add_image(
"Viz/Fake Enhance", fake_dstd_grid, self.epoch)
self.writer.add_image(
"Viz/Recn Distort", recn_ehcd_grid, self.epoch)
self.writer.add_image(
"Viz/Recn Enhance", recn_dstd_grid, self.epoch)
if batch_idx % self.print_freq == 0:
progress.display(batch_idx)
# Write stats to tensorboard
self.writer.add_scalar("Generator Loss/Validation",
gen_losses.avg, self.epoch)
self.writer.add_scalar("Discriminator Loss/Validation",
dis_losses.avg, self.epoch)
return gen_losses.avg, dis_losses.avg
def save_model(self, model_type, model, model_content, is_best):
model_path = f"{self.save_path}/{self.epoch}_{model_type}.pth.tar"
model_content["state_dict"] = model.state_dict()
torch.save(model_content, model_path)
print(f">>> Save '{model_type}' model to {model_path}")
if is_best:
best_path = f"{self.save_path}/best_{model_type}.pth.tar"
copyfile(model_path, best_path)
def load_model(self, model_type, model, model_path, device):
ckpt = torch.load(model_path, map_location=device)
epoch = ckpt["epoch"]
model.load_state_dict(ckpt["state_dict"])
print(
f">>> Load '{model_type}' model at epoch {epoch} from {model_path}")
return epoch, ckpt["best_loss"]
def save(self, loss):
# Check if the current model is the best
is_best = loss < self.best_gen_loss
self.best_gen_loss = min(self.best_gen_loss, loss)
# Prepare model info to be saved
model_content = {"best_loss": loss, "epoch": self.epoch}
# Save generator and discriminator
self.save_model("gen_dstd2ehcd", self.gen_dstd2ehcd, model_content, is_best)
self.save_model("gen_ehcd2dstd", self.gen_ehcd2dstd, model_content, is_best)
self.save_model("dis_dstd", self.dis_dstd, model_content, is_best)
self.save_model("dis_ehcd", self.dis_ehcd, model_content, is_best)
def load(self, gen_dstd2ehcd_resume, gen_ehcd2dstd_resume, dis_dstd_resume, dis_ehcd_resume):
device = "cuda:0" if self.is_cuda else "cpu"
gen_dstd2ehcd_epoch, best_loss = self.load_model(
"gen_dstd2ehcd", self.gen_dstd2ehcd, gen_dstd2ehcd_resume, device)
gen_ehcd2dstd_epoch, _ = self.load_model(
"gen_ehcd2dstd", self.gen_ehcd2dstd, gen_ehcd2dstd_resume, device)
dis_dstd_epoch, _ = self.load_model(
"dis_dstd", self.dis_dstd, dis_dstd_resume, device)
dis_dst_epoch, _ = self.load_model(
"dis_ehcd", self.dis_ehcd, dis_ehcd_resume, device)
assert gen_dstd2ehcd_epoch == gen_ehcd2dstd_epoch == dis_dstd_epoch == dis_dst_epoch
self.start_epoch = gen_dstd2ehcd_epoch + 1
if __name__ == "__main__":
# Set seed
np.random.seed(77)
torch.manual_seed(77)
is_cuda = torch.cuda.is_available()
if is_cuda:
torch.cuda.manual_seed(77)
parser = argparse.ArgumentParser(description="PyTorch FUnIE-GAN Training")
parser.add_argument("-d", "--data", default="", type=str, metavar="PATH",
help="path to data (default: none)")
parser.add_argument("-j", "--workers", default=4, type=int, metavar="N",
help="number of data loading workers (default: 4)")
parser.add_argument("--epochs", default=90, type=int, metavar="N",
help="number of total epochs to run")
parser.add_argument("-b", "--batch-size", default=256, type=int,
metavar="N",
help="mini-batch size (default: 256), this is the total "
"batch size of all GPUs on the current node when "
"using Data Parallel or Distributed Data Parallel")
parser.add_argument("--lr", "--learning-rate", default=0.1, type=float,
metavar="LR", help="initial learning rate")
parser.add_argument("--gen-dstd2ehcd-resume", default="", type=str, metavar="PATH",
help="path to latest dstd2ehcd generator checkpoint (default: none)")
parser.add_argument("--gen-ehcd2dstd-resume", default="", type=str, metavar="PATH",
help="path to latest ehcd2dstd generator checkpoint (default: none)")
parser.add_argument("--dis-dstd-resume", default="", type=str, metavar="PATH",
help="path to latest dstd discriminator checkpoint (default: none)")
parser.add_argument("--dis-ehcd-resume", default="", type=str, metavar="PATH",
help="path to latest ehcd discriminator checkpoint (default: none)")
parser.add_argument("--save-path", default="", type=str, metavar="PATH",
help="path to save results (default: none)")
args = parser.parse_args()
# Build data loaders
train_set = UnpairDataset(args.data, (256, 256), "train")
valid_set = UnpairDataset(args.data, (256, 256), "valid")
train_loader = torch.utils.data.DataLoader(
train_set, batch_size=args.batch_size, shuffle=True, num_workers=args.workers)
valid_loader = torch.utils.data.DataLoader(
valid_set, batch_size=args.batch_size, shuffle=False, num_workers=args.workers)
# Create trainer
trainer = Trainer(train_loader, valid_loader, args.lr, args.epochs, args.gen_dstd2ehcd_resume,
args.gen_ehcd2dstd_resume, args.dis_dstd_resume, args.dis_ehcd_resume, args.save_path, is_cuda)
trainer.train()
|
nome = input("Digite o nome do cliente: ")
dia_v = input("Digite o dia de vencimento: ")
mes_v = input("Digite o mês de vencimento: ")
fatura = input("Digite o valor da fatura: ")
print("Olá,",nome)
print("A sua fatura com vencimento em",dia_v,"de",mes_v,"no valor de R$",fatura,"está fechada.")
|
import os
import astropy.io.fits as fits
from astropy.convolution import Gaussian2DKernel, convolve_fft, convolve
import matplotlib.colors as colors
from matplotlib.patches import Ellipse
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
import numpy as np
try:
import progressbar
except ImportError:
print('WARNING: progressbar is not present')
from scipy import interpolate
from .parameters import Params, find_parameter_file
from .utils import FWHM_to_sigma, default_cmap, Wm2_to_Tb, Jy_to_Tb
class Line:
_line_file = "lines.fits.gz"
def __init__(self, dir=None, **kwargs):
# Correct path if needed
dir = os.path.normpath(os.path.expanduser(dir))
self.dir = dir
# Search for parameter file
para_file = find_parameter_file(dir)
# Read parameter file
self.P = Params(para_file)
# Read model results
self._read(**kwargs)
def _read(self):
# Read ray-traced image
try:
hdu = fits.open(self.dir + "/" + self._line_file)
self.lines = hdu[0].data
# Read a few keywords in header
self.pixelscale = hdu[0].header['CDELT2'] * 3600.0 # arcsec
self.unit = hdu[0].header['BUNIT']
self.cx = hdu[0].header['CRPIX1']
self.cy = hdu[0].header['CRPIX2']
self.nx = hdu[0].header['NAXIS1']
self.ny = hdu[0].header['NAXIS2']
self.nv = hdu[0].header['NAXIS3']
if self.unit == "JY/PIXEL":
self.is_casa = True
self.restfreq = hdu[0].header['RESTFREQ']
self.freq = [self.restfreq]
self.velocity_type = hdu[0].header['CTYPE3']
if self.velocity_type == "VELO-LSR":
self.CRPIX3 = hdu[0].header['CRPIX3']
self.CRVAL3 = hdu[0].header['CRVAL3']
self.CDELT3 = hdu[0].header['CDELT3']
self.velocity = self.CRVAL3 + self.CDELT3 * (
np.arange(1, self.nv + 1) - self.CRPIX3
) # km/s
else:
raise ValueError("Velocity type is not recognised")
else:
self.is_casa = False
self.cont = hdu[1].data
self.ifreq = hdu[2].data
self.freq = hdu[3].data # frequencies of the transition
self.velocity = hdu[4].data / 1000 # km/s
self.dv = self.velocity[1] - self.velocity[0]
hdu.close()
except OSError:
print('cannot open', self._line_file)
def plot_map(
self,
i=0,
iaz=0,
iTrans=0,
v=None,
iv=None,
insert=False,
substract_cont=False,
moment=None,
psf_FWHM=None,
bmaj=None,
bmin=None,
bpa=None,
plot_beam=None,
axes_unit="arcsec",
conv_method=None,
fmax=None,
fmin=None,
fpeak=None,
dynamic_range=1e3,
color_scale=None,
colorbar=True,
cmap=None,
ax=None,
no_xlabel=False,
no_ylabel=False,
no_xticks=False,
no_yticks=False,
title=None,
limit=None,
limits=None,
Tb=False,
Delta_v=None,
):
# Todo:
# - allow user to change brightness unit : W.m-1, Jy, Tb
# - print molecular info (eg CO J=3-2)
# - add continnum subtraction
# bmin and bamj in arcsec
if ax is None:
ax = plt.gca()
# -- Selecting channel corresponding to a given velocity
if v is not None:
iv = np.abs(self.velocity - v).argmin()
print("Selecting channel #", iv)
# --- Compute pixel scale and extent of image
if axes_unit.lower() == 'arcsec':
pix_scale = self.pixelscale
xlabel = r'$\Delta$ Ra ["]'
ylabel = r'$\Delta$ Dec ["]'
elif axes_unit.lower() == 'au':
pix_scale = self.pixelscale * self.P.map.distance
xlabel = 'Distance from star [au]'
ylabel = 'Distance from star [au]'
elif axes_unit.lower() == 'pixels' or axes_unit.lower() == 'pixel':
pix_scale = 1
xlabel = r'$\Delta$ x [pix]'
ylabel = r'$\Delta$ y [pix]'
else:
raise ValueError("Unknown unit for axes_units: " + axes_unit)
halfsize = np.asarray(self.lines.shape[-2:]) / 2 * pix_scale
extent = [-halfsize[0], halfsize[0], -halfsize[1], halfsize[1]]
# -- set color map
if cmap is None:
if moment == 1:
cmap = "RdBu"
else:
cmap = default_cmap
# -- beam or psf : psf_FWHM and bmaj and bmin are in arcsec, bpa in deg
i_convolve = False
beam = None
if psf_FWHM is not None:
sigma = (
psf_FWHM / self.pixelscale * (2.0 * np.sqrt(2.0 * np.log(2)))
) # in pixels
beam = Gaussian2DKernel(sigma)
i_convolve = True
bmin = psf_FWHM
bmaj = psf_FWHM
bpa = 0
if plot_beam is None:
plot_beam = True
if bmaj is not None:
sigma_x = bmin / self.pixelscale * FWHM_to_sigma # in pixels
sigma_y = bmaj / self.pixelscale * FWHM_to_sigma # in pixels
beam = Gaussian2DKernel(sigma_x, sigma_y, bpa * np.pi / 180)
i_convolve = True
if plot_beam is None:
plot_beam = True
# -- Selecting convolution function
if conv_method is None:
conv_method = convolve_fft
# -- Selection of image to plot
if moment is not None:
im = self.get_moment_map(
i=i,
iaz=iaz,
iTrans=iTrans,
moment=moment,
beam=beam,
conv_method=conv_method,
)
else:
# individual channel
if self.is_casa:
cube = self.lines[:, :, :]
# im = self.lines[iv+1,:,:])
else:
cube = self.lines[i, iaz, iTrans, :, :, :]
# im = self.lines[i,iaz,iTrans,iv,:,:]
# -- continuum substraction
if substract_cont:
cube = np.maximum(
cube - self.cont[i, iaz, iTrans, np.newaxis, :, :], 0.0
)
# Convolve spectrally
if Delta_v is not None:
print("Spectral convolution at ", Delta_v, "km/s")
# Creating a Hanning function with 101 points
n_window = 101
w = np.hanning(n_window)
# For each pixel, resampling the spectrum between -FWHM to FWHM
# then integrating over convolution window
v_new = self.velocity[iv] + np.linspace(-1, 1, n_window) * Delta_v
iv_min = int(iv - Delta_v / self.dv - 1)
iv_max = int(iv + Delta_v / self.dv + 2)
print(iv_min, iv_max, iv_max - iv_min)
print(self.velocity[iv_min:iv_max])
print(v_new)
im = np.zeros([self.nx, self.ny])
for j in range(self.ny):
for i in range(self.nx):
f = interpolate.interp1d(
self.velocity[iv_min:iv_max], cube[iv_min:iv_max, i, j]
)
im[i, j] = np.average(f(v_new))
else:
im = cube[iv, :, :]
# -- Convolve image
if i_convolve:
im = conv_method(im, beam)
if plot_beam is None:
plot_beam = True
# -- Conversion to brightness temperature
if Tb:
if self.is_casa:
im = Jy_to_Tb(im, self.freq[iTrans], self.pixelscale)
else:
im = Wm2_to_Tb(im, self.freq[iTrans], self.pixelscale)
im = np.nan_to_num(im)
print("Max Tb=", np.max(im), "K")
# --- Plot range and color map`
_color_scale = 'lin'
if fmax is None:
fmax = im.max()
if fpeak is not None:
fmax = im.max() * fpeak
if fmin is None:
fmin = im.min()
if color_scale is None:
color_scale = _color_scale
if color_scale == 'log':
if fmin <= 0.0:
fmin = fmax / dynamic_range
norm = colors.LogNorm(vmin=fmin, vmax=fmax, clip=True)
elif color_scale == 'lin':
norm = colors.Normalize(vmin=fmin, vmax=fmax, clip=True)
else:
raise ValueError("Unknown color scale: " + color_scale)
# -- Make the plot
ax.cla()
image = ax.imshow(im, norm=norm, extent=extent, origin='lower', cmap=cmap)
if limit is not None:
limits = [-limit, limit, -limit, limit]
if limits is not None:
ax.set_xlim(limits[0], limits[1])
ax.set_ylim(limits[2], limits[3])
if not no_xlabel:
ax.set_xlabel(xlabel)
if not no_ylabel:
ax.set_ylabel(ylabel)
if no_xticks:
ax.get_xaxis().set_visible(False)
if no_yticks:
ax.get_yaxis().set_visible(False)
if title is not None:
ax.set_title(title)
# -- Color bar
unit = self.unit
if colorbar:
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
cb = plt.colorbar(image, cax=cax)
formatted_unit = unit.replace("-1", "$^{-1}$").replace("-2", "$^{-2}$")
if moment == 0:
cb.set_label("Flux [" + formatted_unit + "km.s$^{-1}$]")
elif moment == 1:
cb.set_label("Velocity [km.s$^{-1}]$")
elif moment == 2:
cb.set_label("Velocity dispersion [km.s$^{-1}$]")
else:
if Tb:
cb.set_label("T$_\mathrm{b}$ [K]")
else:
cb.set_label("Flux [" + formatted_unit + "]")
# -- Adding velocity
if moment is None:
ax.text(
0.5,
0.1,
f"$\Delta$v={self.velocity[iv]:<4.2f}$\,$km/s",
horizontalalignment='center',
color="white",
transform=ax.transAxes,
)
# --- Adding beam
if plot_beam:
dx = 0.125
dy = 0.125
beam = Ellipse(
ax.transLimits.inverted().transform((dx, dy)),
width=bmin,
height=bmaj,
angle=bpa,
fill=True,
color="grey",
)
ax.add_patch(beam)
return image
def plot_line(
self,
i=0,
iaz=0,
iTrans=0,
psf_FWHM=None,
bmaj=None,
bmin=None,
bpa=None,
plot_beam=False,
plot_cont=True,
):
if self.is_casa:
line = np.sum(self.lines[:, :, :], axis=(1, 2))
ylabel = "Flux [Jy]"
else:
line = np.sum(self.lines[i, iaz, iTrans, :, :, :], axis=(1, 2))
ylabel = "Flux [W.m$^{-2}$]"
plt.plot(self.velocity, line)
if plot_cont:
if self.is_casa:
Fcont = 0.5 * (line[0] + line[-1]) # approx the continuum
else:
Fcont = np.sum(self.cont[i, iaz, iTrans, :, :])
plt.plot([self.velocity[0], self.velocity[-1]], [Fcont, Fcont])
xlabel = "v [m.s$^{-1}$]"
plt.xlabel(xlabel)
plt.ylabel(ylabel)
def get_moment_map(
self, i=0, iaz=0, iTrans=0, moment=0, beam=None, conv_method=None
):
"""
This returns the moment maps in physical units, ie:
- M1 is the average velocity [km/s]
- M2 is the velocity dispersion [km/s]
"""
if self.is_casa:
cube = np.copy(self.lines[:, :, :])
else:
cube = np.copy(self.lines[i, iaz, iTrans, :, :, :])
dv = self.velocity[1] - self.velocity[0]
if beam is None:
M0 = np.sum(cube, axis=0) * dv
else:
if moment == 0:
M0 = np.sum(cube, axis=0) * dv
M0 = conv_method(M0, beam)
else: # We need to convolve each channel indidually
print(
"Convolving individual channel maps, this may take a bit of time ...."
)
try:
bar = progressbar.ProgressBar(
maxval=self.nv,
widgets=[
progressbar.Bar('=', '[', ']'),
' ',
progressbar.Percentage(),
],
)
bar.start()
except:
pass
for iv in range(self.nv):
try:
bar.update(iv + 1)
except:
pass
channel = np.copy(cube[iv, :, :])
cube[iv, :, :] = conv_method(channel, beam)
M0 = np.sum(cube, axis=0) * dv
try:
bar.finish()
except:
pass
if moment >= 1:
M1 = (
np.sum(cube[:, :, :] * self.velocity[:, np.newaxis, np.newaxis], axis=0)
* dv
/ M0
)
if moment == 2:
M2 = np.sqrt(
np.sum(
cube[:, :, :]
* (self.velocity[:, np.newaxis, np.newaxis] - M1[np.newaxis, :, :])
** 2,
axis=0,
)
* dv
/ M0
)
if moment == 0:
return M0
elif moment == 1:
return M1
elif moment == 2:
return M2
|
try:
import subprocess32 as sp
except ModuleNotFoundError:
import subprocess as sp
import shlex
def run_command(cmd):
p = sp.run(shlex.split(cmd), stdout=sp.PIPE, stderr=sp.PIPE)
return p.stdout, p.stderr
|
from runners.python import Submission
class MathieuSubmission(Submission):
def run(self, s):
variables=dict()
inputs=[line.split() for line in s.split('\n')]
for line in inputs:
var = line[4]
if var not in variables:
variables[var] = 0
condition = "variables['" + line[4] + "'] "
condition += line[5] + " " + line[6]
if eval(condition):
var=line[0]
if var not in variables:
variables[var] = 0
execution = "variables['" + line[0] + "']"
execution += " += " if line[1] == "inc" else " -= "
execution+=line[2]
exec(execution)
return max(variables.values())
|
import os
import sys
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(os.path.join(BASE_DIR))
sys.path.append(os.path.join(BASE_DIR,'contact_graspnet'))
sys.path.append(os.path.join(BASE_DIR, 'pointnet2', 'tf_ops/grouping'))
sys.path.append(os.path.join(BASE_DIR, 'pointnet2', 'utils'))
from PIL import Image
import numpy as np
import copy
import cv2
import glob
import trimesh.transformations as tra
from scipy.spatial import cKDTree
import provider
from scene_renderer import SceneRenderer
def load_scene_contacts(dataset_folder, test_split_only=False, num_test=None, scene_contacts_path='scene_contacts'):
"""
Load contact grasp annotations from acronym scenes
Arguments:
dataset_folder {str} -- folder with acronym data and scene contacts
Keyword Arguments:
test_split_only {bool} -- whether to only return test split scenes (default: {False})
num_test {int} -- how many test scenes to use (default: {None})
scene_contacts_path {str} -- name of folder with scene contact grasp annotations (default: {'scene_contacts_new'})
Returns:
list(dicts) -- list of scene annotations dicts with object paths and transforms and grasp contacts and transforms.
"""
scene_contact_paths = sorted(glob.glob(os.path.join(dataset_folder, scene_contacts_path, "*")))
if test_split_only:
scene_contact_paths = scene_contact_paths[-num_test:]
contact_infos = []
for contact_path in scene_contact_paths:
# print(contact_path)
try:
npz = np.load(contact_path, allow_pickle=False)
contact_info = {'scene_contact_points':npz['scene_contact_points'],
'obj_paths':npz['obj_paths'],
'obj_transforms':npz['obj_transforms'],
'obj_scales':npz['obj_scales'],
'grasp_transforms':npz['grasp_transforms']}
contact_infos.append(contact_info)
except:
print('corrupt, ignoring..')
return contact_infos
def preprocess_pc_for_inference(input_pc, num_point, pc_mean=None, return_mean=False, use_farthest_point=False, convert_to_internal_coords=False):
"""
Various preprocessing of the point cloud (downsampling, centering, coordinate transforms)
Arguments:
input_pc {np.ndarray} -- Nx3 input point cloud
num_point {int} -- downsample to this amount of points
Keyword Arguments:
pc_mean {np.ndarray} -- use 3x1 pre-computed mean of point cloud (default: {None})
return_mean {bool} -- whether to return the point cloud mean (default: {False})
use_farthest_point {bool} -- use farthest point for downsampling (slow and suspectible to outliers) (default: {False})
convert_to_internal_coords {bool} -- Convert from opencv to internal coordinates (x left, y up, z front) (default: {False})
Returns:
[np.ndarray] -- num_pointx3 preprocessed point cloud
"""
normalize_pc_count = input_pc.shape[0] != num_point
if normalize_pc_count:
pc = regularize_pc_point_count(input_pc, num_point, use_farthest_point=use_farthest_point).copy()
else:
pc = input_pc.copy()
if convert_to_internal_coords:
pc[:,:2] *= -1
if pc_mean is None:
pc_mean = np.mean(pc, 0)
pc -= np.expand_dims(pc_mean, 0)
if return_mean:
return pc, pc_mean
else:
return pc
def inverse_transform(trans):
"""
Computes the inverse of 4x4 transform.
Arguments:
trans {np.ndarray} -- 4x4 transform.
Returns:
[np.ndarray] -- inverse 4x4 transform
"""
rot = trans[:3, :3]
t = trans[:3, 3]
rot = np.transpose(rot)
t = -np.matmul(rot, t)
output = np.zeros((4, 4), dtype=np.float32)
output[3][3] = 1
output[:3, :3] = rot
output[:3, 3] = t
return output
def distance_by_translation_point(p1, p2):
"""
Gets two nx3 points and computes the distance between point p1 and p2.
"""
return np.sqrt(np.sum(np.square(p1 - p2), axis=-1))
def farthest_points(data, nclusters, dist_func, return_center_indexes=False, return_distances=False, verbose=False):
"""
Performs farthest point sampling on data points.
Args:
data: numpy array of the data points.
nclusters: int, number of clusters.
dist_dunc: distance function that is used to compare two data points.
return_center_indexes: bool, If True, returns the indexes of the center of
clusters.
return_distances: bool, If True, return distances of each point from centers.
Returns clusters, [centers, distances]:
clusters: numpy array containing the cluster index for each element in
data.
centers: numpy array containing the integer index of each center.
distances: numpy array of [npoints] that contains the closest distance of
each point to any of the cluster centers.
"""
if nclusters >= data.shape[0]:
if return_center_indexes:
return np.arange(data.shape[0], dtype=np.int32), np.arange(data.shape[0], dtype=np.int32)
return np.arange(data.shape[0], dtype=np.int32)
clusters = np.ones((data.shape[0],), dtype=np.int32) * -1
distances = np.ones((data.shape[0],), dtype=np.float32) * 1e7
centers = []
for iter in range(nclusters):
index = np.argmax(distances)
centers.append(index)
shape = list(data.shape)
for i in range(1, len(shape)):
shape[i] = 1
broadcasted_data = np.tile(np.expand_dims(data[index], 0), shape)
new_distances = dist_func(broadcasted_data, data)
distances = np.minimum(distances, new_distances)
clusters[distances == new_distances] = iter
if verbose:
print('farthest points max distance : {}'.format(np.max(distances)))
if return_center_indexes:
if return_distances:
return clusters, np.asarray(centers, dtype=np.int32), distances
return clusters, np.asarray(centers, dtype=np.int32)
return clusters
def reject_median_outliers(data, m=0.4, z_only=False):
"""
Reject outliers with median absolute distance m
Arguments:
data {[np.ndarray]} -- Numpy array such as point cloud
Keyword Arguments:
m {[float]} -- Maximum absolute distance from median in m (default: {0.4})
z_only {[bool]} -- filter only via z_component (default: {False})
Returns:
[np.ndarray] -- Filtered data without outliers
"""
if z_only:
d = np.abs(data[:,2:3] - np.median(data[:,2:3]))
else:
d = np.abs(data - np.median(data, axis=0, keepdims=True))
return data[np.sum(d, axis=1) < m]
def regularize_pc_point_count(pc, npoints, use_farthest_point=False):
"""
If point cloud pc has less points than npoints, it oversamples.
Otherwise, it downsample the input pc to have npoint points.
use_farthest_point: indicates
:param pc: Nx3 point cloud
:param npoints: number of points the regularized point cloud should have
:param use_farthest_point: use farthest point sampling to downsample the points, runs slower.
:returns: npointsx3 regularized point cloud
"""
if pc.shape[0] > npoints:
if use_farthest_point:
_, center_indexes = farthest_points(pc, npoints, distance_by_translation_point, return_center_indexes=True)
else:
center_indexes = np.random.choice(range(pc.shape[0]), size=npoints, replace=False)
pc = pc[center_indexes, :]
else:
required = npoints - pc.shape[0]
if required > 0:
index = np.random.choice(range(pc.shape[0]), size=required)
pc = np.concatenate((pc, pc[index, :]), axis=0)
return pc
def depth2pc(depth, K, rgb=None):
"""
Convert depth and intrinsics to point cloud and optionally point cloud color
:param depth: hxw depth map in m
:param K: 3x3 Camera Matrix with intrinsics
:returns: (Nx3 point cloud, point cloud color)
"""
mask = np.where(depth > 0)
x,y = mask[1], mask[0]
normalized_x = (x.astype(np.float32) - K[0,2])
normalized_y = (y.astype(np.float32) - K[1,2])
world_x = normalized_x * depth[y, x] / K[0,0]
world_y = normalized_y * depth[y, x] / K[1,1]
world_z = depth[y, x]
if rgb is not None:
rgb = rgb[y,x,:]
pc = np.vstack((world_x, world_y, world_z)).T
return (pc, rgb)
def estimate_normals_cam_from_pc(self, pc_cam, max_radius=0.05, k=12):
"""
Estimates normals in camera coords from given point cloud.
Arguments:
pc_cam {np.ndarray} -- Nx3 point cloud in camera coordinates
Keyword Arguments:
max_radius {float} -- maximum radius for normal computation (default: {0.05})
k {int} -- Number of neighbors for normal computation (default: {12})
Returns:
[np.ndarray] -- Nx3 point cloud normals
"""
tree = cKDTree(pc_cam, leafsize=pc_cam.shape[0]+1)
_, ndx = tree.query(pc_cam, k=k, distance_upper_bound=max_radius, n_jobs=-1) # num_points x k
for c,idcs in enumerate(ndx):
idcs[idcs==pc_cam.shape[0]] = c
ndx[c,:] = idcs
neighbors = np.array([pc_cam[ndx[:,n],:] for n in range(k)]).transpose((1,0,2))
pc_normals = vectorized_normal_computation(pc_cam, neighbors)
return pc_normals
def vectorized_normal_computation(pc, neighbors):
"""
Vectorized normal computation with numpy
Arguments:
pc {np.ndarray} -- Nx3 point cloud
neighbors {np.ndarray} -- Nxkx3 neigbours
Returns:
[np.ndarray] -- Nx3 normal directions
"""
diffs = neighbors - np.expand_dims(pc, 1) # num_point x k x 3
covs = np.matmul(np.transpose(diffs, (0, 2, 1)), diffs) # num_point x 3 x 3
covs /= diffs.shape[1]**2
# takes most time: 6-7ms
eigen_values, eigen_vectors = np.linalg.eig(covs) # num_point x 3, num_point x 3 x 3
orders = np.argsort(-eigen_values, axis=1) # num_point x 3
orders_third = orders[:,2] # num_point
directions = eigen_vectors[np.arange(pc.shape[0]),:,orders_third] # num_point x 3
dots = np.sum(directions * pc, axis=1) # num_point
directions[dots >= 0] = -directions[dots >= 0]
return directions
def load_available_input_data(p, K=None):
"""
Load available data from input file path.
Numpy files .npz/.npy should have keys
'depth' + 'K' + (optionally) 'segmap' + (optionally) 'rgb'
or for point clouds:
'xyz' + (optionally) 'xyz_color'
png files with only depth data (in mm) can be also loaded.
If the image path is from the GraspNet dataset, corresponding rgb, segmap and intrinic are also loaded.
:param p: .png/.npz/.npy file path that contain depth/pointcloud and optionally intrinsics/segmentation/rgb
:param K: 3x3 Camera Matrix with intrinsics
:returns: All available data among segmap, rgb, depth, cam_K, pc_full, pc_colors
"""
segmap, rgb, depth, pc_full, pc_colors = None, None, None, None, None
if K is not None:
if isinstance(K,str):
cam_K = eval(K)
#cam_K = np.array(K).reshape(3,3)
if '.np' in p:
data = np.load(p, allow_pickle=True)
if '.npz' in p:
keys = data.files
else:
keys = []
if len(data.shape) == 0:
data = data.item()
print('test line -----------------------------------------------------')
keys = data.keys()
print('test line -----------------------------------------------------')
elif data.shape[-1] == 3:
pc_full = data
else:
depth = data
if 'depth' in keys:
depth = data['depth']
if K is None and 'K' in keys:
cam_K = data['K'].reshape(3,3)
if 'segmap' in keys:
segmap = data['segmap']
if 'seg' in keys:
segmap = data['seg']
if 'rgb' in keys:
rgb = data['rgb']
rgb = np.array(cv2.cvtColor(rgb, cv2.COLOR_BGR2RGB))
elif 'xyz' in keys:
pc_full = np.array(data['xyz']).reshape(-1,3)
if 'xyz_color' in keys:
pc_colors = data['xyz_color']
elif '.png' in p:
if os.path.exists(p.replace('depth', 'label')):
# graspnet data
depth, rgb, segmap, K = load_graspnet_data(p)
elif os.path.exists(p.replace('depths', 'images').replace('npy', 'png')):
rgb = np.array(Image.open(p.replace('depths', 'images').replace('npy', 'png')))
else:
depth = np.array(Image.open(p))
else:
raise ValueError('{} is neither png nor npz/npy file'.format(p))
return segmap, rgb, depth, cam_K, pc_full, pc_colors
def load_graspnet_data(rgb_image_path):
"""
Loads data from the GraspNet-1Billion dataset
# https://graspnet.net/
:param rgb_image_path: .png file path to depth image in graspnet dataset
:returns: (depth, rgb, segmap, K)
"""
depth = np.array(Image.open(rgb_image_path))/1000. # m to mm
segmap = np.array(Image.open(rgb_image_path.replace('depth', 'label')))
rgb = np.array(Image.open(rgb_image_path.replace('depth', 'rgb')))
# graspnet images are upside down, rotate for inference
# careful: rotate grasp poses back for evaluation
depth = np.rot90(depth,2)
segmap = np.rot90(segmap,2)
rgb = np.rot90(rgb,2)
if 'kinect' in rgb_image_path:
# Kinect azure:
K=np.array([[631.54864502 , 0. , 638.43517329],
[ 0. , 631.20751953, 366.49904066],
[ 0. , 0. , 1. ]])
else:
# Realsense:
K=np.array([[616.36529541 , 0. , 310.25881958],
[ 0. , 616.20294189, 236.59980774],
[ 0. , 0. , 1. ]])
return depth, rgb, segmap, K
def center_pc_convert_cam(cam_poses, batch_data):
"""
Converts from OpenGL to OpenCV coordinates, computes inverse of camera pose and centers point cloud
:param cam_poses: (bx4x4) Camera poses in OpenGL format
:param batch_data: (bxNx3) point clouds
:returns: (cam_poses, batch_data) converted
"""
# OpenCV OpenGL conversion
for j in range(len(cam_poses)):
cam_poses[j,:3,1] = -cam_poses[j,:3,1]
cam_poses[j,:3,2] = -cam_poses[j,:3,2]
cam_poses[j] = inverse_transform(cam_poses[j])
pc_mean = np.mean(batch_data, axis=1, keepdims=True)
batch_data[:,:,:3] -= pc_mean[:,:,:3]
cam_poses[:,:3,3] -= pc_mean[:,0,:3]
return cam_poses, batch_data
class PointCloudReader:
"""
Class to load scenes, render point clouds and augment them during training
Arguments:
root_folder {str} -- acronym root folder
batch_size {int} -- number of rendered point clouds per-batch
Keyword Arguments:
raw_num_points {int} -- Number of random/farthest point samples per scene (default: {20000})
estimate_normals {bool} -- compute normals from rendered point cloud (default: {False})
caching {bool} -- cache scenes in memory (default: {True})
use_uniform_quaternions {bool} -- use uniform quaternions for camera sampling (default: {False})
scene_obj_scales {list} -- object scales in scene (default: {None})
scene_obj_paths {list} -- object paths in scene (default: {None})
scene_obj_transforms {np.ndarray} -- object transforms in scene (default: {None})
num_train_samples {int} -- training scenes (default: {None})
num_test_samples {int} -- test scenes (default: {None})
use_farthest_point {bool} -- use farthest point sampling to reduce point cloud dimension (default: {False})
intrinsics {str} -- intrinsics to for rendering depth maps (default: {None})
distance_range {tuple} -- distance range from camera to center of table (default: {(0.9,1.3)})
elevation {tuple} -- elevation range (90 deg is top-down) (default: {(30,150)})
pc_augm_config {dict} -- point cloud augmentation config (default: {None})
depth_augm_config {dict} -- depth map augmentation config (default: {None})
"""
def __init__(
self,
root_folder,
batch_size=1,
raw_num_points = 20000,
estimate_normals = False,
caching=True,
use_uniform_quaternions=False,
scene_obj_scales=None,
scene_obj_paths=None,
scene_obj_transforms=None,
num_train_samples=None,
num_test_samples=None,
use_farthest_point = False,
intrinsics = None,
distance_range = (0.9,1.3),
elevation = (30,150),
pc_augm_config = None,
depth_augm_config = None
):
self._root_folder = root_folder
self._batch_size = batch_size
self._raw_num_points = raw_num_points
self._caching = caching
self._num_train_samples = num_train_samples
self._num_test_samples = num_test_samples
self._estimate_normals = estimate_normals
self._use_farthest_point = use_farthest_point
self._scene_obj_scales = scene_obj_scales
self._scene_obj_paths = scene_obj_paths
self._scene_obj_transforms = scene_obj_transforms
self._distance_range = distance_range
self._pc_augm_config = pc_augm_config
self._depth_augm_config = depth_augm_config
self._current_pc = None
self._cache = {}
self._renderer = SceneRenderer(caching=True, intrinsics=intrinsics)
if use_uniform_quaternions:
quat_path = os.path.join(self._root_folder, 'uniform_quaternions/data2_4608.qua')
quaternions = [l[:-1].split('\t') for l in open(quat_path, 'r').readlines()]
quaternions = [[float(t[0]),
float(t[1]),
float(t[2]),
float(t[3])] for t in quaternions]
quaternions = np.asarray(quaternions)
quaternions = np.roll(quaternions, 1, axis=1)
self._all_poses = [tra.quaternion_matrix(q) for q in quaternions]
else:
self._cam_orientations = []
self._elevation = np.array(elevation)/180.
for az in np.linspace(0, np.pi * 2, 30):
for el in np.linspace(self._elevation[0], self._elevation[1], 30):
self._cam_orientations.append(tra.euler_matrix(0, -el, az))
self._coordinate_transform = tra.euler_matrix(np.pi/2, 0, 0).dot(tra.euler_matrix(0, np.pi/2, 0))
def get_cam_pose(self, cam_orientation):
"""
Samples camera pose on shell around table center
Arguments:
cam_orientation {np.ndarray} -- 3x3 camera orientation matrix
Returns:
[np.ndarray] -- 4x4 homogeneous camera pose
"""
distance = self._distance_range[0] + np.random.rand()*(self._distance_range[1]-self._distance_range[0])
extrinsics = np.eye(4)
extrinsics[0, 3] += distance
extrinsics = cam_orientation.dot(extrinsics)
cam_pose = extrinsics.dot(self._coordinate_transform)
# table height
cam_pose[2,3] += self._renderer._table_dims[2]
cam_pose[:3,:2]= -cam_pose[:3,:2]
return cam_pose
def _augment_pc(self, pc):
"""
Augments point cloud with jitter and dropout according to config
Arguments:
pc {np.ndarray} -- Nx3 point cloud
Returns:
np.ndarray -- augmented point cloud
"""
# not used because no artificial occlusion
if 'occlusion_nclusters' in self._pc_augm_config and self._pc_augm_config['occlusion_nclusters'] > 0:
pc = self.apply_dropout(pc,
self._pc_augm_config['occlusion_nclusters'],
self._pc_augm_config['occlusion_dropout_rate'])
if 'sigma' in self._pc_augm_config and self._pc_augm_config['sigma'] > 0:
pc = provider.jitter_point_cloud(pc[np.newaxis, :, :],
sigma=self._pc_augm_config['sigma'],
clip=self._pc_augm_config['clip'])[0]
return pc[:,:3]
def _augment_depth(self, depth):
"""
Augments depth map with z-noise and smoothing according to config
Arguments:
depth {np.ndarray} -- depth map
Returns:
np.ndarray -- augmented depth map
"""
if 'sigma' in self._depth_augm_config and self._depth_augm_config['sigma'] > 0:
clip = self._depth_augm_config['clip']
sigma = self._depth_augm_config['sigma']
noise = np.clip(sigma*np.random.randn(*depth.shape), -clip, clip)
depth += noise
if 'gaussian_kernel' in self._depth_augm_config and self._depth_augm_config['gaussian_kernel'] > 0:
kernel = self._depth_augm_config['gaussian_kernel']
depth_copy = depth.copy()
depth = cv2.GaussianBlur(depth,(kernel,kernel),0)
depth[depth_copy==0] = depth_copy[depth_copy==0]
return depth
def apply_dropout(self, pc, occlusion_nclusters, occlusion_dropout_rate):
"""
Remove occlusion_nclusters farthest points from point cloud with occlusion_dropout_rate probability
Arguments:
pc {np.ndarray} -- Nx3 point cloud
occlusion_nclusters {int} -- noof cluster to remove
occlusion_dropout_rate {float} -- prob of removal
Returns:
[np.ndarray] -- N > Mx3 point cloud
"""
if occlusion_nclusters == 0 or occlusion_dropout_rate == 0.:
return pc
labels = farthest_points(pc, occlusion_nclusters, distance_by_translation_point)
removed_labels = np.unique(labels)
removed_labels = removed_labels[np.random.rand(removed_labels.shape[0]) < occlusion_dropout_rate]
if removed_labels.shape[0] == 0:
return pc
mask = np.ones(labels.shape, labels.dtype)
for l in removed_labels:
mask = np.logical_and(mask, labels != l)
return pc[mask]
def get_scene_batch(self, scene_idx=None, return_segmap=False, save=False):
"""
Render a batch of scene point clouds
Keyword Arguments:
scene_idx {int} -- index of the scene (default: {None})
return_segmap {bool} -- whether to render a segmap of objects (default: {False})
save {bool} -- Save training/validation data to npz file for later inference (default: {False})
Returns:
[batch_data, cam_poses, scene_idx] -- batch of rendered point clouds, camera poses and the scene_idx
"""
dims = 6 if self._estimate_normals else 3
batch_data = np.empty((self._batch_size, self._raw_num_points, dims), dtype=np.float32)
cam_poses = np.empty((self._batch_size, 4, 4), dtype=np.float32)
if scene_idx is None:
scene_idx = np.random.randint(0,self._num_train_samples)
obj_paths = [os.path.join(self._root_folder, p) for p in self._scene_obj_paths[scene_idx]]
mesh_scales = self._scene_obj_scales[scene_idx]
obj_trafos = self._scene_obj_transforms[scene_idx]
self.change_scene(obj_paths, mesh_scales, obj_trafos, visualize=False)
batch_segmap, batch_obj_pcs = [], []
for i in range(self._batch_size):
# 0.005s
pc_cam, pc_normals, camera_pose, depth = self.render_random_scene(estimate_normals = self._estimate_normals)
if return_segmap:
segmap, _, obj_pcs = self._renderer.render_labels(depth, obj_paths, mesh_scales, render_pc=True)
batch_obj_pcs.append(obj_pcs)
batch_segmap.append(segmap)
batch_data[i,:,0:3] = pc_cam[:,:3]
if self._estimate_normals:
batch_data[i,:,3:6] = pc_normals[:,:3]
cam_poses[i,:,:] = camera_pose
if save:
K = np.array([[616.36529541,0,310.25881958 ],[0,616.20294189,236.59980774],[0,0,1]])
data = {'depth':depth, 'K':K, 'camera_pose':camera_pose, 'scene_idx':scene_idx}
if return_segmap:
data.update(segmap=segmap)
np.savez('results/{}_acronym.npz'.format(scene_idx), data)
if return_segmap:
return batch_data, cam_poses, scene_idx, batch_segmap, batch_obj_pcs
else:
return batch_data, cam_poses, scene_idx
def render_random_scene(self, estimate_normals=False, camera_pose=None):
"""
Renders scene depth map, transforms to regularized pointcloud and applies augmentations
Keyword Arguments:
estimate_normals {bool} -- calculate and return normals (default: {False})
camera_pose {[type]} -- camera pose to render the scene from. (default: {None})
Returns:
[pc, pc_normals, camera_pose, depth] -- [point cloud, point cloud normals, camera pose, depth]
"""
if camera_pose is None:
viewing_index = np.random.randint(0, high=len(self._cam_orientations))
camera_orientation = self._cam_orientations[viewing_index]
camera_pose = self.get_cam_pose(camera_orientation)
in_camera_pose = copy.deepcopy(camera_pose)
# 0.005 s
_, depth, _, camera_pose = self._renderer.render(in_camera_pose, render_pc=False)
depth = self._augment_depth(depth)
pc = self._renderer._to_pointcloud(depth)
pc = regularize_pc_point_count(pc, self._raw_num_points, use_farthest_point=self._use_farthest_point)
pc = self._augment_pc(pc)
pc_normals = estimate_normals_cam_from_pc(pc[:,:3], raw_num_points=self._raw_num_points) if estimate_normals else []
return pc, pc_normals, camera_pose, depth
def change_object(self, cad_path, cad_scale):
"""
Change object in pyrender scene
Arguments:
cad_path {str} -- path to CAD model
cad_scale {float} -- scale of CAD model
"""
self._renderer.change_object(cad_path, cad_scale)
def change_scene(self, obj_paths, obj_scales, obj_transforms, visualize=False):
"""
Change pyrender scene
Arguments:
obj_paths {list[str]} -- path to CAD models in scene
obj_scales {list[float]} -- scales of CAD models
obj_transforms {list[np.ndarray]} -- poses of CAD models
Keyword Arguments:
visualize {bool} -- whether to update the visualizer as well (default: {False})
"""
self._renderer.change_scene(obj_paths, obj_scales, obj_transforms)
if visualize:
self._visualizer.change_scene(obj_paths, obj_scales, obj_transforms)
def __del__(self):
print('********** terminating renderer **************')
|
# ----------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License
# ----------------------------------------------------------------------
"""Contains the scalar type info objects"""
import os
import textwrap
import CommonEnvironment
from CommonEnvironment import Interface
from Plugins.SharedLibraryTestsPluginImpl.TypeInfoFactory import TypeInfoFactory
# ----------------------------------------------------------------------
_script_fullpath = CommonEnvironment.ThisFullpath()
_script_dir, _script_name = os.path.split(_script_fullpath)
# ----------------------------------------------------------------------
# ----------------------------------------------------------------------
class _ScalarTypeInfoFactory(TypeInfoFactory):
"""Functionality common to all scalars"""
# ----------------------------------------------------------------------
# |
# | Public Properties
# |
# ----------------------------------------------------------------------
@Interface.abstractproperty
def CType(self):
"""C type"""
raise Exception("Abstract property")
# ----------------------------------------------------------------------
# |
# | Public Methods
# |
# ----------------------------------------------------------------------
@Interface.override
def GetTransformInputArgs(
self,
is_input_optional,
input_name="input",
):
if is_input_optional:
return "Microsoft::Featurizer::Traits<typename Microsoft::Featurizer::Traits<{cpp_type}>::nullable_type>::IsNull({input_name}) ? nullptr : &Microsoft::Featurizer::Traits<typename Microsoft::Featurizer::Traits<{cpp_type}>::nullable_type>::GetNullableValue({input_name})".format(
cpp_type=self.CppType,
input_name=input_name,
)
return input_name
# ----------------------------------------------------------------------
@Interface.override
def GetOutputInfo(
self,
result_name="result",
):
return self.Result(
self.CppType,
"{} {};".format(self.CppType, result_name),
"&{}".format(result_name),
textwrap.dedent(
"""\
#if (defined __apple_build_version__ || defined __GNUC__ && (__GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ <= 8)))
results.push_back(std::move({result}));
#else
results.emplace_back(std::move({result}));
#endif
""",
).format(
result=result_name,
),
)
# ----------------------------------------------------------------------
@Interface.staticderived
class Int8TypeInfoFactory(_ScalarTypeInfoFactory):
TypeName = Interface.DerivedProperty("int8")
CppType = Interface.DerivedProperty("std::int8_t")
CType = Interface.DerivedProperty("int8_t")
# ----------------------------------------------------------------------
@Interface.staticderived
class Int16TypeInfoFactory(_ScalarTypeInfoFactory):
TypeName = Interface.DerivedProperty("int16")
CppType = Interface.DerivedProperty("std::int16_t")
CType = Interface.DerivedProperty("int16_t")
# ----------------------------------------------------------------------
@Interface.staticderived
class Int32TypeInfoFactory(_ScalarTypeInfoFactory):
TypeName = Interface.DerivedProperty("int32")
CppType = Interface.DerivedProperty("std::int32_t")
CType = Interface.DerivedProperty("int32_t")
# ----------------------------------------------------------------------
@Interface.staticderived
class Int64TypeInfoFactory(_ScalarTypeInfoFactory):
TypeName = Interface.DerivedProperty("int64")
CppType = Interface.DerivedProperty("std::int64_t")
CType = Interface.DerivedProperty("int64_t")
# ----------------------------------------------------------------------
@Interface.staticderived
class UInt8TypeInfoFactory(_ScalarTypeInfoFactory):
TypeName = Interface.DerivedProperty("uint8")
CppType = Interface.DerivedProperty("std::uint8_t")
CType = Interface.DerivedProperty("uint8_t")
# ----------------------------------------------------------------------
@Interface.staticderived
class UInt16TypeInfoFactory(_ScalarTypeInfoFactory):
TypeName = Interface.DerivedProperty("uint16")
CppType = Interface.DerivedProperty("std::uint16_t")
CType = Interface.DerivedProperty("uint16_t")
# ----------------------------------------------------------------------
@Interface.staticderived
class UInt32TypeInfoFactory(_ScalarTypeInfoFactory):
TypeName = Interface.DerivedProperty("uint32")
CppType = Interface.DerivedProperty("std::uint32_t")
CType = Interface.DerivedProperty("uint32_t")
# ----------------------------------------------------------------------
@Interface.staticderived
class UInt64TypeInfoFactory(_ScalarTypeInfoFactory):
TypeName = Interface.DerivedProperty("uint64")
CppType = Interface.DerivedProperty("std::uint64_t")
CType = Interface.DerivedProperty("uint64_t")
# ----------------------------------------------------------------------
@Interface.staticderived
class FloatTypeInfoFactory(_ScalarTypeInfoFactory):
TypeName = Interface.DerivedProperty("float")
CppType = Interface.DerivedProperty("std::float_t")
CType = Interface.DerivedProperty("float")
# ----------------------------------------------------------------------
@Interface.staticderived
class DoubleTypeInfoFactory(_ScalarTypeInfoFactory):
TypeName = Interface.DerivedProperty("double")
CppType = Interface.DerivedProperty("std::double_t")
CType = Interface.DerivedProperty("double")
# ----------------------------------------------------------------------
@Interface.staticderived
class BoolTypeInfoFactory(_ScalarTypeInfoFactory):
TypeName = Interface.DerivedProperty("bool")
CppType = Interface.DerivedProperty("bool")
CType = Interface.DerivedProperty("bool")
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Static text messages relating to student groups."""
__author__ = 'Mike Gainer (mgainer@google.com)'
STUDENT_GROUPS_DESCRIPTION = """
Manage groups of students. Group level settings can override course level
settings, and apply only to students in that group. This page allows you
to manage group membership. Configuration of per-group settings overrides
is done on pages for those settings.
Additionally, events recorded for students are marked with their current
group. Certain analytics can be filtered to show only results relating
to individual groups.
"""
GROUP_MEMBERS_DESCRIPTION = """
A list of email addresses of students. Student emails may be assigned to
groups before they are registered for the course. Separate email addresses
with any combination of commas, spaces, tabs or newlines."""
EDIT_STUDENT_GROUPS_PERMISSION_DESCRIPTION = """
Allows creation, deletion, and modification of membership in groups of students.
Other permissions may be required to configure group-level settings to
override course-level settings -- e.g., to modify course/unit/lesson
availability.
"""
STUDENT_GROUP_ID_DESCRIPTION = """
Numeric ID of the group to which the student belongs, or null if the student
has not been assigned to any group. This can be used directly for
grouping/aggregating data.
"""
STUDENT_GROUP_NAME_DESCRIPTION = """
Name of the group to which the student has been assigned, or null. Note that
since student groups can be given the same name, you should not rely on this
field for aggregation, unless you are sure that no groups share a name.
"""
AVAILABILITY_DEFAULT_AVAILABILITY_DESCRIPTION = """
This is the current availability setting for this item at the course level.
"""
AVAILABILITY_OVERRIDDEN_AVAILABILITY_DESCRIPTION = """
Availability of course items can be overridden for students in this group,
or can default to using whatever the course-level setting is.
"""
GROUP_COURSE_AVAILABILITY = """
This sets the availability of the course for registered and unregistered
students in this group.
"""
ENABLE_GROUP_CACHING = """
If 'True', student groups are cached, with a one-hour refresh rate.
If you plan to change multiple student groups, or you otherwise need
your student group changes to take effect rapidly, this can be set to
'False'. Otherwise, keep this setting at 'True' to maximize performance.
"""
|
from discord.ext import commands
class MyCog(commands.Cog):
def __init__(self, bot):
self.bot = bot
|
# -*- coding: utf-8 -*-
import asyncio
from paco import curry
from .helpers import run_in_loop
def task(x, y, baz=None, *args, **kw):
return x + y, baz, kw
@asyncio.coroutine
def coro(x, y, baz=None, *args, **kw):
return task(x, y, baz=baz, *args, **kw)
def test_curry_function_arity():
num, val, kw = run_in_loop(curry(task)(2)(4)(baz='foo'))
assert num == 6
assert val == 'foo'
assert kw == {}
num, val, kw = run_in_loop(curry(task)(2, 4)(baz='foo'))
assert num == 6
assert val == 'foo'
assert kw == {}
num, val, kw = run_in_loop(curry(task)(2, 4, baz='foo'))
assert num == 6
assert val == 'foo'
assert kw == {}
num, val, kw = run_in_loop(curry(task)(2, 4, baz='foo', fee=True))
assert num == 6
assert val == 'foo'
assert kw == {'fee': True}
def test_curry_single_arity():
assert run_in_loop(curry(lambda x: x)(True))
def test_curry_zero_arity():
assert run_in_loop(curry(lambda: True))
def test_curry_custom_arity():
currier = curry(4)
num, val, kw = run_in_loop(currier(task)(2)(4)(baz='foo')(tee=True))
assert num == 6
assert val == 'foo'
assert kw == {'tee': True}
def test_curry_ignore_kwargs():
currier = curry(ignore_kwargs=True)
num, val, kw = run_in_loop(currier(task)(2)(4))
assert num == 6
assert val is None
assert kw == {}
currier = curry(ignore_kwargs=True)
num, val, kw = run_in_loop(currier(task)(2)(4, baz='foo', tee=True))
assert num == 6
assert val is 'foo'
assert kw == {'tee': True}
def test_curry_extra_arguments():
currier = curry(4)
num, val, kw = run_in_loop(currier(task)(2)(4)(baz='foo')(tee=True))
assert num == 6
assert val == 'foo'
assert kw == {'tee': True}
currier = curry(4)
num, val, kw = run_in_loop(currier(task)(2)(4)(baz='foo')(tee=True))
assert num == 6
assert val == 'foo'
assert kw == {'tee': True}
def test_curry_evaluator_function():
def evaluator(acc, fn):
return len(acc[0]) < 3
def task(x, y):
return x * y
currier = curry(evaluator=evaluator)
assert run_in_loop(currier(task)(4, 4)) == 16
def test_curry_decorator():
@curry
def task(x, y, z):
return x + y + z
assert run_in_loop(task(2)(4)(8)) == 14
@curry(4)
def task(x, y, *args):
return x + y + args[0] + args[1]
assert run_in_loop(task(2)(4)(8)(10)) == 24
@curry(4)
@asyncio.coroutine
def task(x, y, *args):
return x + y + args[0] + args[1]
assert run_in_loop(task(2)(4)(8)(10)) == 24
def test_curry_coroutine():
num, val, kw = run_in_loop(curry(coro)(2)(4)(baz='foo', tee=True))
assert num == 6
assert val == 'foo'
assert kw == {'tee': True}
|
from pathlib import Path
import warnings
from pynwb import NWBHDF5IO, validate
from pynwb.testing import TestCase
class TestReadOldVersions(TestCase):
def test_read(self):
"""
Attempt to read and validate all NWB files in the same folder as this file. The folder should contain NWB files
from previous versions of NWB. See src/pynwb/testing/make_test_files.py for code to generate the NWB files.
"""
dir_path = Path(__file__).parent
nwb_files = dir_path.glob('*.nwb')
for f in nwb_files:
with self.subTest(file=f.name):
with NWBHDF5IO(str(f), 'r') as io:
errors = validate(io)
io.read()
if errors:
for e in errors:
warnings.warn('%s: %s' % (f.name, e))
# TODO uncomment below when validation errors have been fixed
# raise Exception('%d validation error(s). See warnings.' % len(errors))
|
import numpy as np
from advent.utils.serialization import json_load
from davsn.dataset.base_dataset import BaseDataset
class SynthiaSeqDataSet(BaseDataset):
def __init__(self, root, list_path, set='all',
max_iters=None, crop_size=(321, 321), mean=(128, 128, 128)):
super().__init__(root, list_path, set, max_iters, crop_size, None, mean)
# map to cityscape's ids
self.id_to_trainid = {3: 0, 4: 1, 2: 2, 5: 3, 7: 4, 15: 5, 9: 6, 6: 7, 1: 8, 10: 9, 11: 10, 8: 11,}
def get_metadata(self, name):
img_file = self.root / 'rgb' / name
label_file = self.root / 'label' / name
return img_file, label_file
def __getitem__(self, index):
img_file, label_file, name = self.files[index]
image = self.get_image(img_file)
label = self.get_labels_synthia_seq(label_file)
image = image[:-120, :, :]
label = label[:-120, :]
label_copy = 255 * np.ones(label.shape, dtype=np.float32)
for k, v in self.id_to_trainid.items():
label_copy[label == k] = v
image = self.preprocess(image)
frame = int(name.split('/')[-1].replace('.png',''))
name_kf = name.replace(str(frame).zfill(6) + '.png', str(frame-1).zfill(6) + '.png')
file_kf = self.root / 'rgb' / name_kf
image_kf = self.get_image(file_kf)
image_kf = image_kf[:-120, :, :]
image_kf = self.preprocess(image_kf.copy())
return image.copy(), label_copy.copy(), image_kf.copy(), np.array(image.shape), name
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
""" `pipeline` module provides tools to create a pipeline of tasks
a pipeline can be composed of one or several branches, but
everything runs in a single thread. The initial goal of this
framework is to provide a simple and direct way of defining task
types and reuse them in different pipeline configurations.
*The motivation is not to parallelise tasks yet tasks could be
parallelized in some configurations, depending on the exact use
case and the context...*
tasks are implemented by *coroutines* functions as described by
David Beazle (see http://www.dabeaz.com/coroutines/ for details).
This module is used by `clichain.cli` module.
"""
from functools import wraps
import logging
import sys
import collections
logger = logging.getLogger(__name__)
def coroutine(func):
""" coroutine decorator, 'prime' the coroutine function
this function is intended to be used as a decorator to create
a basic *coroutine* function, for instance: ::
@coroutine
def cr(*args, **kw):
print('starting...')
try:
while True:
item = yield
print(f'processing: {item}')
except GeneratorExit:
print('ending...')
calling the decorated function will automatically get it to the
first *yield* statement. ::
>>> cr()
starting...
.. note:: the decorated function is wrapped using
`functools.wraps`
.. seealso:: http://www.dabeaz.com/coroutines/
"""
@wraps(func)
def start(*args, **kwargs):
cr = func(*args, **kwargs)
next(cr)
return cr
return start
class Control:
""" Internal, 'control' obj received by `task` decorated functions
`Control` is a context manager
.. seealso:: `Control.__init__`
"""
def __init__(self, context, name, targets):
""" initialize new `Control` object
+ `context` is a `Context` object, `Control` object
will use it to log exception if an exception occurs
in the coroutine while used as a context manager. The
`Context` object is also accessible through the `context`
attribute.
+ `name` will be accessible as `name` attribute (rw)
ex: ::
logger = logging.getLogger(f'{__name__}.{ctrl.name}')
+ `targets` will be accessible through `targets` property
(ro)
.. seealso:: `Control.targets`
`targets` is read only to ensure consistency with `push`
function returned by `Control.__enter__`: `Control` object
is expected to be used as a context manager: ::
with ctrl as push:
while True:
data = yield
push(data) # send data to next coroutines
a `push` function is defined and returned when `Control` is
used as a context manager, but can actually be created using
`Control.push` property.
the purpose is to force using an efficient solution avoiding
attributes lookup (using *self*) for every call, which has
an impact given this function is likely to be called a lot
(usually for each processed item). This way we define the
function and reference it once in the user function (as
'push' in the example).
.. seealso:: `task` decorator
"""
self.context = context
self.name = name
self._targets = targets
@property
def push(self):
""" return a 'push' function sending data to next coroutines
.. seealso:: `Control.__init__`
"""
def push(value, targets=self.targets):
""" send `value` to the following coroutines in the pipeline
given 'targets' is an iterable containing the next
coroutines, this is equivalent to:
for t in targets:
t.send(value)
"""
for t in targets:
t.send(value)
return push
def __enter__(self):
""" return push function
.. seealso:: `Control.push`, `Control.__exit__`
"""
return self.push
def __exit__(self, tpe, value, tb):
""" handle GeneratorExit exception and log unhandled exceptions
`Control` object is created by `task` decorator, the
decorated function gets the `Control` object as first arg,
and is expected to use it as a context manager, which will
handle GeneratorExit and log any unhandled exception.
`context` attribute (`Context` object) will be used if the
exception is not `None` or `GeneratorExit`, in order to:
+ determine if the exception traceback should be logged,
if the exception has already been logged by another
`Control` object (i.e in another coroutine), then only
an error message will be logged, otherwise the full
exception will be recorded.
+ get the base logger to use
.. seealso:: `task`, `Control.__init__`, `Context`
"""
if isinstance(value, GeneratorExit):
return True
ctx = self.context
lgr = ctx.logger.getChild(self.name)
if value in ctx.exceptions:
lgr.error(f'failing ({tpe.__name__}: {value})')
else:
lgr.error('an exception occured:', exc_info=(tpe, value, tb))
ctx.exceptions.append(value)
@property
def targets(self):
""" next coroutines in the pipeline """
return self._targets
def task(func):
""" make "partial" `coroutines` expected to be used with `create`.
`task` will create a "partial" function, which when
called with args and keyword args will actually return a
`coroutine` function designed to be used with `create` function.
example:
a basic coroutine adding offset to input data could be defined
as follows using `task`: ::
@task
def offset(ctrl, offset):
print('pre-processing')
with ctrl as push:
while True:
value = yield
push(value + offset)
# will be executed unless an exception occurs in
# the 'while' loop
print('post_processing')
+ `ctrl` will handle the `GeneratorExit` exception and log any
unhandled exception.
+ the `push` method send data to the next coroutines in the
pipeline.
the resulting function is called with the original function's
args and keyword args: ::
off = offset(offset=1)
*off* is a partial `coroutine` function expected to be used in a
pipeline defintion with `create`.
the coroutine will eventually be created calling this new
function with specific arguments depending on the pipeline
specification (see `create` for details), ex: ::
# create the coroutine
off = off(targets=[t1, t2...])
.. note:: as for `coroutine`, all the functions (partial or
final functions) are wrapped using `functools.wraps`
**example:**
::
@task
def output(ctrl):
with ctrl:
while True:
print((yield))
@task
def parse(ctrl):
with ctrl as push:
while True:
try:
value = ast.literal_eval((yield))
except (SyntaxError, ValueError):
continue
push(value)
@task
def offset(ctrl, offset):
offset = int(offset)
logger = logging.getLogger(f'{__name__}.{ctrl.name}')
logger.info(f'offset: {offset}')
with ctrl as push:
while True:
value = yield
push(value + offset)
logger.info('offset task finished, no more value')
if __name__ == '__main__':
out = output()
off1 = offset(10)
off2 = offset(offset=100)
parse = parse()
# the previous results (out, off1, off2, proc) should
# be used in the pipeline definition and the followings
# should be performed by "create"
out = out()
off1 = off1((out,))
off2 = off2((out,))
parse = parse([off1, off2])
with open('foo.txt') as inputs:
for data in inputs:
parse.send(data)
out.close()
off1.close()
off2.close()
parse.close()
.. seealso:: `coroutine`, `create`
"""
@coroutine
@wraps(func)
def _task(context, targets=None, debug='<noname>', args=None, kwargs=None):
ctrl = Control(
context=context,
name=debug,
targets=[] if targets is None else targets,
)
return func(ctrl,
*(() if args is None else args),
**({} if kwargs is None else kwargs))
@wraps(func)
def task(*args, **kwargs):
@wraps(func)
def _partial(*_args, **_kwargs):
return _task(*_args, **_kwargs, args=args, kwargs=kwargs)
return _partial
return task
class Context:
""" will be passed to all `coroutine`s in the pipeline
`Context` object is a common object shared by all coroutines in
the pipeline.
attributes:
+ `exceptions` is a list which remains empty until an exception
occurs within a `task` and is handled by the module. Then
`exception` contains each exception caught by the module.
Each exception is logged only one time with its traceback when
it's caught by `Control` context manager.
.. note:: if an exception is caught by the module it will be
"re-raised" thus terminate the process but user code could
still raise another exception(s) for example if a coroutine
is not implemented using `task` or GeneratorExit is handled
within the user loop...
+ `logger` will be used for every message logged by the module,
and can be used by the user. The default is to use the
module's `logger`.
+ `obj` attribute is an arbitrary object provided by the user
when creating the pipeline. The default is `None`.
.. seealso:: `create`
"""
def __init__(self, logger=logger, obj=None):
""" init the `Context` which will be shared by coroutines """
self.logger = logger
self.exceptions = []
self.obj = obj
def create(tasks, output=print, **kw):
""" create a pipeline of coroutines from a specification
a pipeline is a succession of coroutines organized into one
or several branches.
`output` is a strategy to use for the pipeline output, the
default strategy is `print`. `output` will be called for each
data reaching the pipeline's output, it takes a single argument.
**extra keyword args** will be used to initialize the `Context`
object that will be send to all the coroutines of the pipeline.
`tasks` argument describes the pipeline and consists of a
mapping of coroutines as key: value pairs, where each single key
identifies a single coroutine.
each coroutine is defined either by a single `coroutine`
function (see **task** field below) or a dictionnay, which
contains the following fields:
+ **task**: the coroutine function to use
.. seealso:: `task` decorator
the coroutine function will be called with the following
keyword arguments:
+ **context**: the `Context` object shared by all the
coroutines of the pipeline.
+ **targets**: an iterable containing the following coroutines
in the pipeline (default is no targets).
+ **debug**: an optional name, used by `task` to get a child
logger from the `Context` logger, which will be used to log
error if an exception occurs. The exception will be logged
at error level and the exc_info will be passed to the log
record. The value will be accessible (and writeable) through
`Control.name` attribute, which can be usefull for logging:
ex: ::
logger = logging.getLogger(f'{__name__}.{ctrl.name}')
.. note:: Default value is the coroutine's key in the
pipeline definition (default will be used if value is
`None` or an empty string).
+ **input**: (optional) set this coroutine as a 'target' of the
coroutine(s) defined by **input**. **input** can be a single
key or an iterable containing keys of other coroutines defined
in the pipeline dictionnary.
.. note:: `None` value will be interpreted as the pipeline's
main input. No value or an empty list is equivalent as None
if this coroutine is not specified as **output** of an other
coroutine in the pipeline.
+ **output**: (optional) set the coroutine(s) whose keys are
defined in **output** as a 'target' of this coroutine.
**output** can be a single key or an iterable containing keys
of other coroutines defined in the pipeline dictionnary.
.. note:: `None` value will be interpreted as the pipeline's
main output. No value or an empty list is equivalent as None
if this coroutine is not specified as **input** of an other
coroutine in the pipeline.
+ **debug**: (optional) a debug name to use in logging if an
unhandled exception occurs. see above description.
.. note:: specifying a coroutine by a `coroutine` function is
equivalent as providing a dictionnary containing only the
**task** field.
**examples:**
.. seealso:: `task` and `coroutine` decorators
given we have the following declarations: ::
@coroutine
def output(targets, **kw):
try:
while True:
for t in targets:
t.send('RESULT: {}'.format((yield)))
except GeneratorExit:
return
@task
def parse(ctrl):
with ctrl as push:
while True:
try:
value = ast.literal_eval((yield))
except (SyntaxError, ValueError):
continue
push(value)
@task
def mytask(ctrl, param):
logger = logging.getLogger(f'{__name__}.{ctrl.name}')
logger.info('starting task')
with ctrl as push:
while True:
[...]
logger.info('finishing task')
+ defining a pipeline composed of a single sequence:
example: ::
inp >>> a --> b --> c --> d >>> out
here's how we could define it: ::
pipeline = pipeline.create({
'a': parse(),
'b': {'task': mytask(1), 'input': 'a'},
'c': {'task': mytask(2), 'input': 'b'},
'd': {'task': output, 'input': 'c'},
})
the created pipeline is a `Pipeline` object, it can be run
over any input generator using its 'Pipeline.run' method,
sending data to stdout by default.
.. seealso:: `Pipeline.run`
+ define a pipeline with branches:
example: ::
+--> B --> C >>> out
inp >>> A--|
+--> D --> E >>> out
here's how we could define it: ::
pipeline = pipeline.create({
'a': {'task': A, 'output': ('b', 'd')},
'b': B,
'd': D,
'c': {'task': C, 'input': 'b'},
'e': {'task': E, 'input': 'd'},
})
redoundant specification is not an issue, and the following
example is equivalent to the previous one: ::
pipeline = pipeline.create({
'a': {'task': A, 'output': ('b', 'd')},
'b': {'task': B, 'input': 'a', 'output': 'c'},
'd': {'task': D, 'input': 'a', 'output': 'e'},
'c': {'task': C, 'input': 'b', 'output': None},
'e': {'task': E, 'input': 'd', 'output': ()},
})
+ join branches
example: given we want to implement this: ::
+--> B --> C --+
inp >>> A--| +--> N >>> out
+--> D --> E --+
here's how we could define it: ::
pipeline = pipeline.create({
'a': {'task': A, 'output': ('b', 'd')},
'b': B,
'c': {'task': C, 'input': 'b', 'output': 'f'},
'd': D,
'e': {'task': E, 'input': 'd', 'output': 'f'},
'f': F,
})
+ control branches order
the order in which coroutines are initialized, called and
closed is reproducible.
to control the data flow order between several branches just
use the keys in the pipeline definition, as they will be
sorted, like in the following example: ::
+--> (1) X --+
+--> (2) X --+
inp >>> A--+--> (3) X --+--> B >>> out
+--> (4) X --+
+--> (5) X --+
here's how we could define it: ::
pipeline = pipeline.create({
'a': A,
1: {'task': X, 'input': 'a', 'output': 'b'},
2: {'task': X, 'input': 'a', 'output': 'b'},
3: {'task': X, 'input': 'a', 'output': 'b'},
4: {'task': X, 'input': 'a', 'output': 'b'},
5: {'task': X, 'input': 'a', 'output': 'b'},
'b': B,
})
the 'X' coroutines will be initialized and processed in the
expected order: 1, 2, 3, 4, 5 (they will be closed, if no
exception occurs, in the opposite order).
+ loop back
.. warning:: looping is currently not implemented and will
raise a `NotImplementedError` when creating the pipeline.
example: given we want to implement this: ::
+--> B --> C --+ + >>> out
inp >>> A--| +--> N -- +
+--> D --> E --+ |
| |
+--> F --+ |
| |
+------------------+
here's how we could define it: ::
pipeline = pipeline.create({
'a': {'task': A, 'output': ('b', 'd')},
'b': {'task': B, 'output': 'c'},
'c': {'task': C},
'd': {'task': D, 'output': 'e'},
'e': {'task': E},
'n': {'task': N, 'input': ('c', 'e'), 'output': None},
'f': {'task': F, 'input': 'n', 'output': 'n'},
})
.. warning:: defining a loop can end up in an infinite recursion
, no control is done on this, so it's up to the tasks
implementation to handle this...
+ specify coroutines name
in some contexts we may want to define a name for a coroutine
which is different from its key.
example: the previous example with ordered branches was: ::
+--> (1) X --+
+--> (2) X --+
inp >>> A--+--> (3) X --+--> B >>> out
+--> (4) X --+
+--> (5) X --+
here's how we could define it: ::
pl = {
'a': A,
'b': B,
}
pl.update({
i: {'task': X, 'input': 'a', 'output': 'b',
'debug': f"the X task number {i}"}
for i in range(1, 6)
})
pl = pipeline.create(pl)
"""
# ---------------------------------------------------------------- #
# prepare coroutines
# ---------------------------------------------------------------- #
# NOTE: use OrderedDict to get reproducible outputs
steps = collections.OrderedDict() # <key>: <default>
def default():
return {
'task': None,
'input': set(),
'output': set(),
}
for key, tsk in tasks.items():
if key is None:
raise ValueError('coroutine key cannot be None (reserved)')
step = steps.setdefault(key, default())
_inputs = _outputs = ()
debug = str(key)
if isinstance(tsk, collections.Mapping):
_inputs = tsk.get('input', ())
_outputs = tsk.get('output', ())
debug = tsk.get('debug', debug)
tsk = tsk['task']
_inputs = _listify(_inputs)
_outputs = _listify(_outputs)
step['task'] = tsk
step['debug'] = debug
# by default if a coroutine has no input or no output the
# pipeline input / output will be used
# we also need to make sure coroutines are not added twice as
# input / output of a given coroutine
# in order to do that we link everything (except None)
# using 'output':
# iterate 'inputs' and add to 'input' only if None (i.e pipeline
# input), else add to 'output' of the coroutine specified as
# 'input'
for inp in _inputs:
if inp is None:
step['input'].add(None)
else:
_in = steps.setdefault(inp, default())
_in['output'].add(key)
step['output'].update(_outputs)
# debug checks
assert key not in step['output'], 'coroutine output is itself'
assert key not in step['input'], 'coroutine input is itself'
# ---------------------------------------------------------------- #
# create context
# ---------------------------------------------------------------- #
context = Context(**kw)
# ---------------------------------------------------------------- #
# prepare pipeline input / output
# ---------------------------------------------------------------- #
@coroutine
def main_input(targets):
try:
while True:
data = yield
for target in targets:
target.send(data)
except GeneratorExit:
return
@coroutine
def main_output(strategy):
try:
while True:
strategy((yield))
except GeneratorExit:
return
main_output = main_output(output)
pipeline = [main_output]
# ---------------------------------------------------------------- #
# link coroutines
# ---------------------------------------------------------------- #
lst = list(steps)
loop = {} # to detect loopback in the pipeline (not implemented)
while lst:
key = lst.pop(0)
data = steps[key]
output = data['output']
# set default output if branch has no output
targets = [main_output] if not output or None in output else []
# NOTE: use sorted to get reproducible outputs
for _out in sorted(output.difference({None}), key=str):
# add the current coroutine as input of the target coroutine
# so we know it has at least one input (otherwise the
# pipeline input will be used, see below...)
target = steps[_out]
target['input'].add(key)
if _out in lst:
# the target coroutine has not been initialized yet
# it will be initialized first
break
targets.append(target['task'])
# skip for now if at least one target coroutine has not been
# initialized yet
if len(targets) < len(output):
# detect loopback in the pipeline:
# this is currently not implemented and will fail
try:
if loop[key] == len(lst):
raise NotImplementedError('recursion detected: looping is '
'currently not implemented')
except KeyError:
pass
loop[key] = len(lst)
# skip and go back later to this one
lst.append(key)
continue
# the coroutine can be initialized
# ( NOTE: this will start the user generator function )
tsk = data['task']
cr = tsk(context=context, targets=targets, debug=data['debug'])
data['task'] = cr
pipeline.append(cr)
# link pipeline input
input_targets = []
for key, data in steps.items():
# link coroutine to pipeline input:
# + if coroutine has no input, or
# + if pipeline input (None) is specified in coroutine's inputs
if not data['input'] or None in data['input']:
input_targets.append(data['task'])
pipeline.append(main_input(input_targets))
pipeline.reverse()
return Pipeline(pipeline)
def _listify(obj):
""" makes sure `obj` is a `list` """
if isinstance(obj, str):
return [obj]
try:
# will fail if obj is not iterable
return list(obj)
except TypeError:
return [obj]
class Pipeline:
""" User interface returned by `create` function
`Pipeline` object contains the `coroutine`s of a pipeline.
When used as a context manager, it ensures that coroutines
will be closed immediately at the end of the process.
.. seealso:: `Pipeline.__enter__`, `Pipeline.__exit__`
`Pipeline` also has an additional `Pipeline.run` method
which can be called to run the pipeline over an input
stream and wait until the process complete.
"""
def __init__(self, targets):
""" initialize a new pipeline with `coroutine`s
`targets` is an iterable containing the coroutines of the
pipeline, the first item must be the input coroutine.
.. seealso:: `Pipeline.run`
"""
self.targets = targets
def run(self, inputs):
""" run the pipeline over `inputs` iterator
send data from `inputs` to the pipeline until their is no
more data in inputs or an exception occurs.
"""
with self as process:
for data in inputs:
process(data)
def __enter__(self):
""" return a function sending data thtough the pipeline
ex: ::
with pipeline as process:
for data in stream:
process(data)
.. note:: this is equivalent to:
::
with pipeline:
target = pipeline.target
for data in stream:
target.send(data)
.. seealso:: `Pipeline.__exit__`
"""
def send(data, target=self.targets[0]):
target.send(data)
return send
def __exit__(self, tpe, value, tb):
""" close all the coroutines of the pipeline, raise exc if any
The purpose of using the `Pipeline` object as a context
manager is essentially to make sure all the coroutines will
be terminated (closed) at the end of the process.
This can be critical if user functions are expected to do
some teardown jobs after processing data, for instance: ::
# file won't be closed until the coroutine is closed
# (see while loop...)
@coroutine
def cr(targets, *args, file, **kw):
with open(file) as f:
while True:
data = yield
[...]
.. seealso:: `Pipeline.__enter__`
"""
for coroutine in self.targets:
coroutine.close()
|
def is_polyndromic(number: int) -> bool:
new_number = str(number)
for i in range(len(new_number) // 2):
if new_number[i] != new_number[len(new_number) - i - 1]:
return False
return True
def calculate_polyndromic(n: int) -> int:
return sum(i for i in range(1, n)
if is_polyndromic(i) and is_polyndromic(int(bin(i)[2:])))
if __name__ == '__main__':
n = 10 ** 6
p = calculate_polyndromic(n)
print(p)
|
''' *********************** USER-PARAMETERS FOR FT CALIBRATION *********************** '''
rfw = 10. # Vertical force (N) applied by the right foot when robot in the air.
lfw = 10. # Vertical force (N) applied by the left foot when robot in the air.
|
"""A macro to configure Vulkan SDK deps"""
load("@rules_cc//cc:defs.bzl", "cc_library")
VULKAN_LINKOPTS = select({
"@bazel_tools//src/conditions:linux_x86_64": ["-lvulkan"],
"//conditions:default": [],
})
VULKAN_LIBRARIES = select({
"@bazel_tools//src/conditions:windows": ["@vulkan_windows//:vulkan_cc_library"],
"@bazel_tools//src/conditions:darwin": ["@vulkan_macos//:vulkan_cc_library"],
"//conditions:default": [],
})
def vulkan_sdk():
"""Defines the Vulkan SDK targets with proper exported headers and linkopts.
The targets will be of the form ":<artifact-id>".
"""
cc_library(
name = "vulkan",
deps = VULKAN_LIBRARIES,
linkopts = VULKAN_LINKOPTS,
visibility = ["//visibility:public"],
)
|
import pretty_midi
import numpy as np
import librosa
import pyworld
from scipy.io.wavfile import read as wav_read
from scipy import signal
from pysndfx import AudioEffectsChain
def parse_midi(path: str) -> np.ndarray:
"""
simple parsing function to make piano-roll from midi file
:param path: the MIDI file path
:return: an array of piano-roll
"""
midi = None
try:
midi = pretty_midi.PrettyMIDI(path)
midi.remove_invalid_notes()
except Exception as e:
raise Exception(("%s\nerror readying midi file %s" % (e, path)))
return midi
def lowpass(wav: np.ndarray, frequency: int) -> np.ndarray:
"""
adopt lowpass using pysndfx package
:param wav: wav-form numpy array
:param frequency: target frequency
:return: filtered wav
"""
fx = (
AudioEffectsChain().lowpass(frequency=frequency)
)
return fx(wav)
def get_f0(wav: np.array, hop_length: int, sr: int = 22050):
"""
Parse f0 feature from given wave with using WORLD Vocoder
:param wav: an array of wave
:param hop_length: hop(stride) length
:param sr: sample rate of wave
:return: f0 feature
"""
x = librosa.util.pad_center(wav, len(wav), mode='reflect').astype('double')
_f0, t = pyworld.dio(x, sr, frame_period=hop_length / sr * 1e+3) # raw pitch extractor
f0 = pyworld.stonemask(x, _f0, t, sr) # pitch refinement
return f0.astype(np.float32)
def get_wav_duration(file: str) -> float:
"""
Calc duration of wave file
:param file: file path
:return: wave duration in seconds
"""
try:
sr, wav = wav_read(file)
dur = len(wav) / sr
except:
dur = -1
return dur
def preemphasis(x, coeff=0.97):
return signal.lfilter([1, -coeff], [1], x).astype(np.float32)
def inv_preemphasis(x, coeff=0.97):
return signal.lfilter([1], [1, -coeff], x).astype(np.float32)
|
#!/usr/bin/env python
import rospy
import random
import math
import rospkg
from tf.transformations import quaternion_from_euler
from gazebo_msgs.srv import GetPhysicsProperties
from gazebo_msgs.srv import SetPhysicsProperties
from gazebo_msgs.srv import GetModelState
from gazebo_msgs.srv import SetModelState
from gazebo_msgs.srv import SetModelStateRequest
from gazebo_msgs.srv import SpawnModel, SpawnModelRequest, SpawnModelResponse
from gazebo_msgs.srv import DeleteModel
from geometry_msgs.msg import Pose
from sensor_msgs.msg import PointCloud2
def delete_model(model):
delete_model_prox = rospy.ServiceProxy('gazebo/delete_model', DeleteModel)
delete_model_prox(model)
def create_pose(x, y, z , qx, qy,qz,qw):
'''
returns a Pose() object from the given x, y, z, qx, qy , qz, qw values
'''
pose = Pose()
pose.position.x = x
pose.position.y = y
pose.position.z = z
pose.orientation.x = qx
pose.orientation.y = qy
pose.orientation.z = qz
pose.orientation.w = qw
return pose
def create_pose_random_orient(x, y, z):
'''
returns a Pose() object from the given x, y, z, qx, qy , qz, qw values
'''
pose = Pose()
pose.position.x = x
pose.position.y = y
pose.position.z = z
q = quaternion_from_euler(0 , 0, random.uniform(0, 2*math.pi))
pose.orientation.x = q[0]
pose.orientation.y = q[1]
pose.orientation.z = q[2]
pose.orientation.w = q[3]
return pose
def spawn_model(model_name, pose):
spawn_srv = rospy.ServiceProxy('/gazebo/spawn_sdf_model', SpawnModel)
rospy.loginfo("Spawning " + model_name)
model_path = rospkg.RosPack().get_path('ebot_gazebo') \
+ '/models/' + model_name + '/model.sdf'
with open(model_path , 'r') as xml_file:
model = xml_file.read()
req = SpawnModelRequest()
req.model_name = model_name
req.model_xml = model
req.initial_pose = pose
spawn_srv.call(req)
def printModelState(model_name):
get_model_state_prox = rospy.ServiceProxy('gazebo/get_model_state',GetModelState)
model_state = get_model_state_prox(model_name,'world')
p = model_state.pose.position
q = model_state.pose.orientation
print("(%0.8f, %0.8f, %0.8f, %0.8f, %0.8f, %0.8f, %0.8f)" % \
(p.x, p.y, p.z, q.x, q.y, q.z, q.w))
def delete_all_model(model_list):
delete_model_prox = rospy.ServiceProxy('gazebo/delete_model', DeleteModel)
for model in model_list:
delete_model_prox(model)
rospy.sleep(1)
def wait_for_all_services():
rospy.wait_for_service('gazebo/get_model_state')
rospy.wait_for_service('gazebo/set_model_state')
rospy.wait_for_service('gazebo/spawn_sdf_model')
rospy.wait_for_service('gazebo/delete_model')
rospy.loginfo("Connected to all services!")
if __name__ == '__main__':
rospy.init_node('capture_node')
delete_all_model()
cokePose = create_pose(7.9712791, 3.3939284, 0.8676281, -0.0126091, 0.0003598, 0.0000164, 0.9999204)
gluePose = create_pose(7.84000000, 3.23928000, 0.86998147, 0.00000075, -0.00000197, 0.50251043, 0.86457115)
batteryPose = create_pose(8.10856002, 3.23999991, 0.87299210, 0.00001689, 0.00000146, 0.00000001, 1.00000000)
spawn_model('coke_can', cokePose)
spawn_model('glue', gluePose)
spawn_model('battery', batteryPose)
# printModelState('coke_can')
# printModelState('glue')
# printModelState('battery')
|
import logging
import os
import time
import timeit
import numpy as np
import matplotlib.pyplot as plt
from argparse import ArgumentParser
import paddle
import paddle.nn as nn
# user
from builders.model_builder import build_model
from builders.dataset_builder import build_dataset_train
from utils.utils import setup_seed, init_weight, netParams, init_logger
from utils.metric import get_iou
from utils.loss import CrossEntropyLoss2d, ProbOhemCrossEntropy2d
from utils.lr_scheduler import WarmupPolyLR
GLOBAL_SEED = 1234
def val(args, val_loader, model, logger):
"""
args:
val_loader: loaded for validation dataset
model: model
return: mean IoU and IoU class
"""
# evaluation mode
model.eval()
total_batches = len(val_loader)
data_list = []
for i, (input, label, size, name) in enumerate(val_loader):
start_time = time.time()
with paddle.no_grad():
output = model(input)
time_taken = time.time() - start_time
if (i + 1) % 100 == 0:
logger.info("[{}/{}] time: {:.4f}".format(i + 1, total_batches, time_taken))
output = output[0].numpy()
gt = label.numpy()[0].astype(np.uint8)
output = output.transpose(1, 2, 0)
output = np.asarray(np.argmax(output, axis=2), dtype=np.uint8)
data_list.append([gt.flatten(), output.flatten()])
meanIoU, per_class_iu = get_iou(data_list, args.classes)
model.train()
return meanIoU, per_class_iu
def train(args, train_loader, model, criterion, optimizer, scheduler, epoch, logger):
"""
args:
train_loader: loaded for training dataset
model: model
criterion: loss function
optimizer: optimization algorithm, such as ADAM or SGD
epoch: epoch number
return: average loss, per class IoU, and mean IoU
"""
epoch_loss = []
total_batches = len(train_loader)
logger.info("=====> the number of iterations per epoch: {}".format(total_batches))
st = time.time()
for iteration, batch in enumerate(train_loader, 0):
lr = optimizer.get_lr()
start_time = time.time()
images, labels, _, _ = batch
output = model(images)
loss = criterion(output, labels)
optimizer.clear_grad() # set the grad to zero
loss.backward()
optimizer.step()
scheduler.step()
epoch_loss.append(loss.item())
time_taken = time.time() - start_time
if (iteration + 1) % args.print_batch_step == 0:
logger.info('=====> epoch[{}/{}] iter: [{}/{}] cur_lr: {:.6f} loss: {:.6f} time:{:.4f}'.format(epoch + 1,
args.max_epochs,
iteration + 1,
total_batches,
lr,
loss.item(),
time_taken))
time_taken_epoch = time.time() - st
remain_time = time_taken_epoch * (args.max_epochs - 1 - epoch)
m, s = divmod(remain_time, 60)
h, m = divmod(m, 60)
logger.info("Remaining training time = {} hour {} minutes {} seconds".format(h, m, s))
average_epoch_loss_train = sum(epoch_loss) / len(epoch_loss)
return average_epoch_loss_train, lr
def train_model(args, logger):
"""
args:
args: global arguments
"""
h, w = map(int, args.input_size.split(','))
args.input_size = (h, w)
logger.info("=====> input size:{}".format(args.input_size))
logger.info(args)
if args.cuda:
logger.info("=====> use gpu id: '{}'".format(args.gpus))
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpus
if not paddle.is_compiled_with_cuda():
raise Exception("No GPU found or Wrong gpu id, please run without --cuda")
# set the seed
setup_seed(GLOBAL_SEED)
logger.info("=====> set Global Seed: {}".format(GLOBAL_SEED))
# build the model and initialization
model = build_model(args.model, num_classes=args.classes)
init_weight(model, nn.initializer.KaimingNormal(), nn.BatchNorm2D, 1e-3, 0.1)
logger.info("=====> computing network parameters and FLOPs")
total_paramters = netParams(model)
logger.info("the number of parameters: {} ==> {} M".format(total_paramters, (total_paramters / 1e6)))
# load data and data augmentation
datas, trainLoader, valLoader = build_dataset_train(args)
logger.info('=====> Dataset statistics')
logger.info("data['classWeights']: {}".format(datas['classWeights']))
logger.info('mean and std: {}, {}'.format(datas['mean'], datas['std']))
# define loss function, respectively
weight = paddle.to_tensor(datas['classWeights'])
if args.dataset == 'camvid':
criteria = CrossEntropyLoss2d(weight=weight, ignore_label=ignore_label)
elif args.dataset == 'cityscapes':
min_kept = int(args.batch_size // len(args.gpus) * h * w // 16)
criteria = ProbOhemCrossEntropy2d(use_weight=True, ignore_label=ignore_label,
thresh=0.7, min_kept=min_kept)
else:
raise NotImplementedError(
"This repository now supports two datasets: cityscapes and camvid, {} is not included".format(args.dataset))
start_epoch = 1
# continue training
if args.checkpoint:
if os.path.isfile(args.checkpoint):
checkpoint = paddle.load(args.checkpoint)
start_epoch = checkpoint['epoch']
model.set_state_dict(checkpoint['model'])
logger.info("=====> loaded checkpoint '{}' (epoch {})".format(args.checkpoint, checkpoint['epoch']))
else:
logger.info("=====> no checkpoint found at '{}'".format(args.checkpoint))
model.train()
logger.info("Parameters: {} Seed: {}".format(str(total_paramters), GLOBAL_SEED))
# define optimization criteria
args.per_iter = len(trainLoader)
scheduler = WarmupPolyLR(learning_rate=args.lr, step_each_epoch=len(trainLoader),
epochs=args.max_epochs, warmup_epoch=500 / len(trainLoader), power=0.9)()
if args.dataset == 'camvid':
optimizer = paddle.optimizer.Adam(learning_rate=scheduler, parameters=model.parameters(),
weight_decay=2e-4)
elif args.dataset == 'cityscapes':
optimizer = paddle.optimizer.Momentum(learning_rate=scheduler, parameters=model.parameters(), momentum=0.9,
weight_decay=1e-4)
else:
raise NotImplementedError
lossTr_list = []
epoches = []
mIOU_val_list = []
best_metric = {'mIOU': 0, 'epoch': 0}
logger.info('=====> beginning training')
for epoch in range(start_epoch, args.max_epochs):
# training
lossTr, lr = train(args, trainLoader, model, criteria, optimizer, scheduler, epoch, logger)
lossTr_list.append(lossTr)
model_file_name = os.path.join(args.savedir, 'latest.params')
state = {"epoch": epoch + 1, "model": model.state_dict()}
paddle.save(state, model_file_name)
# validation
if epoch % args.eval_epoch == 0 or epoch == (args.max_epochs - 1):
epoches.append(epoch)
mIOU_val, per_class_iu = val(args, valLoader, model, logger)
mIOU_val_list.append(mIOU_val)
# record train information
logger.info("Epoch : {} Details".format(str(epoch)))
logger.info("Epoch No.: {}\tTrain Loss = {:.6f}\t mIOU(val) = {:.6f}\t lr= {:.6f}".format(epoch,
lossTr,
mIOU_val, lr))
if best_metric['mIOU'] < mIOU_val:
best_metric = {'mIOU': mIOU_val, 'epoch': epoch + 1}
model_file_name = os.path.join(args.savedir, 'best.params')
paddle.save(state, model_file_name)
logger.info('cur mIOU: {:.6f}, best mIOU: {:.6f}'.format(mIOU_val, best_metric['mIOU']))
else:
# record train information
logger.info("Epoch : " + str(epoch) + ' Details')
logger.info("Epoch No.: {}\tTrain Loss = {:.6f}\t lr= {:.6f}".format(epoch, lossTr, lr))
# draw plots for visualization
if epoch % 50 == 0 or epoch == (args.max_epochs - 1):
# Plot the figures per 50 epochs
fig1, ax1 = plt.subplots(figsize=(11, 8))
ax1.plot(range(start_epoch, epoch + 1), lossTr_list)
ax1.set_title("Average training loss vs epochs")
ax1.set_xlabel("Epochs")
ax1.set_ylabel("Current loss")
plt.savefig(args.savedir + "loss_vs_epochs.png")
plt.clf()
fig2, ax2 = plt.subplots(figsize=(11, 8))
ax2.plot(epoches, mIOU_val_list, label="Val IoU")
ax2.set_title("Average IoU vs epochs")
ax2.set_xlabel("Epochs")
ax2.set_ylabel("Current IoU")
plt.legend(loc='lower right')
plt.savefig(args.savedir + "iou_vs_epochs.png")
plt.close('all')
if __name__ == '__main__':
start = timeit.default_timer()
parser = ArgumentParser()
parser.add_argument('--model', default="DABNet", help="model name: Context Guided Network (CGNet)")
parser.add_argument('--dataset', default="cityscapes", help="dataset: cityscapes or camvid")
parser.add_argument('--data_root', default="", help="dataset folder")
parser.add_argument('--train_file', default="dataset/cityscapes/cityscapes_train_list.txt", help="dataset folder")
parser.add_argument('--val_file', default="dataset/cityscapes/cityscapes_val_list.txt", help="dataset folder")
parser.add_argument('--inform_data_file', default="dataset/inform/cityscapes_inform.pkl", help="dataset folder")
parser.add_argument('--max_epochs', type=int, default=1000,
help="the number of epochs: 300 for train set, 350 for train+val set")
parser.add_argument('--input_size', type=str, default="512,1024", help="input size of model")
parser.add_argument('--random_mirror', type=bool, default=True, help="input image random mirror")
parser.add_argument('--random_scale', type=bool, default=True, help="input image resize 0.5 to 2")
parser.add_argument('--num_workers', type=int, default=4, help=" the number of parallel threads")
parser.add_argument('--lr', type=float, default=4.5e-2, help="initial learning rate")
parser.add_argument('--batch_size', type=int, default=8, help="the batch size is set to 16 for 2 GPUs")
parser.add_argument('--savedir', default="./checkpoint/", help="directory to save the model snapshot")
parser.add_argument('--checkpoint', type=str, default="",
help="use this file to load last checkpoint for continuing training")
parser.add_argument('--classes', type=int, default=19,
help="the number of classes in the dataset. 19 and 11 for cityscapes and camvid, respectively")
parser.add_argument('--cuda', type=bool, default=True, help="running on CPU or GPU")
parser.add_argument('--gpus', type=str, default="0", help="default GPU devices (0,1)")
parser.add_argument('--print_batch_step', type=int, default=10, help="print ")
parser.add_argument('--eval_epoch', type=int, default=50, help="print ")
args = parser.parse_args()
if args.dataset == 'cityscapes':
args.classes = 19
args.input_size = '512,1024'
ignore_label = 255
elif args.dataset == 'camvid':
args.classes = 11
args.input_size = '360,480'
ignore_label = 11
else:
raise NotImplementedError(
"This repository now supports two datasets: cityscapes and camvid, %s is not included" % args.dataset)
if not os.path.exists(args.savedir):
os.makedirs(args.savedir)
logFileLoc = os.path.join(args.savedir, 'train.log')
logger = init_logger(logFileLoc)
train_model(args, logger)
end = timeit.default_timer()
hour = 1.0 * (end - start) / 3600
minute = (hour - int(hour)) * 60
logger.info("training time: %d hour %d minutes" % (int(hour), int(minute)))
|
from itertools import combinations
class Solution:
def readBinaryWatch(self, num):
"""
:type num: int
:rtype: List[str]
"""
rls = []
for c in combinations(range(10), num):
p = ['0'] * 10
for t in c:
p[t] = '1'
hour = int(''.join(p[:4]), 2)
minute = int(''.join(p[4:]), 2)
if hour < 12 and minute < 60:
rls.append('%d:%02d' % (hour, minute))
return rls
|
#! /usr/bin/env python
# -*- python -*-
"""Support for retrieving useful information about XML data, including the
public and system IDs and the document type name.
There are parts of this module which assume the native character encoding is
ASCII or a superset; this should be fixed.
"""
__version__ = "$Revision: 1.5 $"
import copy
import os
import re
import string
import struct
import sys
BOM_BE = "\xfe\xff"
BOM_LE = "\xff\xfe"
BIG_ENDIAN = "big-endian"
LITTLE_ENDIAN = "little-endian"
if struct.pack('h', 1) == struct.pack('>h', 1):
NATIVE_ENDIANNESS = BIG_ENDIAN
else:
NATIVE_ENDIANNESS = LITTLE_ENDIAN
class Error(Exception):
"""Base class for xmlinfo exceptions."""
def __init__(self, *args, **kw):
self.message = args[0]
apply(Exception.__init__, (self,) + args, kw)
class ConversionError(Error):
"""Raised when an encoding conversion fails."""
pass
class ParseError(Error):
pass
class EncodingMismatchError(ParseError):
"""Raised when an extractor thinks it's reading from a stream of the
wrong encoding. The exception parameter is the name of a suggested
encoding to try, or None.
"""
def __init__(self, encoding=None):
self.encoding = encoding
ParseError.__init__(self, encoding)
class Record:
public_id = None
system_id = None
doc_elem = None
standalone = None
xml_version = None
encoding = None
byte_order = None
def __init__(self, **kw):
self.__dict__.update(kw)
FieldLabels = Record(
system_id="System ID",
public_id="Public ID",
doc_elem="Document Element",
standalone="Standalone",
xml_version="XML Version",
encoding="Encoding",
byte_order="Byte Order",
)
FieldNames = dir(Record)
for _name in FieldNames[:]:
if _name[:2] == "__":
FieldNames.remove(_name)
def get_xml_info(buffer):
values = Record()
byte_order, encoding, bom = guess_byte_order_and_encoding(buffer)
values.byte_order = byte_order
values.encoding = encoding
return extract(values.encoding, buffer[len(bom):], values)
def get_byte_order_mark(buffer):
bom = buffer[:2]
if bom in (BOM_BE, BOM_LE):
return bom
else:
return ''
def guess_byte_order_and_encoding(buffer):
"""Guess the byte-order and encoding."""
byte_order = None
encoding = "utf-8"
#
bom = get_byte_order_mark(buffer)
buffer = buffer[len(bom):]
if bom == BOM_BE:
return BIG_ENDIAN, "utf-16", BOM_BE
elif bom == BOM_LE:
return LITTLE_ENDIAN, "utf-16", BOM_LE
elif bom == '':
pass
else:
raise RuntimeError, \
"unexpected internal condition: bad byte-order mark"
#
# no byte-order mark
#
prefix = buffer[:4]
if prefix == "\0\0\0\x3c":
byte_order = BIG_ENDIAN
encoding = "ucs-4"
elif prefix == "\x3c\0\0\0":
byte_order = LITTLE_ENDIAN
encoding = "ucs-4"
elif prefix == "\0\x3c\0\x3f":
byte_order = BIG_ENDIAN
encoding = "utf-16"
elif prefix == "\x3c\0\x3f\0":
byte_order = LITTLE_ENDIAN
encoding = "utf-16"
elif prefix == "\x3c\x3f\x78\x6d":
# good enough to parse the encoding declaration
encoding = "utf-8"
elif prefix == "\x4c\x6f\xa7\x94":
encoding = "ebcdic"
#
return byte_order, encoding, ""
def extract(encoding, buffer, values, best_effort=0):
tried = {}
while not tried.has_key(encoding):
tried[encoding] = 1
v2 = copy.copy(values)
extractor = new_extractor(encoding, buffer, v2)
try:
v2 = extractor.extract()
except EncodingMismatchError, e:
encoding = e.encoding
except:
if best_effort:
# in case there's anything there
return v2
raise
else:
return v2
raise ParseError("could not determine encoding")
_extractor_map = {}
def new_extractor(encoding, buffer, values):
encoding = string.lower(encoding)
klass = _extractor_map.get(encoding, Extractor)
return klass(buffer, values)
def add_extractor_class(klass):
for enc in klass.Encodings:
_extractor_map[enc] = klass
class Extractor:
VERSION_CHARS = string.letters + string.digits + "_.:-"
Encodings = ()
def __init__(self, buffer, values):
self.buffer = buffer
self.values = values
def extract(self):
self.parse_declaration()
if self.values.encoding not in self.Encodings:
raise EncodingMismatchError(self.values.encoding)
self.skip_to_doctype()
self.parse_doctype()
return self.values
def parse_declaration(self):
try:
self.require_ascii("<?xml", "XML declation")
except ParseError:
# OK to drop this for UTF-8
return
self.parse_VersionInfo()
attrname, encoding = self.get_opt_pseudo_attr()
if attrname == "encoding":
self.values.encoding = string.lower(encoding)
attrname, standalone = self.get_opt_pseudo_attr()
if attrname == "standalone":
if standalone not in ("yes", "no"):
raise ParseError(
"illegal standalone value in XML declaration: "
+ value)
self.values.standalone = standalone
attrname = None
if attrname is not None:
raise ParseError(
"unknown or out-of-order XML declaration attribute: "
+ attrname)
self.skip_whitespace()
self.require_ascii("?>", "XML declaration")
def parse_VersionInfo(self):
attr, verno = self.get_pseudo_attr()
if attr != 'version':
raise ParseError(
"first pseudo-attribute in XML declaration must be version")
if not verno:
raise ParseError("version number cannot be empty")
version_chars = self.VERSION_CHARS
for c in verno:
if not c in version_chars:
raise ParseError(
"illegal character in XML version declaration: " + `c`)
self.values.xml_version = verno
def get_pseudo_attr(self):
"""Return attr/value pair using the XML declaration's idea of a
pseudo-attribute."""
attrname = ''
value = ''
self.require_whitespace("pseudo-attribute")
while 1:
c = self.get_ascii(1)
if c in string.letters:
attrname = attrname + c
self.discard_chars(1)
else:
break
if not attrname:
raise ParseError("could not extract pseudo-attribute name")
self.skip_whitespace()
self.require_ascii("=", "pseudo-attribute")
self.skip_whitespace()
open_quote = self.get_ascii(1)
if open_quote not in ('"', "'"):
raise ParseError("pseudo-attribute values must be quoted")
self.discard_chars(1)
while 1:
c = self.get_ascii(1)
if not c:
raise ParseError("could not complete pseudo-attribute value")
self.discard_chars(1)
if c == open_quote:
break
value = value + c
return attrname, value
def get_opt_pseudo_attr(self):
buffer = self.buffer
try:
return self.get_pseudo_attr()
except ParseError:
self.buffer = buffer
return None, None
def parse_doctype(self):
self.require_ascii("<!DOCTYPE", "doctype declaration")
self.require_whitespace("doctype declaration")
self.values.doc_elem = self.parse_Name("doctype declaration")
wscount = self.skip_whitespace()
c = self.get_ascii(1)
if c in "]>":
return
self.parse_ExternalID()
def _make_set_predicate(L):
d = {}
for o in L:
d[o] = o
return d.has_key
BASE_CHARS = tuple(
range(0x41, 0x5A+1) + range(0x61, 0x7A+1)
+ range(0xC0, 0xD6+1) + range(0xD8, 0xF6+1)
+ range(0xF8, 0xFF+1) + range(0x100, 0x131+1)
+ range(0x134, 0x13E+1) + range(0x141, 0x148+1)
+ range(0x14A, 0x17E+1) + range(0x180, 0x1C3+1)
+ range(0x1CD, 0x1F0+1) + range(0x1F4, 0x1F5+1)
+ range(0x1FA, 0x217+1) + range(0x250, 0x2A8+1)
+ range(0x2BB, 0x2C1+1) + [0x386]
+ range(0x388, 0x38A+1) + [0x38C]
+ range(0x38E, 0x3A1+1) + range(0x3A3, 0x3CE+1)
+ range(0x3D0, 0x3D6+1) + [0x3DA, 0x3DC, 0x3DE, 0x3E0]
+ range(0x3E2, 0x3F3+1) + range(0x401, 0x40C+1)
+ range(0x40E, 0x44F+1) + range(0x451, 0x45C+1)
+ range(0x45E, 0x481+1) + range(0x490, 0x4C4+1)
+ range(0x4C7, 0x4C8+1) + range(0x4CB, 0x4CC+1)
+ range(0x4D0, 0x4EB+1) + range(0x4EE, 0x4F5+1)
+ range(0x4F8, 0x4F9+1) + range(0x531, 0x556+1) + [0x559]
+ range(0x561, 0x586+1) + range(0x5D0, 0x5EA+1)
+ range(0x5F0, 0x5F2+1) + range(0x621, 0x63A+1)
+ range(0x641, 0x64A+1) + range(0x905, 0x939+1) + [0x93D]
+ range(0x9AA, 0x9B0+1) + [0x9B2]
+ range(0xA05, 0xA0A+1) + range(0xA35, 0xA36+1)
+ range(0xA8F, 0xA91+1) + [0xAE0]
+ range(0xB05, 0xB0C+1) + range(0xB36, 0xB39+1) + [0xB3D]
+ range(0xB5F, 0xB61+1) + range(0xB85, 0xB8A+1)
+ range(0xB8E, 0xB90+1) + range(0xB92, 0xB95+1)
+ range(0xB99, 0xB9A+1) + [0xB9C]
+ range(0xB9E, 0xB9F+1) + range(0xBA3, 0xBA4+1)
+ range(0xBA8, 0xBAA+1) + range(0xBAE, 0xBB5+1)
+ range(0xBB7, 0xBB9+1) + range(0xC05, 0xC0C+1)
+ range(0xC0E, 0xC10+1) + range(0x10A0, 0x10C5+1)
+ range(0x10D0, 0x10F6+1) + [0x1100]
+ range(0x1102, 0x1103+1) + range(0x1105, 0x1107+1) + [0x1109]
+ range(0x110B, 0x110C+1) + range(0x110E, 0x1112+1)
+ [0x113C, 0x113E, 0x1140, 0x114C, 0x114E, 0x1150]
+ range(0x1154, 0x1155+1) + [0x1159]
+ range(0x115F, 0x1161+1) + [0x1163, 0x1165, 0x1167, 0x1169]
+ range(0x116D, 0x116E+1) + range(0x1172, 0x1173+1)
+ [0x1175, 0x119E, 0x11A8, 0x11AB]
+ range(0x1F5F, 0x1F7D+1) + range(0x1F80, 0x1FB4+1)
+ range(0x1FB6, 0x1FBC+1) + [0x1FBE]
+ range(0x1FC2, 0x1FC4+1) + range(0x1FC6, 0x1FCC+1)
+ range(0x1FD0, 0x1FD3+1))
COMBINING_CHARS = tuple(
range(0x300, 0x345+1) + range(0x360, 0x361+1)
+ range(0x483, 0x486+1) + range(0x591, 0x5A1+1)
+ range(0x5A3, 0x5B9+1) + range(0x5BB, 0x5BD+1) + [0x5BF]
+ range(0x5C1, 0x5C2+1) + [0x5C4]
+ range(0x64B, 0x652+1) + [0x670]
+ range(0x6D6, 0x6DC+1) + range(0x6DD, 0x6DF+1)
+ range(0x6E0, 0x6E4+1) + range(0x6E7, 0x6E8+1)
+ range(0x6EA, 0x6ED+1) + range(0x901, 0x903+1) + [0x93C]
+ range(0x93E, 0x94C+1) + [0x94D]
+ range(0x951, 0x954+1) + range(0x962, 0x963+1)
+ range(0x981, 0x983+1) + [0x9BC, 0x9BE, 0x9BF]
+ range(0x9C0, 0x9C4+1) + range(0x9C7, 0x9C8+1)
+ range(0x9CB, 0x9CD+1) + [0x9D7]
+ range(0x9E2, 0x9E3+1) + [0xA02, 0xA3C, 0xA3E, 0xA3F]
+ range(0xA40, 0xA42+1) + range(0xA47, 0xA48+1)
+ range(0xA4B, 0xA4D+1) + range(0xA70, 0xA71+1)
+ range(0xA81, 0xA83+1) + [0xABC]
+ range(0xABE, 0xAC5+1) + range(0xAC7, 0xAC9+1)
+ range(0xACB, 0xACD+1) + range(0xB01, 0xB03+1) + [0xB3C]
+ range(0xB3E, 0xB43+1) + range(0xB47, 0xB48+1)
+ range(0xB4B, 0xB4D+1) + range(0xB56, 0xB57+1)
+ range(0xB82, 0xB83+1) + range(0xBBE, 0xBC2+1)
+ range(0xBC6, 0xBC8+1) + range(0xBCA, 0xBCD+1) + [0xBD7]
+ range(0xC01, 0xC03+1) + range(0xC3E, 0xC44+1)
+ range(0xC46, 0xC48+1) + range(0xC4A, 0xC4D+1)
+ range(0xC55, 0xC56+1) + range(0xC82, 0xC83+1)
+ range(0xCBE, 0xCC4+1) + range(0xCC6, 0xCC8+1)
+ range(0xCCA, 0xCCD+1) + range(0xCD5, 0xCD6+1)
+ range(0xD02, 0xD03+1) + range(0xD3E, 0xD43+1)
+ range(0xD46, 0xD48+1) + range(0xD4A, 0xD4D+1) + [0xD57, 0xE31]
+ range(0xE34, 0xE3A+1) + range(0xE47, 0xE4E+1) + [0xEB1]
+ range(0xEB4, 0xEB9+1) + range(0xEBB, 0xEBC+1)
+ range(0xEC8, 0xECD+1) + range(0xF18, 0xF19+1)
+ [0xF35, 0xF37, 0xF39, 0xF3E, 0xF3F]
+ range(0xF71, 0xF84+1) + range(0xF86, 0xF8B+1)
+ range(0xF90, 0xF95+1) + [0xF97]
+ range(0xF99, 0xFAD+1) + range(0xFB1, 0xFB7+1) + [0xFB9]
+ range(0x20D0, 0x20DC+1) + [0x20E1]
+ range(0x302A, 0x302F+1) + [0x3099, 0x309A])
DIGIT_CHARS = tuple(
range(0x30, 0x3A+1) + range(0x660, 0x669+1)
+ range(0x6F0, 0x6F9+1) + range(0x966, 0x96F+1)
+ range(0x9E6, 0x9EF+1) + range(0xA66, 0xA6F+1)
+ range(0xAE6, 0xAEF+1) + range(0xB66, 0xB6F+1)
+ range(0xBE7, 0xBEF+1) + range(0xC66, 0xC6F+1)
+ range(0xCE6, 0xCEF+1) + range(0xD66, 0xD6F+1)
+ range(0xE50, 0xE59+1) + range(0xED0, 0xED9+1)
+ range(0xF20, 0xF29+1))
is_digit_char = _make_set_predicate(DIGIT_CHARS)
EXTENDING_CHARS = tuple(
[0xB7, 0x2D0, 0x2D1, 0x387, 0x640, 0xE46, 0xEC6, 0x3005]
+ range(0x3031, 0x3035+1) + range(0x309D, 0x309E+1)
+ range(0x30FC, 0x30FE+1))
is_extending_char = _make_set_predicate(EXTENDING_CHARS)
IDEOGRAPHIC_CHARS = tuple(
range(0x4E00, 0x9FA5+1) + range(0x3021, 0x3029+1))
is_ideographic_char = _make_set_predicate(IDEOGRAPHIC_CHARS)
LETTER_CHARS = BASE_CHARS + IDEOGRAPHIC_CHARS
is_letter_char = _make_set_predicate(LETTER_CHARS)
NAME_CHARS = LETTER_CHARS + DIGIT_CHARS + (46, 45, 95, 58) \
+ COMBINING_CHARS + EXTENDING_CHARS
is_name_char = _make_set_predicate(NAME_CHARS)
del _make_set_predicate
def parse_Name(self, where):
s, u = self.get_char_and_unicode()
if not self.is_name_char(u):
raise ParseError("illegal character in name: %s (%d)" % (`s`, u))
i = 1
while 1:
c, u = self.get_char_and_unicode(i)
if u not in self.NAME_CHARS:
break
i = i + 1
s = s + c
self.discard_chars(i)
return s
def parse_ExternalID(self):
str = self.get_ascii(6)
if str == "PUBLIC":
# public system id w/ optional system id
self.discard_chars(len(str))
self.require_whitespace("ExternalID")
id = self.get_quoted_string()
if not id:
raise ParseError("could not parse doctype declaration:"
" bad public id")
self.values.public_id = id
self.require_whitespace("ExternalID")
self.values.system_id = self.get_quoted_string()
elif str == "SYSTEM":
# system id
self.discard_chars(len(str))
self.require_whitespace("ExternalID")
id = self.get_quoted_string()
if not id:
raise ParseError("could not parse doctype declaration:"
" bad system id")
self.values.system_id = id
else:
raise ParseError("illegal external ID")
def get_quoted_string(self):
c, u = self.get_char_and_unicode()
if u not in (34, 39):
raise ParseError("illegal quoted string")
self.discard_chars(1)
quote_mark = u
s = ''
while 1:
c, u = self.get_char_and_unicode()
if not c:
raise ParseError("could not find end of quoted string")
self.discard_chars(1)
if u == quote_mark:
break
s = s + c
return s
def skip_comment(self):
self.require_ascii("<!--", "comment")
self.skip_past_ascii("-->", "comment")
def skip_pi(self):
self.require_ascii("<?", "processing instruction")
self.skip_past_ascii("?>", "processing instruction")
def skip_to_doctype(self):
# This should probably be implemented by any extractor for which we
# care about performance.
while 1:
self.skip_whitespace()
try:
c = self.get_ascii(1)
except ConversionError:
self.discard_chars(1)
else:
if not c:
break
if c == "<":
# might be something interesting
try:
prefix = self.get_ascii(4)
except ConversionError:
# If this fails, assume there's something non-white in
# there; allow the exception to be raised since there's
# probably illegal data before the document element.
prefix = self.get_ascii(2)
if prefix == "<!--":
self.skip_comment()
elif prefix[:2] == "<?":
self.skip_pi()
else:
break
else:
# way bad!
raise ParseError("could not locate doctype declaration"
" or start of document element")
def skip_whitespace(self):
"""Trim leading whitespace, returning the number of characters
stripped.
The default implementation is slow; subclasses should override it.
"""
count = 0
try:
while 1:
c, u = self.get_char_and_unicode(count)
if not c:
break
if u not in (0x9, 0xA, 0xD, 0x20):
break
count = count + 1
except ConversionError:
pass
if count:
self.discard_chars(count)
return count
def require_whitespace(self, where):
"""Trim leading whitespace, returning the number of characters
stripped or raising ParseError is no whitespace was present."""
numchars = self.skip_whitespace()
if not numchars:
raise ParseError("required whitespace in " + where)
def get_ascii(self, count):
raise NotImplementedError
def get_char_and_unicode(self, index=0):
raise NotImplementedError
def require_ascii(self, str, where):
width = len(str)
data = self.get_ascii(width)
if data != str:
raise ParseError("required text '%s' missing in %s" % (str, where))
self.discard_chars(width)
def skip_past_ascii(self, str, what):
width = len(str)
initchar = str[0]
subs = range(1, width)
while 1:
try:
data = self.get_ascii(width)
except ConversionError:
self.discard_chars(1)
else:
if len(data) < width:
raise ParseError("could not locate end of " + what)
if data == str:
self.discard_chars(width)
return
for i in subs:
if data[i] == initchar:
self.discard_chars(i)
else:
self.discard_chars(width)
def discard_chars(self, count):
raise NotImplementedError
class ISO8859Extractor(Extractor):
__declattr_rx = re.compile(
"([a-z]*)=\"((?:[^?\"]|\?[^?>\"]|\?(?=\?))*)\"", re.MULTILINE)
__gi_rx = re.compile("[a-zA-Z_:][-a-zA-Z_:0-9.]*")
__id_rx = re.compile(r"""(?:'[^']*'|\"[^\"]*\")""",
re.MULTILINE | re.VERBOSE)
def yank_id(self):
self.require_whitespace("doctype declaration: ExternalID")
m = self.__id_rx.match(self.buffer)
if not m:
return None
self.buffer = self.buffer[m.end():]
return string.lstrip(m.group())[1:-1]
def parse_doctype(self):
self.require_ascii("<!DOCTYPE", "doctype declaration")
self.require_whitespace("doctype declaration")
m = self.__gi_rx.match(self.buffer)
if not m:
raise ParseError("could not parse doctype declaration: no name")
self.values.doc_elem = m.group()
self.discard_chars(len(self.values.doc_elem))
whitechars = self.skip_whitespace()
if not self.buffer:
raise ParseError("could not parse doctype declaration:"
" insufficient data")
if self.get_ascii(1) in ">[":
# reached internal subset or end of declaration; we're done
return
if not whitechars:
raise ParseError("whitespace required between document type and"
" document type declaration")
self.parse_ExternalID()
def skip_to_doctype(self):
while self.buffer:
self.buffer = string.lstrip(self.buffer)
if self.buffer[:4] == "<!--":
self.skip_comment()
elif self.buffer[:2] == "<?":
self.skip_pi()
else:
break
def skip_pi(self):
pos = string.find(self.buffer, "?>", 2)
if pos < 0:
raise ParseError("could not scan over processing instruction")
self.buffer = self.buffer[pos + 2:]
def skip_comment(self):
pos = string.find(self.buffer, "-->", 4)
if pos < 0:
raise ParseError("could not scan over comment")
self.buffer = self.buffer[pos + 4:]
def skip_whitespace(self):
old_buffer = self.buffer
self.buffer = string.lstrip(old_buffer)
return len(old_buffer) - len(self.buffer)
def get_ascii(self, count):
# not quite right, but good enough for now
return self.buffer[:count]
def get_char_and_unicode(self, index=0):
# really only good for iso-8859-1
c = self.buffer[index:index + 1]
if c:
return c, ord(c)
else:
return c, None
def discard_chars(self, count):
self.buffer = self.buffer[count:]
def lower(self, str):
return string.lower(str)
class ISO8859_1_Extractor(ISO8859Extractor):
Encodings = ("iso-8859-1", "iso-latin-1", "latin-1")
def get_ascii(self, count):
return self.buffer[:count]
def get_char_and_unicode(self, index=0):
c = self.buffer[index:index + 1]
if c:
return c, ord(c)
else:
return c, None
add_extractor_class(ISO8859_1_Extractor)
for c in "23456789":
class _Extractor(ISO8859Extractor):
Encodings = ("iso-8859-" + c,)
try:
_Extractor.__name__ = "ISO8859_%s_Extractor" % c
except TypeError:
# older Python versions wouldn't allow __name__ to be set on a class
pass
exec "ISO8859_%s_Extractor = _Extractor" % c
add_extractor_class(_Extractor)
del _Extractor
class UTF8Extractor(ISO8859Extractor):
Encodings = ("utf-8",)
def get_char_and_unicode(self, index=0):
raise NotImplementedError
add_extractor_class(UTF8Extractor)
class EBCDICExtractor(Extractor):
Encodings = ("ebcdic",)
# This table was taken from the source code of GNU recode 3.4.
__ASCII_TO_EBCDIC = [
0, 1, 2, 3, 55, 45, 46, 47, # 0 - 7
22, 5, 37, 11, 12, 13, 14, 15, # 8 - 15
16, 17, 18, 19, 60, 61, 50, 38, # 16 - 23
24, 25, 63, 39, 28, 29, 30, 31, # 24 - 31
64, 79, 127, 123, 91, 108, 80, 125, # 32 - 39
77, 93, 92, 78, 107, 96, 75, 97, # 40 - 47
240, 241, 242, 243, 244, 245, 246, 247, # 48 - 55
248, 249, 122, 94, 76, 126, 110, 111, # 56 - 63
124, 193, 194, 195, 196, 197, 198, 199, # 64 - 71
200, 201, 209, 210, 211, 212, 213, 214, # 72 - 79
215, 216, 217, 226, 227, 228, 229, 230, # 80 - 87
231, 232, 233, 74, 224, 90, 95, 109, # 88 - 95
121, 129, 130, 131, 132, 133, 134, 135, # 96 - 103
136, 137, 145, 146, 147, 148, 149, 150, # 104 - 111
151, 152, 153, 162, 163, 164, 165, 166, # 112 - 119
167, 168, 169, 192, 106, 208, 161, 7, # 120 - 127
32, 33, 34, 35, 36, 21, 6, 23, # 128 - 135
40, 41, 42, 43, 44, 9, 10, 27, # 136 - 143
48, 49, 26, 51, 52, 53, 54, 8, # 144 - 151
56, 57, 58, 59, 4, 20, 62, 225, # 152 - 159
65, 66, 67, 68, 69, 70, 71, 72, # 160 - 167
73, 81, 82, 83, 84, 85, 86, 87, # 168 - 175
88, 89, 98, 99, 100, 101, 102, 103, # 176 - 183
104, 105, 112, 113, 114, 115, 116, 117, # 184 - 191
118, 119, 120, 128, 138, 139, 140, 141, # 192 - 199
142, 143, 144, 154, 155, 156, 157, 158, # 200 - 207
159, 160, 170, 171, 172, 173, 174, 175, # 208 - 215
176, 177, 178, 179, 180, 181, 182, 183, # 216 - 223
184, 185, 186, 187, 188, 189, 190, 191, # 224 - 231
202, 203, 204, 205, 206, 207, 218, 219, # 232 - 239
220, 221, 222, 223, 234, 235, 236, 237, # 240 - 247
238, 239, 250, 251, 252, 253, 254, 255, # 248 - 255
]
_m = [None] * 256
for _i in range(len(__ASCII_TO_EBCDIC)):
_e = __ASCII_TO_EBCDIC[_i]
__ASCII_TO_EBCDIC[_i] = chr(_e)
_m[_e] = chr(_i)
for i in range(len(_m)):
if _m[_i] is None:
print "No EBCDIC character for ASCII", `chr(i)`
__EBCDIC_TO_ASCII = tuple(_m)
__translation = string.maketrans(string.join(__ASCII_TO_EBCDIC, ''),
string.join(__EBCDIC_TO_ASCII, ''))
def get_ascii(self, count):
buffer = self.buffer[:count]
return string.translate(buffer, self.__translation)
add_extractor_class(EBCDICExtractor)
def ascii_to_ucs2be(s):
L = map(None, s)
L.insert(0, '')
return string.join(L, '\0')
def ascii_to_ucs2le(s):
L = map(None, s)
L.append('')
return string.join(L, '\0')
def ascii_to_ucs4be(s):
L = map(None, s)
L.insert(0, '')
return string.join(L, '\0\0\0')
def ascii_to_ucs4le(s):
L = map(None, s)
L.append('')
return string.join(L, '\0\0\0')
class UCS2Extractor(Extractor):
Encodings = ("ucs-2", "utf-16", "iso-10646-ucs-2")
__WHITESPACE_BE = map(ascii_to_ucs2be, string.whitespace)
__WHITESPACE_LE = map(ascii_to_ucs2le, string.whitespace)
def __init__(self, buffer, values):
Extractor.__init__(self, buffer, values)
if values.byte_order not in (BIG_ENDIAN, LITTLE_ENDIAN):
raise ValueError, \
"UCS-2 encoded strings must have determinable byte order"
self.__byte_order = values.byte_order
if values.byte_order == BIG_ENDIAN:
self.__whitespace = self.__WHITESPACE_BE
self.__from_ascii = ascii_to_ucs2be
else:
self.__whitespace = self.__WHITESPACE_LE
self.__from_ascii = ascii_to_ucs2le
def skip_whitespace(self):
buffer = self.buffer
pos = 0
whitespace = self.__whitespace
while buffer[pos:pos+2] in whitespace:
pos = pos + 2
self.buffer = buffer[pos:]
return pos / 2
def get_ascii(self, count):
data = self.buffer[:count*2]
if self.__byte_order == BIG_ENDIAN:
zero_offset = 0
char_offset = 1
else:
zero_offset = 1
char_offset = 0
s = ''
try:
for i in range(0, count*2, 2):
if data[i+zero_offset] != '\0':
raise ConversionError("cannot convert %s to ASCII"
% `data[i:i+2]`)
s = s + data[i+char_offset]
except IndexError:
# just didn't have enough; somebody else's problem
pass
return s
def get_char_and_unicode(self, index=0):
if len(self.buffer) >= 2:
offset = index * 2
c = self.buffer[offset:offset + 2]
return c, ordwc(c, self.__byte_order)
else:
return None, None
def discard_chars(self, count):
self.buffer = self.buffer[count*2:]
add_extractor_class(UCS2Extractor)
def ordwc(wc, byte_order=None):
"""Return the ord() for a wide character."""
if byte_order is None:
byte_order = NATIVE_ENDIANNESS
width = len(wc)
if width == 2:
o1, o2 = map(ord, wc)
if byte_order == BIG_ENDIAN:
o = (o1 << 8) | o2
else:
o = (o2 << 8) | o1
elif width == 4:
o1, o2, o3, o4 = map(ord, wc)
if byte_order == BIG_ENDIAN:
o = (((((o1 << 8) | o2) << 8) | o3) << 8) | o4
else:
o = (((((o4 << 8) | o3) << 8) | o2) << 8) | o1
else:
raise ValueError, "wide-character string has bad length"
return o
def ordwstr(wstr, byte_order=None, charsize=2):
assert charsize in (2, 4), "wide character size must be 2 or 4"
ords = []
for i in range(0, len(wstr), charsize):
ords.append(ordwc(wstr[i:i+charsize], byte_order))
return ords
def dump_info(values, labels=None):
if labels is None:
labels = FieldLabels
format = "%%%ds: %%s" % max(map(len, FieldLabels.__dict__.values()))
for field_name in FieldNames:
value = getattr(values, field_name)
label = getattr(FieldLabels, field_name)
if value is not None:
print format % (label, value)
def main():
import getopt
#
reqs = Record() # required values (for output)
#
get_defaults = 1
full_report = 0
debugging = 0
program = os.path.basename(sys.argv[0])
opts, args = getopt.getopt(sys.argv[1:], "ad",
["all", "docelem", "encoding", "public-id",
"standalone", "system-id", "version"])
if opts:
get_defaults = 0
for opt, arg in opts:
if opt in ("-a", "--all"):
full_report = 1
elif opt == "-d":
debugging = debugging + 1
elif opt == "--docelem":
reqs.doc_elem = 1
elif opt == "--encoding":
reqs.encoding = 1
elif opt == "--public-id":
reqs.publib_id = 1
elif opt == "--standalone":
reqs.standalone = 1
elif opt == "--system-id":
reqs.system_id = 1
elif opt == "--version":
reqs.xml_version = 1
if get_defaults:
full_report = 1
#
if len(args) > 1:
sys.stderr.write(program + ": too many input sources specified")
sys.exit(2)
if args:
if os.path.exists(args[0]):
fp = open(args[0])
else:
import urllib
fp = urllib.urlopen(args[0])
else:
fp = sys.stdin
#
buffer = fp.read(10240)
fp.close()
try:
values = get_xml_info(buffer)
except Error, e:
sys.stderr.write("parse failed: %s\n" % e.args[0])
if debugging:
raise
sys.exit(1)
#
# Make the report:
#
if full_report:
dump_info(values)
else:
for field_name in FieldNames:
if getattr(reqs, field_name):
value = getattr(values, field_name)
if value is None:
print
else:
print value
if __name__ == "__main__":
main()
|
import pathlib
import shutil
import tempfile
from anymail.message import AnymailMessage
from django.conf import settings
from django.core.management import BaseCommand, call_command
from django.template import loader as template_loader
from django.utils.translation import gettext_lazy as _, activate, get_language
from mtp_common.tasks import default_from_address, prepare_context
from prison.models import PrisonerCreditNoticeEmail
class Command(BaseCommand):
"""
Emails a PDF bundle of credit notices to prisons
"""
help = __doc__.strip().splitlines()[0]
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.subject = _('These prisoners’ accounts have been credited')
self.from_address = default_from_address()
self.verbosity = 1
def add_arguments(self, parser):
super().add_arguments(parser)
parser.add_argument('--prison', help='NOMIS id of prison, defaults to all prisons.')
parser.add_argument('--date', help='Credited date, defaults to yesterday.')
def handle(self, prison=None, date=None, **options):
self.verbosity = options.get('verbosity', self.verbosity)
if not prison:
credit_notice_emails = PrisonerCreditNoticeEmail.objects.all()
else:
credit_notice_emails = PrisonerCreditNoticeEmail.objects.filter(prison=prison)
if not credit_notice_emails.exists():
if prison:
self.stderr.write('No email address found for %s' % prison)
else:
self.stderr.write('No known email addresses')
return
if not get_language():
language = getattr(settings, 'LANGUAGE_CODE', 'en')
activate(language)
bundle_dir = pathlib.Path(tempfile.mkdtemp())
try:
for credit_notice_email in credit_notice_emails:
path = bundle_dir / ('prison-credits-%s.pdf' % credit_notice_email.prison.nomis_id)
self.handle_prison(credit_notice_email, path, date, **options)
finally:
if bundle_dir.exists():
shutil.rmtree(str(bundle_dir))
def handle_prison(self, credit_notice_email, path, date, **options):
call_command(
'create_prisoner_credit_notices',
path, credit_notice_email.prison.nomis_id,
date=date, **options
)
if not path.exists():
if self.verbosity:
self.stdout.write('Nothing to send to %s' % credit_notice_email)
return
template_context = prepare_context()
text_body = template_loader.get_template('credit/prisoner-notice-email.txt').render(template_context)
html_body = template_loader.get_template('credit/prisoner-notice-email.html').render(template_context)
email = AnymailMessage(
subject=str(self.subject),
body=text_body.strip('\n'),
from_email=self.from_address,
to=[credit_notice_email.email],
tags=['prisoner-notice'],
)
email.attach_alternative(html_body, 'text/html')
email.attach_file(str(path), mimetype='application/pdf')
if self.verbosity:
self.stdout.write('Sending prisoner notice email to %s' % credit_notice_email)
email.send()
|
import itertools
class SeleneModelGenerator:
def __init__(self, teller_count: int, voter_count: int, cand_count: int, formula: int):
self._teller_count: int = teller_count
self._voter_count: int = voter_count
self._cand_count: int = cand_count
self._formula: int = formula
def generate(self):
model = ""
model += self._generate_teller()
model += "\n"
model += self._generate_ea()
model += "\n"
model += self._generate_wbb()
model += "\n"
model += self._generate_voter()
model += "\n"
model += self._generate_coercer()
model += "\n"
model += self._generate_reduction()
model += self._generate_persistent()
model += self._generate_coalition()
return model
def _generate_teller(self):
teller = f"Agent Teller[{self._teller_count}]:\n"
teller += "init: t_init\n"
votes = [0 for _ in range(self._voter_count)]
for voter_id in range(1, self._voter_count + 1):
for cand_id in range(1, self._cand_count + 1):
votes_copy = votes[:]
votes_copy[voter_id - 1] = cand_id
teller += f"shared sendVote_Voter{voter_id}_{cand_id}: t_init -> t_init{self._trackers_to_str(votes_copy)} [v_Voter{voter_id}={cand_id}]\n"
teller += self._recursive_teller(votes_copy)
votes = [[i + 1 for i in range(self._cand_count)] for _ in range(self._voter_count)]
for vot in itertools.product(*votes):
teller += f"shared finishVoting: t_init{self._trackers_to_str(vot)} -> t_finish{self._trackers_to_str(vot)}\n"
teller += f"shared decryptVotes: t_finish{self._trackers_to_str(vot)} -> t_decrypt{self._trackers_to_str(vot)}\n"
teller += f"shared sendToWBB{self._trackers_to_str(vot)}: t_decrypt{self._trackers_to_str(vot)} -> t_send\n"
teller += "shared publishVotes: t_send -> t_publish\n"
return teller
def _recursive_teller(self, votes: list):
result = ""
for voter_id in range(1, self._voter_count + 1):
if votes[voter_id - 1] != 0:
continue
for cand_id in range(1, self._cand_count + 1):
votes_copy = votes[:]
votes_copy[voter_id - 1] = cand_id
result += f"shared sendVote_Voter{voter_id}_{cand_id}: t_init{self._trackers_to_str(votes)} -> t_init{self._trackers_to_str(votes_copy)} [v_Voter{voter_id}={cand_id}]\n"
result += self._recursive_teller(votes_copy)
return result
def _generate_ea(self):
ea = "Agent ElectionAuthority[1]:\n"
ea += "init: ea_init\n"
trackers = [i for i in range(1, self._voter_count + 1)]
for perm in itertools.permutations(trackers):
ea += f"shared generateTrackers{self._trackers_to_str(perm)}: ea_init -> ea_gen{self._trackers_to_str(perm)} {self._trackers_to_props(perm)}\n"
ea += f"shared publishTrackers: ea_gen{self._trackers_to_str(perm)} -> ea_pub_t{self._trackers_to_str(perm)}\n"
ea += f"shared startVoting: ea_pub_t{self._trackers_to_str(perm)} -> ea_start{self._trackers_to_str(perm)}\n"
ea += f"shared finishVoting: ea_start{self._trackers_to_str(perm)} -> ea_finish{self._trackers_to_str(perm)}\n"
ea += f"shared publishVotes: ea_finish{self._trackers_to_str(perm)} -> ea_pub_v{self._trackers_to_str(perm)} [published=true]\n"
for v_id in range(1, self._voter_count + 1):
ea += f"shared sendTracker_Voter{v_id}_{perm[v_id - 1]}: ea_pub_v{self._trackers_to_str(perm)} -> ea_pub_v{self._trackers_to_str(perm)} [t_Voter{v_id}={perm[v_id - 1]}]\n"
ea += f"shared allTrackerSend: ea_pub_v{self._trackers_to_str(perm)} -> ea_send [end=True]\n"
return ea
def _generate_wbb(self):
wbb = "Agent WBB[1]:\n"
wbb += "init: wbb_init\n"
trackers = [i for i in range(1, self._voter_count + 1)]
votes = [[i + 1 for i in range(self._cand_count)] for _ in range(self._voter_count)]
uniqe_tr = set()
for perm in itertools.permutations(trackers):
wbb += f"shared generateTrackers{self._trackers_to_str(perm)}: wbb_init -> wbb_gen{self._trackers_to_str(perm)} {self._trackers_to_props(list(perm))}\n"
for vot in itertools.product(*votes):
wbb += f"shared sendToWBB{self._trackers_to_str(vot)}: wbb_gen{self._trackers_to_str(perm)} -> wbb_send{self._tr_vote_comb_to_str(list(perm), list(vot))} {self._tr_vote_comb_to_props(list(perm), list(vot))}\n"
for voter in range(1, self._voter_count + 1):
for candidate in range(1, self._cand_count + 1):
if vot[perm[voter - 1] - 1] == candidate:
uniqe_tr.add(f"shared coercerWBB_{voter}_{candidate}: wbb_send{self._tr_vote_comb_to_str(list(perm), list(vot))} -> wbb_send{self._tr_vote_comb_to_str(list(perm), list(vot))}\n")
for v_id in range(1, self._voter_count + 1):
uniqe_tr.add(f"shared Voter{v_id}_WBB_{voter}_{candidate}: wbb_send{self._tr_vote_comb_to_str(list(perm), list(vot))} -> wbb_send{self._tr_vote_comb_to_str(list(perm), list(vot))}\n")
for tr in uniqe_tr:
wbb += tr
return wbb
def _tr_vote_comb_to_str(self, trackers, votes):
result = ""
for i in range(len(trackers)):
result += f"_{i + 1}_{votes[trackers[i] - 1]}"
return result
def _tr_vote_comb_to_props(self, trackers, votes):
result = f"[wbb_t1={votes[trackers[0] - 1]}"
for i in range(1, len(trackers)):
result += f", wbb_t{i + 1}={votes[trackers[i] - 1]}"
result += "]"
return result
def _trackers_to_str(self, trackers):
result = ""
for tr in trackers:
result += f"_{tr}"
return result
def _trackers_to_props(self, trackers):
result = f"[t1={trackers[0]}"
for i in range(1, len(trackers)):
result += f", t{i + 1}={trackers[i]}"
result += "]"
return result
def _generate_voter(self):
voter = f"Agent Voter[{self._voter_count}]:\n"
voter += "init: v_init\n"
for request in range(self._cand_count + 1):
voter += f"shared requestVoteFor{request}_aID: v_init -> v_request_r{request} [req_aID={request}]\n"
voter += f"shared startVoting: v_request_r{request} -> v_start_r{request}\n"
voter += f"createCommitment: v_start_r{request} -> v_commit_r{request}\n"
for vote in range(1, self._cand_count + 1):
voter += f"fillVote{vote}: v_commit_r{request} -> v_fill_r{request}_v{vote} [v_aID={vote}]\n"
voter += f"encryptVote: v_fill_r{request}_v{vote} -> v_encrypt_r{request}_v{vote}\n"
voter += f"shared sendVote_aID_{vote}: v_encrypt_r{request}_v{vote} -> v_send_r{request}_v{vote}\n"
voter += f"shared finishVoting: v_send_r{request}_v{vote} -> v_finish_r{request}_v{vote}\n"
voter += f"shared publishVotes: v_finish_r{request}_v{vote} -> v_publish_r{request}_v{vote}\n"
for false_term in range(self._voter_count + 1):
voter += f"computeFalseAlphaTerm_{false_term}: v_publish_r{request}_v{vote} -> v_false_a_r{request}_v{vote}_f{false_term} [false_a_aID={false_term}]\n"
voter += f"computeFalseTracker: v_false_a_r{request}_v{vote}_f{false_term} -> v_false_tr_r{request}_v{vote}_f{false_term} [false_tr_aID={false_term}]\n"
for tracker in range(1, self._voter_count + 1):
voter += f"shared sendTracker_aID_{tracker}: v_false_tr_r{request}_v{vote}_f{false_term} -> v_send_tr_r{request}_v{vote}_f{false_term}_t{tracker} [v_t_aID={tracker}]\n"
voter += f"shared allTrackerSend: v_send_tr_r{request}_v{vote}_f{false_term}_t{tracker} -> v_wbb_r{request}_v{vote}_f{false_term}_t{tracker}\n"
for wbb_tracker in range(1, self._voter_count + 1):
for wbb_vote in range(1, self._cand_count + 1):
voter += f"shared aID_WBB_{wbb_tracker}_{wbb_vote}: v_wbb_r{request}_v{vote}_f{false_term}_t{tracker} -> v_verif_r{request}_v{vote}_f{false_term}_t{tracker}_wt{wbb_tracker}_wv{wbb_vote}\n"
voter += f"verifyVote: v_verif_r{request}_v{vote}_f{false_term}_t{tracker}_wt{wbb_tracker}_wv{wbb_vote} -> v_show_r{request}_v{vote}_f{false_term}_t{tracker} [verify_aID={wbb_tracker == tracker and wbb_vote == vote}]\n"
if false_term != 0 and false_term != tracker:
voter += f"shared showTracker{false_term}_aID: v_show_r{request}_v{vote}_f{false_term}_t{tracker} -> v_punish\n"
voter += f"shared showTracker{tracker}_aID: v_show_r{request}_v{vote}_f{false_term}_t{tracker} -> v_punish\n"
voter += f"shared punish_aID: v_punish -> v_end [v_pun_aID=True]\n"
voter += f"shared not_punish_aID: v_punish -> v_end [v_pun_aID=False]\n"
return voter
def _recursive_coercer(self, req: list):
result = ""
for voter_id in range(1, self._voter_count + 1):
if req[voter_id - 1] != -1:
continue
for cand_id in range(0, self._cand_count + 1):
req_copy = req[:]
req_copy[voter_id - 1] = cand_id
result += f"shared requestVoteFor{cand_id}_Voter{voter_id}: c_req{self._trackers_to_str(req)} -> c_req{self._trackers_to_str(req_copy)} [c_req_Voter{voter_id}={cand_id}]\n"
result += self._recursive_coercer(req_copy)
return result
def _recursive_coercer_wbb(self, wbb, req):
result = ""
for tracker_id in range(1, self._voter_count + 1):
if wbb[tracker_id - 1] != 0:
continue
for cand_id in range(1, self._cand_count + 1):
wbb_copy = wbb[:]
wbb_copy[tracker_id - 1] = cand_id
result += f"shared coercerWBB_{tracker_id}_{cand_id}: c_wbb{self._trackers_to_str(req)}{self._trackers_to_str(wbb)} -> c_wbb{self._trackers_to_str(req)}{self._trackers_to_str(wbb_copy)} [c_wbb_t{tracker_id}={cand_id}]\n"
result += self._recursive_coercer_wbb(wbb_copy, req)
return result
def _generate_coercer(self):
coercer = "Agent Coercer[1]:\n"
coercer += "init: c_init\n"
requests = [-1 for _ in range(self._voter_count)]
for voter_id in range(1, self._voter_count + 1):
for cand_id in range(0, self._cand_count + 1):
req_copy = requests[:]
req_copy[voter_id - 1] = cand_id
coercer += f"shared requestVoteFor{cand_id}_Voter{voter_id}: c_init -> c_req{self._trackers_to_str(req_copy)} [c_req_Voter{voter_id}={cand_id}]\n"
coercer += self._recursive_coercer(req_copy)
requests = [[i for i in range(self._cand_count + 1)] for _ in range(self._voter_count)]
for req in itertools.product(*requests):
coercer += f"shared publishTrackers: c_req{self._trackers_to_str(req)} -> c_pubt{self._trackers_to_str(req)}\n"
coercer += f"shared startVoting: c_pubt{self._trackers_to_str(req)} -> c_start{self._trackers_to_str(req)}\n"
coercer += f"shared finishVoting: c_start{self._trackers_to_str(req)} -> c_finish{self._trackers_to_str(req)}\n"
coercer += f"shared publishVotes: c_finish{self._trackers_to_str(req)} -> c_pubv{self._trackers_to_str(req)}\n"
wbb = [0 for _ in range(self._voter_count)]
for tracker_id in range(1, self._voter_count + 1):
for cand_id in range(1, self._cand_count + 1):
wbb_copy = wbb[:]
wbb_copy[tracker_id - 1] = cand_id
coercer += f"shared coercerWBB_{tracker_id}_{cand_id}: c_pubv{self._trackers_to_str(req)} -> c_wbb{self._trackers_to_str(req)}{self._trackers_to_str(wbb_copy)} [c_wbb_t{tracker_id}={cand_id}]\n"
coercer += self._recursive_coercer_wbb(wbb_copy, req)
wbb = [[i for i in range(self._cand_count + 1)] for _ in range(self._voter_count)]
for w in itertools.product(*wbb):
trackers = [0 for _ in range(self._voter_count)]
for voter_id in range(1, self._voter_count + 1):
for tracker_id in range(1, self._voter_count + 1):
tr_copy = trackers[:]
tr_copy[voter_id - 1] = tracker_id
coercer += f"shared showTracker{tracker_id}_Voter{voter_id}: c_wbb{self._trackers_to_str(req)}{self._trackers_to_str(w)} -> c_show{self._trackers_to_str(req)}{self._trackers_to_str(w)}{self._trackers_to_str(tr_copy)} [c_voter{voter_id}_bad={req[voter_id - 1] != 0 and req[voter_id-1] != w[tr_copy[voter_id - 1] - 1]}, c_some_bad={tr_copy[voter_id - 1] in trackers}, c_vote_Voter{voter_id}={w[tr_copy[voter_id - 1] - 1]}]\n"
coercer += self._recursive_coercer_tr(tr_copy, req, w)
trackers = [[i for i in range(1, self._voter_count + 1)] for _ in range(self._voter_count)]
for tr in itertools.product(*trackers):
for voter_id in range(1, self._voter_count + 1):
coercer += f"shared punish_Voter{voter_id}: c_show{self._trackers_to_str(req)}{self._trackers_to_str(w)}{self._trackers_to_str(tr)} -> c_show{self._trackers_to_str(req)}{self._trackers_to_str(w)}{self._trackers_to_str(tr)} [c_pun_Voter{voter_id}=True]\n"
coercer += f"shared not_punish_Voter{voter_id}: c_show{self._trackers_to_str(req)}{self._trackers_to_str(w)}{self._trackers_to_str(tr)} -> c_show{self._trackers_to_str(req)}{self._trackers_to_str(w)}{self._trackers_to_str(tr)} [c_pun_Voter{voter_id}=False]\n"
return coercer
def _recursive_coercer_tr(self, tr, req, w):
result = ""
for voter_id in range(1, self._voter_count + 1):
if tr[voter_id - 1] != 0:
continue
for tracker_id in range(1, self._voter_count + 1):
tr_copy = tr[:]
tr_copy[voter_id - 1] = tracker_id
result += f"shared showTracker{tracker_id}_Voter{voter_id}: c_show{self._trackers_to_str(req)}{self._trackers_to_str(w)}{self._trackers_to_str(tr)} -> c_show{self._trackers_to_str(req)}{self._trackers_to_str(w)}{self._trackers_to_str(tr_copy)} [c_voter{voter_id}_bad={req[voter_id - 1] != 0 and req[voter_id-1] != w[tr_copy[voter_id - 1] - 1]}, c_some_bad={tr_copy[voter_id - 1] in tr}, c_vote_Voter{voter_id}={w[tr_copy[voter_id - 1] - 1]}]\n"
result += self._recursive_coercer_tr(tr_copy, req, w)
return result
def _generate_reduction(self):
if self._formula == 0:
reduction = "REDUCTION: [c_pun_Voter1]\n"
elif self._formula == 1:
reduction = "REDUCTION: [v_Voter1, c_vote_Voter1]\n"
elif self._formula == 2:
reduction = "REDUCTION: [verify_Voter1]\n"
elif self._formula == 3 or self._formula == 4:
pass
return reduction
def _generate_persistent(self):
if self._formula == 0:
persistent = "PERSISTENT: [c_pun_Voter1]\n"
elif self._formula == 1:
persistent = "PERSISTENT: [v_Voter1, c_vote_Voter1]\n"
elif self._formula == 2:
persistent = "PERSISTENT: [verify_Voter1]\n"
elif self._formula == 3 or self._formula == 4:
pass
return persistent
def _generate_coalition(self):
if self._formula == 2:
coalition = "COALITION: [Voter1]\n"
else:
coalition = "COALITION: [Coercer1]\n"
return coalition
if __name__ == "__main__":
teller_count = int(input("Teller Count: "))
voter_count = int(input("Voter Count: "))
cand_count = int(input("Candidates Count: "))
formula = int(input("Formula: "))
selene_model_generator = SeleneModelGenerator(teller_count, voter_count, cand_count, formula)
model = selene_model_generator.generate()
file = open(f"Selene_{teller_count}_{voter_count}_{cand_count}_{formula}.txt", "w")
file.write(model)
file.close()
|
#
# PySNMP MIB module SONUS-RTCP-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/SONUS-RTCP-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 21:02:06 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
ObjectIdentifier, Integer, OctetString = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "Integer", "OctetString")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsIntersection, ValueSizeConstraint, ValueRangeConstraint, ConstraintsUnion, SingleValueConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsIntersection", "ValueSizeConstraint", "ValueRangeConstraint", "ConstraintsUnion", "SingleValueConstraint")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
Bits, Counter32, IpAddress, TimeTicks, iso, ModuleIdentity, MibScalar, MibTable, MibTableRow, MibTableColumn, Gauge32, NotificationType, Counter64, ObjectIdentity, Integer32, MibIdentifier, Unsigned32 = mibBuilder.importSymbols("SNMPv2-SMI", "Bits", "Counter32", "IpAddress", "TimeTicks", "iso", "ModuleIdentity", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Gauge32", "NotificationType", "Counter64", "ObjectIdentity", "Integer32", "MibIdentifier", "Unsigned32")
TextualConvention, DateAndTime, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DateAndTime", "DisplayString")
sonusEventDescription, sonusSlotIndex, sonusShelfIndex, sonusEventLevel, sonusEventClass = mibBuilder.importSymbols("SONUS-COMMON-MIB", "sonusEventDescription", "sonusSlotIndex", "sonusShelfIndex", "sonusEventLevel", "sonusEventClass")
sonusResourcesMIBs, = mibBuilder.importSymbols("SONUS-SMI", "sonusResourcesMIBs")
SonusBoolean, SonusShelfIndex = mibBuilder.importSymbols("SONUS-TC", "SonusBoolean", "SonusShelfIndex")
sonusRtcpMIB = ModuleIdentity((1, 3, 6, 1, 4, 1, 2879, 2, 2, 7))
if mibBuilder.loadTexts: sonusRtcpMIB.setLastUpdated('200104180000Z')
if mibBuilder.loadTexts: sonusRtcpMIB.setOrganization('Sonus Networks, Inc.')
sonusRtcpMIBObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 2879, 2, 2, 7, 1))
sonusRtcpShelfAdmnTable = MibTable((1, 3, 6, 1, 4, 1, 2879, 2, 2, 7, 1, 1), )
if mibBuilder.loadTexts: sonusRtcpShelfAdmnTable.setStatus('current')
sonusRtcpShelfAdmnEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2879, 2, 2, 7, 1, 1, 1), ).setIndexNames((0, "SONUS-RTCP-MIB", "sonusRtcpShelfAdmnIndex"))
if mibBuilder.loadTexts: sonusRtcpShelfAdmnEntry.setStatus('current')
sonusRtcpShelfAdmnIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 2879, 2, 2, 7, 1, 1, 1, 1), SonusShelfIndex()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sonusRtcpShelfAdmnIndex.setStatus('current')
sonusRtcpShelfAdmnSrInterval = MibTableColumn((1, 3, 6, 1, 4, 1, 2879, 2, 2, 7, 1, 1, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(5, 40)).clone(5)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: sonusRtcpShelfAdmnSrInterval.setStatus('current')
sonusRtcpShelfAdmnEstablishInterval = MibTableColumn((1, 3, 6, 1, 4, 1, 2879, 2, 2, 7, 1, 1, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 5)).clone(2)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: sonusRtcpShelfAdmnEstablishInterval.setStatus('current')
sonusRtcpShelfAdmnLossTrapHistoryEntries = MibTableColumn((1, 3, 6, 1, 4, 1, 2879, 2, 2, 7, 1, 1, 1, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 50))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: sonusRtcpShelfAdmnLossTrapHistoryEntries.setStatus('current')
sonusRtcpShelfAdmnAbsenceTrapHistoryEntries = MibTableColumn((1, 3, 6, 1, 4, 1, 2879, 2, 2, 7, 1, 1, 1, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 50))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: sonusRtcpShelfAdmnAbsenceTrapHistoryEntries.setStatus('current')
sonusRtcpShelfAdmnLossTrapHistoryTableReset = MibTableColumn((1, 3, 6, 1, 4, 1, 2879, 2, 2, 7, 1, 1, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("unknown", 1), ("reset", 2))).clone('unknown')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: sonusRtcpShelfAdmnLossTrapHistoryTableReset.setStatus('current')
sonusRtcpShelfAdmnAbsenceTrapHistoryTableReset = MibTableColumn((1, 3, 6, 1, 4, 1, 2879, 2, 2, 7, 1, 1, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("unknown", 1), ("reset", 2))).clone('unknown')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: sonusRtcpShelfAdmnAbsenceTrapHistoryTableReset.setStatus('current')
sonusRtcpSlotLinkLossTrapStatusTable = MibTable((1, 3, 6, 1, 4, 1, 2879, 2, 2, 7, 1, 4), )
if mibBuilder.loadTexts: sonusRtcpSlotLinkLossTrapStatusTable.setStatus('current')
sonusRtcpSlotLinkLossTrapStatusEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2879, 2, 2, 7, 1, 4, 1), ).setIndexNames((0, "SONUS-RTCP-MIB", "sonusRtcpSlotLinkLossTrapStatShelfIndex"), (0, "SONUS-RTCP-MIB", "sonusRtcpSlotLinkLossTrapStatSlotIndex"), (0, "SONUS-RTCP-MIB", "sonusRtcpSlotLinkLossTrapStatSrcIpAddress"), (0, "SONUS-RTCP-MIB", "sonusRtcpSlotLinkLossTrapStatDestIpAddress"))
if mibBuilder.loadTexts: sonusRtcpSlotLinkLossTrapStatusEntry.setStatus('current')
sonusRtcpSlotLinkLossTrapStatShelfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 2879, 2, 2, 7, 1, 4, 1, 1), SonusShelfIndex()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sonusRtcpSlotLinkLossTrapStatShelfIndex.setStatus('current')
sonusRtcpSlotLinkLossTrapStatSlotIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 2879, 2, 2, 7, 1, 4, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 16))).setMaxAccess("readonly")
if mibBuilder.loadTexts: sonusRtcpSlotLinkLossTrapStatSlotIndex.setStatus('current')
sonusRtcpSlotLinkLossTrapStatSrcIpAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 2879, 2, 2, 7, 1, 4, 1, 3), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sonusRtcpSlotLinkLossTrapStatSrcIpAddress.setStatus('current')
sonusRtcpSlotLinkLossTrapStatDestIpAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 2879, 2, 2, 7, 1, 4, 1, 4), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sonusRtcpSlotLinkLossTrapStatDestIpAddress.setStatus('current')
sonusRtcpSlotLinkLossTrapStatCount = MibTableColumn((1, 3, 6, 1, 4, 1, 2879, 2, 2, 7, 1, 4, 1, 5), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sonusRtcpSlotLinkLossTrapStatCount.setStatus('current')
sonusRtcpSlotLinkLossTrapStatTotalCount = MibTableColumn((1, 3, 6, 1, 4, 1, 2879, 2, 2, 7, 1, 4, 1, 6), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sonusRtcpSlotLinkLossTrapStatTotalCount.setStatus('current')
sonusRtcpSlotLinkLossTrapStatStartTime = MibTableColumn((1, 3, 6, 1, 4, 1, 2879, 2, 2, 7, 1, 4, 1, 7), DateAndTime()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sonusRtcpSlotLinkLossTrapStatStartTime.setStatus('current')
sonusRtcpSlotLinkLossTrapStatLastTime = MibTableColumn((1, 3, 6, 1, 4, 1, 2879, 2, 2, 7, 1, 4, 1, 8), DateAndTime()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sonusRtcpSlotLinkLossTrapStatLastTime.setStatus('current')
sonusRtcpSlotLinkLossTrapStatActive = MibTableColumn((1, 3, 6, 1, 4, 1, 2879, 2, 2, 7, 1, 4, 1, 9), SonusBoolean()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sonusRtcpSlotLinkLossTrapStatActive.setStatus('current')
sonusRtcpSlotLinkAbsenceTrapStatusTable = MibTable((1, 3, 6, 1, 4, 1, 2879, 2, 2, 7, 1, 5), )
if mibBuilder.loadTexts: sonusRtcpSlotLinkAbsenceTrapStatusTable.setStatus('current')
sonusRtcpSlotLinkAbsenceTrapStatusEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2879, 2, 2, 7, 1, 5, 1), ).setIndexNames((0, "SONUS-RTCP-MIB", "sonusRtcpSlotLinkAbsenceTrapStatShelfIndex"), (0, "SONUS-RTCP-MIB", "sonusRtcpSlotLinkAbsenceTrapStatSlotIndex"), (0, "SONUS-RTCP-MIB", "sonusRtcpSlotLinkAbsenceTrapStatSrcIpAddress"), (0, "SONUS-RTCP-MIB", "sonusRtcpSlotLinkAbsenceTrapStatDestIpAddress"))
if mibBuilder.loadTexts: sonusRtcpSlotLinkAbsenceTrapStatusEntry.setStatus('current')
sonusRtcpSlotLinkAbsenceTrapStatShelfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 2879, 2, 2, 7, 1, 5, 1, 1), SonusShelfIndex()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sonusRtcpSlotLinkAbsenceTrapStatShelfIndex.setStatus('current')
sonusRtcpSlotLinkAbsenceTrapStatSlotIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 2879, 2, 2, 7, 1, 5, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 16))).setMaxAccess("readonly")
if mibBuilder.loadTexts: sonusRtcpSlotLinkAbsenceTrapStatSlotIndex.setStatus('current')
sonusRtcpSlotLinkAbsenceTrapStatSrcIpAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 2879, 2, 2, 7, 1, 5, 1, 3), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sonusRtcpSlotLinkAbsenceTrapStatSrcIpAddress.setStatus('current')
sonusRtcpSlotLinkAbsenceTrapStatDestIpAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 2879, 2, 2, 7, 1, 5, 1, 4), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sonusRtcpSlotLinkAbsenceTrapStatDestIpAddress.setStatus('current')
sonusRtcpSlotLinkAbsenceTrapStatCount = MibTableColumn((1, 3, 6, 1, 4, 1, 2879, 2, 2, 7, 1, 5, 1, 5), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sonusRtcpSlotLinkAbsenceTrapStatCount.setStatus('current')
sonusRtcpSlotLinkAbsenceTrapStatTotalCount = MibTableColumn((1, 3, 6, 1, 4, 1, 2879, 2, 2, 7, 1, 5, 1, 6), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sonusRtcpSlotLinkAbsenceTrapStatTotalCount.setStatus('current')
sonusRtcpSlotLinkAbsenceTrapStatStartTime = MibTableColumn((1, 3, 6, 1, 4, 1, 2879, 2, 2, 7, 1, 5, 1, 7), DateAndTime()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sonusRtcpSlotLinkAbsenceTrapStatStartTime.setStatus('current')
sonusRtcpSlotLinkAbsenceTrapStatLastTime = MibTableColumn((1, 3, 6, 1, 4, 1, 2879, 2, 2, 7, 1, 5, 1, 8), DateAndTime()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sonusRtcpSlotLinkAbsenceTrapStatLastTime.setStatus('current')
sonusRtcpSlotLinkAbsenceTrapStatActive = MibTableColumn((1, 3, 6, 1, 4, 1, 2879, 2, 2, 7, 1, 5, 1, 9), SonusBoolean()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sonusRtcpSlotLinkAbsenceTrapStatActive.setStatus('current')
sonusRtcpMIBNotifications = MibIdentifier((1, 3, 6, 1, 4, 1, 2879, 2, 2, 7, 2))
sonusRtcpMIBNotificationsPrefix = MibIdentifier((1, 3, 6, 1, 4, 1, 2879, 2, 2, 7, 2, 0))
sonusRtcpMIBNotificationsObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 2879, 2, 2, 7, 2, 1))
sonusRtcpLocalIpAddr = MibScalar((1, 3, 6, 1, 4, 1, 2879, 2, 2, 7, 2, 1, 1), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sonusRtcpLocalIpAddr.setStatus('current')
sonusRtcpRemoteIpAddr = MibScalar((1, 3, 6, 1, 4, 1, 2879, 2, 2, 7, 2, 1, 2), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sonusRtcpRemoteIpAddr.setStatus('current')
sonusRtcpPacketLossThresholdExceededNotification = NotificationType((1, 3, 6, 1, 4, 1, 2879, 2, 2, 7, 2, 0, 1)).setObjects(("SONUS-COMMON-MIB", "sonusShelfIndex"), ("SONUS-COMMON-MIB", "sonusSlotIndex"), ("SONUS-COMMON-MIB", "sonusEventDescription"), ("SONUS-COMMON-MIB", "sonusEventClass"), ("SONUS-COMMON-MIB", "sonusEventLevel"))
if mibBuilder.loadTexts: sonusRtcpPacketLossThresholdExceededNotification.setStatus('current')
sonusRtcpPacketLossThresholdClearedNotification = NotificationType((1, 3, 6, 1, 4, 1, 2879, 2, 2, 7, 2, 0, 2)).setObjects(("SONUS-COMMON-MIB", "sonusShelfIndex"), ("SONUS-COMMON-MIB", "sonusSlotIndex"), ("SONUS-COMMON-MIB", "sonusEventDescription"), ("SONUS-COMMON-MIB", "sonusEventClass"), ("SONUS-COMMON-MIB", "sonusEventLevel"))
if mibBuilder.loadTexts: sonusRtcpPacketLossThresholdClearedNotification.setStatus('current')
sonusRtcpNoRtpOrRtcpPacketsReceivedNotification = NotificationType((1, 3, 6, 1, 4, 1, 2879, 2, 2, 7, 2, 0, 3)).setObjects(("SONUS-COMMON-MIB", "sonusShelfIndex"), ("SONUS-COMMON-MIB", "sonusSlotIndex"), ("SONUS-COMMON-MIB", "sonusEventDescription"), ("SONUS-COMMON-MIB", "sonusEventClass"), ("SONUS-COMMON-MIB", "sonusEventLevel"))
if mibBuilder.loadTexts: sonusRtcpNoRtpOrRtcpPacketsReceivedNotification.setStatus('current')
sonusRtcpNoRtpOrRtcpPacketsClearedNotification = NotificationType((1, 3, 6, 1, 4, 1, 2879, 2, 2, 7, 2, 0, 4)).setObjects(("SONUS-COMMON-MIB", "sonusShelfIndex"), ("SONUS-COMMON-MIB", "sonusSlotIndex"), ("SONUS-COMMON-MIB", "sonusEventDescription"), ("SONUS-COMMON-MIB", "sonusEventClass"), ("SONUS-COMMON-MIB", "sonusEventLevel"))
if mibBuilder.loadTexts: sonusRtcpNoRtpOrRtcpPacketsClearedNotification.setStatus('current')
mibBuilder.exportSymbols("SONUS-RTCP-MIB", sonusRtcpRemoteIpAddr=sonusRtcpRemoteIpAddr, sonusRtcpSlotLinkLossTrapStatDestIpAddress=sonusRtcpSlotLinkLossTrapStatDestIpAddress, sonusRtcpSlotLinkAbsenceTrapStatSlotIndex=sonusRtcpSlotLinkAbsenceTrapStatSlotIndex, sonusRtcpPacketLossThresholdExceededNotification=sonusRtcpPacketLossThresholdExceededNotification, sonusRtcpLocalIpAddr=sonusRtcpLocalIpAddr, sonusRtcpSlotLinkAbsenceTrapStatDestIpAddress=sonusRtcpSlotLinkAbsenceTrapStatDestIpAddress, sonusRtcpShelfAdmnAbsenceTrapHistoryTableReset=sonusRtcpShelfAdmnAbsenceTrapHistoryTableReset, sonusRtcpSlotLinkAbsenceTrapStatTotalCount=sonusRtcpSlotLinkAbsenceTrapStatTotalCount, sonusRtcpSlotLinkAbsenceTrapStatLastTime=sonusRtcpSlotLinkAbsenceTrapStatLastTime, sonusRtcpSlotLinkAbsenceTrapStatusTable=sonusRtcpSlotLinkAbsenceTrapStatusTable, sonusRtcpNoRtpOrRtcpPacketsReceivedNotification=sonusRtcpNoRtpOrRtcpPacketsReceivedNotification, sonusRtcpSlotLinkLossTrapStatCount=sonusRtcpSlotLinkLossTrapStatCount, sonusRtcpMIBObjects=sonusRtcpMIBObjects, sonusRtcpShelfAdmnEntry=sonusRtcpShelfAdmnEntry, sonusRtcpSlotLinkLossTrapStatActive=sonusRtcpSlotLinkLossTrapStatActive, sonusRtcpMIB=sonusRtcpMIB, sonusRtcpSlotLinkAbsenceTrapStatCount=sonusRtcpSlotLinkAbsenceTrapStatCount, sonusRtcpSlotLinkLossTrapStatSlotIndex=sonusRtcpSlotLinkLossTrapStatSlotIndex, sonusRtcpSlotLinkAbsenceTrapStatStartTime=sonusRtcpSlotLinkAbsenceTrapStatStartTime, sonusRtcpMIBNotificationsObjects=sonusRtcpMIBNotificationsObjects, sonusRtcpSlotLinkLossTrapStatLastTime=sonusRtcpSlotLinkLossTrapStatLastTime, sonusRtcpPacketLossThresholdClearedNotification=sonusRtcpPacketLossThresholdClearedNotification, PYSNMP_MODULE_ID=sonusRtcpMIB, sonusRtcpShelfAdmnLossTrapHistoryTableReset=sonusRtcpShelfAdmnLossTrapHistoryTableReset, sonusRtcpSlotLinkAbsenceTrapStatSrcIpAddress=sonusRtcpSlotLinkAbsenceTrapStatSrcIpAddress, sonusRtcpSlotLinkAbsenceTrapStatShelfIndex=sonusRtcpSlotLinkAbsenceTrapStatShelfIndex, sonusRtcpSlotLinkLossTrapStatSrcIpAddress=sonusRtcpSlotLinkLossTrapStatSrcIpAddress, sonusRtcpShelfAdmnIndex=sonusRtcpShelfAdmnIndex, sonusRtcpSlotLinkLossTrapStatTotalCount=sonusRtcpSlotLinkLossTrapStatTotalCount, sonusRtcpSlotLinkLossTrapStatusEntry=sonusRtcpSlotLinkLossTrapStatusEntry, sonusRtcpSlotLinkLossTrapStatShelfIndex=sonusRtcpSlotLinkLossTrapStatShelfIndex, sonusRtcpSlotLinkLossTrapStatusTable=sonusRtcpSlotLinkLossTrapStatusTable, sonusRtcpShelfAdmnEstablishInterval=sonusRtcpShelfAdmnEstablishInterval, sonusRtcpShelfAdmnAbsenceTrapHistoryEntries=sonusRtcpShelfAdmnAbsenceTrapHistoryEntries, sonusRtcpSlotLinkAbsenceTrapStatusEntry=sonusRtcpSlotLinkAbsenceTrapStatusEntry, sonusRtcpMIBNotifications=sonusRtcpMIBNotifications, sonusRtcpShelfAdmnLossTrapHistoryEntries=sonusRtcpShelfAdmnLossTrapHistoryEntries, sonusRtcpSlotLinkLossTrapStatStartTime=sonusRtcpSlotLinkLossTrapStatStartTime, sonusRtcpShelfAdmnTable=sonusRtcpShelfAdmnTable, sonusRtcpShelfAdmnSrInterval=sonusRtcpShelfAdmnSrInterval, sonusRtcpNoRtpOrRtcpPacketsClearedNotification=sonusRtcpNoRtpOrRtcpPacketsClearedNotification, sonusRtcpMIBNotificationsPrefix=sonusRtcpMIBNotificationsPrefix, sonusRtcpSlotLinkAbsenceTrapStatActive=sonusRtcpSlotLinkAbsenceTrapStatActive)
|
import psycopg2
from logger import Logger
from dataclasses import dataclass
from constants.app_constants import Database
logger = Logger.get_info_logger()
@dataclass
class DatabaseServer(object):
"""
Gets data from the Postgres database server.
"""
@staticmethod
def connect_database_and_fetch_data(self):
try:
connection = psycopg2.connect(
user=Database.DB_USER,
password=Database.DB_PASSWORD,
host=Database.DB_HOST,
port=Database.DB_PORT,
database=Database.DB_NAME
)
cursor = connection.cursor()
postgres_select_query = "SELECT row_to_json(products) FROM products" # NOQA
# Index products data
products_data = self._index_products_data(
cursor=cursor,
sql_query=postgres_select_query
)
# Index extra information from 'product_info' table
# Extra information includes => brand_name, discount, etc
postgres_select_query = "SELECT row_to_json(products_extra_info) FROM products_extra_info" # NOQA
products_extra_info = self._index_products_extra_info(
cursor=cursor,
sql_query=postgres_select_query
)
return products_data, products_extra_info
except (Exception, psycopg2.Error) as exc:
logger.info(
"Error reported from PostgreSQL: {}".format(exc),
exc_info=True
)
finally:
# Close database connection
if connection:
connection.close()
cursor.close()
logger.info("Database connection is closed now.")
@staticmethod
def _index_products_data(cursor, sql_query: str):
cursor.execute(sql_query)
logger.info("Selecting rows from PRODUCTS table...")
products_data = cursor.fetchAll()
return products_data
@staticmethod
def _index_products_extra_info(cursor, sql_query: str):
cursor.execute(sql_query)
logger.info("Selecting rows from PRODUCTS_EXTRA_INFO table...")
products_extra_info = cursor.fetchAll()
return products_extra_info
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.