content
stringlengths 5
1.05M
|
|---|
"""
Bacteria Bomb Model
Created by Cameron Leighton
"""
#import the operators
import csv
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot
import matplotlib.animation
import agentframework
import tkinter
import time
#creates the figure and the axes
fig = matplotlib.pyplot.figure(figsize=(7, 7))
ax = fig.add_axes([0, 300, 0, 300])
#create the number of bacteria being modelled
num_of_bacteria = 500
#create an empty list to append the bacteria data to
bacteria = []
#create an empty list to append the environment to
environment = []
#create an empty list to append append the bacteria that have hit the ground to
fallen = []
#create the environment
with open ('City.csv') as f:
#open CSV
readCSV = csv.reader(f,delimiter = ',')
#for each row in CSV it creates a new list
for row in readCSV:
rowlist = []
#for each value in the rows of the CSV it appends the interger
for value in row:
# print(float(value))
rowlist.append(int(value))
#the rowlist is then appended into the environment
environment.append(rowlist)
#this give values to the conditions we will append to each bacteria such as the x,y postion and height(t) and whether or not it has fallen
t = None
x = None
y = None
has_fallen = 0
#append the x,y,t & has_fallen data to the bactria for each individual bacteria
for i in range(num_of_bacteria):
bacteria.append(agentframework.Bacteria(bacteria,environment,has_fallen,fallen,t,x,y))
carry_on = True
# starts a clock for testing the time the model takes to run
start = time.clock()
# defines the update in the model for each time the model moves
def update(frame_number):
# counter = 0
fig.clear()
global carry_on
#each time the model moves the bacteria will do 3 things dependig if conditions are met
for i in range(num_of_bacteria):
#here a switch is used where if the bacteria has hit the ground the loop wont run
if ((len(bacteria))) != 0 and bacteria[i].has_fallen == 0:
#moves the bacteria
bacteria[i].move()
#makes the bacteria fall to the ground
bacteria[i].fall()
#changes the state of the bacteria if they hit the ground
bacteria[i].landed()
#plots the envrioment using matplotlib and shows the environment on the plot
matplotlib.pyplot.ylim(0, 300)
matplotlib.pyplot.xlim(0, 300)
matplotlib.pyplot.imshow(environment)
#plots the bacteria was used in the testing porcess to make sure the bacteria moved and fell
for i in range(num_of_bacteria):
#simple switch again for the bacteria
if bacteria[i].has_fallen == 0:
#if the abacteria are still airborne then they are ploted in white
matplotlib.pyplot.scatter(bacteria[i]._x,bacteria[i]._y, marker = "*", color = 'white')
else:
#if the bacteria have landed they are plotted by a yellow cross
matplotlib.pyplot.scatter(bacteria[i]._x,bacteria[i]._y, marker = "x", color = 'yellow')
#create the stopping condtion
def gen_function(b = [0]):
for i in range(num_of_bacteria):
global carry_on #Not actually needed as we're not assigning, but clearer
#while the list size of bacteria doesnt = the list size of the fallen and the carry on is true the model will run
while ((len(bacteria)) != (len(fallen))) & (carry_on) :
yield len(bacteria)
#if the list of fallen does = list size of bacteria then the model stops
if (len(fallen)) == (len(bacteria)):
carry_on = False
#writes the x and y data to a text file so it can be inputted into the density program
with open ('fallen.csv',mode ='w') as fallen_list:
fallen_writer = csv.writer(fallen_list,delimiter = ',')
#currently this only puts the last x and y coordinate into the text file
fallen_writer.writerow([fallen[i]._x,fallen[i]._y])
#prints the x and y of all the landed bacteria
print (fallen[i]._x,fallen[i]._y)
#prints to say all the bacteria have landed
print ("all the bacteria have landed")
#ends the clock
end = time.clock()
#prints the run time of the model
print("time = " + str(end - start))
#creates the tkinter animations and the gui
def run():
animation = matplotlib.animation.FuncAnimation(fig, update, frames=gen_function, repeat=False)
canvas.draw()
root = tkinter.Tk()
root.wm_title("Bacteria Bomb")
menu_bar = tkinter.Menu(root)
root.config(menu=menu_bar)
model_menu = tkinter.Menu(menu_bar)
menu_bar.add_cascade(label="Model", menu=model_menu)
model_menu.add_command(label="Run model", command=run)
canvas = matplotlib.backends.backend_tkagg.FigureCanvasTkAgg(fig, master=root)
canvas._tkcanvas.pack(side=tkinter.TOP, fill=tkinter.BOTH, expand=1)
tkinter.mainloop()
|
# Load the website on local with Python WEBSERVER
from http import server, HTTPStatus
import sys
if __name__ == "__main__":
if(len(sys.argv) < 2):
print("Error: Missing argument...\nUsage: python webserver.py [PORT NUMBER]\n")
else:
handler = server.SimpleHTTPRequestHandler
adresse = 'localhost'
try:
port = int(sys.argv[1]) # Turn the port given as input into Integer
except:
print("Error: Port number invalid...\n")
else:
print("Serving on address: http://localhost:%d/" %(port,))
serveur = server.HTTPServer((adresse,port), handler)
serveur.serve_forever()
|
import mock
import unittest
import datetime
from . import mock_boto3
import timewarp.ec2
class TestEc2Adapter(unittest.TestCase):
def setUp(self):
self.boto3 = mock_boto3.Boto3Mock()
self.invalidInstanceId = "abcdef"
def tearDown(self):
self.boto3.teardown()
def test_invalidInstanceId(self):
self.boto3.Instance(self.invalidInstanceId).load.side_effect = ValueError
with self.assertRaises(timewarp.exceptions.NoSuchVirtualMachine):
vm = timewarp.ec2.VirtualMachine(self.invalidInstanceId)
def test_missingInstanceId(self):
with self.assertRaises(timewarp.exceptions.NoSuchVirtualMachine):
vm = timewarp.ec2.VirtualMachine(None)
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
|
"""5-7. Favorite Fruit: Make a list of your favorite fruits, and then write a series of
independent if statements that check for certain fruits in your list.
• Make a list of your three favorite fruits and call it favorite_fruits.
• Write five if statements. Each should check whether a certain kind of fruit
is in your list. If the fruit is in your list, the if block should print a statement,
such as You really like bananas!"""
frutas = ['Sandia', 'Melon', 'Piña']
if 'Sandia' in frutas:
print("Realmente te gusta la sandia!")
if 'Melon' in frutas:
print("Realmente te gusta el melon!")
if 'Piña' in frutas:
print("Realmente te gusta la piña!")
if 'Papaya' in frutas:
print("Realmente te gusta la papaya!")
if 'Guayaba' in frutas:
print("Realmente te gusta la guayaba!")
|
from classier.utils.PersistentDict import PersistentDict
import concurrent.futures
import threading
import subprocess
import os
TEST_FILE = "test_file.json"
if os.path.exists(TEST_FILE):
subprocess.call(["rm", TEST_FILE])
def write_to_test(val):
print(f"{threading.get_ident()} is writing {val}")
PersistentDict(TEST_FILE)['concurrency_test'] = val
print(f"{threading.get_ident()} is done writing {val}")
with concurrent.futures.ThreadPoolExecutor() as executor:
tasks = [executor.submit(write_to_test, i) for i in range(1000)]
results = [task.result() for task in tasks]
with open(TEST_FILE, "r") as f:
print(f"final value: {f.read()}")
subprocess.call(["rm", TEST_FILE])
|
# -*- coding: utf-8 -*-
# pragma pylint: disable=unused-argument, no-self-use
# (c) Copyright IBM Corp. 2010, 2021. All Rights Reserved.
# headers for comments
FROM_SENTINEL_COMMENT_HDR = "From Sentinel"
FROM_SOAR_COMMENT_HDR = "From IBM SOAR"
SENT_TO_SENTINEL_HDR = "Sent to Sentinel"
SENTINEL_INCIDENT_NUMBER = "sentinel_incident_number"
|
import os
os.environ["JSII_DEBUG"] = "1"
|
# coding=utf-8
# Copyright 2018 The TF-Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Environments module."""
# TODO(b/155801943): Bring parallel_py_environment here once we're py3-only.
from tf_agents.environments import batched_py_environment
from tf_agents.environments import py_environment
from tf_agents.environments import random_py_environment
from tf_agents.environments import random_tf_environment
from tf_agents.environments import tf_environment
from tf_agents.environments import tf_py_environment
from tf_agents.environments import trajectory_replay
from tf_agents.environments import utils
from tf_agents.environments import wrappers
|
#!/usr/bin/env python
'''
Sharan Multani
smultani@carbonblack.com
Script to list all devices in your org, that have checked in after a specified time.
'''
import sys, datetime as dt
from cbapi.example_helpers import build_cli_parser, get_cb_defense_object
from cbapi.psc.defense import Device
def main():
parser = build_cli_parser("List devices")
device_options = parser.add_mutually_exclusive_group(required=False)
device_options.add_argument("-i", "--id", type=int, help="Device ID of sensor")
device_options.add_argument("-n", "--hostname", help="Hostname")
args = parser.parse_args()
cb = get_cb_defense_object(args)
'''
Create time object
This is used while comparing against the device check-in time
'''
time = dt.datetime(2019,1,1,1,1);
if args.id:
devices = [cb.select(Device, args.id)]
elif args.hostname:
devices = list(cb.select(Device).where("hostNameExact:{0}".format(args.hostname)))
else:
devices = list(cb.select(Device))
print("{0:9} {1:40}{2:18}{3}".format("ID", "Hostname", "IP Address", "Last Check-In Time"))
for device in devices:
#Use the time object to compare against device check-in time
if device.lastContact > time:
print("{0:9} {1:40s}{2:18s}{3}".format(device.deviceId, device.name or "None", device.lastInternalIpAddress or "Unknown", device.lastContact))
if __name__ == "__main__":
sys.exit(main())
|
# Generated by Django 2.1.5 on 2019-01-18 13:19
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('api', '0056_auto_20190114_0857'),
]
operations = [
migrations.RemoveField(
model_name='channel',
name='user',
),
migrations.RemoveField(
model_name='check',
name='user',
),
]
|
# Generated by Django 2.2.6 on 2019-12-17 13:09
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cms', '0044_alter_field_hist_freq_verbose_name_on_recordentry'),
]
operations = [
migrations.AddField(
model_name='eventtag',
name='name',
field=models.CharField(blank=True, max_length=2048, null=True),
),
]
|
# -*- coding: utf-8 -*-
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# DO NOT EDIT! This is a generated sample ("Request", "language_entity_sentiment_gcs")
# To install the latest published package dependency, execute the following:
# pip install google-cloud-language
# sample-metadata
# title: Analyzing Entity Sentiment (GCS)
# description: Analyzing Entity Sentiment in text file stored in Cloud Storage
# usage: python3 samples/v1/language_entity_sentiment_gcs.py [--gcs_content_uri "gs://cloud-samples-data/language/entity-sentiment.txt"]
# [START language_entity_sentiment_gcs]
from google.cloud import language_v1
def sample_analyze_entity_sentiment(gcs_content_uri):
"""
Analyzing Entity Sentiment in text file stored in Cloud Storage
Args:
gcs_content_uri Google Cloud Storage URI where the file content is located.
e.g. gs://[Your Bucket]/[Path to File]
"""
client = language_v1.LanguageServiceClient()
# gcs_content_uri = 'gs://cloud-samples-data/language/entity-sentiment.txt'
# Available types: PLAIN_TEXT, HTML
type_ = language_v1.Document.Type.PLAIN_TEXT
# Optional. If not specified, the language is automatically detected.
# For list of supported languages:
# https://cloud.google.com/natural-language/docs/languages
language = "en"
document = {"gcs_content_uri": gcs_content_uri, "type_": type_, "language": language}
# Available values: NONE, UTF8, UTF16, UTF32
encoding_type = language_v1.EncodingType.UTF8
response = client.analyze_entity_sentiment(request = {'document': document, 'encoding_type': encoding_type})
# Loop through entitites returned from the API
for entity in response.entities:
print(u"Representative name for the entity: {}".format(entity.name))
# Get entity type, e.g. PERSON, LOCATION, ADDRESS, NUMBER, et al
print(u"Entity type: {}".format(language_v1.Entity.Type(entity.type_).name))
# Get the salience score associated with the entity in the [0, 1.0] range
print(u"Salience score: {}".format(entity.salience))
# Get the aggregate sentiment expressed for this entity in the provided document.
sentiment = entity.sentiment
print(u"Entity sentiment score: {}".format(sentiment.score))
print(u"Entity sentiment magnitude: {}".format(sentiment.magnitude))
# Loop over the metadata associated with entity. For many known entities,
# the metadata is a Wikipedia URL (wikipedia_url) and Knowledge Graph MID (mid).
# Some entity types may have additional metadata, e.g. ADDRESS entities
# may have metadata for the address street_name, postal_code, et al.
for metadata_name, metadata_value in entity.metadata.items():
print(u"{} = {}".format(metadata_name, metadata_value))
# Loop over the mentions of this entity in the input document.
# The API currently supports proper noun mentions.
for mention in entity.mentions:
print(u"Mention text: {}".format(mention.text.content))
# Get the mention type, e.g. PROPER for proper noun
print(
u"Mention type: {}".format(language_v1.EntityMention.Type(mention.type_).name)
)
# Get the language of the text, which will be the same as
# the language specified in the request or, if not specified,
# the automatically-detected language.
print(u"Language of the text: {}".format(response.language))
# [END language_entity_sentiment_gcs]
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
"--gcs_content_uri",
type=str,
default="gs://cloud-samples-data/language/entity-sentiment.txt",
)
args = parser.parse_args()
sample_analyze_entity_sentiment(args.gcs_content_uri)
if __name__ == "__main__":
main()
|
import requests
from flask import current_app
from app import config
class DigitalOcean(object):
base = 'https://api.digitalocean.com/v2/'
headers = {
'Content-Type': 'application/json',
'Authorization': 'Bearer ' + config.DO_TOKEN
}
def make_req(self, method, endpoint, data=None):
url = self.base + endpoint
current_app.logger.info(f'{method.upper()} - {url}')
if method == 'post':
r = requests.post(url, timeout=8, headers=self.headers, json=data)
elif method == 'get':
r = requests.get(url, timeout=8, headers=self.headers)
elif method == 'delete':
r = requests.delete(url, timeout=8, headers=self.headers)
else:
return 'method not defined'
r.raise_for_status()
return r
# Account meta
def show_account(self):
r = self.make_req('get', 'account')
return r.json()['account']
# Domains and records
def create_record(self, name, ip_addr, type='A'):
data = {
'type': type,
'name': name,
'data': ip_addr
}
r = self.make_req('post', f'domains/{config.DO_DOMAIN}/records', data)
return r.json()['domain_record']
def delete_record(self, record_id):
if record_id:
route = f'domains/{config.DO_DOMAIN}/records/{record_id}'
r = self.make_req('delete', route)
if r.status_code == 204:
return True
else:
return False
else:
return False
# SSH Keys
def create_key(self, name, public_key):
data = {
'name': name,
'public_key': public_key.strip()
}
return self.make_req('post', 'account/keys', data)
def list_keys(self):
return self.make_req('get', 'account/keys')
# Volumes
def create_volume(self, name, region):
data = {
'name': name,
'size_gigabytes': int(config.DO_DROPLET_STORAGE_GB),
'region': region,
'filesystem_type': 'ext4'
}
r = self.make_req('post', 'volumes', data)
return r.json()['volume']
def list_volumes(self):
r = self.make_req('get', 'volumes')
return r.json()['volumes']
def check_volume_exists(self, name, region):
r = self.make_req('get', f'volumes?name={name}®ion={region}')
if r.json()['volumes'] == list():
return (False, None)
else:
return (True, r.json()['volumes'][0]['id'])
def show_volume(self, volume_id):
r = self.make_req('get', f'volumes/{volume_id}')
return r.json()['volume']
# Droplets
def create_droplet(self, name, region, extra_vols=[]):
# First check that droplet name does not exist
de = self.check_droplet_exists(name)
if de[0]:
return self.show_droplet(de[1])
data = {
'name': name,
'region': region,
'size': config.DO_DROPLET_SIZE,
'image': config.DO_DROPLET_IMAGE,
'ssh_keys': [
int(config.DO_SSH_KEY)
],
'backups': False,
'ipv6': True,
'user_data': f'#!/bin/bash\nwget https://raw.githubusercontent.com/lalanza808/docker-monero-node/master/cloud-init.sh -q -O - | DOMAIN={name}.node.{config.DO_DOMAIN} ACME_EMAIL={config.ADMIN_EMAIL} GRAF_PASS={config.GRAF_PASS} GRAF_USER={config.GRAF_USER} bash',
'private_networking': None,
'volumes': extra_vols,
'tags': []
}
r = self.make_req('post', 'droplets', data)
return r.json()['droplet']
def destroy_droplet(self, droplet_id):
if droplet_id:
route = f'droplets/{droplet_id}/destroy_with_associated_resources/dangerous'
self.headers['X-Dangerous'] = 'true'
r = self.make_req('delete', route)
if r.status_code == 202:
return True
else:
return False
else:
return False
def show_droplet(self, droplet_id):
if droplet_id:
r = self.make_req('get', f'droplets/{droplet_id}')
return r.json()['droplet']
else:
return None
def check_droplet_exists(self, name):
r = self.make_req('get', 'droplets')
droplets = r.json()['droplets']
for d in droplets:
if d['name'] == name:
return (True, d['id'])
return (False, None)
# Pricing
def get_droplet_price_usd_per_hour(self, size):
sizes = {
's-1vcpu-1gb': .00744,
's-1vcpu-2gb': .015,
's-2vcpu-2gb': .022,
's-2vcpu-4gb': .030,
's-4vcpu-8gb': .060,
's-8vcpu-16gb': .119,
}
return sizes[size]
def get_volume_price_usd_per_hour(self, size_gb):
return round(0.105 * size_gb / 730, 3)
do = DigitalOcean()
|
from twisted.conch import recvline
from twisted.conch.insults import insults
from twisted.conch.telnet import TelnetTransport, TelnetBootstrapProtocol
from twisted.conch.manhole_ssh import ConchFactory, TerminalRealm
from twisted.internet import protocol
from twisted.application import internet, service
from twisted.cred import checkers, portal
clients = [] # We can probably search through the connections... but...
CTRL_C = '\x03'
CTRL_D = '\x04'
CTRL_BACKSLASH = '\x1c'
CTRL_L = '\x0c'
class DemoRecvLine(recvline.HistoricRecvLine):
"""Simple echo protocol.
Accepts lines of input and writes them back to its connection. If
a line consisting solely of \"quit\" is received, the connection
is dropped.
"""
def handle_CTRLD(self):
self.terminal.write("Received Control-D!")
self.terminal.nextLine()
def handle_CTRLC(self):
self.terminal.write("Received Control-C!")
self.terminal.nextLine()
def connectionMade(self):
recvline.HistoricRecvLine.connectionMade(self)
self.keyHandlers[CTRL_C] = self.handle_INT
self.keyHandlers[CTRL_D] = self.handle_EOF
self.keyHandlers[CTRL_L] = self.handle_FF
self.keyHandlers[CTRL_BACKSLASH] = self.handle_QUIT
for client in clients:
client.terminal.nextLine()
client.terminal.write("A new user has joined.")
client.terminal.nextLine()
client.drawInputLine()
clients.append(self)
#def connectionMade(self):
#self.interpreter = ManholeInterpreter(self, self.namespace)
def handle_INT(self):
self.terminal.nextLine()
self.terminal.write("KeyboardInterrupt")
self.terminal.nextLine()
self.terminal.write(self.ps[self.pn])
self.lineBuffer = []
self.lineBufferIndex = 0
def handle_EOF(self):
if self.lineBuffer:
self.terminal.write('\a')
else:
self.handle_QUIT()
def handle_FF(self):
"""
Handle a 'form feed' byte - generally used to request a screen
refresh/redraw.
"""
self.terminal.eraseDisplay()
self.terminal.cursorHome()
self.drawInputLine()
def handle_QUIT(self):
self.terminal.loseConnection()
def connectionLost(self, reason):
if self in clients:
clients.remove(self)
for client in clients:
client.terminal.nextLine()
client.terminal.write("A new user has disconnected.")
client.terminal.nextLine()
client.drawInputLine()
recvline.HistoricRecvLine.connectionLost(self, reason)
def lineReceived(self, line):
if line == "quit":
self.terminal.loseConnection()
self.terminal.write("You say, \"%s\"" % line)
self.terminal.nextLine()
self.drawInputLine()
for client in clients:
if client is self: continue
client.terminal.nextLine()
client.terminal.write("User says: %s" % line)
client.terminal.nextLine()
client.drawInputLine()
def makeService(factory, port, *args, **kw):
f = protocol.ServerFactory()
f.protocol = lambda: TelnetTransport(TelnetBootstrapProtocol, insults.ServerProtocol, factory, *args, **kw)
tsvc = internet.TCPServer(port, f)
return tsvc
#m = service.MultiService()
#tsvc.setServiceParent(m)
#csvc.setServiceParent(m)
#return m
application = service.Application("Insults RecvLine Demo")
makeService(DemoRecvLine, 6023).setServiceParent(application)
|
#!/usr/bin/env python
import os,sys,damask
import os,sys,string
from optparse import OptionParser
import numpy as np
import math
scriptID = string.replace('$Id: out_size.py 153 2015-11-06 14:32:50Z chakra34 $','\n','\\n')
scriptName = os.path.splitext(scriptID.split()[1])[0]
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [file[s]]', description = """
prints the size of the file (as ASCII table) .
""", version = scriptID)
(options, filenames) = parser.parse_args()
# --- loop over input files ------------------------------------------------------------------------
if filenames == []: filenames = [None]
for name in filenames:
try:
table = damask.ASCIItable(name = name,
readonly = True,
buffered = False,
)
except: continue
damask.util.report(scriptName,name)
table.head_read()
table.data_readArray()
print table.data.shape[0],table.data.shape[1]
for i in xrange(table.data.shape[1]):
print np.amax(table.data[:,i])
table.close()
|
# Generated by Django 3.1.5 on 2021-02-01 22:54
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('PRODUCTOS', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='productos',
name='pro_precio',
field=models.DecimalField(decimal_places=2, max_digits=8),
),
]
|
from django.contrib.auth.models import Permission
from django.db import models
from django.utils.translation import gettext_lazy as _
from .settings import MODEL_TREE, TREE_ITEMS_ALIASES
class CharFieldNullable(models.CharField):
"""We use custom char field to put nulls in SiteTreeItem 'alias' field.
That allows 'unique_together' directive in Meta to work properly, so
we don't have two site tree items with the same alias in the same site tree.
"""
def get_prep_value(self, value):
if value is not None:
if value.strip() == '':
return None
return self.to_python(value)
class TreeBase(models.Model):
title = models.CharField(
_('Title'), max_length=100, help_text=_('Site tree title for presentational purposes.'), blank=True)
alias = models.CharField(
_('Alias'), max_length=80,
help_text=_('Short name to address site tree from templates.<br /><b>Note:</b> change with care.'),
unique=True, db_index=True)
class Meta:
abstract = True
verbose_name = _('Site Tree')
verbose_name_plural = _('Site Trees')
def get_title(self) -> str:
return self.title or self.alias
def __str__(self) -> str:
return self.alias
class TreeItemBase(models.Model):
PERM_TYPE_ANY = 1
PERM_TYPE_ALL = 2
PERM_TYPE_CHOICES = {
PERM_TYPE_ANY: _('Any'),
PERM_TYPE_ALL: _('All'),
}
title = models.CharField(
_('Title'), max_length=100,
help_text=_('Site tree item title. Can contain template variables E.g.: {{ mytitle }}.'))
hint = models.CharField(
_('Hint'), max_length=200,
help_text=_('Some additional information about this item that is used as a hint.'), blank=True, default='')
url = models.CharField(
_('URL'), max_length=200,
help_text=_('Exact URL or URL pattern (see "Additional settings") for this item.'), db_index=True)
urlaspattern = models.BooleanField(
_('URL as Pattern'),
help_text=_('Whether the given URL should be treated as a pattern.<br />'
'<b>Note:</b> Refer to Django "URL dispatcher" documentation (e.g. "Naming URL patterns" part).'),
db_index=True, default=False)
tree = models.ForeignKey(
MODEL_TREE, related_name='%(class)s_tree', on_delete=models.CASCADE, verbose_name=_('Site Tree'),
help_text=_('Site tree this item belongs to.'), db_index=True)
hidden = models.BooleanField(
_('Hidden'), help_text=_('Whether to show this item in navigation.'), db_index=True, default=False)
alias = CharFieldNullable(
_('Alias'), max_length=80,
help_text=_(
'Short name to address site tree item from a template.<br />'
'<b>Reserved aliases:</b> "%s".' % '", "'.join(TREE_ITEMS_ALIASES)
),
db_index=True, blank=True, null=True)
description = models.TextField(
_('Description'),
help_text=_('Additional comments on this item.'), blank=True, default='')
inmenu = models.BooleanField(
_('Show in menu'),
help_text=_('Whether to show this item in a menu.'), db_index=True, default=True)
inbreadcrumbs = models.BooleanField(
_('Show in breadcrumb path'),
help_text=_('Whether to show this item in a breadcrumb path.'), db_index=True, default=True)
insitetree = models.BooleanField(
_('Show in site tree'),
help_text=_('Whether to show this item in a site tree.'), db_index=True, default=True)
access_loggedin = models.BooleanField(
_('Logged in only'),
help_text=_('Check it to grant access to this item to authenticated users only.'),
db_index=True, default=False)
access_guest = models.BooleanField(
_('Guests only'),
help_text=_('Check it to grant access to this item to guests only.'), db_index=True, default=False)
access_restricted = models.BooleanField(
_('Restrict access to permissions'),
help_text=_('Check it to restrict user access to this item, using Django permissions system.'),
db_index=True, default=False)
access_permissions = models.ManyToManyField(
Permission, verbose_name=_('Permissions granting access'), blank=True)
access_perm_type = models.IntegerField(
_('Permissions interpretation'),
help_text=_('<b>Any</b> — user should have any of chosen permissions. '
'<b>All</b> — user should have all chosen permissions.'),
choices=PERM_TYPE_CHOICES.items(), default=PERM_TYPE_ANY)
# These two are for 'adjacency list' model.
# This is the current approach of tree representation for sitetree.
parent = models.ForeignKey(
'self', related_name='%(class)s_parent', on_delete=models.CASCADE, verbose_name=_('Parent'),
help_text=_('Parent site tree item.'), db_index=True, null=True, blank=True)
sort_order = models.IntegerField(
_('Sort order'),
help_text=_('Item position among other site tree items under the same parent.'), db_index=True, default=0)
def save(self, force_insert=False, force_update=False, **kwargs):
# Ensure that item is not its own parent, since this breaks
# the sitetree (and possibly the entire site).
if self.parent == self:
self.parent = None
# Set item's sort order to its primary key.
id_ = self.id
if id_ and self.sort_order == 0:
self.sort_order = id_
super().save(force_insert, force_update, **kwargs)
# Set item's sort order to its primary key if not already set.
if self.sort_order == 0:
self.sort_order = self.id
self.save()
class Meta:
abstract = True
verbose_name = _('Site Tree Item')
verbose_name_plural = _('Site Tree Items')
unique_together = ('tree', 'alias')
def __str__(self) -> str:
return self.title
class Tree(TreeBase):
"""Built-in tree class. Default functionality."""
class TreeItem(TreeItemBase):
"""Built-in tree item class. Default functionality."""
|
#!/usr/bin/env python3
#
# Cross Platform and Multi Architecture Advanced Binary Emulation Framework
#
import sys,unittest
sys.path.append("..")
from qiling import *
from qiling.exception import *
from test_elf import *
from test_posix import *
from test_qltool import *
if __name__ == "__main__":
unittest.main()
|
'''
XSSCon - 2019/2020
This project was created by menkrep1337 with 407Aex team.
Copyright under the MIT license
'''
import requests, json
##### Warna #######
N = '\033[0m'
W = '\033[1;37m'
B = '\033[1;34m'
M = '\033[1;35m'
R = '\033[1;31m'
G = '\033[1;32m'
Y = '\033[1;33m'
C = '\033[1;36m'
##### Styling ######
underline = "\033[4m"
##### Default ######
agent = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'}
line="—————————————————"
#####################
def session(proxies,headers,cookie):
r=requests.Session()
r.proxies=proxies
r.headers=headers
r.cookies.update(json.loads(cookie))
return r
logo=G+"""__ ______ ____ ____
\ \/ / ___/ ___| / ___|___ _ __
\ /\___ \___ \| | / _ \| '_ \ %s
/ \ ___) |__) | |__| (_) | | | | %s
/_/\_\____/____/ \____\___/|_| |_|
<<<<<<< HEAD
"""%(R+"{v0.5 Final}"+G,underline+C+"https://github.com/menkrep1337/XSSCon"+N+G)
##=======
"""%(R+"{v0.5 Final}"+G,underline+C+"https://github.com/menkrep1337/XSSCon"+N+G)
>>>>>>> branch 'master' of https://github.com/menkrep1337/XSSCon
"""
|
"""
This script evaluates the size of the LWW linkable ring signatures
implemented in koppercoin.crypto.lww_signature
"""
from koppercoin.crypto.lww_signature import *
import time
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
plt.style.use('ggplot')
import jsonpickle
from pympler import asizeof
import gc
# The ringsizes which will be tested
ringsizes = range(1, 21)
memsizes = pd.DataFrame(np.zeros(len(ringsizes)), index=ringsizes)
for ringsize in ringsizes:
print("Running LWW-Signature on Ringsize " + str(ringsize) + " from " + str(list(ringsizes)))
# generate key of other participants in the anonymity set
public_keys = [keygen()[1] for j in range(ringsize)]
m = "some message"
(sec, pub) = keygen()
public_keys.append(pub)
# generate the signature
sig = ringsign(public_keys, sec, m)
# Save its size
memsizes[0][ringsize] = asizeof.asizeof(sig)
print("Size of memsizes object is " + str(asizeof.asizeof(memsizes)) + " Bytes")
print("Number of gc-tracked objects: " + str(len(gc.get_objects())))
# Save the data in complicated JSON-format
with open('memsizes_LWWsig.json', 'w') as f:
json_obj = jsonpickle.encode(memsizes)
f.write(json_obj)
print("Running postprocessing steps")
# Save the data in handy .csv
memsizes.to_csv('memsizes_LWWsig.csv')
# Set up the plot
plt.figure()
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
# plot it
memsizes.plot(style='bo', legend=None)
plt.xlabel('Size of the Ring')
plt.ylabel('Memory Size in Bytes')
plt.title('Memory Measurements for LWW-signatures')
#plt.legend(loc='upper left')
plt.savefig('memsizes_LWWsig.png')
|
class Cocinero():
def prepararPlatillo(self):
self.cuchillo = Cuchillo()
self.cuchillo.cortarVegetales()
self.estufa = Estufa()
self.estufa.boilVegetables()
self.freidora = Freidora()
self.freidora.freirVegetales()
class Cuchillo():
def cortarVegetales(self):
print("Todos los vegetales fueron cortados")
class Estufa():
def boilVegetables(self):
print("Todos los vegetales fueron hervidos")
class Freidora():
def freirVegetales(self):
print("Todos los vegetales fueron mezclados y freidos")
if __name__ == "__main__":
Cocinero = Cocinero()
Cocinero.prepararPlatillo()
|
from __future__ import absolute_import, unicode_literals
import os
from .base import *
SECRET_KEY = os.environ['SECRET_KEY']
DEBUG = False
import dj_database_url
DATABASES['default'] = dj_database_url.config()
ALLOWED_HOSTS = [
os.environ['HOST_URL'],
os.environ['ACCESS_URL']
]
# Redirect to https in production
SECURE_SSL_REDIRECT = True
# Honor the 'X-Forwarded-Proto' header for request.is_secure()
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
COMPRESS_OFFLINE = True
COMPRESS_CSS_FILTERS = [
'compressor.filters.css_default.CssAbsoluteFilter',
'compressor.filters.cssmin.CSSMinFilter',
]
COMPRESS_CSS_HASHING_METHOD = 'content'
AWS_ACCESS_KEY_ID = os.environ['AWS_ACCESS_KEY_ID']
AWS_SECRET_ACCESS_KEY = os.environ['AWS_SECRET_ACCESS_KEY']
AWS_STORAGE_BUCKET_NAME = os.environ['AWS_STORAGE_BUCKET_NAME']
AWS_S3_REGION_NAME = os.environ['AWS_S3_REGION_NAME']
DEFAULT_FILE_STORAGE = os.environ['DEFAULT_FILE_STORAGE']
EMAIL_HOST = os.environ['EMAIL_HOST']
EMAIL_PORT = os.environ['EMAIL_PORT']
EMAIL_HOST_USER = os.environ['EMAIL_HOST_USER']
EMAIL_HOST_PASSWORD = os.environ['EMAIL_HOST_PASSWORD']
EMAIL_USE_TLS = os.environ['EMAIL_USE_TLS']
BASE_URL = os.environ['BASE_URL']
# Detailed logging using heroku logs --tail --app simple-data-tools on heroku cli
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'console': {
'class': 'logging.StreamHandler',
},
},
'loggers': {
'django': {
'handlers': ['console'],
'level': os.getenv('DJANGO_LOG_LEVEL', 'ERROR'),
},
},
}
try:
from .local import *
except ImportError:
pass
|
import numpy as np
from sklearn.metrics import roc_auc_score, average_precision_score
def compute_scores_thresholding(gt, exp, threshold):
ret = np.max(exp) * threshold
binary_exp_array = exp > ret
TP = (binary_exp_array * gt).sum()
predict_pos = binary_exp_array.sum()
actual_pos = gt.sum()
precision = TP / predict_pos
recall = TP / actual_pos
f1_score = (2*precision*recall) / (precision + recall + 1e-6)
return precision, recall, f1_score
def comptue_score_general(gt, exp):
auc_score = roc_auc_score(gt.flatten(), exp.flatten())
ap_score = average_precision_score(gt.flatten(), exp.flatten())
return auc_score, ap_score
class PointGame():
"""Pointing Game Evaluation Method.
More details can be found in the original paper:
https://arxiv.org/abs/1608.00507.
Note that the bounding box of annotations is required for the evaluation.
This method does not need models either. For API compatibility, we implement
it within the same functions as other evaluators.
"""
def __init__(self):
pass
def evaluate(self, bbox: tuple, exp_array: np.ndarray, threshold=0.25):
"""Main function, to evaluate whether the explanation is aligned with
the annotated bounding box.
Args:
bbox (tuple): A tuple of four integers: (x1, y1, x2, y2), where
(x1, y1) is the coordinates of the top-left point w.r.t.
width and height respectively;
(x2, y2) is the coordinates of the bottom-right point w.r.t.
width and height respectively;
exp_array (np.ndarray): An explanation of type np.ndarray.
threshold (float, optional): _description_. Defaults to 0.25.
Returns:
dict: containing 'precision', 'recall', 'f1_score' and 'auc_score',
'ap_score', where the first three depend on the threshold and the
last two do not.
"""
gt = np.zeros_like(exp_array, dtype=np.uint8)
x1, y1, x2, y2 = bbox
gt[y1:y2, x1:x2] = 1
# depends on the threshold
precision, recall, f1_score = compute_scores_thresholding(gt, exp_array, threshold)
r = {'precision': precision, 'recall': recall, 'f1_score': f1_score}
# independ of threshold
auc_score, ap_score = comptue_score_general(gt, exp_array)
r.update( {'auc_score': auc_score, 'ap_score': ap_score} )
return r
class PointGameSegmentation():
"""Pointing Game Evaluation Method.
More details can be found in the original paper:
https://arxiv.org/abs/1608.00507.
Note that the pixelwise annotation is required for the evaluation.
This method does not need models either. For API compatibility, we implement
it within the same functions as other evaluators.
"""
def __init__(self):
pass
def evaluate(self, seg_gt: np.ndarray, exp_array: np.ndarray, threshold=0.25):
"""Main function, to evaluate whether the explanation is aligned with
the annotated segmentation.
Args:
seg_gt (np.ndarray): binary values are supported only currently.
exp_array (np.ndarray): An explanation of type np.ndarray.
threshold (float, optional): _description_. Defaults to 0.25.
Returns:
dict: containing 'precision', 'recall', 'f1_score' and 'auc_score',
'ap_score', where the first three depend on the threshold and the
last two do not.
"""
gt = seg_gt
# depends on the threshold
precision, recall, f1_score = compute_scores_thresholding(gt, exp_array, threshold)
r = {'precision': precision, 'recall': recall, 'f1_score': f1_score}
# independ of threshold
auc_score, ap_score = comptue_score_general(gt, exp_array)
r.update( {'auc_score': auc_score, 'ap_score': ap_score} )
return r
|
from __future__ import print_function
import os
import sys
import json
import unittest
from what import What
from kuyruk import Kuyruk, Config
config = Config()
config.from_pyfile('/tmp/kuyruk_config.py')
class LoaderTestCase(unittest.TestCase):
def test_load_single_file(self):
self._test_function_name(
'onefile.py',
'loader',
'onefile.print_message',
)
def test_load_directory(self):
self._test_function_name(
'main.py',
'loader/appdirectory',
'tasks.print_message',
)
def test_load_package(self):
self._test_function_name(
'-m apppackage.main',
'loader',
'apppackage.tasks.print_message',
)
def test_script_in_package(self):
self._test_function_name(
'-m apppackage.scripts.send_message',
'loader',
'apppackage.tasks.print_message',
)
def _test_function_name(self, args, cwd, name):
with Kuyruk(config=config).channel() as ch:
print(cwd, args, name)
ch.queue_delete("kuyruk")
# Every call sends a task to the queue
run_python(args, cwd=cwd)
# Can we load the task by name?
got = get_name()
assert got == name, got
def run_python(args, cwd):
dirname = os.path.dirname(__file__)
cwd = os.path.join(dirname, cwd)
What(sys.executable, *args.split(' '), cwd=cwd).expect_exit(0)
def get_name():
with Kuyruk(config=config).channel() as ch:
message = ch.basic_get("kuyruk")
desc = json.loads(message.body)
return '.'.join([desc['module'], desc['function']])
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
test_cell
"""
from __future__ import print_function
import pytest
import numpy as np
from tunacell.base.cell import Cell, filiate_from_bpointer
from tunacell.base.colony import Colony, build_recursively_from_cells
## Fixture for cell
@pytest.fixture
def cells():
"""Builds a list of 10 cells with ids from '0' to '9'"""
cells = [Cell(identifier='0'), ]
for index in range(1, 10):
cells.append(Cell(identifier='{}'.format(index)))
return cells
@pytest.fixture
def binary_division_cells(cells):
# we want to label cells sequentially given their depth in filiation
# e.g. [[0], [1 2], [3 4 5 6], ...]
# first define how to slice an array of [0 1 2 3 4 5 6 ...]
sls = []
for k in range(1, 5):
sls.append(slice(sum((2**i for i in range(k-1))), sum((2**i for i in range(k)))))
# second, slice the array of 10 cells
ll = [cells[sl] for sl in sls]
# third associate parent
for irow, row in enumerate(ll[1:], start=1):
for icol, col in enumerate(row):
col.bpointer = ll[irow - 1][icol // 2].identifier
return cells
## Fixtures for colonies
@pytest.fixture
def tree():
"""Example from `treelib`_
.. _treelib: https://github.com/caesar0301/treelib/blob/master/examples/family_tree.py
"""
tree = Colony()
tree.create_node("Harry", "harry") # root node
tree.create_node("Jane", "jane", parent="harry")
tree.create_node("Bill", "bill", parent="harry")
tree.create_node("Diane", "diane", parent="jane")
tree.create_node("Mary", "mary", parent="diane")
tree.create_node("Mark", "mark", parent="jane")
return tree
## Test functions for cell
def test_cell_parent(cells):
root = cells[0]
for index in range(1, 10):
cell = cells[index]
cell.parent = root
for index in range(1, 10):
assert cell.parent == root
def test_cell_childs(cells):
root = cells[0]
for index in range(1, 10):
cell = cells[index]
root.childs = cell
assert root.childs == cells[1:]
def test_cell_division_timing(cells):
root = cells[0]
couples = [(int(root.identifier), t) for t in [0, 1, 2]]
root.data = np.array(couples, dtype=[('cellID', 'u2',), ('time', 'f8')])
for index in range(1, 10):
cell = cells[index]
times = [4, 5, 6]
couples = [(cell.identifier, t) for t in times]
cell.data = np.array(couples, dtype=[('cellID', 'u2',), ('time', 'f8')])
cell.parent = root
cell.set_division_event()
assert root.division_time == 3
for index in range(1, 10):
assert cell.birth_time == 3
def test_cell_build_filiation(binary_division_cells):
cells = binary_division_cells
# make filiation in place
filiate_from_bpointer(cells)
assert cells[1] in cells[0].childs
assert cells[2] in cells[0].childs
assert cells[1].parent == cells[0]
assert cells[2].parent == cells[0]
assert cells[3] in cells[1].childs
assert cells[4] in cells[1].childs
assert cells[3].parent == cells[1]
assert cells[4].parent == cells[1]
assert cells[5] in cells[2].childs
assert cells[6] in cells[2].childs
assert cells[5].parent == cells[2]
assert cells[6].parent == cells[2]
def test_colony_full_decomposition(tree):
tree.decompose(independent=False, seed=42)
assert tree._decomposition['independent'] is False
assert tree._decomposition['seed'] == 42
assert tree.idseqs == [['harry', 'bill'],
['harry', 'jane', 'diane', 'mary'],
['harry', 'jane', 'mark']]
def test_colony_independent_decomposition(tree):
tree.decompose(independent=True, seed=42)
assert tree._decomposition['independent'] is True
assert tree._decomposition['seed'] == 42
assert tree.idseqs == [['harry', 'jane', 'mark'], ['diane', 'mary'], ['bill']]
def test_colony_recursive_constructor(binary_division_cells):
"""This is effectively used in Container class"""
cells = binary_division_cells
filiate_from_bpointer(cells)
colonies = build_recursively_from_cells(cells)
assert len(colonies) == 1
colony = colonies[0]
assert colony.depth() == 3
assert colony.level('9') == 3
|
import time
from django.views.generic import TemplateView
from django.utils.decorators import method_decorator
from django.shortcuts import render
from django.views.decorators.cache import never_cache
from cache_helpers.decorators import cache_page_forever, cache_result
from cache_helpers.views import CachePageMixin
from myapp.caches import get_cache_key_by_path
@cache_result(3)
def expensive_func(foo=None):
print('Expensive stuff start!!!', id, foo)
time.sleep(1)
print('Expensive stuff end!!!')
return 'ok'
class Mixin(object):
@method_decorator(cache_result(3))
def expensive_method(self, id, foo=None):
print('Expensive stuff start!!!', id, foo)
time.sleep(1)
print('Expensive stuff end!!!')
return 'ok'
def get_context_data(self, **kwargs):
kwargs = super().get_context_data(**kwargs)
print(self.expensive_method(3, foo='bar'))
# print(self.expensive_method.foo)
kwargs.update({
'user': self.request.user,
'lang': self.request.LANGUAGE_CODE,
})
return kwargs
class IndexView(Mixin, TemplateView):
template_name = 'myapp/index.html'
extra_context = {
'title': 'Index',
}
def my_cache(*args, **kwargs):
return cache_page_forever(15, key_func=get_cache_key_by_path)
class FartView(CachePageMixin, Mixin, TemplateView):
cache_timeout = 2
template_name = 'myapp/index.html'
extra_context = {
'title': 'Fart',
}
def cache_key_func(self, request, *args, **kwargs):
return request.path
@method_decorator(my_cache(), name='dispatch')
@method_decorator(never_cache, name='dispatch')
class FooView(Mixin, TemplateView):
template_name = 'myapp/index.html'
extra_context = {
'title': 'Foo',
}
@my_cache()
def bar_view(request, id, *args, **kwargs):
print(expensive_func(5))
return render(request, 'myapp/index.html', context={
'title': 'Bar',
'user': request.user,
'lang': request.LANGUAGE_CODE,
})
|
import datetime as dt
import numpy as np
import pandas as pd
import os
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, func
from flask import Flask, jsonify
# Database Setup
engine = create_engine("sqlite:///hawaii.sqlite")
# reflect an existing database into a new model
Base = automap_base()
# reflect the tables
Base.prepare(engine, reflect=True)
# Save reference to the tables
measurement = Base.classes.Measurement
station = Base.classes.Station
# Create session from Python to the DB
session = Session(engine)
# Flask Setup
app = Flask(__name__)
# Flask Routes
@app.route("/")
def welcome():
"""List all available api routes."""
return (
f"Available Routes:<br/>"
f"/api/v1.0/precipitation<br/>"
f"/api/v1.0/stations<br/>"
f"/api/v1.0/tobs<br/>"
f"/api/v1.0/start/<start><br/>"
f"/api/v1.0/startend/<start><end><br/>"
)
@app.route("/api/v1.0/precipitation")
def precipitation():
# Query the dates and temp from the last year
precip = session.query(measurement.date, measurement.prcp).filter(measurement.date > '2017-04-14').order_by(measurement.date).all()
# Create a dictionary from the row data and append to a list of all_passengers
precip_data = []
for data in precip:
precip_dict = {}
precip_dict["date"] = measurement.date
precip_dict["prcp"] = measurement.prcp
precip_data.append(precip_dict)
return jsonify(precip)
@app.route("/api/v1.0/stations")
def stations():
# Return a json list of stations
station_list = session.query(station.station, station.name).all()
return jsonify(station_list)
@app.route("/api/v1.0/tobs")
def tobs():
# Return a json list of tobs from the last year
tobs_data = session.query(measurement.date, measurement.tobs).filter(measurement.date > '2017-04-14').order_by(measurement.date).all()
return jsonify(tobs_data)
@app.route("/api/v1.0/start/<start>")
def start_date(start):
# Return a json list of the minimum temperature, the average temperature, and the max temperature for a given start or start-end range.
# When given the start only, calculate TMIN, TAVG, and TMAX for all dates greater than and equal to the start date.
# When given the start and the end date, calculate the TMIN, TAVG, and TMAX for dates between the start and end date inclusive.
temp_min = session.query(func.min(measurement.tobs)).filter(measurement.date > start).all()
temp_max = session.query(func.max(measurement.tobs)).filter(measurement.date > start).all()
temp_avg = session.query(func.avg(measurement.tobs)).filter(measurement.date > start).all()
# return temp_min, temp_max, temp_avg
return jsonify(temp_min, temp_max, temp_avg)
""" think there's a problem in my querying of the data. I can get values without func.min, but when I add that in my results are blank"""
@app.route("/api/v1.0/startend/<start><end>")
def start_end(start_end):
# Return a json list of the minimum temperature, the average temperature, and the max temperature for a given start or start-end range.
# When given the start only, calculate TMIN, TAVG, and TMAX for all dates greater than and equal to the start date.
# When given the start and the end date, calculate the TMIN, TAVG, and TMAX for dates between the start and end date inclusive.
temp_min = session.query(func.min(measurement.tobs)).filter(measurement.date > start).filter(measurement.date < end).all()
temp_max = session.query(func.max(measurement.tobs)).filter(measurement.date > start).filter(measurement.date < end).all()
temp_avg = session.query(func.avg(measurement.tobs)).filter(measurement.date > start).filter(measurement.date < end).all()
# return temp_min, temp_max, temp_avg
return jsonify(temp_min, temp_max, temp_avg)
if __name__ == '__main__':
app.run(debug=True)
|
from django.db.models.query import QuerySet
from django.contrib import admin
from .models import Image
# https://docs.djangoproject.com/en/2.2/ref/contrib/admin/actions/#writing-action-functions
def delete_everywhere(modeladmin, request, queryset: QuerySet):
"""
Delete object both in Django and in MinIO too.
:param modeladmin: unused
:param request: unused
:param queryset: A QuerySet containing the set of objects selected by the user
:return:
"""
for obj in queryset:
obj.delete()
delete_everywhere.short_description = "Delete selected objects in Django and MinIO"
@admin.register(Image)
class ImageAdmin(admin.ModelAdmin):
list_display = ('id', 'image',)
readonly_fields = ('id', )
model = Image
actions = [delete_everywhere, ]
|
from argparse import ArgumentParser
from pathlib import Path
import sys
from typing import Callable
import os
os.environ["MKL_THREADING_LAYER"] = "GNU"
import torch
import math
from tqdm import tqdm
# Setup path
new_path = Path().absolute()
sys.path.append(str(new_path))
import src.util.CudaUtil as CU
from src.controller.ASADControllerModels import (
get_dec_arch,
get_cls_arch,
)
from src.util.AuxUtil import AuxModelInfo, save_aux, load_aux_best
from src.generator.Generator import Generator
def _get_config() -> dict:
# Setup parser
parser = ArgumentParser()
parser.add_argument("--generator_name", type=str)
parser.add_argument("--decoder_name", type=str)
parser.add_argument("--classifier_name", type=str)
parser.add_argument("--epochs", type=int)
parser.add_argument("--iter_per_epoch", type=int)
parser.add_argument("--batch_size", type=int)
parser.add_argument("--lambda_mse", type=int)
return vars(parser.parse_args())
def main():
"""
Train decoder specified by the `decoder_name` parameter using generator
specified by the `generator_name` parameter.
Requires classifier named `classifier_name`
associated with the decoder.
! Needs to be extended when new generators are added.
* Now supports the following generators: unetgan, stylegan2ada
"""
config = _get_config()
# Dynamic import of generator
# * Add new generators here
if config["generator_name"] == "unetgan":
from environment.agent_unetgan import get_generator
from src.generator.UNetGANGenerator import UNetGANGenerator as Gen
elif config["generator_name"] == "stylegan2ada":
from environment.agent_stylegan2ada import get_generator
from src.generator.StyleGAN2ADAGenerator import StyleGAN2ADAGenerator as Gen
elif config["generator_name"] == "styleswin":
from environment.agent_styleswin import get_generator
from src.generator.StyleSwinGenerator import StyleSwinGenerator as Gen
# Get generator
G = get_generator()
_setup_decoder(config, Gen(None), G)
def _setup_decoder(
config: dict, gen: Generator, G: Callable[[torch.Tensor], torch.Tensor]
) -> None:
# Setup GPU
device = CU.get_default_device()
# Init decoder model
name = config["decoder_name"]
model = CU.to_device(get_dec_arch(gen), device)
# Init and load pretrained classifier model
cls_name = config["classifier_name"]
cls_model = get_cls_arch()
cls_model.load_state_dict(load_aux_best(cls_name).state)
cls_model = CU.to_device(cls_model, device)
cls_model.eval()
# Fit decoder
_fit_decoder(config, model, cls_model, device, name, gen, G)
def _decoder_loss(
control_vectors: torch.Tensor,
codes: torch.Tensor,
cls_model: Callable[[torch.Tensor], torch.Tensor],
G: Callable[[torch.Tensor], torch.Tensor],
config: dict,
) -> torch.Tensor:
# To avoid taking log of zero
eps = 0.000001
# According to ASAD paper
sum_dim = (1, 2, 3)
G_minus_n = G(codes - control_vectors)
c1 = torch.mean(torch.log(cls_model(G_minus_n) + eps))
mse1 = torch.mean(torch.sum((G_minus_n - G(codes)) ** 2, sum_dim))
# Take resolution into account to balance MSE vs classification loss
lambda_mse_res = ((256 / G_minus_n.shape[-1]) ** 2) / config["lambda_mse"]
del G_minus_n
CU.empty_cache()
C = c1 + torch.mean(torch.log(1 - cls_model(G(codes + control_vectors)) + eps))
MSE = mse1 + torch.mean(
torch.sum((G(codes + control_vectors) - G(codes)) ** 2, sum_dim)
)
return C + MSE * lambda_mse_res
def _fit_decoder(
config: dict,
model: Callable[[torch.Tensor], torch.Tensor],
cls_model: Callable[[torch.Tensor], torch.Tensor],
device: torch.device,
name: str,
gen: Generator,
G: Callable[[torch.Tensor], torch.Tensor],
) -> None:
CU.empty_cache()
# Losses
tr_losses = []
val_losses = []
# Create optimizers
opt = torch.optim.Adam(model.parameters())
# Setup batches
n_batches = math.floor(config["iter_per_epoch"] / config["batch_size"])
tr_percent = 0.8
tr_batches = math.floor(n_batches * tr_percent)
# Start training loop
for epoch in range(config["epochs"]):
# Create latent seed codes
print(f"Generating latent seed codes for epoch {epoch}...")
all_codes = gen.random_latent_code(n_batches * config["batch_size"]).astype(
"float32"
)
print(f"Successfully generated {all_codes.shape[0]} latent seed codes!")
avg_loss = 0
for batch_nr in tqdm(range(n_batches)):
# Get batch
codes = all_codes[
batch_nr * config["batch_size"] : (batch_nr + 1) * config["batch_size"]
]
# Send data to GPU
codes = CU.to_device(torch.from_numpy(codes), device)
if batch_nr + 1 >= tr_batches:
# Validate decoder
val_loss = _validate_decoder(config, model, cls_model, codes, G)
# Record losses
avg_loss += val_loss
val_losses.append((val_loss, batch_nr))
else:
# Train decoder
tr_loss = _train_decoder(
config, model, cls_model, device, codes, opt, G
)
# Record losses
tr_losses.append((tr_loss, batch_nr))
# Log losses & scores (last batch)
avg_loss = avg_loss / (n_batches - tr_batches)
print(
"Epoch [{}/{}], train loss: {:.4f}, val loss: {:.4f}".format(
epoch + 1, config["epochs"], tr_loss, avg_loss
)
)
# Save result
save_aux(
name,
AuxModelInfo(
model.state_dict(),
epoch + 1,
batch_nr + 1,
tr_batches,
tr_loss,
avg_loss,
),
)
pass
def _train_decoder(
config: dict,
model: Callable[[torch.Tensor], torch.Tensor],
cls_model: Callable[[torch.Tensor], torch.Tensor],
device: torch.device,
codes: torch.Tensor,
opt,
G: Callable[[torch.Tensor], torch.Tensor],
):
# Clear model gradients
opt.zero_grad()
CU.empty_cache()
# Enable autocast to leverage memory usage
with torch.autocast(str(device)):
# Get predictions
control_vectors = model(codes)
# Calc loss
loss = _decoder_loss(control_vectors, codes, cls_model, G, config)
# Update weights
loss.backward()
opt.step()
return loss.item()
def _validate_decoder(
config: dict,
model: Callable[[torch.Tensor], torch.Tensor],
cls_model: Callable[[torch.Tensor], torch.Tensor],
codes: torch.Tensor,
G: Callable[[torch.Tensor], torch.Tensor],
):
# Clear model gradients
model.eval()
CU.empty_cache()
with torch.no_grad():
# Get predictions
control_vectors = model(codes)
# Calc loss
loss = _decoder_loss(control_vectors, codes, cls_model, G, config)
model.train()
return loss.item()
if __name__ == "__main__":
main()
|
from time import time
from functools import partial
from qcodes import VisaInstrument, InstrumentChannel, ChannelList
from qcodes.utils import validators as vals
class DACException(Exception):
pass
class DacReader(object):
@staticmethod
def _dac_parse(resp):
"""
Parses responses from the DAC. They should take the form of "<cmd><resp>!"
This command returns the value of resp.
"""
resp = resp.strip()
if resp[-1] != "!":
raise DACException("Unexpected terminator on response: {}. Should end with \"!\"".format(resp))
return resp.strip()[1:-1]
def _dac_v_to_code(self, volt):
"""
Convert a voltage to the internal dac code (number between 0-65536)
based on the minimum/maximum values of a given channel.
Midrange is 32768.
"""
if volt < self.min_val or volt >= self.max_val:
raise ValueError('Cannot convert voltage {} V '.format(volt) +
'to a voltage code, value out of range '
'({} V - {} V).'.format(self.min_val,
self.max_val))
frac = (volt - self.min_val) / (self.max_val - self.min_val)
val = int(round(frac * 65536))
# extra check to be absolutely sure that the instrument does nothing
# receive an out-of-bounds value
if val > 65535 or val < 0:
raise ValueError('Voltage ({} V) resulted in the voltage code {}'
', which is not within the allowed range.'
''.format(volt, val))
return val
def _dac_code_to_v(self, code):
"""
Convert a voltage to the internal dac code (number between 0-65536)
based on the minimum/maximum values of a given channel.
Midrange is 32768.
"""
frac = code/65536.0
return (frac * (self.max_val - self.min_val)) + self.min_val
def _set_slot(self):
"""
Set the active DAC slot
"""
resp = self.ask_raw("B{};".format(self._slot))
if int(self._dac_parse(resp)) != self._slot:
raise DACException("Unexpected return from DAC when setting slot: "
"{}. DAC slot may not have been set.".format(resp))
def _set_channel(self):
"""
Set the active DAC channel
"""
resp = self.ask_raw("B{};C{};".format(self._slot, self._channel))
if resp.strip() != "B{}!C{}!".format(self._slot, self._channel):
raise DACException("Unexpected return from DAC when setting channel: "
"{}. DAC channel may not have been set.".format(resp))
def _query_address(self, addr, count=1, versa_eeprom=False):
"""
Query the value at the dac address given.
Args:
addr (int): The address to query.
count (int): The number of bytes to query.
versa_eeprom(bool): do we want to read from the versadac (slot) EEPROM
"""
# Check if we actually have anything to query
if count == 0:
return 0
# Validate address
addr = int(addr)
if addr < 0 or addr > 1107296266:
raise DACException("Invalid address {}.".format(addr))
# Choose a poke command depending on whether we are querying a
# VERSADAC eeprom or main memory
# If we are writing to a VERSADAC, we must also set the slot.
if versa_eeprom:
self._set_slot()
query_command = "e;"
else:
query_command = "p;"
# Read a number of bytes from the device and convert to an int
val = 0
for i in range(count):
# Set DAC to point to address
ret = int(self._dac_parse(self.ask_raw("A{};".format(addr))))
if ret != addr:
raise DACException("Failed to set EEPROM address {}.".format(addr))
val += int(self._dac_parse(self.ask_raw(query_command))) << (32*(count-i-1))
addr += 1
return val
def _write_address(self, addr, val, versa_eeprom=False):
"""
Write a value to a given DAC address
Args:
addr (int): The address to query.
val (int): The value to write.
versa_eeprom(bool): do we want to read from the versadac (slot) EEPROM
"""
# Validate address
addr = int(addr)
if addr < 0 or addr > 1107296266:
raise DACException("Invalid address {}.".format(addr))
# Validate value
val = int(val)
if val < 0 or val >= 2**32:
raise DACException("Writing invalid value ({}) to address {}.".format(val, addr))
# Choose a poke command depending on whether we are querying a
# VERSADAC eeprom or main memory. If we are writing to a versadac channel
# we must also set the slot
if versa_eeprom:
query_command = "e;"
write_command = "E"
self._set_slot()
else:
query_command = "p;"
write_command = "P"
# Write the value to the DAC
# Set DAC to point to address
ret = int(self._dac_parse(self.ask_raw("A{};".format(addr))))
if ret != addr:
raise DACException("Failed to set EEPROM address {}.".format(addr))
self.ask_raw("{}{};".format(write_command, val))
# Check the write was successful
if int(self._dac_parse(self.ask_raw(query_command))) != val:
raise DACException("Failed to write value ({}) to address {}.".format(val, addr))
class DacChannel(InstrumentChannel, DacReader):
"""
A single DAC channel of the DECADAC
"""
_CHANNEL_VAL = vals.Ints(0, 3)
def __init__(self, parent, name, channel, min_val=-5, max_val=5):
super().__init__(parent, name)
# Validate slot and channel values
self._CHANNEL_VAL.validate(channel)
self._channel = channel
self._slot = self._parent._slot
# Calculate base address for querying channel parameters
# Note that the following values can be found using these offsets
# 0: Interrupt Period
# 4: DAC High Limit
# 5: DAC Low Limit
# 6: Slope (double)
# 8: DAC Value (double)
self._base_addr = 1536 + (16*4)*self._slot + 16*self._channel
# Store min/max voltages
assert(min_val < max_val)
self.min_val = min_val
self.max_val = max_val
# Add channel parameters
# Note we will use the older addresses to read the value from the dac rather than the newer
# 'd' command for backwards compatibility
self._volt_val = vals.Numbers(self.min_val, self.max_val)
self.add_parameter("volt", get_cmd=partial(self._query_address, self._base_addr+9, 1),
get_parser=self._dac_code_to_v,
set_cmd=self._set_dac, set_parser=self._dac_v_to_code, vals=self._volt_val,
label="channel {}".format(channel+self._slot*4), unit="V")
# The limit commands are used to sweep dac voltages. They are not safety features.
self.add_parameter("lower_ramp_limit", get_cmd=partial(self._query_address, self._base_addr+5),
get_parser=self._dac_code_to_v,
set_cmd="L{};", set_parser=self._dac_v_to_code, vals=self._volt_val,
label="Lower_Ramp_Limit", unit="V")
self.add_parameter("upper_ramp_limit", get_cmd=partial(self._query_address, self._base_addr+4),
get_parser=self._dac_code_to_v,
set_cmd="U{};", set_parser=self._dac_v_to_code, vals=self._volt_val,
label="Upper_Ramp_Limit", unit="V")
self.add_parameter("update_period", get_cmd=partial(self._query_address, self._base_addr),
get_parser=int, set_cmd="T{};", set_parser=int, vals=vals.Ints(50, 65535),
label="Update_Period", unit="us")
self.add_parameter("slope", get_cmd=partial(self._query_address, self._base_addr+6, 2),
get_parser=int, set_cmd="S{};", set_parser=int, vals=vals.Ints(-(2**32), 2**32),
label="Ramp_Slope")
# Manual parameters to control whether DAC channels should ramp to voltages or jump
self._ramp_val = vals.Numbers(0, 10)
self.add_parameter("enable_ramp", get_cmd=None, set_cmd=None, initial_value=False,
vals=vals.Bool())
self.add_parameter("ramp_rate", get_cmd=None, set_cmd=None, initial_value=0.1,
vals=self._ramp_val, unit="V/s")
# Add ramp function to the list of functions
self.add_function("ramp", call_cmd=self._ramp, args=(self._volt_val, self._ramp_val))
# If we have access to the VERSADAC (slot) EEPROM, we can set the inital
# value of the channel.
# NOTE: these values will be overwritten by a K3 calibration
if self._parent._VERSA_EEPROM_available:
_INITIAL_ADDR = [6, 8, 32774, 32776]
self.add_parameter("initial_value",
get_cmd=partial(self._query_address, _INITIAL_ADDR[self._channel], versa_eeprom=True),
get_parser=self._dac_code_to_v,
set_cmd=partial(self._write_address, _INITIAL_ADDR[self._channel], versa_eeprom=True),
set_parser=self._dac_v_to_code, vals=vals.Numbers(self.min_val, self.max_val))
def _ramp(self, val, rate, block=True):
"""
Ramp the DAC to a given voltage.
Params:
val (float): The voltage to ramp to in volts
rate (float): The ramp rate in units of volts/s
block (bool): Should the call block until the ramp is complete?
"""
# We need to know the current dac value (in raw units), as well as the update rate
c_volt = self.volt.get() # Current Voltage
if c_volt == val: # If we are already at the right voltage, we don't need to ramp
return
c_val = self._dac_v_to_code(c_volt) # Current voltage in DAC units
e_val = self._dac_v_to_code(val) # Endpoint in DAC units
t_rate = 1/(self.update_period.get() * 1e-6) # Number of refreshes per second
secs = abs((c_volt - val)/rate) # Number of seconds to ramp
# The formula to calculate the slope is: Number of DAC steps divided by the number of time
# steps in the ramp multiplied by 65536
slope = int(((e_val - c_val)/(t_rate*secs))*65536)
# Now let's set up our limits and ramo slope
if slope > 0:
self.upper_ramp_limit.set(val)
else:
self.lower_ramp_limit.set(val)
self.slope.set(slope)
# Block until the ramp is complete is block is True
if block:
while self.slope.get() != 0:
pass
def _set_dac(self, code):
"""
Set the voltage on the dac channel, ramping if the enable_rate
parameter is set for this channel.
Params:
code (int): the DAC code to set the voltage to
"""
if self.enable_ramp.get():
self._ramp(self._dac_code_to_v(code), rate=self.ramp_rate.get())
else:
code = int(code)
self._set_channel()
self.ask_raw("U65535;L0;D{};".format(code))
def write(self, cmd):
"""
Overload write to set channel prior to any channel operations.
Since all commands are echoed back, we must keep track of responses
as well, otherwise commands receive the wrong response.
"""
self._set_channel()
return self.ask_raw(cmd)
def ask(self, cmd):
"""
Overload ask to set channel prior to operations
"""
self._set_channel()
return self.ask_raw(cmd)
class DacSlot(InstrumentChannel, DacReader):
"""
A single DAC Slot of the DECADAC
"""
_SLOT_VAL = vals.Ints(0, 4)
SLOT_MODE_DEFAULT = "Coarse"
def __init__(self, parent, name, slot, min_val=-5, max_val=5):
super().__init__(parent, name)
# Validate slot and channel values
self._SLOT_VAL.validate(slot)
self._slot = slot
# Store whether we have access to the VERSADAC EEPROM
self._VERSA_EEPROM_available = self._parent._VERSA_EEPROM_available
# Create a list of channels in the slot
channels = ChannelList(self, "Slot_Channels", parent.DAC_CHANNEL_CLASS)
for i in range(4):
channels.append(parent.DAC_CHANNEL_CLASS(self, "Chan{}".format(i), i,
min_val=min_val, max_val=max_val))
self.add_submodule("channels", channels)
# Set the slot mode. Valid modes are:
# Off: Channel outputs are disconnected from the input, grounded with 10MOhm.
# Fine: 2-channel mode. Channels 0 and 1 are output, use 2 and 3 for fine
# adjustment of Channels 0 and 1 respectively
# Coarse: All 4 channels are used as output
# FineCald: Calibrated 2-channel mode, with 0 and 1 output, 2 and 3 used
# automatically for fine adjustment. This mode only works for calibrated
# DecaDAC's
# Unfortunately there is no known way of reading the slot mode hence this will be
# set in initialization
if self._parent._cal_supported:
slot_modes = {"Off": 0, "Fine": 1, "Coarse": 2, "FineCald": 3}
else:
slot_modes = {"Off": 0, "Fine": 1, "Coarse": 2}
self.add_parameter('slot_mode', get_cmd="m;", get_parser=self._dac_parse, set_cmd="M{};",
val_mapping=slot_modes)
# Enable all slots in coarse mode.
self.slot_mode.set(self.SLOT_MODE_DEFAULT)
def write(self, cmd):
"""
Overload write to set channel prior to any channel operations.
Since all commands are echoed back, we must keep track of responses
as well, otherwise commands receive the wrong response.
"""
self._set_slot()
return self.ask_raw(cmd)
def ask(self, cmd):
"""
Overload ask to set channel prior to operations
"""
self._set_slot()
return self.ask_raw(cmd)
class Decadac(VisaInstrument, DacReader):
"""
The qcodes driver for the Decadac.
Each slot on the Deacadac is to be treated as a seperate
four-channel instrument.
Tested with a Decadec firmware revion number 14081 (Decadac 139).
The message strategy is the following: always keep the queue empty, so
that self.visa_handle.ask(XXX) will return the answer to XXX and not
some previous event.
Attributes:
_ramp_state (bool): If True, ramp state is ON. Default False.
_ramp_time (int): The ramp time in ms. Default 100 ms.
"""
DAC_CHANNEL_CLASS = DacChannel
DAC_SLOT_CLASS = DacSlot
def __init__(self, name, address, min_val=-5, max_val=5, **kwargs):
"""
Creates an instance of the Decadac instrument corresponding to one slot
on the physical instrument.
Args:
name (str): What this instrument is called locally.
port (str): The address of the DAC. For a serial port this is ASRLn::INSTR
where n is replaced with the address set in the VISA control panel.
Baud rate and other serial parameters must also be set in the VISA control
panel.
min_val (number): The minimum value in volts that can be output by the DAC.
This value should correspond to the DAC code 0.
max_val (number): The maximum value in volts that can be output by the DAC.
This value should correspond to the DAC code 65536.
"""
super().__init__(name, address, **kwargs)
# Do feature detection
self._feature_detect()
# Create channels
channels = ChannelList(self, "Channels", self.DAC_CHANNEL_CLASS, snapshotable=False)
slots = ChannelList(self, "Slots", self.DAC_SLOT_CLASS)
for i in range(5): # Create the 6 DAC slots
slots.append(self.DAC_SLOT_CLASS(self, "Slot{}".format(i), i, min_val, max_val))
channels.extend(slots[i].channels)
slots.lock()
channels.lock()
self.add_submodule("slots", slots)
self.add_submodule("channels", channels)
self.connect_message()
def set_all(self, volt):
"""
Set all dac channels to a specific voltage. If channels are set to ramp then the ramps
will occur in sequence, not simultaneously.
Args:
volt(float): The voltage to set all gates to.
"""
for chan in self.channels:
chan.volt.set(volt)
def ramp_all(self, volt, ramp_rate):
"""
Ramp all dac channels to a specific voltage at the given rate simultaneously. Note
that the ramps are not synchronized due to communications time and DAC ramps starting
as soon as the commands are in.
Args:
volt(float): The voltage to ramp all channels to.
ramp_rate(float): The rate in volts per second to ramp
"""
# Start all channels ramping
for chan in self.channels:
chan._ramp(volt, ramp_rate, block=False)
# Wait for all channels to complete ramping.
# The slope is reset to 0 once ramping is complete.
for chan in self.channels:
while chan.slope.get():
pass
def get_idn(self):
"""
Attempt to identify the dac. Since we don't have standard SCPI commands, ``*IDN`` will
do nothing on this DAC.
Returns:
A dict containing a serial and hardware version
"""
self._feature_detect()
return {"serial": self.serial_no, "hardware_version": self.version}
def connect_message(self, idn_param='IDN', begin_time=None):
"""
Print a connect message, taking into account the lack of a standard ``*IDN`` on
the Harvard DAC
Args:
begin_time (number): time.time() when init started.
Default is self._t0, set at start of Instrument.__init__.
"""
# start with an empty dict, just in case an instrument doesn't
# heed our request to return all 4 fields.
t = time() - (begin_time or self._t0)
con_msg = ("Connected to Harvard DecaDAC "
"(hw ver: {}, serial: {}) in {:.2f}s".format(
self.version, self.serial_no, t))
print(con_msg)
def __repr__(self):
"""Simplified repr giving just the class and name."""
return '<{}: {}>'.format(type(self).__name__, self.name)
def _feature_detect(self):
"""
Detect which features are available on the DAC by querying various parameters.
"""
# Check whether EEPROM is installed
try:
if self._query_address(1107296256) == 21930:
self._EEPROM_available = True
else:
self._EEPROM_available = False
except DACException:
self._EEPROM_available = False
# Check whether we can set startup values for the DAC.
# This requires access to the EEPROM on each slot
try:
# Let's temporarily pretend to be slot 0
self._slot = 0
self._query_address(6, versa_eeprom=True)
del self._slot
except DACException:
self._VERSA_EEPROM_available = False
# Check whether calibration is supported
try:
if self._dac_parse(self.ask("k;")):
self._cal_supported = True
except DACException:
self._cal_supported = False
# Finally try and read the DAC version and S/N. This is only possible if the EEPROM
# is queryable.
if self._EEPROM_available:
self.version = self._query_address(1107296266)
self.serial_no = self._query_address(1107296264)
else:
self.version = 0
self.serial_no = 0
def write(self, cmd):
"""
Since all commands are echoed back, we must keep track of responses
as well, otherwise commands receive the wrong response. Hence
all writes must also read a response.
"""
return self.ask(cmd)
|
# coding=utf-8
# Copyright 2020 The TF-Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Runs training and eval on CQL-SAC on D4RL using the Actor-Learner API.
All default hyperparameters in train_eval come from the CQL paper:
https://arxiv.org/abs/2006.04779
"""
import os
from typing import Callable, Dict, Optional, Tuple, Union
from absl import app
from absl import flags
from absl import logging
import gin
import numpy as np
import reverb
import rlds
import tensorflow as tf
from tf_agents.agents.cql import cql_sac_agent
from tf_agents.agents.ddpg import critic_network
from tf_agents.agents.sac import tanh_normal_projection_network
from tf_agents.environments import tf_py_environment
from tf_agents.examples.cql_sac.kumar20.d4rl_utils import load_d4rl
from tf_agents.metrics import py_metrics
from tf_agents.networks import actor_distribution_network
from tf_agents.policies import greedy_policy
from tf_agents.policies import py_tf_eager_policy
from tf_agents.replay_buffers import reverb_replay_buffer
from tf_agents.replay_buffers import reverb_utils
from tf_agents.replay_buffers import rlds_to_reverb
from tf_agents.specs import tensor_spec
from tf_agents.train import actor
from tf_agents.train import learner
from tf_agents.train import triggers
from tf_agents.train.utils import strategy_utils
from tf_agents.train.utils import train_utils
from tf_agents.trajectories import trajectory
from tf_agents.typing import types
FLAGS = flags.FLAGS
flags.DEFINE_string('root_dir', os.getenv('TEST_UNDECLARED_OUTPUTS_DIR'),
'Root directory for writing logs/summaries/checkpoints.')
_REVERB_PORT = flags.DEFINE_integer(
'reverb_port', None,
'Port for reverb server, if None, use a randomly chosen unused port.')
flags.DEFINE_string('env_name', 'antmaze-medium-play-v0',
'Name of the environment.')
_DATASET_NAME = flags.DEFINE_string(
'dataset_name', 'd4rl_antmaze/medium-play-v0',
'RLDS dataset name. Please select the RLDS dataset'
'corresponding to D4RL environment chosen for training.')
flags.DEFINE_integer('learner_iterations_per_call', 500,
'Iterations per learner run call.')
flags.DEFINE_integer('policy_save_interval', 10000, 'Policy save interval.')
flags.DEFINE_integer('eval_interval', 10000, 'Evaluation interval.')
flags.DEFINE_integer('summary_interval', 1000, 'Summary interval.')
flags.DEFINE_integer('num_gradient_updates', 1000000,
'Total number of train iterations to perform.')
flags.DEFINE_multi_string('gin_file', None, 'Paths to the gin-config files.')
flags.DEFINE_multi_string('gin_param', None, 'Gin binding parameters.')
_DATA_TAKE = flags.DEFINE_integer(
'data_take', None, 'Number of steps to take for training '
'from RLDS dataset. If not specified, all steps are used '
'for training.')
_SEQUENCE_LENGTH = 2
_STRIDE_LENGTH = 1
@gin.configurable
def train_eval(
root_dir: str,
env_name: str,
dataset_name: str,
load_dataset_fn: Optional[Callable[[str], tf.data.Dataset]] = rlds.load,
# Training params
tpu: bool = False,
use_gpu: bool = False,
num_gradient_updates: int = 1000000,
actor_fc_layers: Tuple[int, ...] = (256, 256),
critic_joint_fc_layers: Tuple[int, ...] = (256, 256, 256),
# Agent params
batch_size: int = 256,
bc_steps: int = 0,
actor_learning_rate: types.Float = 3e-5,
critic_learning_rate: types.Float = 3e-4,
alpha_learning_rate: types.Float = 3e-4,
reward_scale_factor: types.Float = 1.0,
cql_alpha_learning_rate: types.Float = 3e-4,
cql_alpha: types.Float = 5.0,
cql_tau: types.Float = 10.0,
num_cql_samples: int = 10,
reward_noise_variance: Union[types.Float, tf.Variable] = 0.0,
include_critic_entropy_term: bool = False,
use_lagrange_cql_alpha: bool = True,
log_cql_alpha_clipping: Optional[Tuple[types.Float, types.Float]] = None,
softmax_temperature: types.Float = 1.0,
# Data and Reverb Replay Buffer params
reward_shift: types.Float = 0.0,
action_clipping: Optional[Tuple[types.Float, types.Float]] = None,
data_shuffle_buffer_size: int = 100,
data_prefetch: int = 10,
data_take: Optional[int] = None,
pad_end_of_episodes: bool = False,
reverb_port: Optional[int] = None,
min_rate_limiter: int = 1,
# Others
policy_save_interval: int = 10000,
eval_interval: int = 10000,
summary_interval: int = 1000,
learner_iterations_per_call: int = 1,
eval_episodes: int = 10,
debug_summaries: bool = False,
summarize_grads_and_vars: bool = False,
seed: Optional[int] = None) -> None:
"""Trains and evaluates CQL-SAC.
Args:
root_dir: Training eval directory
env_name: Environment to train on.
dataset_name: RLDS dataset name for the envronment to train.
load_dataset_fn: A function that will return an instance of a
tf.data.Dataset for RLDS data to be used for training.
tpu: Whether to use TPU for training.
use_gpu: Whether to use GPU for training.
num_gradient_updates: Number of gradient updates for training.
actor_fc_layers: Optional list of fully_connected parameters for actor
distribution network, where each item is the number of units in the layer.
critic_joint_fc_layers: Optional list of fully connected parameters after
merging observations and actions in critic, where each item is the number
of units in the layer.
batch_size: Batch size for sampling data from Reverb Replay Buffer.
bc_steps: Number of behavioral cloning steps.
actor_learning_rate: The learning rate for the actor network. It is used in
Adam optimiser for actor network.
critic_learning_rate: The learning rate for the critic network. It is used
in Adam optimiser for critic network.
alpha_learning_rate: The learning rate to tune cql alpha. It is used in Adam
optimiser for cql alpha.
reward_scale_factor: Multiplicative scale for the reward.
cql_alpha_learning_rate: The learning rate to tune cql_alpha.
cql_alpha: The weight on CQL loss. This can be a tf.Variable.
cql_tau: The threshold for the expected difference in Q-values which
determines the tuning of cql_alpha.
num_cql_samples: Number of samples for importance sampling in CQL.
reward_noise_variance: The noise variance to introduce to the rewards.
include_critic_entropy_term: Whether to include the entropy term in the
target for the critic loss.
use_lagrange_cql_alpha: Whether to use a Lagrange threshold to tune
cql_alpha during training.
log_cql_alpha_clipping: (Minimum, maximum) values to clip log CQL alpha.
softmax_temperature: Temperature value which weights Q-values before the
`cql_loss` logsumexp calculation.
reward_shift: shift rewards for each experience sample by the value provided
action_clipping: Clip actions for each experience sample
data_shuffle_buffer_size: Shuffle buffer size for the interleaved dataset.
data_prefetch: Number of data point to prefetch for training from Reverb
Replay Buffer.
data_take: Number of steps to take for training from RLDS dataset. If not
specified, all steps are used for training.
pad_end_of_episodes: Whether to pad end of episodes.
reverb_port: Port to start the Reverb server. if not provided, randomly
chosen port used.
min_rate_limiter: Reverb min rate limiter.
policy_save_interval: How often, in train steps, the trigger will save.
eval_interval: Number of train steps in between evaluations.
summary_interval: Number of train steps in between summaries.
learner_iterations_per_call: Iterations per learner run call.
eval_episodes: Number of episodes evaluated per run call.
debug_summaries: A bool to gather debug summaries.
summarize_grads_and_vars: If True, gradient and network variable summaries
will be written during training.
seed: Optional seed for tf.random.
"""
logging.info('Training CQL-SAC on: %s', env_name)
tf.random.set_seed(seed)
np.random.seed(seed)
# Load environment.
env = load_d4rl(env_name)
tf_env = tf_py_environment.TFPyEnvironment(env)
strategy = strategy_utils.get_strategy(tpu, use_gpu)
# Create dataset of TF-Agents trajectories from RLDS D4RL dataset.
#
# The RLDS dataset will be converted to trajectories and pushed to Reverb.
rlds_data = load_dataset_fn(dataset_name)
trajectory_data_spec = rlds_to_reverb.create_trajectory_data_spec(rlds_data)
table_name = 'uniform_table'
table = reverb.Table(
name=table_name,
max_size=data_shuffle_buffer_size,
sampler=reverb.selectors.Uniform(),
remover=reverb.selectors.Fifo(),
rate_limiter=reverb.rate_limiters.MinSize(min_rate_limiter),
signature=tensor_spec.add_outer_dim(trajectory_data_spec))
reverb_server = reverb.Server([table], port=reverb_port)
reverb_replay = reverb_replay_buffer.ReverbReplayBuffer(
trajectory_data_spec,
sequence_length=_SEQUENCE_LENGTH,
table_name=table_name,
local_server=reverb_server)
rb_observer = reverb_utils.ReverbAddTrajectoryObserver(
reverb_replay.py_client,
table_name,
sequence_length=_SEQUENCE_LENGTH,
stride_length=_STRIDE_LENGTH,
pad_end_of_episodes=pad_end_of_episodes)
def _transform_episode(episode: tf.data.Dataset) -> tf.data.Dataset:
"""Apply reward_shift and action_clipping to RLDS episode.
Args:
episode: An RLDS episode dataset of RLDS steps datasets.
Returns:
An RLDS episode after applying action clipping and reward shift.
"""
def _transform_step(
rlds_step: Dict[str, tf.Tensor]) -> Dict[str, tf.Tensor]:
"""Apply reward_shift and action_clipping to RLDS step.
Args:
rlds_step: An RLDS step is a dictionary of tensors containing is_first,
is_last, observation, action, reward, is_terminal, and discount.
Returns:
An RLDS step after applying action clipping and reward shift.
"""
rlds_step[rlds.REWARD] = rlds_step[rlds.REWARD] + reward_shift
if action_clipping:
rlds_step[rlds.ACTION] = tf.clip_by_value(
rlds_step[rlds.ACTION],
clip_value_min=action_clipping[0],
clip_value_max=action_clipping[1])
return rlds_step
episode[rlds.STEPS] = episode[rlds.STEPS].map(_transform_step)
return episode
if data_take:
rlds_data = rlds_data.take(data_take)
if reward_shift or action_clipping:
rlds_data = rlds_data.map(_transform_episode)
rlds_to_reverb.push_rlds_to_reverb(rlds_data, rb_observer)
def _experience_dataset() -> tf.data.Dataset:
"""Reads and returns the experiences dataset from Reverb Replay Buffer."""
return reverb_replay.as_dataset(
sample_batch_size=batch_size,
num_steps=_SEQUENCE_LENGTH).prefetch(data_prefetch)
# Create agent.
time_step_spec = tf_env.time_step_spec()
observation_spec = time_step_spec.observation
action_spec = tf_env.action_spec()
with strategy.scope():
train_step = train_utils.create_train_step()
actor_net = actor_distribution_network.ActorDistributionNetwork(
observation_spec,
action_spec,
fc_layer_params=actor_fc_layers,
continuous_projection_net=tanh_normal_projection_network
.TanhNormalProjectionNetwork)
critic_net = critic_network.CriticNetwork(
(observation_spec, action_spec),
joint_fc_layer_params=critic_joint_fc_layers,
kernel_initializer='glorot_uniform',
last_kernel_initializer='glorot_uniform')
agent = cql_sac_agent.CqlSacAgent(
time_step_spec,
action_spec,
actor_network=actor_net,
critic_network=critic_net,
actor_optimizer=tf.keras.optimizers.Adam(
learning_rate=actor_learning_rate),
critic_optimizer=tf.keras.optimizers.Adam(
learning_rate=critic_learning_rate),
alpha_optimizer=tf.keras.optimizers.Adam(
learning_rate=alpha_learning_rate),
cql_alpha=cql_alpha,
num_cql_samples=num_cql_samples,
include_critic_entropy_term=include_critic_entropy_term,
use_lagrange_cql_alpha=use_lagrange_cql_alpha,
cql_alpha_learning_rate=cql_alpha_learning_rate,
target_update_tau=5e-3,
target_update_period=1,
random_seed=seed,
cql_tau=cql_tau,
reward_noise_variance=reward_noise_variance,
num_bc_steps=bc_steps,
td_errors_loss_fn=tf.math.squared_difference,
gamma=0.99,
reward_scale_factor=reward_scale_factor,
gradient_clipping=None,
log_cql_alpha_clipping=log_cql_alpha_clipping,
softmax_temperature=softmax_temperature,
debug_summaries=debug_summaries,
summarize_grads_and_vars=summarize_grads_and_vars,
train_step_counter=train_step)
agent.initialize()
# Create learner.
saved_model_dir = os.path.join(root_dir, learner.POLICY_SAVED_MODEL_DIR)
collect_env_step_metric = py_metrics.EnvironmentSteps()
learning_triggers = [
triggers.PolicySavedModelTrigger(
saved_model_dir,
agent,
train_step,
interval=policy_save_interval,
metadata_metrics={
triggers.ENV_STEP_METADATA_KEY: collect_env_step_metric
}),
triggers.StepPerSecondLogTrigger(train_step, interval=100)
]
cql_learner = learner.Learner(
root_dir,
train_step,
agent,
experience_dataset_fn=_experience_dataset,
triggers=learning_triggers,
summary_interval=summary_interval,
strategy=strategy)
# Create actor for evaluation.
tf_greedy_policy = greedy_policy.GreedyPolicy(agent.policy)
eval_greedy_policy = py_tf_eager_policy.PyTFEagerPolicy(
tf_greedy_policy, use_tf_function=True)
eval_actor = actor.Actor(
env,
eval_greedy_policy,
train_step,
metrics=actor.eval_metrics(eval_episodes),
summary_dir=os.path.join(root_dir, 'eval'),
episodes_per_run=eval_episodes)
# Run.
dummy_trajectory = trajectory.mid((), (), (), 0., 1.)
num_learner_iterations = int(num_gradient_updates /
learner_iterations_per_call)
for _ in range(num_learner_iterations):
# Mimic collecting environment steps since we loaded a static dataset.
for _ in range(learner_iterations_per_call):
collect_env_step_metric(dummy_trajectory)
cql_learner.run(iterations=learner_iterations_per_call)
if eval_interval and train_step.numpy() % eval_interval == 0:
eval_actor.run_and_log()
def main(_):
logging.set_verbosity(logging.INFO)
gin.parse_config_files_and_bindings(FLAGS.gin_file, FLAGS.gin_param)
train_eval(
root_dir=FLAGS.root_dir,
dataset_name=_DATASET_NAME.value,
env_name=FLAGS.env_name,
tpu=FLAGS.tpu,
use_gpu=FLAGS.use_gpu,
num_gradient_updates=FLAGS.num_gradient_updates,
policy_save_interval=FLAGS.policy_save_interval,
eval_interval=FLAGS.eval_interval,
summary_interval=FLAGS.summary_interval,
learner_iterations_per_call=FLAGS.learner_iterations_per_call,
reverb_port=_REVERB_PORT.value,
data_take=_DATA_TAKE.value)
if __name__ == '__main__':
app.run(main)
|
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework.permissions import IsAuthenticated
class HelloView(APIView):
permission_classes = (IsAuthenticated,)
def get(self, request):
content = {'message': 'Hello, World!'}
return Response(content)
def post(self, request):
if not request.data:
return Response({"message": "Please provide username and password"}, status='400')
|
from pathlib import Path
import torch
import torch.nn as nn
import torch.utils.data as data
from PIL import Image
from torchvision import transforms
import torchvision.models.resnet as resnet
import argparse
from tqdm import tqdm
from torch.utils.tensorboard import SummaryWriter
class TrainDataset(data.Dataset):
def __init__(self, datapath, transform=None):
self.datalist = list(datapath.glob("*.png"))
self.transform = transform
def __len__(self):
return len(self.datalist)
def __getitem__(self, index):
p = self.datalist[index]
img = Image.open(p)
rgb = img.convert("RGB")
hsv = img.convert("HSV")
if self.transform:
rgb, hsv = self.transform(rgb), self.transform(hsv)
return rgb, hsv
class ResNext_block(nn.Module):
def __init__(self):
super(ResNext_block, self).__init__()
self.conv1 = nn.Conv2d(64, 32, 1, 1)
self.conv2 = nn.Conv2d(32, 32, 3, 1, 1)
self.conv3 = nn.Conv2d(32, 64, 1, 1)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
y = self.conv1(x)
y = self.relu(y)
y = self.conv2(y)
y = self.relu(y)
y = self.conv3(y)
y += x
y = self.relu(y)
return y
class Rgb2hsv(nn.Module):
def __init__(self):
super(Rgb2hsv, self).__init__()
self.conv1 = nn.Conv2d(3, 32, 1, 1)
self.conv2 = nn.Conv2d(32, 64, 1, 1)
self.conv3 = nn.Conv2d(64, 64, 1, 1)
self.conv4 = nn.Conv2d(64, 64, 1, 1)
self.conv5 = nn.Conv2d(64, 32, 1, 1)
self.conv6 = nn.Conv2d(32, 3, 1, 1)
self.res_blocks = nn.Sequential(*[ResNext_block() for _ in range(5)])
self.relu = nn.ReLU(inplace=True)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
elif isinstance(m, nn.Linear):
nn.init.kaiming_normal_(m.weight)
if hasattr(m, 'bias') and m.bias is not None:
nn.init.constant_(m.bias, 0)
def forward(self, rgb):
hsv = self.conv1(rgb)
hsv = self.relu(hsv)
hsv = self.conv2(hsv)
hsv = self.relu(hsv)
hsv = self.conv3(hsv)
hsv = self.relu(hsv)
hsv = self.res_blocks(hsv)
hsv = self.conv4(hsv)
hsv = self.relu(hsv)
hsv = self.conv5(hsv)
hsv = self.relu(hsv)
hsv = self.conv6(hsv)
hsv = self.relu(hsv)
return hsv
def get_dataset(args):
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))
])
dataset = TrainDataset(args.dataset, transform)
loader = data.DataLoader(dataset, batch_size=args.batch_size, shuffle=True, num_workers=args.num_workers)
return loader
def train(args):
loader = get_dataset(args)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
torch.autograd.set_detect_anomaly(True)
writer = Path('./rgb2hsv/log')
if not writer.exists():
writer.mkdir(parents=True)
writer = SummaryWriter(str(writer))
model = nn.DataParallel(Rgb2hsv()).to(device)
l1 = nn.L1Loss().to(device)
# opti = torch.optim.Adam(model.parameters(), lr=args.lr, betas=(args.beta1, args.beta2))
opti = torch.optim.SGD(model.parameters(), lr=args.lr)
for epoch in range(args.epoch):
print(f"epoch: {epoch + 1}")
model.train()
max_iter = loader.dataset.__len__() // args.batch_size
for iter, (rgb, hsv) in enumerate(tqdm(loader, ncols=80)):
if iter >= max_iter:
break
rgb, hsv = rgb.to(device), hsv.to(device)
opti.zero_grad()
loss = l1(model(rgb), hsv)
loss.backward()
opti.step()
writer.add_scalar("l1", loss.item(), iter+epoch*max_iter)
save(model, epoch)
def save(model, epoch):
p = Path("./rgb2hsv/")
if not p.exists():
p.mkdir()
torch.save({
"rgb2hsv": model.state_dict(),
"epoch": epoch
}, str(p / f"rgb2hsv_{epoch}_epoch.pkl"))
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--dataset", type=str, default="./dataset/rgb_train")
parser.add_argument("--batch_size", type=int, default=64)
parser.add_argument("--num_workers", type=int, default=8)
parser.add_argument("--epoch", type=int, default=50)
parser.add_argument("--lr", type=float, default=0.0001)
parser.add_argument("--beta1", type=float, default=0.5, help="Adam optimizer parameter")
parser.add_argument("--beta2", type=float, default=0.999, help="Adam optimizer parameter")
args = parser.parse_args()
args.dataset = Path(args.dataset)
train(args)
if __name__ == "__main__":
main()
|
#!/usr/bin/env python3
from argparse import ArgumentParser
from glob import glob
from itertools import product
from os.path import normpath, realpath
from pathlib import Path
from subprocess import PIPE, STDOUT, run
from sys import argv
from vang.pio.shell import run_commands
def get_work_dirs(find, root):
return [
Path(realpath(p)).parent for p in glob(
normpath(f'{root}/**/{find}'),
recursive=True,
)
]
def get_command(commands):
return [' && '.join(commands)]
def execute_in_parallel(root, commands, find='.git/'):
commands_and_work_dirs = tuple(
product(
get_command(commands),
get_work_dirs(find, root),
))
yield from run_commands(commands_and_work_dirs, max_processes=25,
check=False)
def execute_in_sequence(root, commands, find='.git/', timeout=None):
command = get_command(commands)[0]
for cwd in get_work_dirs(find, root):
yield run(
command,
cwd=cwd,
stdout=PIPE,
stderr=STDOUT,
check=False,
timeout=timeout,
shell=True)
def main(root, commands, find='.git/', sequence=False):
execute = execute_in_sequence if sequence else execute_in_parallel
for process in execute(root, commands, find):
print(process.stdout.decode(errors="ignore").strip())
def parse_args(args):
parser = ArgumentParser(
description='Execute commands recursively (default in git repositories)'
)
parser.add_argument('commands', nargs='+', help='Commands')
parser.add_argument('-r', '--root', help='The root directory', default='.')
parser.add_argument(
'-f',
'--find',
help='The file/dir to be in the directory in which to execute, '
'default ".git/"',
default='.git/')
parser.add_argument(
'-s',
'--sequence',
help='Run the commands in sequence, i.e. not in parallel',
action='store_true')
return parser.parse_args(args)
if __name__ == '__main__': # pragma: no cover
main(**parse_args(argv[1:]).__dict__)
|
#! /usr/bin/env python3
import sys
import mysql.connector
from mysql.connector import errorcode
class EventDB(object):
db_targets = {}
def Connect(self, conf):
db_dsn = {
'user': conf['db_user'],
'password': conf['db_pass'],
'host': conf['db_host'],
'database': conf['db_name'],
'port': conf['db_port'],
}
try:
self.cnx = mysql.connector.connect(**db_dsn)
except mysql.connector.Error as err:
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
raise Exception("Database access denied")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
raise Exception("Specified database does not exist")
else:
raise Exception("An error occured on database connection")
self.cnx = None
def _get_new_cursor(self):
if self.cnx == None:
raise Exception("Database is not connected")
return self.cnx.cursor()
def Close(self):
if self.cnx != None:
self.cnx.close()
def ListSchemes(self, summary):
cursor = self._get_new_cursor()
cursor.execute("SELECT * FROM schemes WHERE %s LIKE exec_cond",
[summary])
row_ids = cursor.column_names
ret = {}
for row_raw in cursor:
row = dict(zip(row_ids, row_raw))
ret[row['id']] = row
cursor.close()
return ret
def ListNoticesBySource(self, source):
cursor = self._get_new_cursor()
cursor.execute("SELECT * FROM notices WHERE source = %s AND fired = 0",
[source])
row_ids = cursor.column_names
ret = {}
for row_raw in cursor:
row = dict(zip(row_ids, row_raw))
ret[row['id']] = row
cursor.close()
return ret
def CancelNotice(self, id):
cursor = self._get_new_cursor()
cursor.execute("UPDATE notices SET fired = 2 WHERE id = %s", [id])
self.cnx.commit()
if cursor.rowcount != 1:
print("Delete failed {}".format(id))
cursor.close()
return None
cursor.close()
return id
def AddNotice(self, val):
cursor = self._get_new_cursor()
cursor.execute("INSERT INTO notices (target, content, tid, source, url, description) VALUES (FROM_UNIXTIME(%s), %s, %s, %s, %s, %s)",
[val['target'], val['content'], val['tid'], val['source'],
val['url'], val['description']])
self.cnx.commit()
if cursor.rowcount != 1:
print("Add new notice failed")
cursor.close()
return None
lid = cursor.lastrowid
cursor.close()
return lid
|
import os
import lib.snip as snip
import lib.translate as translate
from pynput import keyboard
def pull_and_process():
try:
area = snip.GetArea()
image = area.snip()
translated_string = translate.translate(image, language[choice])
print(translated_string)
except SystemError:
print("Bad box selection. Requires selecting with top left corner, then bottom right corner.\r\n")
except KeyboardInterrupt:
print("Goodbye")
exit()
class RunApp:
def __init__(self):
with keyboard.GlobalHotKeys({'<ctrl>+<alt>+t': self.on_activate_t}) as self.l:
self.l.join()
def on_activate_t(self):
print("Now select your text.")
pull_and_process()
n = 1
language = ['rus', 'chi_sim', 'chi_tra', 'kor', 'fra', 'Arabic']
loop = True
print("Easy OCR + Translate through a terminal - Without Cut and Paste!")
print("Choose your language")
for lan in language:
print(str(n) + ": " + str(lan))
n = n + 1
while loop:
try:
choice = int(input()) - 1
loop = False
except ValueError:
print("Bad selection, try again.")
loop = True
os.system("clear")
print("You chose " + language[choice])
print("Press \"Ctrl+Alt+T\" to begin.")
while True:
try:
app = RunApp()
except KeyboardInterrupt:
exit()
|
NUMBER_OF_ROWS = 8
NUMBER_OF_COLUMNS = 8
DIMENSION_OF_EACH_SQUARE = 64
BOARD_COLOR_1 = "#DDB88C"
BOARD_COLOR_2 = "#A66D4F"
X_AXIS_LABELS = ('A', 'B', 'C', 'D', 'E', 'F', 'G', 'H')
Y_AXIS_LABELS = (1, 2, 3, 4, 5, 6, 7, 8)
SHORT_NAME = {
'R':'Rook', 'N':'Knight', 'B':'Bishop',
'Q':'Queen', 'K':'King', 'P':'Pawn'
}
# remember capital letters - White pieces, Small letters - Black pieces
START_PIECES_POSITION = {
"A8": "r", "B8": "n", "C8": "b", "D8": "q", "E8": "k", "F8": "b", "G8": "n", "H8": "r",
"A7": "p", "B7": "p", "C7": "p", "D7": "p", "E7": "p", "F7": "p", "G7": "p", "H7": "p",
"A2": "P", "B2": "P", "C2": "P", "D2": "P", "E2": "P", "F2": "P", "G2": "P", "H2": "P",
"A1": "R", "B1": "N", "C1": "B", "D1": "Q", "E1": "K", "F1": "B", "G1": "N", "H1": "R"
}
ORTHOGONAL_POSITIONS = ((-1,0),(0,1),(1,0),(0, -1))
DIAGONAL_POSITIONS = ((-1,-1),(-1,1),(1,-1),(1,1))
KNIGHT_POSITIONS = ((-2,-1),(-2,1),(-1,-2),(-1,2),(1,-2),(1,2),(2,-1),(2,1))
|
# -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
"""Tests the database migration from legacy calculations."""
from __future__ import annotations
from uuid import uuid4
from aiida.common import timezone
from aiida.storage.psql_dos.migrations.utils.calc_state import STATE_MAPPING, StateMapping
from aiida.storage.psql_dos.migrator import PsqlDostoreMigrator
def test_legacy_jobcalcstate(perform_migrations: PsqlDostoreMigrator):
"""Test the migration that performs a data migration of legacy `JobCalcState`."""
# starting revision
perform_migrations.migrate_up('django@django_0037')
# setup the database
user_model = perform_migrations.get_current_table('db_dbuser')
node_model = perform_migrations.get_current_table('db_dbnode')
with perform_migrations.session() as session:
user = user_model(
email='user@aiida.net',
first_name='John',
last_name='Doe',
institution='EPFL',
)
session.add(user)
session.commit()
nodes: dict[int, StateMapping] = {}
for state, mapping in STATE_MAPPING.items():
node = node_model(
uuid=str(uuid4()),
node_type='process.calculation.calcjob.CalcJobNode.',
attributes={'state': state},
label='test',
description='',
user_id=user.id,
ctime=timezone.now(),
mtime=timezone.now(),
)
session.add(node)
session.commit()
nodes[node.id] = mapping
# final revision
perform_migrations.migrate_up('django@django_0038')
node_model = perform_migrations.get_current_table('db_dbnode')
with perform_migrations.session() as session:
for node_id, mapping in nodes.items():
attributes = session.get(node_model, node_id).attributes
assert attributes.get('process_state', None) == mapping.process_state
assert attributes.get('process_status', None) == mapping.process_status
assert attributes.get('exit_status', None) == mapping.exit_status
assert attributes.get('process_label', None) == 'Legacy JobCalculation'
assert attributes.get('state', None) is None
assert attributes.get('exit_message', None) is None or isinstance(attributes.get('exit_message'), int)
|
import torch
import numpy as np
import unittest
import fastNLP.modules.utils as utils
class TestUtils(unittest.TestCase):
def test_case_1(self):
a = torch.tensor([
[1, 2, 3, 4, 5], [2, 3, 4, 5, 6]
])
utils.orthogonal(a)
def test_case_2(self):
a = np.random.rand(100, 100)
utils.mst(a)
|
from gaiasdk import sdk
import logging
def MyAwesomeJob(args):
logging.info("This output will be streamed back to gaia and will be displayed in the pipeline logs.")
# Just raise an exception to tell Gaia if a job failed.
# raise Exception("Oh no, this job failed!")
def main():
logging.basicConfig(level=logging.INFO)
myjob = sdk.Job("MyAwesomeJob", "Do something awesome", MyAwesomeJob)
sdk.serve([myjob])
|
"""
Exports an experiment to a directory.
"""
import os
import mlflow
import shutil
import tempfile
import click
from mlflow_export_import.common import filesystem as _filesystem
from mlflow_export_import.common import mlflow_utils
from mlflow_export_import.common.search_runs_iterator import SearchRunsIterator
from mlflow_export_import.run.export_run import RunExporter
from mlflow_export_import import utils, click_doc
client = mlflow.tracking.MlflowClient()
class ExperimentExporter():
def __init__(self, client=None, export_metadata_tags=False, notebook_formats=[], filesystem=None):
self.client = client or mlflow.tracking.MlflowClient()
self.fs = filesystem or _filesystem.get_filesystem()
print("Filesystem:",type(self.fs).__name__)
self.run_exporter = RunExporter(self.client, export_metadata_tags, notebook_formats, self.fs)
def export_experiment(self, exp_id_or_name, output):
exp = mlflow_utils.get_experiment(self.client, exp_id_or_name)
exp_id = exp.experiment_id
print(f"Exporting experiment '{exp.name}' (ID {exp.experiment_id}) to '{output}'")
if output.endswith(".zip"):
self.export_experiment_to_zip(exp_id, output)
else:
self.fs.mkdirs(output)
self.export_experiment_to_dir(exp_id, output)
def export_experiment_to_dir(self, exp_id, exp_dir):
exp = self.client.get_experiment(exp_id)
dct = {"experiment": utils.strip_underscores(exp)}
run_ids = []
failed_run_ids = []
j = -1
for j,run in enumerate(SearchRunsIterator(self.client, exp_id)):
run_dir = os.path.join(exp_dir, run.info.run_id)
print(f"Exporting run {j+1}: {run.info.run_id}")
res = self.run_exporter.export_run(run.info.run_id, run_dir)
if res:
run_ids.append(run.info.run_id)
else:
failed_run_ids.append(run.info.run_id)
dct["export_info"] = { "export_time": utils.get_now_nice(), "num_runs": (j+1) }
dct["run_ids"] = run_ids
dct["failed_run_ids"] = failed_run_ids
path = os.path.join(exp_dir,"manifest.json")
utils.write_json_file(self.fs, path, dct)
if len(failed_run_ids) == 0:
print(f"All {len(run_ids)} runs succesfully exported")
else:
print(f"{len(run_ids)/j} runs succesfully exported")
print(f"{len(failed_run_ids)/j} runs failed")
def export_experiment_to_zip(self, exp_id, zip_file):
temp_dir = tempfile.mkdtemp()
try:
self.export_experiment_to_dir(exp_id, temp_dir)
utils.zip_directory(zip_file, temp_dir)
finally:
shutil.rmtree(temp_dir)
@click.command()
@click.option("--experiment", help="Experiment name or ID.", required=True, type=str)
@click.option("--output-dir", help="Output directory.", required=True)
@click.option("--export-metadata-tags", help=click_doc.export_metadata_tags, type=bool, default=False, show_default=True)
@click.option("--notebook-formats", help=click_doc.notebook_formats, default="", show_default=True)
def main(experiment, output_dir, export_metadata_tags, notebook_formats): # pragma: no cover
print("Options:")
for k,v in locals().items():
print(f" {k}: {v}")
exporter = ExperimentExporter(None, export_metadata_tags, utils.string_to_list(notebook_formats))
exporter.export_experiment(experiment, output_dir)
if __name__ == "__main__":
main()
|
import pytest
def test_something(random_number_generator):
a = random_number_generator()
b = 10
assert a + b >= 10
|
import re
from collections import defaultdict
d = open("input.txt").read().splitlines()
rules = defaultdict(list)
i = 0
while len(d[i]) > 0:
for x in re.findall(r'\d+-\d+', d[i]):
a, b = [int(x) for x in x.split('-')]
rules[d[i].split(':')[0]].append(range(a, b+1))
i += 1
yt = [int(x) for x in d[i + 2].split(',')]
ot = [[int(x) for x in d[j].split(',')] for j in range(i + 5, len(d))]
# Part 1
valid = lambda x: any(x in sr for r in rules.values() for sr in r)
print(sum(0 if valid(x) else x for t in ot for x in t))
# Part 2
ft = [yt]
for t in ot:
if all(valid(x) for x in t):
ft.append(t)
valid_val = lambda x, y: any(x in r for r in rules[y])
valid_att = lambda a, i: all(valid_val(x[i], a) for x in ft)
poss_att = dict()
for a in rules.keys():
poss_att[a] = [i for i in range(len(yt)) if valid_att(a, i)]
used, ans = set(), 1
for a in sorted(poss_att, key=lambda x: len(poss_att[x])):
for i in [x for x in poss_att[a] if x not in used]:
used.add(i)
if a.startswith('departure'):
ans *= yt[i]
print(ans)
|
from csdl import Model
import csdl
import numpy as np
class ExampleSimple(Model):
"""
:param var: vec1
:param var: vec2
:param var: VecVecCross
"""
def define(self):
x = self.declare_variable('x')
y = self.declare_variable('y')
a = x + y
b = x + y
c = 2 * a
d = 2 * b
self.register_output('c', c)
self.register_output('d', d)
|
#!/usr/bin/env python
import subprocess
import os
import sys
if len(sys.argv) ==1:
print '\nUsage: awskill [instance ID or cluster name]\n'
print '\nSpecify instance ID or cluster name that will be terminated, which can be found using "awsls" or "awsls -c"\n'
sys.exit()
instanceID=sys.argv[1]
#====================
def module_exists(module_name):
try:
__import__(module_name)
except ImportError:
return False
else:
return True
#====================
def exec_remote_cmd(cmd):
with hide('output','running','warnings'), settings(warn_only=True):
return run(cmd)
#==============================
def query_yes_no(question, default="no"):
valid = {"yes": True, "y": True, "ye": True,"no": False, "n": False}
if default is None:
prompt = " [y/n] "
elif default == "yes":
prompt = " [Y/n] "
elif default == "no":
prompt = " [y/N] "
else:
raise ValueError("invalid default answer: '%s'" % default)
while True:
sys.stdout.write(question + prompt)
choice = raw_input().lower()
if default is not None and choice == '':
return valid[default]
elif choice in valid:
return valid[choice]
else:
sys.stdout.write("Please respond with 'yes' or 'no' "
"(or 'y' or 'n').\n")
#List instances given a users tag
keyPath=subprocess.Popen('echo $KEYPAIR_PATH',shell=True, stdout=subprocess.PIPE).stdout.read().strip()
if len(keyPath) == 0:
print '\nError: KEYPAIR_PATH not specified as environment variable. Exiting\n'
sys.exit()
if keyPath.split('/')[-1].split('.')[-1] != 'pem':
print '\nError: Keypair specified is invalid, it needs to have .pem extension. Found .%s extension instead. Exiting\n' %(keyPath.split('/')[-1].split('.')[-1])
sys.exit()
tag=keyPath.split('/')[-1].split('.')[0]
if os.path.exists('%s/.starcluster' %(subprocess.Popen('echo $HOME',shell=True, stdout=subprocess.PIPE).stdout.read().strip())):
clusterflag=1
if not os.path.exists('%s/.starcluster' %(subprocess.Popen('echo $HOME',shell=True, stdout=subprocess.PIPE).stdout.read().strip())):
clusterflag=0
os.makedirs('%s/.starcluster' %(subprocess.Popen('echo $HOME',shell=True, stdout=subprocess.PIPE).stdout.read().strip()))
if not os.path.exists('%s/.starcluster/config' %(subprocess.Popen('echo $HOME',shell=True, stdout=subprocess.PIPE).stdout.read().strip())):
AWS_ACCESS_KEY_ID=subprocess.Popen('echo $AWS_ACCESS_KEY_ID',shell=True, stdout=subprocess.PIPE).stdout.read().strip()
AWS_SECRET_ACCESS_KEY=subprocess.Popen('echo $AWS_SECRET_ACCESS_KEY',shell=True, stdout=subprocess.PIPE).stdout.read().strip()
AWS_ACCOUNT_ID=subprocess.Popen('echo $AWS_ACCOUNT_ID',shell=True, stdout=subprocess.PIPE).stdout.read().strip()
AWS_DEFAULT_REGION=subprocess.Popen('echo $AWS_DEFAULT_REGION',shell=True, stdout=subprocess.PIPE).stdout.read().strip()
cmd='####################################\n'
cmd+='## StarCluster Configuration File ##\n'
cmd+='####################################\n'
cmd+='[aws info]\n'
cmd+='AWS_USER_ID=%s\n' %(AWS_ACCOUNT_ID)
cmd+='AWS_ACCESS_KEY_ID =%s\n' %(AWS_ACCESS_KEY_ID)
cmd+='AWS_SECRET_ACCESS_KEY = %s\n' %(AWS_SECRET_ACCESS_KEY)
cmd+='AWS_REGION_NAME = %s\n' %(AWS_DEFAULT_REGION)
cmd+='AVAILABILITY_ZONE = %sa\n' %(AWS_DEFAULT_REGION)
cmd+='AWS_REGION_HOST = ec2.%s.amazonaws.com\n' %(AWS_DEFAULT_REGION)
cmd+='[global]\n'
cmd+='DEFAULT_TEMPLATE=cluster\n'
cmd+='[key %s]\n' %(keyPath.split('/')[-1].split('.')[0])
cmd+='KEY_LOCATION=%s\n' %(keyPath)
o1=open('%s/.starcluster/config' %(subprocess.Popen('echo $HOME',shell=True, stdout=subprocess.PIPE).stdout.read().strip()),'w')
o1.write(cmd)
o1.close()
if instanceID.split('-')[0] == 'cluster':
if clusterflag==0:
print 'Error: Could not find starcluster installed. Exiting.\n'
sys.exit()
cmd='starcluster terminate %s -f'%(instanceID)
subprocess.Popen(cmd,shell=True).wait()
if instanceID.split('-')[0] != 'cluster':
#answer=query_yes_no("\nTerminate instance %s?" %(instanceID))
PublicIP=subprocess.Popen('aws ec2 describe-instances --instance-id %s --query "Reservations[*].Instances[*].{IPaddress:PublicIpAddress}" | grep IPaddress' %(instanceID),shell=True, stdout=subprocess.PIPE).stdout.read().strip().split()[-1].split('"')[1]
#Import Fabric modules now:
fabric_test=module_exists('fabric.api')
if fabric_test is False:
print 'Error: Could not find fabric installed and it is required. Install from here: http://www.fabfile.org/installing.html'
sys.exit()
from fabric.operations import run, put
from fabric.api import env,run,hide,settings
from fabric.context_managers import shell_env
from fabric.operations import put
env.host_string='ubuntu@%s' %(PublicIP)
env.key_filename = '%s' %(keyPath)
answer=True
if answer is True:
print '\nRemoving instance ...\n'
if os.path.exists('tmp4949585940.txt'):
os.remove('tmp4949585940.txt')
#Check if instance has volume mounted aws ec2 describe-instance-attribute --instance-id i-0d4524ffad3ac020b --attribute blockDeviceMapping
'''
numDevices=float(subprocess.Popen('aws ec2 describe-instances --instance-id %s --query "Reservations[*].Instances[*].BlockDeviceMappings" | grep DeviceName | wc -l' %(instanceID), shell=True, stdout=subprocess.PIPE).stdout.read().strip().split()[0])
if numDevices > 1:
counter=2
while counter <= numDevices:
mountpoint=subprocess.Popen('aws ec2 describe-instances --instance-id %s --query "Reservations[*].Instances[*].BlockDeviceMappings[%i]" | grep DeviceName' %(instanceID,counter-1), shell=True, stdout=subprocess.PIPE).stdout.read().strip().split(':')[-1].split('"')[1]
volID=subprocess.Popen('aws ec2 describe-instances --instance-id %s --query "Reservations[*].Instances[*].BlockDeviceMappings[%i]" | grep VolumeId' %(instanceID,counter-1), shell=True, stdout=subprocess.PIPE).stdout.read().strip().split(':')[-1].split('"')[1]
umount=exec_remote_cmd('sudo umount /dev/%s' %(mountpoint))
if len(umount.split()) >0:
print 'Error unmounting volume. Stop all running processes (shown below) and try again to terminate instance.\n'
lsof=exec_remote_cmd('lsof | grep /data')
if len(lsof)>0:
counter2=0
print 'COMMAND\t\tPROCESSID'
print '------------------------------'
while counter2 < len(lsof.split('\n')):
command=lsof.split('\n')[counter2].split()[0]
pid=lsof.split('\n')[counter2].split()[1]
print '%s\t\t%s' %(command,pid)
counter2=counter2+1
print ''
sys.exit()
vol=subprocess.Popen('aws ec2 detach-volume --volume-id %s ' %(volID),shell=True, stdout=subprocess.PIPE).stdout.read().strip()
counter=counter+1
'''
cmd='aws ec2 terminate-instances --instance-ids %s > tmp4949585940.txt' %(instanceID)
subprocess.Popen(cmd,shell=True).wait()
os.remove('tmp4949585940.txt')
print 'Success!'
|
#!/usr/bin/env python
# ------------------------------------------------------------------------------------------------------%
# Created by "Thieu Nguyen" at 16:19, 16/03/2020 %
# %
# Email: nguyenthieu2102@gmail.com %
# Homepage: https://www.researchgate.net/profile/Thieu_Nguyen6 %
# Github: https://github.com/thieunguyen5991 %
#-------------------------------------------------------------------------------------------------------%
|
# Copyright 2014 Google Inc. All Rights Reserved.
"""The super-group for the compute CLI."""
import argparse
from googlecloudsdk.calliope import base
from googlecloudsdk.compute.lib import constants
from googlecloudsdk.compute.lib import utils
from googlecloudsdk.core import cli
from googlecloudsdk.core import exceptions
from googlecloudsdk.core import properties
from googlecloudsdk.core import resolvers
from googlecloudsdk.core import resources
class Compute(base.Group):
"""Read and manipulate Google Compute Engine resources."""
def Filter(self, context, args):
http = cli.Http()
core_values = properties.VALUES.core
compute_values = properties.VALUES.compute
context['http'] = http
context['project'] = core_values.project.Get(required=True)
for api, param, prop in (
('compute', 'project', core_values.project),
('resourceviews', 'projectName', core_values.project),
('compute', 'zone', compute_values.zone),
('resourceviews', 'zone', compute_values.zone),
('compute', 'region', compute_values.region),
('resourceviews', 'region', compute_values.region)):
resources.SetParamDefault(
api=api,
collection=None,
param=param,
resolver=resolvers.FromProperty(prop))
utils.UpdateContextEndpointEntries(context,
)
Compute.detailed_help = {
'brief': 'Read and manipulate Google Compute Engine resources',
}
|
import os
import time
class Teste:
# INICIACAO DA VARIAVEL
def __init__(self):
self.pid = os.getpid()
# FUNCAO PARA ESCREVER O PID DO PYTHON QUE ESTÁ SENDO EXECUTADO(ESSE SOFTWARE).
def escrita(self):
e = open('pid.txt', 'w')
e.writelines('{0}'.format(self.pid))
e.close
# FUNCAO PARA CONTAR ATÉ 3 (9 SEGUNDOS).
def cont(self):
self.escrita()
for i in range(0,3,1):
if(i < 2):
print('2: I am alive')
else:
print('2: I gonna die now, bye')
time.sleep(3)
t = Teste()
t.cont()
|
# /usr/bin/env python3
# -*- coding: utf-8 -*-
'''
跨平台全局进程锁,防止定时任务多次启动
'''
import os
if os.name == 'nt':
import win32con, win32file, pywintypes
elif os.name == 'posix':
import fcntl
class AutoLock:
def __init__( self, mutexname ):
self.filepath = mutexname + '.lock'
self.f = None
def __enter__( self ):
try:
self.f = open( self.filepath, 'wb' )
except:
raise RuntimeError( "File Path Error" )
try:
if os.name == 'nt':
__overlapped = pywintypes.OVERLAPPED()
hfile = win32file._get_osfhandle( self.f.fileno() )
win32file.LockFileEx( hfile, win32con.LOCKFILE_EXCLUSIVE_LOCK | win32con.LOCKFILE_FAIL_IMMEDIATELY, 0, 0xffff0000, __overlapped )
elif os.name == 'posix':
fcntl.flock( f.fileno(), fcntl.LOCK_EX | fcntl.LOCK_NB )
except:
raise SystemExit( 'Process Running' )
return self
def __exit__( self, type, value, trace ):
if self.f:
if os.name == 'nt':
__overlapped = pywintypes.OVERLAPPED()
hfile = win32file._get_osfhandle( self.f.fileno() )
win32file.UnlockFileEx( hfile, 0, 0xffff0000, __overlapped )
elif os.name == 'posix':
fcntl.flock( self.file.fileno(), fcntl.LOCK_UN )
self.f.close()
try:
os.remove( self.filepath )
except:
pass
|
x = 0
while 1:
print x
x = x + 1
if x > 16000:
x = 0
|
from .misc import SpaceFuncsException
class baseGeometryObject:
_AttributesDict = {'spaceDimension': '_spaceDimension'}
def __init__(self, *args, **kw):
self.__dict__.update(kw)
#__call__ = lambda self, *args, **kwargs: ChangeName(self, args[0]) #if len(args) == 1 and type(args[0]) == str else tmp(*args, **kwargs)
def plot(self, *args, **kw): # should be overwritten by derived classes
raise SpaceFuncsException('plotting for the object is not implemented yet')
def __getattr__(self, attr):
if attr in self._AttributesDict:
tmp = getattr(self, self._AttributesDict[attr])()
setattr(self, attr, tmp)
return tmp
else:
raise AttributeError('no such method "%s" for the object' % attr)
def _spaceDimension(self):
raise SpaceFuncsException('this function should be overwritten by derived class')
|
import re
import json
import urllib
import requests
site = 'https://blog.mhuig.top'
sitemaps = ['/post-sitemap.xml','/page-sitemap.xml']
result = []
bingUrllist = []
bingData = {}
i=0
headers = {'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:23.0) Gecko/20100101 Firefox/23.0'}
for sitemap in sitemaps:
sitemap = site+sitemap
req = urllib.request.Request(url=sitemap, headers=headers)
html = urllib.request.urlopen(req).read().decode('utf-8')
data = re.findall(re.compile(r'(?<=<loc>).*?(?=</loc>)'), html)
result=result+data
with open('urls.txt', 'w') as file:
for data in result:
i=i+1
print(data, file=file)
# bing 提交前10条
if i <= 10:
bingUrllist.append(data)
# baidu google 提交前100条
if i == 100:
break
bingData["siteUrl"] = site
bingData["urlList"] = bingUrllist
with open("bing.json", "w") as f:
json.dump(bingData,f)
# with open('all-urls.txt', 'w') as file:
# for data in result:
# print(data, file=file)
|
#!/usr/bin/env python
from copy import deepcopy
from glob import glob
import os
import shutil
import yaml
base_path = os.path.join(os.path.dirname(__file__), "..")
generated_path = os.path.join(base_path, "generated")
# Helper method to allow for `literal` YAML syntax
def str_presenter(dumper, data):
if len(data.splitlines()) > 1: # check for multiline string
return dumper.represent_scalar('tag:yaml.org,2002:str', data, style='|')
return dumper.represent_scalar('tag:yaml.org,2002:str', data)
yaml.add_representer(str, str_presenter)
os.mkdir(generated_path)
# Grafana
# Copy files
grafana_dashboard_path = os.path.join(base_path, "..", "grafana", "generated-dashboards")
grafana_template_path = os.path.join(base_path, "templates", "grafana")
grafana_output_path = os.path.join(generated_path, "grafana")
print("Grafana")
print("=======")
print("Copying templates to output path")
print(f"{grafana_template_path} => {grafana_output_path}")
shutil.copytree(grafana_template_path, grafana_output_path)
# Load k8s dashboard template
dashboard_template_path = os.path.join(grafana_output_path, "dashboard.yaml")
with open(dashboard_template_path, "r") as template_file:
k8s_template = yaml.safe_load(template_file)
# Iterate over all dashboards
dashboards = glob(os.path.join(grafana_dashboard_path, "*.json"))
for json_dashboard in dashboards:
# Copy the template and update with the appropriate values
k8s_dashboard = deepcopy(k8s_template)
k8s_dashboard["metadata"]["name"] = os.path.splitext(os.path.basename(json_dashboard))[0]
k8s_dashboard["spec"]["name"] = os.path.basename(json_dashboard)
k8s_dashboard["spec"]["url"] = f"https://raw.githubusercontent.com/datastax/metrics-collector-for=apache-cassandra/master/dashboards/grafana/generated-dashboards/{os.path.basename(json_dashboard)}"
# Read in JSON dashboard
with open(json_dashboard, "r") as json_file:
k8s_dashboard["spec"]["json"] = json_file.read()
# Write out the k8s dashboard file
dashboard_filename = f"{k8s_dashboard['metadata']['name']}.dashboard.yaml"
dashboard_output_path = os.path.join(generated_path, "grafana", dashboard_filename)
print(f"Templating {json_dashboard} => {dashboard_output_path}")
with open(dashboard_output_path, "w") as k8s_file:
k8s_file.write(yaml.dump(k8s_dashboard))
# Delete original template from distribution
print("Removing template from generated directory")
print(dashboard_template_path)
os.remove(os.path.join(grafana_output_path, "dashboard.yaml"))
print("")
# Prometheus
key_mapping = {
'action': 'action',
'regex': 'regex',
'replacement': 'replacement',
'separator': 'separator',
'source_labels': 'sourceLabels',
'target_label': 'targetLabel'
}
# Copy files
prometheus_output_path = os.path.join(generated_path, "prometheus")
prometheus_template_path = os.path.join(base_path, "templates", "prometheus")
service_monitor_path = os.path.join(prometheus_output_path, "service_monitor.yaml")
prometheus_config_path = os.path.join(base_path, "..", "prometheus", "prometheus.yaml")
print("Prometheus")
print("=======")
print("Copying templates to output path")
print(f"{prometheus_template_path} => {prometheus_output_path}")
shutil.copytree(prometheus_template_path, prometheus_output_path)
# Load k8s service monitor template
with open(service_monitor_path, "r") as template_file:
k8s_service_monitor = yaml.safe_load(template_file)
# Load prometheus configuration file
with open(prometheus_config_path, "r") as prometheus_file:
prometheus_conf = yaml.safe_load(prometheus_file)
# Extract scrape configs
for scrape_config in prometheus_conf['scrape_configs']:
if scrape_config['job_name'] == "mcac":
# Extract relabel configs
for relabel_config in scrape_config['metric_relabel_configs']:
k8s_relabel_config = {}
# Rename keys and move to template
for pair in relabel_config.items():
if pair[0] in key_mapping:
k8s_relabel_config[key_mapping[pair[0]]] = pair[1]
else:
print(f"Missing mapping for {pair[0]}")
k8s_service_monitor['spec']['endpoints'][0]['metricRelabelings'].append(k8s_relabel_config)
# Write out templated k8s service monitor
with open(service_monitor_path, "w") as service_monitor_file:
print("Writing out service monitor configuration")
print(service_monitor_path)
yaml.dump(k8s_service_monitor, service_monitor_file)
|
import hashlib
import hmac
import re
import time
import urllib
import requests
import six
from broker.exceptions import BrokerApiException, BrokerRequestException
from broker import user_agent
class Request(object):
API_VERSION = 'v1'
QUOTE_API_VERSION = 'v1'
def __init__(self, api_key='', secret='', entry_point='', proxies=None):
if not entry_point.endswith('/'):
entry_point = entry_point + '/'
self.api_key = api_key
self.secret = secret
self.entry_point = entry_point
self.proxies = proxies
self.ping()
def _create_api_uri(self, path, version):
version_path = ''
if version:
version_path = version + '/'
return self.entry_point + version_path + path
def _create_quote_api_uri(self, path, version):
return self.entry_point + 'quote/' + version + '/' + path
def _generate_signature(self, data):
if six.PY2:
params_str = urllib.urlencode(data)
else:
params_str = urllib.parse.urlencode(data)
digest = hmac.new(self.secret.encode(encoding='UTF8'),
params_str.encode(encoding='UTF8'),
digestmod=hashlib.sha256).hexdigest()
return digest
def _get(self, path, signed=False, version=API_VERSION, **kwargs):
uri = self._create_api_uri(path, version)
return self._request('GET', uri, signed, **kwargs)
def _post(self, path, signed=False, version=API_VERSION, **kwargs):
uri = self._create_api_uri(path, version)
return self._request('POST', uri, signed, **kwargs)
def _put(self, path, signed=False, version=API_VERSION, **kwargs):
uri = self._create_api_uri(path, version)
return self._request('PUT', uri, signed, **kwargs)
def _delete(self, path, signed=False, version=API_VERSION, **kwargs):
uri = self._create_api_uri(path, version)
return self._request('DELETE', uri, signed, **kwargs)
def _quote_get(self, path, signed=False, version=QUOTE_API_VERSION, **kwargs):
uri = self._create_quote_api_uri(path, version)
return self._request('GET', uri, signed, **kwargs)
def _request(self, method, uri, signed, **kwargs):
if 'timeout' not in kwargs:
kwargs['timeout'] = 10
date_type = 'data' if method == 'POST' else 'params'
if date_type not in kwargs:
kwargs[date_type] = {}
kwargs[date_type]['timestamp'] = int(time.time() * 1000)
self._process_parameters(kwargs[date_type])
if signed:
kwargs[date_type]['signature'] = self._generate_signature(kwargs[date_type])
kwargs['headers'] = {
'X-BH-APIKEY': self.api_key,
'User-Agent': user_agent
}
response = requests.request(method, uri, proxies=self.proxies, **kwargs)
return self._handle_response(response)
@classmethod
def _process_parameters(cls, parameters):
assert isinstance(parameters, dict)
processed_parameters = dict()
for name, value in parameters.items():
processed_parameters[cls._camelcase(name)] = '' if value is None else value
parameters.clear()
parameters.update(processed_parameters)
@classmethod
def _camelcase(cls, name):
name = re.sub(r"^[\-_.]", '', str(name))
if not name:
return name
return name[0].lower() + re.sub(r"[\-_.\s]([a-z])", lambda matched: matched.group(1).upper(), name[1:])
@classmethod
def _handle_response(cls, response):
if not str(response.status_code).startswith('2') and not response.status_code == 400:
raise BrokerApiException(response)
try:
return response.json()
except ValueError:
raise BrokerRequestException('Invalid Response: %s' % response.text)
def ping(self):
return self._get('ping')
def time(self):
"""
Check server time
"""
return self._get('time')
def broker_info(self, trade_type=''):
"""
Broker information
"""
params = {
'type': trade_type
}
return self._get('brokerInfo', params=params)
def stream_get_listen_key(self):
"""
Start user data stream (USER_STREAM)
"""
return self._post('userDataStream', signed=True)
def stream_keepalive(self, listen_key):
"""
Keepalive user data stream (USER_STREAM)
"""
params = {
'listenKey': listen_key
}
return self._put('userDataStream', signed=True, params=params)
def stream_close(self, listen_key):
"""
Close user data stream (USER_STREAM)
"""
params = {
'listenKey': listen_key
}
return self._delete('userDataStream', signed=True, params=params)
|
# Generated by Django 2.2.14 on 2020-07-24 20:35
import datetime
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('auth0login', '0002_auto_20200722_2011'),
]
operations = [
migrations.CreateModel(
name='Tag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('tag_type', models.IntegerField(default=0)),
('tag_de', models.CharField(max_length=200)),
('tag_en', models.CharField(max_length=200)),
('tag_fr', models.CharField(max_length=200)),
('tag_es', models.CharField(max_length=200)),
('tag_it', models.CharField(max_length=200)),
('created_at', models.DateTimeField(default=datetime.datetime.now)),
],
),
migrations.CreateModel(
name='EventLog',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('event_type', models.IntegerField(default=0)),
('event_subtype', models.IntegerField(default=0)),
('event_details', models.CharField(max_length=2048)),
('created_at', models.DateTimeField(default=datetime.datetime.now)),
('user', models.ForeignKey(default=None, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
# high-dimensional reacher
import numpy as np
import matplotlib.pyplot as plt
def smooth(y, radius=2, mode='two_sided'):
if len(y) < 2*radius+1:
return np.ones_like(y) * y.mean()
elif mode == 'two_sided':
convkernel = np.ones(2 * radius+1)
return np.convolve(y, convkernel, mode='same') / \
np.convolve(np.ones_like(y), convkernel, mode='same')
elif mode == 'causal':
convkernel = np.ones(radius)
out = np.convolve(y, convkernel,mode='full') / \
np.convolve(np.ones_like(y), convkernel, mode='full')
return out[:-radius+1]
def moving_sum(y, window=2):
c = y.cumsum()
c[window:] = c[window:] - c[:-window]
return c/float(window)
def plot(x, data, color, label):
y_m=np.mean(data, axis=0)
y_std=np.std(data, axis=0)
y_upper=y_m+y_std
y_lower=y_m-y_std
plt.fill_between(
x, list(y_lower), list(y_upper), interpolate=True, facecolor=color, linewidth=0.0, alpha=0.3
)
plt.plot(x, list(y_m), color=color, label=label)
file_pre = './'
y=np.load(file_pre+'eval_rewards.npy')
s=np.load(file_pre+'eval_success.npy')
plt.figure(figsize=(8,6))
fig, axs = plt.subplots(2)
x=np.arange(len(y))
axs[0].plot(x, smooth(y), label = 'SawyerPush', color='b')
axs[0].set_ylabel('Reward')
# plt.ylim(0)
axs[0].legend( loc=2)
axs[0].grid()
axs[1].set_xlabel('Episodes')
axs[1].set_ylabel('Average Success Rate')
axs[1].plot(x, moving_sum(s), label = 'SawyerPush', color='b')
axs[1].grid()
plt.savefig('reward.pdf')
plt.show()
|
#!/usr/bin/env python3
import numpy as np
import scipy.special
from functools import reduce
def peirce_dev(N: int, n: int = 1, m: int = 1) -> float:
"""Peirce's criterion
Returns the squared threshold error deviation for outlier identification
using Peirce's criterion based on Gould's methodology.
Arguments:
- int, total number of observations (N)
- int, number of outliers to be removed (n)
- int, number of model unknowns (m)
Returns:
float, squared error threshold (x2)
"""
# Assign floats to input variables:
N = float(N)
n = float(n)
m = float(m)
# Check number of observations:
if N > 1:
# Calculate Q (Nth root of Gould's equation B):
Q = (n ** (n / N) * (N - n) ** ((N - n) / N)) / N
#
# Initialize R values (as floats)
r_new = 1.0
r_old = 0.0 # <- Necessary to prompt while loop
#
# Start iteration to converge on R:
while abs(r_new - r_old) > (N * 2.0e-16):
# Calculate Lamda
# (1/(N-n)th root of Gould's equation A'):
ldiv = r_new ** n
if ldiv == 0:
ldiv = 1.0e-6
Lamda = ((Q ** N) / (ldiv)) ** (1.0 / (N - n))
# Calculate x-squared (Gould's equation C):
x2 = 1.0 + (N - m - n) / n * (1.0 - Lamda ** 2.0)
# If x2 goes negative, return 0:
if x2 < 0:
x2 = 0.0
r_old = r_new
else:
# Use x-squared to update R (Gould's equation D):
r_old = r_new
r_new = np.exp((x2 - 1) / 2.0) * scipy.special.erfc(
np.sqrt(x2) / np.sqrt(2.0)
)
else:
x2 = 0.0
return x2
def stats(values):
n = len(values)
sum = values.sum()
avg = sum / n
var = np.var(values)
std = np.std(values)
return {"n": n, "sum": sum,"avg": avg,"var": var,"std": std}
def separate_outliers(v):
result = None
s = stats(v)
nbrRemoved = 0
k = None
while True:
k = nbrRemoved + 1
r = np.sqrt(peirce_dev(s['n'], k))
max = r * s['std']
def outlierReduce(r, x):
if np.abs(x - s['avg'] < max):
r['trimmed'].append(x)
else:
r['outliers'].append(x)
return r
initial = {"original": v, 'trimmed': [], 'outliers': []}
result = reduce(outlierReduce, v, initial)
#DO STUFF
if nbrRemoved <= k:
break
return result
|
import random
chars1='abcdefghijklmnopqrstuvwxyz1234567890ABCDEFGHIJKLMNOPQRSTUVWXYZ'
chars2 = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
def start():
print('Welcome to the Random Password Generator. Version 1.00')
print("------------------------------------")
length = int(input('How much characters would you like your password to be? '))
number = int(input('How many passwords would you like to generate? '))
want_numbers = input('Would you like to have numbers in your password? (yes/no) ').lower()
print('Here is a list of passwords generated')
if want_numbers == 'yes':
for __doc__ in range(number):
password = ''
for _ in range(length):
password += random.choice(chars1)
print(password)
print(password)
elif want_numbers == 'no':
for __doc__ in range(number):
password = ''
for _ in range(length):
password += random.choice(chars2)
print(password)
start()
|
import pytest
from eth_utils import (
decode_hex,
encode_hex,
)
from eth_keys import keys
from p2p import ecies
# (pvikey_hex, pubkey_hex, expected_ecdh) tuples with known-good values, to ensure our
# ECC backends are compatible with other clients'.
# Copied from
# https://github.com/ethereum/cpp-ethereum/blob/3c49a0/test/unittests/libp2p/rlpx.cpp#L427
# and
# https://github.com/ethereum/go-ethereum/blob/5c9346/crypto/ecies/ecies_test.go#L456
STATIC_ECDH_VALUES = [
("0x332143e9629eedff7d142d741f896258f5a1bfab54dab2121d3ec5000093d74b",
"0xf0d2b97981bd0d415a843b5dfe8ab77a30300daab3658c578f2340308a2da1a07f0821367332598b6aa4e180a41e92f4ebbae3518da847f0b1c0bbfe20bcf4e1", # noqa: E501
"0xee1418607c2fcfb57fda40380e885a707f49000a5dda056d828b7d9bd1f29a08",
),
("0x7ebbc6a8358bc76dd73ebc557056702c8cfc34e5cfcd90eb83af0347575fd2ad",
"0x83ede0f19c3c98649265956a4193677b14c338a22de2086a08d84e4446fe37e4e233478259ec90dbeef52f4f6c890f8c38660ec7b61b9d439b8a6d1c323dc025", # noqa: E501
"0x167ccc13ac5e8a26b131c3446030c60fbfac6aa8e31149d0869f93626a4cdf62",
),
]
def test_encrypt_decrypt():
msg = b'test yeah'
privkey = ecies.generate_privkey()
ciphertext = ecies.encrypt(msg, privkey.public_key)
decrypted = ecies.decrypt(ciphertext, privkey)
assert decrypted == msg
privkey2 = ecies.generate_privkey()
with pytest.raises(ecies.DecryptionError):
decrypted = ecies.decrypt(ciphertext, privkey2)
def test_decrypt_known_good_handshake():
# Data taken from https://gist.github.com/fjl/3a78780d17c755d22df2
privkey = keys.PrivateKey(
decode_hex("c45f950382d542169ea207959ee0220ec1491755abe405cd7498d6b16adb6df8"))
auth_ciphertext = decode_hex(
"04a0274c5951e32132e7f088c9bdfdc76c9d91f0dc6078e848f8e3361193dbdc43b94351ea3d89e4ff33ddcefbc80070498824857f499656c4f79bbd97b6c51a514251d69fd1785ef8764bd1d262a883f780964cce6a14ff206daf1206aa073a2d35ce2697ebf3514225bef186631b2fd2316a4b7bcdefec8d75a1025ba2c5404a34e7795e1dd4bc01c6113ece07b0df13b69d3ba654a36e35e69ff9d482d88d2f0228e7d96fe11dccbb465a1831c7d4ad3a026924b182fc2bdfe016a6944312021da5cc459713b13b86a686cf34d6fe6615020e4acf26bf0d5b7579ba813e7723eb95b3cef9942f01a58bd61baee7c9bdd438956b426a4ffe238e61746a8c93d5e10680617c82e48d706ac4953f5e1c4c4f7d013c87d34a06626f498f34576dc017fdd3d581e83cfd26cf125b6d2bda1f1d56") # noqa: E501
auth_plaintext = decode_hex(
"884c36f7ae6b406637c1f61b2f57e1d2cab813d24c6559aaf843c3f48962f32f46662c066d39669b7b2e3ba14781477417600e7728399278b1b5d801a519aa570034fdb5419558137e0d44cd13d319afe5629eeccb47fd9dfe55cc6089426e46cc762dd8a0636e07a54b31169eba0c7a20a1ac1ef68596f1f283b5c676bae4064abfcce24799d09f67e392632d3ffdc12e3d6430dcb0ea19c318343ffa7aae74d4cd26fecb93657d1cd9e9eaf4f8be720b56dd1d39f190c4e1c6b7ec66f077bb1100") # noqa: E501
decrypted = ecies.decrypt(auth_ciphertext, privkey)
assert auth_plaintext == decrypted
@pytest.mark.parametrize("privkey_hex, pubkey_hex, ecdh_expected", STATIC_ECDH_VALUES)
def test_ecdh(privkey_hex, pubkey_hex, ecdh_expected):
privkey = keys.PrivateKey(decode_hex(privkey_hex))
pubkey = keys.PublicKey(decode_hex(pubkey_hex))
assert ecdh_expected == encode_hex(ecies.ecdh_agree(privkey, pubkey))
# FIXME: Document those values; this test was lifted from pydevp2p:
# https://github.com/ethereum/pydevp2p/blob/e1ef07a782b9369d18a8441c3b9bcf12456e0608/devp2p/tests/test_ecies.py#L31
def test_hmac_sha256():
k_mac = decode_hex("0x07a4b6dfa06369a570f2dcba2f11a18f")
indata = decode_hex("0x4dcb92ed4fc67fe86832")
hmac_expected = decode_hex("0xc90b62b1a673b47df8e395e671a68bfa68070d6e2ef039598bb829398b89b9a9")
hmac = ecies.hmac_sha256(k_mac, indata)
assert hmac_expected == hmac
# message tag generated by geth
tag_secret = decode_hex("0xaf6623e52208c596e17c72cea6f1cb09")
tag_input = decode_hex("0x3461282bcedace970df2")
tag_expected = decode_hex("0xb3ce623bce08d5793677ba9441b22bb34d3e8a7de964206d26589df3e8eb5183")
hmac = ecies.hmac_sha256(tag_secret, tag_input)
assert hmac == tag_expected
# FIXME: Document those values; this test was lifted from pydevp2p:
# https://github.com/ethereum/pydevp2p/blob/e1ef07a782b9369d18a8441c3b9bcf12456e0608/devp2p/tests/test_ecies.py#L46
def test_kdf():
input_ = decode_hex("0x961c065873443014e0371f1ed656c586c6730bf927415757f389d92acf8268df")
expected_key = decode_hex("0x4050c52e6d9c08755e5a818ac66fabe478b825b1836fd5efc4d44e40d04dabcc")
key = ecies.kdf(input_)
assert key == expected_key
|
import time
import pytest
import tapystry as tap
def test_simple():
def fn():
yield tap.Broadcast('key')
return 5
assert tap.run(fn) == 5
def test_receive():
def broadcaster(value):
yield tap.Broadcast('key', value)
def receiver():
value = yield tap.Receive('key')
return value
def fn():
recv_strand = yield tap.CallFork(receiver)
# even though this is forked, it doesn't end up hanging
yield tap.CallFork(broadcaster, (5,))
value = yield tap.Join(recv_strand)
# join again should give the same thing, it's already done
value1 = yield tap.Join(recv_strand)
assert value1 == value
return value
assert tap.run(fn) == 5
def test_broadcast_receive_order():
def broadcaster(value):
yield tap.Broadcast('key', value)
def receiver():
value = yield tap.Receive('key')
return value
def fn():
# fork in apparently wrong order!
broadcast_strand = yield tap.CallFork(broadcaster, (5,))
recv_strand = yield tap.CallFork(receiver)
yield tap.Join(broadcast_strand)
value = yield tap.Join(recv_strand)
return value
assert tap.run(fn) == 5
def test_never_receive():
def broadcaster(value):
yield tap.Broadcast('key', value)
def receiver():
value = yield tap.Receive('key2')
return value
def fn():
recv_strand = yield tap.CallFork(receiver)
broadcast_strand = yield tap.CallFork(broadcaster, (5,))
yield tap.Join(broadcast_strand)
value = yield tap.Join(recv_strand)
return value
with pytest.raises(tap.TapystryError) as x:
tap.run(fn)
assert str(x.value).startswith("Hanging strands")
def test_bad_yield():
def fn():
yield 3
with pytest.raises(tap.TapystryError) as x:
tap.run(fn)
assert str(x.value).startswith("Strand yielded non-effect")
def test_immediate_return():
def fn():
if False:
yield
return 3
assert tap.run(fn) == 3
def test_never_join():
def broadcaster(value):
yield tap.Broadcast('key', value)
yield tap.Broadcast('key2', value)
def fn():
yield tap.CallFork(broadcaster, (5,))
return
assert tap.run(fn) is None
def test_no_arg():
def broadcaster(value):
yield tap.Broadcast('key', value)
def fn():
yield tap.CallFork(broadcaster)
return
with pytest.raises(TypeError):
tap.run(fn)
def test_call():
def random(value):
yield tap.Broadcast('key', value)
return 10
def fn():
x = yield tap.Call(random, (5,))
return x
assert tap.run(fn) == 10
def test_call_trivial():
def random(value):
return 10
def fn():
x = yield tap.Call(random, (5,))
return x
def fork_fn():
strand = yield tap.CallFork(random, (5,))
x = yield tap.Join(strand)
return x
assert tap.run(fn) == 10
assert tap.run(fork_fn) == 10
assert tap.run(random, args=(5,)) == 10
def test_cancel():
a = 0
def add_three(value):
nonlocal a
yield tap.Receive('key')
a += 5
yield tap.Receive('key')
a += 5
yield tap.Receive('key')
a += 5
return 10
def fn():
strand = yield tap.CallFork(add_three, (5,))
yield tap.Broadcast('key')
yield tap.Broadcast('key')
yield tap.Cancel(strand)
tap.run(fn)
assert a == 10
def test_multifirst():
def broadcaster(value):
yield tap.Broadcast('key', value)
def receiver(wait_value):
value = yield tap.Receive('key', lambda x: x == wait_value)
return value
def fn():
strand_1 = yield tap.CallFork(receiver, (1,))
strand_2 = yield tap.CallFork(receiver, (2,))
strand_3 = yield tap.CallFork(receiver, (3,))
results = yield tap.Fork([
tap.First([strand_1, strand_2, strand_3]),
tap.First([strand_2, strand_1]),
])
yield tap.Call(broadcaster, (5,))
yield tap.Call(broadcaster, (3,))
yield tap.Call(broadcaster, (1,))
value = yield tap.Join(results)
return value
# the first race resolves first, thus cancelling strands 1 and 2, preventing the second from ever finishing
with pytest.raises(tap.TapystryError) as x:
tap.run(fn)
assert str(x.value).startswith("Hanging strands")
def test_multifirst_again():
def broadcaster(value):
yield tap.Broadcast('key', value)
def receiver(wait_value):
value = yield tap.Receive('key', lambda x: x == wait_value)
return value
def fn():
strand_1 = yield tap.CallFork(receiver, (1,))
strand_2 = yield tap.CallFork(receiver, (2,))
strand_3 = yield tap.CallFork(receiver, (3,))
results = yield tap.Fork([
tap.First([strand_1, strand_2], name="1v2"),
tap.First([strand_2, strand_3], name="2v3"),
])
yield tap.Call(broadcaster, (5,))
yield tap.Call(broadcaster, (1,))
yield tap.Call(broadcaster, (3,))
value = yield tap.Join(results, name="joinfork")
# yield tap.Join([strand_1, strand_2, strand_3], name="joinstrands")
return value
assert tap.run(fn) == [
(0, 1),
(1, 3),
]
def test_multifirst_canceled():
def broadcaster(value):
yield tap.Broadcast('key', value)
def receiver(wait_value):
value = yield tap.Receive('key', lambda x: x == wait_value)
return value
def fn():
strand_1 = yield tap.CallFork(receiver, (1,))
strand_2 = yield tap.CallFork(receiver, (2,))
strand_3 = yield tap.CallFork(receiver, (3,))
results = yield tap.Fork([
tap.First([strand_1, strand_2], name="1v2"),
tap.First([strand_2, strand_3], name="2v3"),
])
yield tap.Call(broadcaster, (5,))
yield tap.Call(broadcaster, (1,))
yield tap.Call(broadcaster, (3,))
value = yield tap.Join(results, name="joinfork")
yield tap.Join(strand_2, name="joincanceled")
return value
with pytest.raises(tap.TapystryError) as x:
tap.run(fn)
# TODO: always do the child strand
assert str(x.value).startswith("Hanging strands detected waiting for Race(Join(joincanceled))") or str(x.value).startswith("Hanging strands detected waiting for Call(Join)")
def test_multifirst_no_cancel():
def broadcaster(value):
yield tap.Broadcast('key', value)
def receiver(wait_value):
value = yield tap.Receive('key', lambda x: x == wait_value)
return value
def fn():
strand_1 = yield tap.CallFork(receiver, (1,))
strand_2 = yield tap.CallFork(receiver, (2,))
strand_3 = yield tap.CallFork(receiver, (3,))
results = yield tap.Fork([
tap.First([strand_1, strand_2], name="1v2", cancel_losers=False),
tap.First([strand_2, strand_3], name="2v3", cancel_losers=False),
])
yield tap.Call(broadcaster, (5,))
yield tap.Call(broadcaster, (1,))
yield tap.Call(broadcaster, (3,))
value = yield tap.Join(results, name="joinfork")
yield tap.Call(broadcaster, (2,))
yield tap.Join(strand_2, name="joincanceled")
return value
assert tap.run(fn) == [
(0, 1),
(1, 3),
]
def test_yield_from():
def fn1(value):
yield tap.Broadcast('key1', value)
yield tap.Broadcast('key2', value)
return 1
def fn2(value):
yield tap.Broadcast('key3', value)
yield tap.Broadcast('key4', value)
return 2
def fn():
v1 = yield from fn1(3)
v2 = yield from fn2(4)
assert v1 == 1
assert v2 == 2
tap.run(fn)
def test_sleep():
def fn():
t = time.time()
yield tap.Sleep(0.01)
assert time.time() - t > 0.01
t = time.time()
yield tap.Sleep(0)
assert time.time() - t < 0.01
tap.run(fn)
def test_intercept_nontest():
def fn():
yield tap.Intercept()
with pytest.raises(tap.TapystryError) as x:
tap.run(fn)
assert str(x.value).startswith("Cannot intercept outside of test mode")
def test_intercept():
def broadcaster(value):
yield tap.Broadcast('key', value)
def receiver():
value = yield tap.Receive('key')
return value
def intercepter():
(effect, inject) = yield tap.Intercept(lambda e: isinstance(e, tap.Receive))
assert effect.key == "key"
yield inject("real")
def fn():
intercept_strand = yield tap.CallFork(intercepter)
recv_strand = yield tap.CallFork(receiver)
# even though this is forked, it doesn't end up hanging
broadcast_strand = yield tap.CallFork(broadcaster, (5,))
value = yield tap.Join(recv_strand)
assert value == "real" # got intercepted
yield tap.Join(intercept_strand)
yield tap.Cancel(broadcast_strand)
return value
assert tap.run(fn, test_mode=True) == "real" # got intercepted
def test_error_stack():
def broadcaster(value):
yield tap.Broadcast('key', value)
def receiver():
value = yield tap.Receive('key')
if value < 10:
broadcast_strand = yield tap.CallFork(broadcaster, (value + 1,))
receive_strand = yield tap.CallFork(receiver)
yield tap.Join([broadcast_strand, receive_strand])
raise Exception("too large")
return value
def fn():
# fork in apparently wrong order!
broadcast_strand = yield tap.CallFork(broadcaster, (5,))
recv_strand = yield tap.CallFork(receiver)
yield tap.Join(broadcast_strand)
value = yield tap.Join(recv_strand)
return value
with pytest.raises(tap.TapystryError) as x:
tap.run(fn)
# print(x.value)
assert str(x.value).startswith("Exception caught at")
assert str(x.value).count(", in receiver\n") == 6
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-06-04 09:53
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('problem', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='submission',
name='date',
field=models.CharField(default=b'2017/6/4', max_length=10),
),
]
|
import pandas as pd
def test_roundtrip_substrait(duckdb_cursor):
res = duckdb_cursor.get_substrait("select * from integers limit 5")
proto_bytes = res.fetchone()[0]
query_result = duckdb_cursor.from_substrait(proto_bytes)
expected = pd.Series(range(5), name="i", dtype="int32")
pd.testing.assert_series_equal(query_result.df()["i"], expected)
|
import datetime
from flask import request
import db
import validators
def split_ip(ip):
"""Split a IP address given as string into a 4-tuple of integers."""
return tuple(int(part) for part in ip.split('.'))
def read_all_devices():
device_list = []
all_devices = db.find_all_devices()
if all_devices is not None:
for host in all_devices:
device_info = translate_device_info(host, False)
device_list.append(device_info)
device_list.sort(key=lambda device: split_ip(device['ip'])[3], reverse=False)
return device_list
def translate_device_info(host, get_ports):
device_id = host['device_id']
now = datetime.datetime.now()
first_seen = host['first_seen']
first_seen_today = first_seen.date() == now.date()
last_seen = host['last_seen']
online = last_seen + datetime.timedelta(minutes=60) > now
port_list = []
open_ports = host.get('open_ports')
if get_ports == True:
ports = db.find_ports_of_device(device_id)
open_ports = len(ports)
for port in ports:
port_info_tmp = {'first_seen': port['first_seen'].strftime('%Y-%m-%d %H:%M:%S'),
'last_seen': port['last_seen'].strftime('%Y-%m-%d %H:%M:%S'),
'protocol': port['protocol'],
'name': port['name'],
'product': port['product'],
'version': port['version'],
'port': int(port['port_num'])}
port_list.append(port_info_tmp)
port_list = sorted(port_list, key=lambda k: k['port'])
device_info = {'hostname': host['hostname'],
'nickname': host['nickname'],
'ip': host['ip'],
'mac': host['mac'],
'vendor': host['vendor'],
'first_seen': first_seen.strftime('%Y-%m-%d %H:%M:%S'),
'first_seen_today': first_seen_today,
'last_seen': last_seen.strftime('%Y-%m-%d %H:%M:%S'),
'online': online,
'device_id': host['device_id'],
'open_ports': open_ports,
'port_list': port_list
}
return device_info
def read_device_details_from_request_body():
hostname = ''
ip = ''
mac = ''
vendor = ''
f = request.form
for key in f.keys():
for _ in f.getlist(key):
if key == "hostname":
hostname = request.form['hostname']
if key == "ip":
ip = request.form['ip']
if key == "mac":
mac = request.form['mac']
# Convert mac to uppercase alphanumeric
mac = validators.convert_mac(mac)
if key == "vendor":
vendor = request.form['vendor']
new_device_data = {'hostname': hostname,
'nickname': hostname,
'ip': ip,
'mac': mac,
'vendor': vendor,
'first_seen': datetime.datetime.now(),
'last_seen': datetime.datetime.now()}
return new_device_data
|
#
# Copyright (c) 2014 Juniper Networks, Inc. All rights reserved.
#
"""
This file contains implementation of data model for physical router
configuration manager
"""
from device_conf import DeviceConf
from ansible_base import AnsibleBase
from dm_utils import PushConfigState
from dm_utils import DMUtils
from dm_utils import DMIndexer
from cfgm_common.vnc_db import DBBase
from cfgm_common.uve.physical_router.ttypes import *
from cfgm_common.exceptions import ResourceExistsError
from vnc_api.vnc_api import *
import struct
import socket
import gevent
import traceback
from gevent import queue
from cfgm_common.vnc_object_db import VncObjectDBClient
from netaddr import IPAddress
from cfgm_common.zkclient import IndexAllocator
from cfgm_common import vnc_greenlets
from sandesh_common.vns.constants import DEVICE_MANAGER_KEYSPACE_NAME
from time import gmtime, strftime
from cfgm_common.uve.physical_router_config.ttypes import *
from cfgm_common.uve.service_status.ttypes import *
import re
import json
import pyhash
class DBBaseDM(DBBase):
obj_type = __name__
# end DBBaseDM
class BgpRouterDM(DBBaseDM):
_dict = {}
obj_type = 'bgp_router'
def __init__(self, uuid, obj_dict=None):
self.uuid = uuid
self.bgp_routers = {}
self.physical_router = None
self.update(obj_dict)
# end __init__
def update(self, obj=None):
if obj is None:
obj = self.read_obj(self.uuid)
self.name = obj['fq_name'][-1]
self.params = obj.get('bgp_router_parameters') or {}
if self.params and self.params.get('autonomous_system') is None:
self.params[
'autonomous_system'] = GlobalSystemConfigDM.get_global_asn()
self.update_single_ref('physical_router', obj)
new_peers = {}
for ref in obj.get('bgp_router_refs', []):
new_peers[ref['uuid']] = ref['attr']
for peer_id in set(self.bgp_routers.keys()) - set(new_peers.keys()):
peer = BgpRouterDM.get(peer_id)
if not peer:
continue
if self.uuid in peer.bgp_routers:
del peer.bgp_routers[self.uuid]
for peer_id, attrs in new_peers.items():
peer = BgpRouterDM.get(peer_id)
if peer:
peer.bgp_routers[self.uuid] = attrs
self.bgp_routers = new_peers
def get_all_bgp_router_ips(self):
bgp_router_ips = {}
if self.params.get('address'):
bgp_router_ips[self.name] = self.params['address']
for peer_uuid in self.bgp_routers:
peer = BgpRouterDM.get(peer_uuid)
if peer is None or not peer.params.get('address'):
continue
bgp_router_ips[peer.name] = peer.params['address']
return bgp_router_ips
# end get_all_bgp_router_ips
# end class BgpRouterDM
class PhysicalRouterDM(DBBaseDM):
_dict = {}
obj_type = 'physical_router'
_sandesh = None
def __init__(self, uuid, obj_dict=None):
self.uuid = uuid
self.virtual_networks = set()
self.logical_routers = set()
self.bgp_router = None
self.physical_router_role = None
self.routing_bridging_roles = None
self.config_manager = None
self.service_endpoints = set()
self.router_mode = None
self.e2_service_index = 0
self.e2_service_providers = set()
self.nc_q = queue.Queue(maxsize=1)
self.vn_ip_map = {'irb': {}, 'lo0': {}}
self.dci_ip_map = {}
self.allocated_asn = None
self.config_sent = False
self.ae_index_allocator = DMIndexer(
DMUtils.get_max_ae_device_count(), DMIndexer.ALLOC_DECREMENT)
self.init_cs_state()
self.fabric = None
self.virtual_port_groups = []
self.port_tuples = []
self.node_profile = None
self.nc_handler_gl = None
self.update(obj_dict)
self.set_conf_sent_state(False)
self.config_repush_interval = PushConfigState.get_repush_interval()
self.nc_handler_gl = vnc_greenlets.VncGreenlet("VNC Device Manager",
self.nc_handler)
# end __init__
def use_ansible_plugin(self):
return PushConfigState.is_push_mode_ansible() and not self.is_ec2_role()
# end use_ansible_plugin
def reinit_device_plugin(self):
plugin_params = {
"physical_router": self
}
if self.use_ansible_plugin():
plugin_base = AnsibleBase
else:
plugin_base = DeviceConf
if not self.config_manager:
self.config_manager = plugin_base.plugin(self.vendor,
self.product, plugin_params, self._logger)
elif self.config_manager.verify_plugin(self.vendor,
self.product, self.physical_router_role):
self.config_manager.update()
else:
self.config_manager.clear()
self.config_manager = plugin_base.plugin(self.vendor,
self.product, plugin_params, self._logger)
# end reinit_device_plugin
def is_ec2_role(self):
return self.physical_router_role and\
self.physical_router_role.lower().startswith('e2-')
# end is_ec2_role
def has_rb_role(self, role):
if self.routing_bridging_roles and role in self.routing_bridging_roles:
return True
return False
# end has_rb_role
def is_erb_gateway(self):
if self.routing_bridging_roles:
gateway_roles = [r for r in self.routing_bridging_roles if 'ERB' in r and 'Gateway' in r]
if gateway_roles:
return True
return False
# end is_erb_gateway
def update(self, obj=None):
if obj is None:
obj = self.read_obj(self.uuid)
self.name = obj['fq_name'][-1]
self.management_ip = obj.get('physical_router_management_ip')
self.loopback_ip = obj.get('physical_router_loopback_ip')
self.dataplane_ip = obj.get(
'physical_router_dataplane_ip') or self.loopback_ip
self.vendor = obj.get('physical_router_vendor_name') or ''
self.product = obj.get('physical_router_product_name') or ''
self.device_family = obj.get('physical_router_device_family')
self.vnc_managed = obj.get('physical_router_vnc_managed')
self.underlay_managed = obj.get('physical_router_underlay_managed')
self.physical_router_role = obj.get('physical_router_role')
routing_bridging_roles = obj.get('routing_bridging_roles')
if routing_bridging_roles is not None:
self.routing_bridging_roles = routing_bridging_roles.get('rb_roles')
self.user_credentials = obj.get('physical_router_user_credentials')
self.physical_router_snmp = obj.get('physical_router_snmp')
self.physical_router_lldp = obj.get('physical_router_lldp')
self.telemetry_info = obj.get('telemetry_info')
self.junos_service_ports = obj.get(
'physical_router_junos_service_ports')
self.update_single_ref('bgp_router', obj)
self.update_multiple_refs('virtual_network', obj)
self.update_multiple_refs('logical_router', obj)
self.physical_interfaces = set([pi['uuid'] for pi in
obj.get('physical_interfaces', [])])
self.logical_interfaces = set([li['uuid'] for li in
obj.get('logical_interfaces', [])])
self.update_single_ref('fabric', obj)
self.update_multiple_refs('service_endpoint', obj)
self.update_multiple_refs('e2_service_provider', obj)
self.update_single_ref('node_profile', obj)
self.allocate_asn()
self.reinit_device_plugin()
# end update
def set_associated_lags(self, lag_uuid):
self.virtual_port_groups.append(lag_uuid)
def remove_associated_lags(self, lag_uuid):
self.virtual_port_groups.remove(lag_uuid)
def set_associated_port_tuples(self, pt_uuid):
self.port_tuples.append(pt_uuid)
def remove_associated_port_tuples(self, pt_uuid):
self.port_tuples.remove(pt_uuid)
def get_lr_dci_map(self):
lrs = self.logical_routers
dcis = []
for lr_uuid in lrs or []:
lr = LogicalRouterDM.get(lr_uuid)
if lr and lr.data_center_interconnect:
dci = DataCenterInterconnectDM.get(lr.data_center_interconnect)
if dci:
dcis.append({"from": lr_uuid, "dci": lr.data_center_interconnect})
return dcis
# end get_lr_dci_map
def get_dci_list(self):
lrs = self.logical_routers
dcis = []
for lr_uuid in lrs or []:
lr = LogicalRouterDM.get(lr_uuid)
if lr and lr.data_center_interconnect:
dci = DataCenterInterconnectDM.get(lr.data_center_interconnect)
if dci:
dcis.append(lr.data_center_interconnect)
return dcis
# end get_dci_list
def get_dci_bgp_params(self, dci_uuid):
dci = DataCenterInterconnectDM.get(dci_uuid)
params = {}
if dci:
params["asn"] = dci.allocated_asn
params["hold_time"] = dci.bgp_hold_time
params["families"] = dci.bgp_address_families
params["ip"] = self.loopback_ip
params["name"] = DMUtils.get_pr_dci_bgp_group(self.name, dci_uuid)
params["type"] = "internal"
return params
# end get_dci_bgp_params
def get_dci_bgp_neighbours(self, dci_uuid):
dci = DataCenterInterconnectDM.get(dci_uuid)
neigh = []
if dci and dci.get_connected_physical_routers():
for pr in dci.get_connected_physical_routers():
if self.uuid == pr.uuid:
continue
params = {}
params["asn"] = dci.allocated_asn
params["hold_time"] = dci.bgp_hold_time
params["families"] = dci.bgp_address_families
params["ip"] = pr.loopback_ip
params["name"] = DMUtils.get_pr_dci_bgp_group(pr.name, dci_uuid)
params["type"] = "internal"
neigh.append(params)
return neigh
# end get_dci_bgp_neighbours
def verify_allocated_asn(self, fabric):
self._logger.debug("physical router: verify allocated asn for %s" %
self.uuid)
if self.allocated_asn is not None and fabric is not None:
for namespace_uuid in fabric.fabric_namespaces:
namespace = FabricNamespaceDM.get(namespace_uuid)
if namespace is None:
continue
if namespace.as_numbers is not None:
if self.allocated_asn in namespace.as_numbers:
self._logger.debug("physical router: asn %d is allocated"
% self.allocated_asn)
return True
if namespace.asn_ranges is not None:
for asn_range in namespace.asn_ranges:
if asn_range[0] <= self.allocated_asn <= asn_range[1]:
self._logger.debug(
"physical router: asn %d is allocated" %
self.allocated_asn)
return True
self._logger.debug("physical router: asn not allocated")
return False
# end verify_allocated_asn
def allocate_asn(self):
if not self.fabric or not self.underlay_managed:
return
fabric = FabricDM.get(self.fabric)
if self.verify_allocated_asn(fabric):
return
# get the configured asn ranges for this fabric
asn_ranges = []
for namespace_uuid in fabric.fabric_namespaces:
namespace = FabricNamespaceDM.get(namespace_uuid)
if namespace is None:
continue
if namespace.as_numbers is not None:
asn_ranges.extend([(asn, asn) for asn in namespace.as_numbers])
if namespace.asn_ranges is not None:
asn_ranges.extend(namespace.asn_ranges)
asn_ranges = sorted(asn_ranges)
# find the first available asn
# loop through all asns to account for dangling asn in a range
# due to deleted PRs
for asn_range in asn_ranges:
for asn in range(asn_range[0], asn_range[1] + 1):
if self._object_db.get_pr_for_asn(asn) is None:
self.allocated_asn = asn
self._object_db.add_asn(self.uuid, asn)
self._logger.debug(
"physical router: allocated asn %d for %s" %
(self.allocated_asn, self.uuid))
return
self._logger.error(
"physical router: could not find an unused asn to allocate for %s"
% self.uuid)
# end allocate_asn
def wait_for_config_push(self, timeout=1):
if self.use_ansible_plugin() and self.config_manager:
while self.config_manager.push_in_progress():
try:
self.nc_q.get(True, timeout)
except queue.Empty:
pass
# end wait_for_config_push
def delete_handler(self):
self.wait_for_config_push()
if self.nc_handler_gl:
gevent.kill(self.nc_handler_gl)
self.update_single_ref('bgp_router', {})
self.update_multiple_refs('virtual_network', {})
self.update_multiple_refs('logical_router', {})
self.update_multiple_refs('service_endpoint', {})
self.update_multiple_refs('e2_service_provider', {})
self.update_single_ref('fabric', {})
if self.config_manager:
if self.use_ansible_plugin():
self.config_manager.push_conf(is_delete=True)
max_retries = 3
for _ in range(max_retries):
if self.config_manager.retry():
self.config_manager.push_conf(is_delete=True)
else:
break
self.set_conf_sent_state(False)
elif self.is_vnc_managed():
self.config_manager.push_conf(is_delete=True)
self.config_manager.clear()
self._object_db.delete_pr(self.uuid)
self.uve_send(True)
self.update_single_ref('node_profile', {})
# end delete_handler
def delete_obj(self):
vnc_greenlets.VncGreenlet("VNC Device Manager", self.delete_handler)
# end delete_obj
@classmethod
def reset(cls):
for obj in cls._dict.values():
if obj.config_manager:
obj.config_manager.clear()
cls._dict = {}
# end reset
def is_junos_service_ports_enabled(self):
if (self.junos_service_ports is not None
and self.junos_service_ports.get('service_port') is not None):
return True
return False
# end is_junos_service_ports_enabled
def block_and_set_config_state(self, timeout):
try:
if self.nc_q.get(True, timeout) is not None:
self.set_config_state()
except queue.Empty:
self.set_config_state()
# end block_and_set_config_state
def set_config_state(self):
try:
self.nc_q.put_nowait(1)
except queue.Full:
pass
# end
def nc_handler(self):
while self.nc_q.get() is not None:
try:
self.push_config()
except Exception as e:
tb = traceback.format_exc()
self._logger.error("Exception: " + str(e) + tb)
# end
def is_valid_ip(self, ip_str):
try:
socket.inet_aton(ip_str)
return True
except socket.error:
return False
# end
def init_cs_state(self):
asn = self._object_db.get_asn_for_pr(self.uuid)
if asn:
self.allocated_asn = asn
vn_subnet_set = self._object_db.get_pr_vn_set(self.uuid)
for vn_subnet in vn_subnet_set:
subnet = vn_subnet[0]
ip_used_for = vn_subnet[1]
ip = self._object_db.get_ip(self.uuid + ':' + subnet, ip_used_for)
if ip:
self.vn_ip_map[ip_used_for][subnet] = ip
dci_set = self._object_db.get_pr_dci_set(self.uuid)
for dci_key in dci_set:
ip = self._object_db.get_dci_ip(self.uuid + ':' + dci_key)
if ip:
self.dci_ip_map[dci_key] = ip
# end init_cs_state
def reserve_ip(self, vn_uuid, subnet_uuid):
try:
vn = VirtualNetwork()
vn.set_uuid(vn_uuid)
ip_addr = self._manager._vnc_lib.virtual_network_ip_alloc(
vn,
subnet=subnet_uuid)
if ip_addr:
return ip_addr[0] # ip_alloc default ip count is 1
except Exception as e:
self._logger.error("Exception: %s" % (str(e)))
return None
# end
def free_ip(self, vn_uuid, ip_addr):
try:
vn = VirtualNetwork()
vn.set_uuid(vn_uuid)
ip_addr = ip_addr.split('/')[0]
self._manager._vnc_lib.virtual_network_ip_free(
vn, [ip_addr])
return True
except Exception as e:
self._logger.error("Exception: %s" % (str(e)))
return False
# end
def get_vn_irb_ip_map(self):
ips = {'irb': {}, 'lo0': {}}
for ip_used_for in ['irb', 'lo0']:
for vn_subnet, ip_addr in self.vn_ip_map[ip_used_for].items():
(vn_uuid, subnet_prefix) = vn_subnet.split(':', 1)
vn = VirtualNetworkDM.get(vn_uuid)
if vn_uuid not in ips[ip_used_for]:
ips[ip_used_for][vn_uuid] = set()
ips[ip_used_for][vn_uuid].add(
(ip_addr,
vn.gateways[subnet_prefix].get('default_gateway')))
return ips
# end get_vn_irb_ip_map
def get_dci_lo_ip(self, dci_uuid):
if not self.dci_ip_map:
return None
return self.dci_ip_map.get(self.uuid + ":" + dci_uuid)
# end get_dci_lo_ip
def evaluate_dci_ip_map(self):
if not self.has_rb_role('DCI-Gateway'):
self._logger.debug("NO DCI Gateway role configured for pr: " + self.uuid)
return
if not DataCenterInterconnectDM.dci_network:
self._logger.debug("no dci network: " + self.uuid)
return
vn_uuid = DataCenterInterconnectDM.dci_network.uuid
new_dci_ip_set = set()
for dci_info in self.get_lr_dci_map():
dci_uuid = dci_info.get("dci")
dci = DataCenterInterconnectDM.get(dci_uuid)
if not dci:
continue
key = self.uuid + ":" + dci_uuid
new_dci_ip_set.add(key)
old_set = set(self.dci_ip_map.keys())
delete_set = old_set.difference(new_dci_ip_set)
create_set = new_dci_ip_set.difference(old_set)
for dci_id in delete_set:
ret = self.free_ip(vn_uuid, self.dci_ip_map[dci_id])
if ret == False:
self._logger.error("Unable to free ip for dci/pr "
"(%s)" % (
dci_id))
ret = self._object_db.delete_dci_ip(dci_id)
if ret == False:
self._logger.error("Unable to free ip from db for dci/pr "
"(%s)" % (
dci_id))
continue
self._object_db.delete_from_pr_dci_map(self.uuid, dci_id)
del self.dci_ip_map[dci_id]
vn = DataCenterInterconnectDM.dci_network
if not vn.gateways.keys():
self._logger.error("no subnets configured for dci")
return
for dci_id in create_set:
subnet_prefix = vn.gateways.keys()[0]
subnet_uuid = vn.gateways[subnet_prefix].get('subnet_uuid')
ip_addr = self.reserve_ip(vn_uuid, subnet_uuid)
if ip_addr is None:
self._logger.error("Unable to allocate ip for dci/pr "
"(%s)" % (
dci_id))
continue
ret = self._object_db.add_dci_ip(dci_id,
ip_addr)
if ret == False:
self._logger.error("Unable to store ip for dci/pr "
"(%s)" % (
dci_id))
if self.free_ip(vn_uuid, ip_addr) == False:
self._logger.error("Unable to free ip for dci/pr "
"(%s)" % (
dci_id))
continue
self._object_db.add_to_pr_dci_map(self.uuid, dci_id)
self.dci_ip_map[dci_id] = ip_addr
# end evaluate_dci_ip_map
def evaluate_vn_irb_ip_map(self, vn_set, fwd_mode, ip_used_for, ignore_external=False):
is_erb = self.is_erb_gateway()
new_vn_ip_set = set()
for vn_uuid in vn_set:
vn = VirtualNetworkDM.get(vn_uuid)
if not vn:
continue
# dont need irb ip, gateway ip
if vn.get_forwarding_mode() != fwd_mode:
continue
if vn.router_external and ignore_external:
continue
for subnet_prefix in vn.gateways.keys():
new_vn_ip_set.add(vn_uuid + ':' + subnet_prefix)
old_set = set(self.vn_ip_map[ip_used_for].keys())
delete_set = old_set.difference(new_vn_ip_set)
create_set = new_vn_ip_set.difference(old_set)
for vn_subnet in delete_set:
(vn_uuid, subnet_prefix) = vn_subnet.split(':', 1)
if not is_erb:
ret = self.free_ip(vn_uuid, self.vn_ip_map[ip_used_for][vn_subnet])
if ret == False:
self._logger.error("Unable to free ip for vn/subnet/pr "
"(%s/%s/%s)" % (
vn_uuid,
subnet_prefix,
self.uuid))
ret = self._object_db.delete_ip(
self.uuid + ':' + vn_uuid + ':' + subnet_prefix, ip_used_for)
if ret == False:
self._logger.error("Unable to free ip from db for vn/subnet/pr "
"(%s/%s/%s)" % (
vn_uuid,
subnet_prefix,
self.uuid))
continue
self._object_db.delete_from_pr_map(self.uuid, vn_subnet, ip_used_for)
del self.vn_ip_map[ip_used_for][vn_subnet]
for vn_subnet in create_set:
(vn_uuid, subnet_prefix) = vn_subnet.split(':', 1)
vn = VirtualNetworkDM.get(vn_uuid)
subnet_uuid = vn.gateways[subnet_prefix].get('subnet_uuid')
(sub, length) = subnet_prefix.split('/')
if is_erb:
ip_addr = vn.gateways[subnet_prefix].get('default_gateway')
else:
ip_addr = self.reserve_ip(vn_uuid, subnet_uuid)
if ip_addr is None:
self._logger.error("Unable to allocate ip for vn/subnet/pr "
"(%s/%s/%s)" % (
vn_uuid,
subnet_prefix,
self.uuid))
continue
ret = self._object_db.add_ip(self.uuid + ':' + vn_uuid + ':' + subnet_prefix,
ip_used_for, ip_addr + '/' + length)
if ret == False:
self._logger.error("Unable to store ip for vn/subnet/pr "
"(%s/%s/%s)" % (
self.uuid,
subnet_prefix,
self.uuid))
if self.free_ip(vn_uuid, ip_addr) == False:
self._logger.error("Unable to free ip for vn/subnet/pr "
"(%s/%s/%s)" % (
self.uuid,
subnet_prefix,
self.uuid))
continue
self._object_db.add_to_pr_map(self.uuid, vn_subnet, ip_used_for)
self.vn_ip_map[ip_used_for][vn_subnet] = ip_addr + '/' + length
# end evaluate_vn_irb_ip_map
def is_vnc_managed(self):
if not self.vnc_managed:
self._logger.info("vnc managed property must be set for a physical router to get auto "
"configured, ip: %s, not pushing config" % (self.management_ip))
return False
return True
# end is_vnc_managed
def set_conf_sent_state(self, state):
self.config_sent = state
# end set_conf_sent_state
def is_conf_sent(self):
return self.config_sent
# end is_conf_sent
def delete_config(self):
if self.is_conf_sent() and (not self.is_vnc_managed() or (not self.bgp_router and self.physical_router_role != 'pnf')):
if not self.config_manager:
self.uve_send()
return False
# user must have unset the vnc managed property
self.config_manager.push_conf(is_delete=True)
if self.config_manager.retry():
# failed commit: set repush interval upto max value
self.config_repush_interval = min([2 * self.config_repush_interval,
PushConfigState.get_repush_max_interval()])
self.block_and_set_config_state(self.config_repush_interval)
return True
# succesful commit: reset repush interval
self.config_repush_interval = PushConfigState.get_repush_interval()
self.set_conf_sent_state(False)
self.uve_send()
self.config_manager.clear()
return True
return False
# end delete_config
def get_pnf_vrf_name(self, si_obj, interface_type, first_tag):
if not first_tag:
return '_contrail-' + si_obj.name + '-' + interface_type
else:
return ('_contrail-' + si_obj.name + '-' + interface_type
+ '-sc-entry-point')
# end get_pnf_vrf_name
def allocate_pnf_resources(self, vmi):
resources = self._object_db.get_pnf_resources(
vmi, self.uuid)
network_id = int(resources['network_id'])
if vmi.service_interface_type == "left":
ip = str(IPAddress(network_id*4+1))
if vmi.service_interface_type == "right":
ip = str(IPAddress(network_id*4+2))
ip = ip + "/30"
return {
"ip_address": ip,
"vlan_id": resources['vlan_id'],
"unit_id": resources['unit_id']}
# end allocate_pnf_resources
def compute_pnf_static_route(self, ri_obj, pnf_dict):
"""
Compute all the static route for the pnfs on the device
Args:
ri_obj: The routing instance need to added the static routes
pnf_dict: The pnf mapping dict
Returns:
static_routes: a static route list
[
"service_chain_address":{
"next-hop":"ip_address",
"preference": int #use for the load balance
}
]
"""
prefrence = 0
static_routes = {}
for vmi_uuid in ri_obj.virtual_machine_interfaces:
# found the service chain address
# Check if this vmi is a PNF vmi
vmi = VirtualMachineInterfaceDM.get(vmi_uuid)
preference = 0
if vmi is not None:
if vmi.service_instance is not None:
li_list = []
if vmi.service_interface_type == 'left':
li_list = pnf_dict[vmi.service_instance]['right']
elif vmi.service_interface_type == 'right':
li_list = pnf_dict[vmi.service_instance]['left']
for li in li_list:
static_entry = {
"next-hop": li.ip.split('/')[0]
}
if preference > 0:
static_entry[
"preference"] = preference
preference += 1
srs = static_routes.setdefault(
ri_obj.service_chain_address, [])
srs.append(static_entry)
return static_routes
# end compute_pnf_static_route
def push_config(self):
if not self.config_manager:
self._logger.info("Plugin not found for vendor family(%s:%s), "
"ip: %s, not pushing config" % (str(self.vendor),
str(self.product), self.management_ip))
return
if self.delete_config() or not self.is_vnc_managed():
return
self.config_manager.initialize()
if not self.config_manager.validate_device():
self._logger.error("physical router: %s, device config validation failed. "
"device configuration=%s" % (self.uuid, \
str(self.config_manager.get_device_config())))
return
config_size = self.config_manager.push_conf()
if not config_size:
return
self.set_conf_sent_state(True)
self.uve_send()
if self.config_manager.retry():
# failed commit: set repush interval upto max value
self.config_repush_interval = min([2 * self.config_repush_interval,
PushConfigState.get_repush_max_interval()])
self.block_and_set_config_state(self.config_repush_interval)
else:
# successful commit: reset repush interval to base
self.config_repush_interval = PushConfigState.get_repush_interval()
if PushConfigState.get_push_delay_enable():
# sleep, delay=compute max delay between two successive commits
gevent.sleep(self.get_push_config_interval(config_size))
# end push_config
def get_push_config_interval(self, last_config_size):
config_delay = int(
(last_config_size/1000) * PushConfigState.get_push_delay_per_kb())
delay = min([PushConfigState.get_push_delay_max(), config_delay])
return delay
def is_service_port_id_valid(self, service_port_id):
# mx allowed ifl unit number range is (1, 16385) for service ports
if service_port_id < 1 or service_port_id > 16384:
return False
return True
# end is_service_port_id_valid
def uve_send(self, deleted=False):
pr_trace = UvePhysicalRouterConfig(
name=self.name,
ip_address=self.management_ip,
connected_bgp_router=self.bgp_router,
auto_conf_enabled=self.vnc_managed,
product_info=str(self.vendor) + ':' + str(self.product))
if deleted:
pr_trace.deleted = True
pr_msg = UvePhysicalRouterConfigTrace(
data=pr_trace,
sandesh=DBBaseDM._sandesh)
pr_msg.send(sandesh=DBBaseDM._sandesh)
return
commit_stats = {}
if self.config_manager:
commit_stats = self.config_manager.get_commit_stats()
if self.is_vnc_managed():
pr_trace.netconf_enabled_status = True
pr_trace.last_commit_time = \
commit_stats.get('last_commit_time', '')
pr_trace.last_commit_duration = \
commit_stats.get('last_commit_duration', '0')
pr_trace.commit_status_message = \
commit_stats.get('commit_status_message', '')
pr_trace.total_commits_sent_since_up = \
commit_stats.get('total_commits_sent_since_up', 0)
else:
pr_trace.netconf_enabled_status = False
pr_msg = UvePhysicalRouterConfigTrace(
data=pr_trace, sandesh=DBBaseDM._sandesh)
pr_msg.send(sandesh=DBBaseDM._sandesh)
# end uve_send
# end PhysicalRouterDM
class GlobalVRouterConfigDM(DBBaseDM):
_dict = {}
obj_type = 'global_vrouter_config'
global_vxlan_id_mode = None
global_forwarding_mode = None
global_encapsulation_priorities = []
global_encapsulation_priority = None
def __init__(self, uuid, obj_dict=None):
self.uuid = uuid
self.update(obj_dict)
# end __init__
def update(self, obj=None):
if obj is None:
obj = self.read_obj(self.uuid)
new_global_vxlan_id_mode = obj.get('vxlan_network_identifier_mode')
new_global_encapsulation_priority = None
new_global_encapsulation_priorities = []
encapsulation_priorities = obj.get('encapsulation_priorities')
if encapsulation_priorities:
new_global_encapsulation_priorities = encapsulation_priorities.get("encapsulation")
if new_global_encapsulation_priorities:
new_global_encapsulation_priority = new_global_encapsulation_priorities[0]
new_global_forwarding_mode = obj.get('forwarding_mode')
if (GlobalVRouterConfigDM.global_vxlan_id_mode !=
new_global_vxlan_id_mode or
GlobalVRouterConfigDM.global_forwarding_mode !=
new_global_forwarding_mode or
GlobalVRouterConfigDM.global_encapsulation_priorities !=
new_global_encapsulation_priorities or
GlobalVRouterConfigDM.global_encapsulation_priority !=
new_global_encapsulation_priority):
GlobalVRouterConfigDM.global_vxlan_id_mode = \
new_global_vxlan_id_mode
GlobalVRouterConfigDM.global_forwarding_mode = \
new_global_forwarding_mode
GlobalVRouterConfigDM.global_encapsulation_priorities = \
new_global_encapsulation_priorities
GlobalVRouterConfigDM.global_encapsulation_priority = \
new_global_encapsulation_priority
self.update_physical_routers()
# end update
def update_physical_routers(self):
for pr in PhysicalRouterDM.values():
pr.set_config_state()
# end update_physical_routers
@classmethod
def is_global_vxlan_id_mode_auto(cls):
if (cls.global_vxlan_id_mode is not None and
cls.global_vxlan_id_mode == 'automatic'):
return True
return False
@classmethod
def delete(cls, uuid):
if uuid not in cls._dict:
return
obj = cls._dict[uuid]
# end delete
# end GlobalVRouterConfigDM
class GlobalSystemConfigDM(DBBaseDM):
_dict = {}
obj_type = 'global_system_config'
global_asn = None
ip_fabric_subnets = None
dci_asn_namespace = None
dci_loopback_namespace = None
def __init__(self, uuid, obj_dict=None):
self.uuid = uuid
self.physical_routers = set()
self.data_center_interconnects = set()
self.node_profiles = set()
self.update(obj_dict)
# end __init__
def update(self, obj=None):
if obj is None:
obj = self.read_obj(self.uuid)
GlobalSystemConfigDM.global_asn = obj.get('autonomous_system')
GlobalSystemConfigDM.ip_fabric_subnets = obj.get('ip_fabric_subnets')
GlobalSystemConfigDM.dci_asn_namespace = obj.get('data_center_interconnect_asn_namespace')
GlobalSystemConfigDM.dci_loopback_namespace = obj.get('data_center_interconnect_loopback_namespace')
self.set_children('physical_router', obj)
self.set_children('data_center_interconnect', obj)
self.set_children('node_profile', obj)
# end update
@classmethod
def get_global_asn(cls):
return cls.global_asn
@classmethod
def delete(cls, uuid):
if uuid not in cls._dict:
return
obj = cls._dict[uuid]
# end delete
# end GlobalSystemConfigDM
class PhysicalInterfaceDM(DBBaseDM):
_dict = {}
obj_type = 'physical_interface'
_esi_map = {}
def __init__(self, uuid, obj_dict=None):
self.uuid = uuid
self.name = None
self.physical_router = None
self.logical_interfaces = set()
self.virtual_machine_interfaces = set()
self.physical_interfaces = set()
self.mtu = 0
self.esi = None
self.interface_type = None
self.port = None
obj = self.update(obj_dict)
self.add_to_parent(obj)
# end __init__
def update(self, obj=None):
if obj is None:
obj = self.read_obj(self.uuid)
self.fq_name = obj['fq_name']
self.physical_router = self.get_parent_uuid(obj)
self.logical_interfaces = set([li['uuid'] for li in
obj.get('logical_interfaces', [])])
self.name = obj.get('display_name')
if self.name and re.search(r"[0-9]+_[0-9]+$", self.name):
# For channelized ports
self.name = self.name.replace("_", ":")
self.esi = obj.get('ethernet_segment_identifier')
self.interface_type = obj.get('physical_interface_type')
self.update_multiple_refs('virtual_machine_interface', obj)
self.update_multiple_refs('physical_interface', obj)
self.update_single_ref('port', obj)
return obj
# end update
def get_pr_uuid(self):
return self.physical_router
def delete_obj(self):
self.update_multiple_refs('virtual_machine_interface', {})
self.update_multiple_refs('physical_interface', {})
self.update_single_ref('port', None)
self.remove_from_parent()
# end delete_obj
# end PhysicalInterfaceDM
class LogicalInterfaceDM(DBBaseDM):
_dict = {}
obj_type = 'logical_interface'
def __init__(self, uuid, obj_dict=None):
self.uuid = uuid
self.virtual_machine_interface = None
self.vlan_tag = 0
self.li_type = None
self.instance_ip = None
obj = self.update(obj_dict)
self.add_to_parent(obj)
# end __init__
def update(self, obj=None):
if obj is None:
obj = self.read_obj(self.uuid)
self.fq_name = obj['fq_name']
if obj['parent_type'] == 'physical-router':
self.physical_router = self.get_parent_uuid(obj)
self.physical_interface = None
else:
self.physical_interface = self.get_parent_uuid(obj)
self.physical_router = None
self.name = obj.get('display_name')
if self.name and re.search(r"[0-9]+(_[0-9]+){2}$", self.name):
# For channelized ports
self.name = self.name.replace("_", ":")
self.vlan_tag = obj.get('logical_interface_vlan_tag', 0)
self.li_type = obj.get('logical_interface_type', 0)
self.update_single_ref('virtual_machine_interface', obj)
self.update_single_ref('instance_ip', obj)
return obj
# end update
@classmethod
def get_sg_list(cls):
sg_list = []
li_dict = cls._dict
for li_obj in li_dict.values() or []:
sg_list += li_obj.get_attached_sgs()
return sg_list
# end get_sg_list
def get_attached_sgs(self):
sg_list = []
if self.virtual_machine_interface:
vmi = VirtualMachineInterfaceDM.get(self.virtual_machine_interface)
if not vmi:
return sg_list
for sg in vmi.security_groups or []:
sg = SecurityGroupDM.get(sg)
if sg:
sg_list.append(sg)
return sg_list
# end get_attached_sgs
def get_attached_acls(self):
acl_list = []
sg_list = li_obj.get_attached_sgs()
for sg in sg_list or []:
for acl in sg.access_control_lists or []:
acl = AccessControlListDM.get(acl)
if acl:
acl_list.append(acl)
return acl_list
# end get_attached_acls
def delete_obj(self):
if self.physical_interface:
parent = PhysicalInterfaceDM.get(self.physical_interface)
elif self.physical_router:
parent = PhysicalRouterDM.get(self.physical_router)
if parent:
parent.logical_interfaces.discard(self.uuid)
self.update_single_ref('virtual_machine_interface', {})
self.update_single_ref('instance_ip', {})
self.remove_from_parent()
# end delete_obj
# end LogicalInterfaceDM
class FloatingIpDM(DBBaseDM):
_dict = {}
obj_type = 'floating_ip'
def __init__(self, uuid, obj_dict=None):
self.uuid = uuid
self.virtual_machine_interface = None
self.floating_ip_address = None
self.floating_ip_pool = None
self.update(obj_dict)
# end __init__
def update(self, obj=None):
if obj is None:
obj = self.read_obj(self.uuid)
self.fq_name = obj['fq_name']
self.name = self.fq_name[-1]
self.floating_ip_address = obj.get("floating_ip_address")
self.update_single_ref('virtual_machine_interface', obj)
self.add_to_parent(obj)
# end update
def get_public_network(self):
if self.floating_ip_pool is None:
return None
pool_obj = FloatingIpPoolDM.get(self.floating_ip_pool)
return pool_obj.virtual_network if pool_obj else None
# end get_public_network
def get_private_network(self):
if self.virtual_machine_interface:
vmi_obj = VirtualMachineInterfaceDM.get(self.virtual_machine_interface)
return vmi_obj.virtual_network if vmi_obj else None
return None
# end get_private_network
@classmethod
def delete(cls, uuid):
if uuid not in cls._dict:
return
obj = cls._dict[uuid]
obj.update_single_ref('virtual_machine_interface', {})
obj.remove_from_parent()
del cls._dict[uuid]
# end delete
# end FloatingIpDM
class FloatingIpPoolDM(DBBaseDM):
_dict = {}
obj_type = 'floating_ip_pool'
def __init__(self, uuid, obj_dict=None):
self.uuid = uuid
self.virtual_network = None
self.floating_ips = set()
self.update(obj_dict)
# end __init__
def update(self, obj=None):
if obj is None:
obj = self.read_obj(self.uuid)
self.fq_name = obj['fq_name']
self.name = self.fq_name[-1]
self.add_to_parent(obj)
self.update_multiple_refs('floating_ip', obj)
# end update
@classmethod
def delete(cls, uuid):
if uuid not in cls._dict:
return
obj = cls._dict[uuid]
obj.update_multiple_refs('floating_ip', {})
obj.remove_from_parent()
del cls._dict[uuid]
# end delete
# end FloatingIpPoolDM
class InstanceIpDM(DBBaseDM):
_dict = {}
obj_type = 'instance_ip'
def __init__(self, uuid, obj_dict=None):
self.name = None
self.fq_name = None
self.uuid = uuid
self.instance_ip_address = None
self.virtual_machine_interface = None
self.logical_interface = None
self.update(obj_dict)
# end __init__
def update(self, obj=None):
if obj is None:
obj = self.read_obj(self.uuid)
self.fq_name = obj['fq_name']
self.name = self.fq_name[-1]
self.instance_ip_address = obj.get("instance_ip_address")
self.update_single_ref('virtual_machine_interface', obj)
self.update_single_ref('logical_interface', obj)
# end update
def delete_obj(self):
self.update_single_ref('virtual_machine_interface', {})
self.update_single_ref('logical_interface', {})
# end delete_obj
# end InstanceIpDM
class AccessControlListDM(DBBaseDM):
_dict = {}
obj_type = 'access_control_list'
def __init__(self, uuid, obj_dict=None):
self.uuid = uuid
self.vnc_obj = None
self.security_group = None
self.update(obj_dict)
self.is_ingress = self.name.startswith('ingress-')
# end __init__
def update(self, obj=None):
if obj is None:
obj = self.read_obj(self.uuid)
self.name = obj.get('fq_name')[-1]
self.vnc_obj = self.vnc_obj_from_dict(self.obj_type, obj)
if obj.get('parent_type') == "security-group":
self.add_to_parent(obj)
return obj
# end update
@classmethod
def delete(cls, uuid):
if uuid not in cls._dict:
return
obj = cls._dict[uuid]
if obj.security_group:
obj.remove_from_parent()
del cls._dict[uuid]
# end delete
# end AccessControlListDM
class SecurityGroupDM(DBBaseDM):
_dict = {}
obj_type = 'security_group'
def __init__(self, uuid, obj_dict=None):
self.uuid = uuid
self.name = None
self.virtual_machine_interfaces = set()
self.update(obj_dict)
# end __init__
def update(self, obj=None):
if obj is None:
obj = self.read_obj(self.uuid)
self.fq_name = obj['fq_name']
self.name = self.fq_name[-1]
self.update_multiple_refs('virtual_machine_interface', obj)
self.set_children('access_control_list', obj)
# end update
@classmethod
def delete(cls, uuid):
if uuid not in cls._dict:
return
obj = cls._dict[uuid]
obj.update_multiple_refs('virtual_machine_interface', {})
del cls._dict[uuid]
# end delete
# end SecurityGroupDM
class VirtualMachineInterfaceDM(DBBaseDM):
_dict = {}
obj_type = 'virtual_machine_interface'
def __init__(self, uuid, obj_dict=None):
self.uuid = uuid
self.name = None
self.virtual_network = None
self.floating_ip = None
self.instance_ip = None
self.logical_interfaces = set()
self.physical_interface = None
self.vlan_tag = None
self.service_interface_type = None
self.port_tuple = None
self.routing_instances = set()
self.security_groups = set()
self.service_instance = None
self.service_endpoint = None
self.update(obj_dict)
# end __init__
def update(self, obj=None):
if obj is None:
obj = self.read_obj(self.uuid)
self.fq_name = obj['fq_name']
self.name = self.fq_name[-1]
if obj.get('virtual_machine_interface_properties'):
self.params = obj['virtual_machine_interface_properties']
self.vlan_tag = self.params.get('sub_interface_vlan_tag', None)
self.service_interface_type = self.params.get(
'service_interface_type', None)
else:
self.vlan_tag = 0
self.bindings = obj.get('virtual_machine_interface_bindings') or {}
self.device_owner = obj.get("virtual_machine_interface_device_owner") or ''
self.update_multiple_refs('logical_interface', obj)
self.update_single_ref('virtual_network', obj)
self.update_single_ref('floating_ip', obj)
self.update_single_ref('instance_ip', obj)
self.update_single_ref('physical_interface', obj)
self.update_multiple_refs('routing_instance', obj)
self.update_multiple_refs('security_group', obj)
self.update_single_ref('port_tuple', obj)
self.service_instance = None
if self.port_tuple:
pt = PortTupleDM.get(self.port_tuple)
if pt:
self.service_instance = pt.svc_instance
self.update_single_ref('service_endpoint', obj)
# end update
def is_device_owner_bms(self):
if self.logical_interfaces and len(self.logical_interfaces) >= 1 and \
self.device_owner.lower() in ['physicalrouter', 'physical-router']:
return True
kvps = self.bindings.get('key_value_pair') or []
kvp_dict = dict((kvp['key'], kvp['value']) for kvp in kvps)
vnic_type = kvp_dict.get('vnic_type') or ''
if vnic_type == 'baremetal':
return True
return False
# end
@classmethod
def delete(cls, uuid):
if uuid not in cls._dict:
return
obj = cls._dict[uuid]
obj.update_multiple_refs('logical_interface', {})
obj.update_single_ref('virtual_network', {})
obj.update_single_ref('floating_ip', {})
obj.update_single_ref('instance_ip', {})
obj.update_single_ref('physical_interface', {})
obj.update_multiple_refs('routing_instance', {})
obj.update_multiple_refs('security_group', {})
obj.update_single_ref('port_tuple', {})
obj.update_single_ref('service_endpoint', {})
del cls._dict[uuid]
# end delete
# end VirtualMachineInterfaceDM
class LogicalRouterDM(DBBaseDM):
_dict = {}
obj_type = 'logical_router'
def __init__(self, uuid, obj_dict=None):
self.uuid = uuid
self.physical_routers = set()
self.data_center_interconnect = None
self.virtual_machine_interfaces = set()
# internal virtual-network
self.virtual_network = None
self.port_tuples = set()
self.update(obj_dict)
# end __init__
def update(self, obj=None):
if obj is None:
obj = self.read_obj(self.uuid)
if not self.virtual_network:
vn_name = DMUtils.get_lr_internal_vn_name(self.uuid)
vn_obj = VirtualNetworkDM.find_by_name_or_uuid(vn_name)
if vn_obj:
self.virtual_network = vn_obj.uuid
vn_obj.logical_router = self.uuid
self.logical_router_gateway_external = obj.get("logical_router_gateway_external")
self.update_multiple_refs('physical_router', obj)
self.update_single_ref('data_center_interconnect', obj)
self.update_multiple_refs('virtual_machine_interface', obj)
self.update_multiple_refs('port_tuple', obj)
self.fq_name = obj['fq_name']
self.name = self.fq_name[-1]
# end update
def get_internal_vn_name(self):
return '__contrail_' + self.uuid + '_lr_internal_vn__'
# end get_internal_vn_name
def get_connected_networks(self, include_internal=True):
vn_list = []
if include_internal and self.virtual_network:
vn_list.append(self.virtual_network)
for vmi_uuid in self.virtual_machine_interfaces or []:
vmi = VirtualMachineInterfaceDM.get(vmi_uuid)
if vmi:
vn_list.append(vmi.virtual_network)
return vn_list
# end get_connected_networks
@classmethod
def delete(cls, uuid):
if uuid not in cls._dict:
return
obj = cls._dict[uuid]
obj.update_multiple_refs('physical_router', {})
obj.update_multiple_refs('virtual_machine_interface', {})
obj.update_multiple_refs('port_tuple', {})
obj.update_single_ref('virtual_network', None)
obj.update_single_ref('data_center_interconnect', None)
del cls._dict[uuid]
# end delete
# end LogicalRouterDM
class NetworkIpamDM(DBBaseDM):
_dict = {}
obj_type = 'network_ipam'
def __init__(self, uuid, obj_dict=None):
self.uuid = uuid
self.name = None
self.ipam_subnets = set()
self.ipam_method = None
self.server_discovery_params = None
self.virtual_networks = set()
self.update(obj_dict)
# end __init__
def update(self, obj=None):
if obj is None:
obj = self.read_obj(self.uuid)
self.fq_name = obj['fq_name']
self.name = self.fq_name[-1]
self.ipam_method = obj.get('ipam_subnet_method')
self.ipam_subnets = obj.get('ipam_subnets')
if self.ipam_subnets:
self.server_discovery_params = \
DMUtils.get_server_discovery_parameters(self.ipam_subnets.get('subnets', []))
self.update_multiple_refs('virtual_network', obj)
# end update
@classmethod
def delete(cls, uuid):
if uuid not in cls._dict:
return
obj = cls._dict[uuid]
obj.update_multiple_refs('virtual_network', {})
del cls._dict[uuid]
# end delete
# end NetworkIpamDM
class VirtualNetworkDM(DBBaseDM):
_dict = {}
obj_type = 'virtual_network'
def __init__(self, uuid, obj_dict=None):
self.uuid = uuid
self.name = None
self.physical_routers = set()
self.tags = set()
self.network_ipams = set()
self.logical_router = None
self.data_center_interconnect = None
self.router_external = False
self.forwarding_mode = None
self.gateways = None
self.floating_ip_pools = set()
self.instance_ip_map = {}
self.route_targets = None
self.update(obj_dict)
# end __init__
def set_logical_router(self, name):
if DMUtils.get_lr_internal_vn_prefix() in name:
lr_uuid = DMUtils.extract_lr_uuid_from_internal_vn_name(name)
lr_obj = LogicalRouterDM.get(lr_uuid)
if lr_obj:
self.logical_router = lr_obj.uuid
lr_obj.virtual_network = self.uuid
# end set_logical_router
def set_data_center_interconnect(self, name):
if DMUtils.get_dci_internal_vn_prefix() in name:
dci_uuid = DMUtils.extract_dci_uuid_from_internal_vn_name(name)
dci_obj = DataCenterInterconnectDM.get(dci_uuid)
if dci_obj:
self.data_center_interconnect = dci_obj.uuid
dci_obj.virtual_network = self.uuid
# end set_data_center_interconnect
def get_dci_connected_lr_network(self, pr_uuid):
if not self.data_center_interconnect:
return None
dci_obj = DataCenterInterconnectDM.get(self.data_center_interconnect)
if dci_obj:
return dci_obj.get_lr_vn(pr_uuid)
return None
# end get_dci_connected_lr_network
def get_dci_connected_lr(self, pr_uuid):
if not self.data_center_interconnect:
return None
dci_obj = DataCenterInterconnectDM.get(self.data_center_interconnect)
if dci_obj:
return dci_obj.get_lr(pr_uuid)
return None
def update(self, obj=None):
if obj is None:
obj = self.read_obj(self.uuid)
self.set_logical_router(obj.get("fq_name")[-1])
self.set_data_center_interconnect(obj.get("fq_name")[-1])
if obj["fq_name"] == ['default-domain', 'default-project', 'dci-network']:
DataCenterInterconnectDM.set_dci_network(self)
self.update_multiple_refs('physical_router', obj)
self.update_multiple_refs('tag', obj)
self.update_multiple_refs('network_ipam', obj)
self.set_children('floating_ip_pool', obj)
self.fq_name = obj['fq_name']
self.name = self.fq_name[-1]
try:
self.router_external = obj['router_external']
except KeyError:
self.router_external = False
self.vn_network_id = obj.get('virtual_network_network_id')
self.virtual_network_properties = obj.get('virtual_network_properties')
self.set_forwarding_mode(obj)
self.routing_instances = set([ri['uuid'] for ri in
obj.get('routing_instances', [])])
self.virtual_machine_interfaces = set(
[vmi['uuid'] for vmi in
obj.get('virtual_machine_interface_back_refs', [])])
self.gateways = DMUtils.get_network_gateways(obj.get('network_ipam_refs', []))
self.route_targets = None
route_target_list = obj.get('route_target_list')
if route_target_list:
route_targets = route_target_list.get('route_target')
if route_targets:
self.route_targets = set(route_targets)
# end update
def get_prefixes(self, pr_uuid=None):
lr = None
if self.logical_router:
lr = LogicalRouterDM.get(self.logical_router)
elif self.data_center_interconnect and pr_uuid:
lr = self.get_dci_connected_lr(pr_uuid)
if not lr or (not lr.logical_router_gateway_external and not self.data_center_interconnect):
return set(self.gateways.keys())
vn_list = lr.get_connected_networks(include_internal=False)
prefix_set = set()
if self.gateways and self.gateways.keys():
prefix_set = set(self.gateways.keys())
for vn in vn_list:
vn_obj = VirtualNetworkDM.get(vn)
if vn_obj:
prefixes = vn_obj.get_prefixes()
if prefixes:
prefix_set = prefix_set.union(prefixes)
return prefix_set
# end get_prefixes
def get_vxlan_vni(self, is_internal_vn = False, is_dci_vn = False):
if is_internal_vn or is_dci_vn:
props = self.virtual_network_properties or {}
return props.get("vxlan_network_identifier") or self.vn_network_id
if GlobalVRouterConfigDM.is_global_vxlan_id_mode_auto():
return self.vn_network_id
props = self.virtual_network_properties or {}
return props.get("vxlan_network_identifier") or self.vn_network_id
# end get_vxlan_vni
def set_forwarding_mode(self, obj=None):
if obj is None:
obj = self.read_obj(self.uuid)
self.forwarding_mode = None
try:
prop = obj['virtual_network_properties']
if prop['forwarding_mode'] is not None:
self.forwarding_mode = prop['forwarding_mode']
except KeyError:
pass
# end set_forwarding_mode
def get_forwarding_mode(self):
if not self.forwarding_mode:
return GlobalVRouterConfigDM.global_forwarding_mode or 'l2_l3'
return self.forwarding_mode
# end get_forwarding_mode
def update_instance_ip_map(self):
self.instance_ip_map = {}
for vmi_uuid in self.virtual_machine_interfaces:
vmi = VirtualMachineInterfaceDM.get(vmi_uuid)
if vmi is None or vmi.is_device_owner_bms() == False:
continue
if vmi.floating_ip is not None and vmi.instance_ip is not None:
fip = FloatingIpDM.get(vmi.floating_ip)
inst_ip = InstanceIpDM.get(vmi.instance_ip)
if fip is None or inst_ip is None or fip.get_public_network() is None:
continue
instance_ip = inst_ip.instance_ip_address
floating_ip = fip.floating_ip_address
public_vn = VirtualNetworkDM.get(fip.get_public_network())
if public_vn is None or public_vn.vn_network_id is None:
continue
public_vrf_name = DMUtils.make_vrf_name(public_vn.fq_name[-1],
public_vn.vn_network_id, 'l3')
self.instance_ip_map[instance_ip] = {
'floating_ip': floating_ip,
'vrf_name': public_vrf_name
}
# end update_instance_ip_map
def get_connected_private_networks(self):
vn_list = set()
for pool_uuid in self.floating_ip_pools or []:
pool = FloatingIpPoolDM.get(pool_uuid)
if not pool or not pool.floating_ips:
continue
floating_ips = pool.floating_ips
for fip in floating_ips:
fip_obj = FloatingIpDM.get(fip)
if not fip_obj or not fip_obj.virtual_machine_interface:
continue
vmi = VirtualMachineInterfaceDM.get(fip_obj.virtual_machine_interface)
if vmi is None or vmi.is_device_owner_bms() == False:
continue
if vmi.floating_ip is not None and vmi.instance_ip is not None:
fip = FloatingIpDM.get(vmi.floating_ip)
inst_ip = InstanceIpDM.get(vmi.instance_ip)
if fip is None or inst_ip is None or fip.get_private_network() is None:
continue
instance_ip = inst_ip.instance_ip_address
floating_ip = fip.floating_ip_address
private_vn = VirtualNetworkDM.get(fip.get_private_network())
if private_vn is None or private_vn.vn_network_id is None:
continue
vn_list.add(private_vn.uuid)
return list(vn_list)
# end get_connected_private_networks:w
@classmethod
def delete(cls, uuid):
if uuid not in cls._dict:
return
obj = cls._dict[uuid]
obj.update_multiple_refs('physical_router', {})
obj.update_multiple_refs('tag', {})
obj.update_multiple_refs('network_ipam', {})
del cls._dict[uuid]
# end delete
# end VirtualNetworkDM
class RoutingInstanceDM(DBBaseDM):
_dict = {}
obj_type = 'routing_instance'
def __init__(self, uuid, obj_dict=None):
self.uuid = uuid
self.name = None
self.virtual_network = None
self.import_targets = set()
self.export_targets = set()
self.routing_instances = set()
self.service_chain_address = None
self.virtual_machine_interfaces = set()
self.update(obj_dict)
vn = VirtualNetworkDM.get(self.virtual_network)
if vn:
vn.routing_instances.add(self.uuid)
# end __init__
def update(self, obj=None):
if obj is None:
obj = self.read_obj(self.uuid)
self.fq_name = obj['fq_name']
self.name = obj['fq_name'][-1]
self.virtual_network = self.get_parent_uuid(obj)
self.import_targets = set()
self.export_targets = set()
for rt_ref in obj.get('route_target_refs', []):
rt_name = rt_ref['to'][0]
exim = rt_ref.get('attr').get('import_export')
if exim == 'export':
self.export_targets.add(rt_name)
elif exim == 'import':
self.import_targets.add(rt_name)
else:
self.import_targets.add(rt_name)
self.export_targets.add(rt_name)
self.update_multiple_refs('routing_instance', obj)
self.update_multiple_refs('virtual_machine_interface', obj)
service_chain_information = obj.get('service_chain_information')
if service_chain_information is not None:
self.service_chain_address = service_chain_information.get(
'service_chain_address')
# end update
@classmethod
def delete(cls, uuid):
if uuid not in cls._dict:
return
obj = cls._dict[uuid]
vn = VirtualNetworkDM.get(obj.virtual_network)
if vn:
vn.routing_instances.discard(obj.uuid)
del cls._dict[uuid]
# end delete
# end RoutingInstanceDM
class ServiceTemplateDM(DBBaseDM):
_dict = {}
obj_type = 'service_template'
def __init__(self, uuid, obj_dict=None):
self.uuid = uuid
self.service_instances = set()
self.service_appliance_set = None
self.update(obj_dict)
# end __init__
def update(self, obj=None):
if obj is None:
obj = self.read_obj(self.uuid)
self.name = obj['fq_name'][-1]
self.fq_name = obj['fq_name']
self.port_tuple_order = []
self.params = obj.get('service_template_properties')
if self.params:
self.virtualization_type = self.params.get(
'service_virtualization_type')
intf_type = self.params.get('interface_type')
if intf_type is not None:
for svc_intf_type in intf_type:
if svc_intf_type.get('service_interface_type'):
self.port_tuple_order.append(svc_intf_type.get('service_interface_type'))
self.update_multiple_refs('service_instance', obj)
self.update_single_ref('service_appliance_set', obj)
# end update
def delete_obj(self):
self.update_multiple_refs('service_instance', {})
self.update_single_ref('service_appliance_set', {})
# end delete_obj
# end class ServiceTemplateDM
class ServiceApplianceDM(DBBaseDM):
_dict = {}
obj_type = 'service_appliance'
def __init__(self, uuid, obj_dict=None):
self.uuid = uuid
self.service_appliance_set = None
self.physical_interfaces = {}
self.kvpairs = []
obj = self.update(obj_dict)
self.add_to_parent(obj)
# end __init__
def update(self, obj=None):
if obj is None:
obj = self.read_obj(self.uuid)
self.name = obj['fq_name'][-1]
self.fq_name = obj['fq_name']
kvpairs = obj.get('service_appliance_properties', None)
if kvpairs:
self.kvpairs = kvpairs.get('key_value_pair', [])
self.service_appliance_set = self.get_parent_uuid(obj)
self.update_multiple_refs_with_attr('physical_interface', obj)
return obj
# end update
def delete_obj(self):
self.update_multiple_refs_with_attr('physical_interface', {})
self.remove_from_parent()
# end delete_obj
# end ServiceApplianceDM
class ServiceApplianceSetDM(DBBaseDM):
_dict = {}
obj_type = 'service_appliance_set'
def __init__(self, uuid, obj_dict=None):
self.uuid = uuid
self.service_appliances = set()
self.service_template = None
self.kvpairs = []
self.ha_mode = "active-active"
self.update(obj_dict)
# end __init__
def update(self, obj=None):
if obj is None:
obj = self.read_obj(self.uuid)
self.name = obj['fq_name'][-1]
self.fq_name = obj['fq_name']
self.update_single_ref("service_template", obj)
kvpairs = obj.get('service_appliance_set_properties', None)
if kvpairs:
self.kvpairs = kvpairs.get('key_value_pair', [])
self.service_appliances = set(
[sa['uuid'] for sa in obj.get('service_appliances', [])])
self.ha_mode = obj.get('service_appliance_ha_mode')
# end update
def delete_obj(self):
self.update_single_ref("service_template",{})
# end delete_obj
# end ServiceApplianceSetDM
class ServiceInstanceDM(DBBaseDM):
_dict = {}
obj_type = 'service_instance'
def __init__(self, uuid, obj_dict=None):
self.uuid = uuid
self.fq_name = None
self.name = None
self.params = None
self.service_template = None
self.port_tuples = set()
self.update(obj_dict)
# end
def update(self, obj=None):
if obj is None:
obj = self.read_obj(self.uuid)
self.fq_name = obj['fq_name']
self.name = obj['fq_name'][-1]
self.params = obj.get('service_instance_properties', None)
bindings = obj.get('service_instance_bindings', None)
annotations = obj.get('annotations')
if annotations:
kvps = annotations.get('key_value_pair') or []
kvp_dict = dict((kvp['key'], kvp['value']) for kvp in kvps)
self.left_svc_asns = kvp_dict.get('left-svc-asns').split(',') or []
self.right_svc_asns = kvp_dict.get('right-svc-asns').split(',') or []
self.left_svc_vlan = kvp_dict.get('left-svc-vlan') or None
self.right_svc_vlan = kvp_dict.get('right-svc-vlan') or None
self.rp_ip_addr = kvp_dict.get('rp-ip-addr') or None
if bindings:
kvps = bindings.get('key_value_pair') or []
kvp_dict = dict((kvp['key'], kvp['value']) for kvp in kvps)
self.left_svc_unit = kvp_dict.get('left-svc-unit') or None
self.right_svc_unit = kvp_dict.get('right-svc-unit') or None
self.port_tuples = set(
[pt['uuid'] for pt in obj.get('port_tuples', [])])
self.update_single_ref('service_template', obj)
self.bgp_enabled = obj.get('service_instance_bgp_enabled')
# end
def delete_obj(self):
self.update_single_ref('service_template', {})
self._object_db.delete_pnf_resources(uuid)
# end
class PortTupleDM(DBBaseDM):
_dict = {}
obj_type = 'port_tuple'
def __init__(self, uuid, obj_dict=None):
self.uuid = uuid
self.virtual_machine_interfaces = set()
self.logical_routers = set()
self.virtual_networks = set()
obj = self.update(obj_dict)
self.add_to_parent(obj)
# end __init__
def update(self, obj=None):
if obj is None:
obj = self.read_obj(self.uuid)
self.fq_name = obj['fq_name']
self.name = self.fq_name[-1]
self.svc_instance = self.get_parent_uuid(obj)
self.build_pt_pr_map()
self.update_multiple_refs('virtual_machine_interface', obj)
self.update_multiple_refs('logical_router', obj)
self.update_multiple_refs('virtual_network', obj)
for vmi in self.virtual_machine_interfaces:
vmi_obj = VirtualMachineInterfaceDM.get(vmi)
if vmi_obj and not vmi_obj.service_instance:
vmi_obj.service_instance = self.svc_instance
return obj
# end update
def get_sa_obj(self):
svc_appliance_obj = None
si_obj = ServiceInstanceDM.get(self.svc_instance)
if si_obj is not None:
if si_obj.service_template is not None:
svc_tmpl_obj = ServiceTemplateDM.get(si_obj.service_template)
if svc_tmpl_obj.service_appliance_set is not None:
svc_appliance_set_obj = ServiceApplianceSetDM.get(svc_tmpl_obj.service_appliance_set)
for sa in svc_appliance_set_obj.service_appliances or []:
svc_appliance_obj = ServiceApplianceDM.get(sa)
return svc_appliance_obj
def build_pt_pr_map(self):
sa_obj = self.get_sa_obj()
if sa_obj is not None:
for pi in sa_obj.physical_interfaces or {}:
pi_obj = PhysicalInterfaceDM.get(pi)
pr_obj = PhysicalRouterDM.get(pi_obj.get_pr_uuid())
if self.uuid not in pr_obj.port_tuples:
pr_obj.set_associated_port_tuples(self.uuid)
for pi_ref in pi_obj.physical_interfaces or []:
pi_ref_obj = PhysicalInterfaceDM.get(pi_ref)
pr_ref_obj = PhysicalRouterDM.get(pi_ref_obj.get_pr_uuid())
if self.uuid not in pr_ref_obj.port_tuples:
pr_ref_obj.set_associated_port_tuples(self.uuid)
# end build_pr_pt_map
def delete_obj(self):
sa_obj = self.get_sa_obj()
if sa_obj is not None:
for pi in sa_obj.physical_interfaces or {}:
pi_obj = PhysicalInterfaceDM.get(pi)
pr_obj = PhysicalRouterDM.get(pi_obj.get_pr_uuid())
if self.uuid in pr_obj.port_tuples:
pr_obj.remove_associated_port_tuples(self.uuid)
for pi_ref in pi_obj.physical_interfaces or []:
pi_ref_obj = PhysicalInterfaceDM.get(pi_ref)
pr_ref_obj = PhysicalRouterDM.get(pi_ref_obj.get_pr_uuid())
if self.uuid in pr_ref_obj.port_tuples:
pr_ref_obj.remove_associated_port_tuples(self.uuid)
self.update_multiple_refs('virtual_machine_interface', {})
self.update_multiple_refs('logical_router', {})
self.update_multiple_refs('virtual_network', {})
self.remove_from_parent()
# end delete_obj
# end PortTupleDM
class ServiceEndpointDM(DBBaseDM):
_dict = {}
obj_type = 'service_endpoint'
def __init__(self, uuid, obj_dict=None):
self.uuid = uuid
self.physical_router = None
self.service_connection_modules = set()
self.virtual_machine_interface = None
self.site_id = 0
self.update(obj_dict)
# end __init__
def update(self, obj=None):
if obj is None:
obj = self.read_obj(self.uuid)
self.name = obj['fq_name'][-1]
self.service_name = obj.get('service_name')
self.update_single_ref('physical_router', obj)
self.update_multiple_refs('service_connection_module', obj)
self.update_single_ref('virtual_machine_interface', obj)
@classmethod
def delete(cls, uuid):
if uuid not in cls._dict:
return
obj = cls._dict[uuid]
obj.update_single_ref('physical_router', {})
obj.update_multiple_refs('service_connection_module', {})
obj.update_single_ref('virtual_machine_interface', {})
del cls._dict[uuid]
# end class ServiceEndpointDM
class ServiceConnectionModuleDM(DBBaseDM):
_dict = {}
obj_type = 'service_connection_module'
def __init__(self, uuid, obj_dict=None):
self.uuid = uuid
self.service_endpoints = set()
self.service_object = None
self.circuit_id = 0
self.mtu = 0
self.no_control_word = False
self.management_ip = None
self.user_creds = None
self.sap_info = None
self.sdp_info = None
self.id_perms = None
self.service_type = None
self.commit_stats = {
'last_commit_time': '',
'last_commit_duration': '',
'commit_status_message': '',
'total_commits_sent_since_up': 0,
}
self.update(obj_dict)
# end __init__
def update(self, obj=None):
if obj is None:
obj = self.read_obj(self.uuid)
self.name = obj['fq_name'][-1]
self.e2service = obj.get('e2service')
self.id_perms = obj.get('id_perms')
self.annotations = obj.get('annotations')
self.service_type = obj.get('service_type')
self.update_multiple_refs('service_endpoint', obj)
self.update_single_ref('service_object', obj)
@classmethod
def delete(cls, uuid):
if uuid not in cls._dict:
return
obj = cls._dict[uuid]
obj.update_multiple_refs('service_endpoint', {})
obj.update_single_ref('service_object', {})
del cls._dict[uuid]
# end class ServiceConnectionModuleDM
class ServiceObjectDM(DBBaseDM):
_dict = {}
obj_type = 'service_object'
def __init__(self, uuid, obj_dict=None):
self.uuid = uuid
self.service_connection_module = None
self.sep_list = None
self.physical_router = None
self.service_status = {}
self.management_ip = None
self.user_creds = None
self.service_type = None
self.update(obj_dict)
# end __init__
def update(self, obj=None):
if obj is None:
obj = self.read_obj(self.uuid)
self.name = obj['fq_name'][-1]
self.service_object_name = obj.get('service_object_name')
self.update_single_ref('service_connection_module', obj)
circuit_id = 0
if self.service_connection_module is not None:
scm = ServiceConnectionModuleDM.get(self.service_connection_module)
if scm is not None:
circuit_id = scm.circuit_id
if circuit_id == 0 and \
scm.service_type != 'fabric-interface':
return
found = False
neigbor_id = None
for sindex, sep_uuid in enumerate(scm.service_endpoints):
sep = ServiceEndpointDM.get(sep_uuid)
if sep is None:
continue
pr_uuid = sep.physical_router
pr = PhysicalRouterDM.get(pr_uuid)
if pr is not None and pr.vendor.lower() == "juniper" \
and found != True:
self.management_ip = pr.management_ip
self.user_creds = pr.user_credentials
self.service_type = scm.service_type
found = True
elif pr is not None:
bgp_uuid = pr.bgp_router
bgp_entry = BgpRouterDM.get(bgp_uuid)
neigbor_id = bgp_entry.params.get('address')
if found == True:
service_params = {
"service_type": self.service_type,
"circuit_id": circuit_id,
"neigbor_id": neigbor_id,
}
self.service_status = \
pr.config_manager.get_service_status(service_params)
self.uve_send()
def uve_send(self):
mydata=self.service_status
if self.service_status is not None:
last_timestamp = strftime("%Y-%m-%d %H:%M:%S", gmtime())
pr_trace = UveServiceStatus(
name=self.name,
ip_address=self.management_ip,
service_name=self.name,
status_data=str(mydata),
operational_status="None",
last_get_time=last_timestamp)
pr_msg = UveServiceStatusTrace(
data=pr_trace, sandesh=DBBaseDM._sandesh)
pr_msg.send(sandesh=DBBaseDM._sandesh)
# end uve_send
@classmethod
def delete(cls, uuid):
if uuid not in cls._dict:
return
obj = cls._dict[uuid]
obj.update_single_ref('service_connection_module', {})
del cls._dict[uuid]
# end class ServiceObjectDM
class NetworkDeviceConfigDM(DBBaseDM):
_dict = {}
obj_type = 'network_device_config'
def __init__(self, uuid, obj_dict=None):
self.uuid = uuid
self.physical_router = None
self.management_ip = None
self.config_manager = None
self.update(obj_dict)
# end __init__
def update(self, obj=None):
if obj is None:
obj = self.read_obj(self.uuid)
self.name = obj['fq_name'][-1]
self.config_object_name = obj.get('config_object_name')
self.update_single_ref('physical_router', obj)
if self.physical_router is not None:
pr = PhysicalRouterDM.get(self.physical_router)
if pr is not None:
self.management_ip = pr.management_ip
self.config_manager = pr.config_manager
self.uve_send()
# end update
def uve_send(self):
mydata = self.config_manager.device_get_config()
if mydata:
last_timestamp = strftime("%Y-%m-%d %H:%M:%S", gmtime())
pr_trace = UvePhysicalRouterConfiguration(
name=self.name,
ip_address=self.management_ip,
config_data=mydata,
last_get_time=last_timestamp)
pr_msg = UvePhysicalRouterConfigurationTrace(
data=pr_trace, sandesh=DBBaseDM._sandesh)
pr_msg.send(sandesh=DBBaseDM._sandesh)
# end uve_send
@classmethod
def delete(cls, uuid):
if uuid not in cls._dict:
return
obj = cls._dict[uuid]
obj.update_single_ref('physical_router', {})
del cls._dict[uuid]
# end class NetworkDeviceConfigDM
class DataCenterInterconnectDM(DBBaseDM):
_dict = {}
obj_type = 'data_center_interconnect'
dci_network = None
def __init__(self, uuid, obj_dict=None):
self.uuid = uuid
self.name = None
self.logical_routers = set()
self.bgp_hold_time = None
self.allocated_asn = None
self.virtual_network = None
self.bgp_address_families = None
self.init_cs_state()
obj = self.update(obj_dict)
self.add_to_parent(obj)
# end __init__
def init_cs_state(self):
asn = self._object_db.get_asn_for_dci(self.uuid)
if asn:
self.allocated_asn = asn
# end init_cs_state
def update(self, obj=None):
if obj is None:
obj = self.read_obj(self.uuid)
if not self.virtual_network:
vn_name = DMUtils.get_dci_internal_vn_name(self.uuid)
vn_obj = VirtualNetworkDM.find_by_name_or_uuid(vn_name)
if vn_obj:
self.virtual_network = vn_obj.uuid
vn_obj.data_center_interconnect = self.uuid
self.name = obj['fq_name'][-1]
self.bgp_hold_time = obj.get('data_center_interconnect_bgp_hold_time', 90)
self.bgp_address_families = obj.get('data_center_interconnect_bgp_address_families')
if self.bgp_address_families:
self.bgp_address_families = self.bgp_address_families.get("family")
else:
self.bgp_address_families = ['inet-vpn', 'e-vpn']
self.update_multiple_refs('logical_router', obj)
self.allocate_asn()
return obj
# end update
def get_connected_physical_routers(self):
if not self.logical_routers:
return []
pr_list = []
for lr_uuid in self.logical_routers:
lr = LogicalRouterDM.get(lr_uuid)
if lr and lr.physical_routers:
prs = lr.physical_routers
for pr_uuid in prs:
pr = PhysicalRouterDM.get(pr_uuid)
if pr.has_rb_role("DCI-Gateway"):
pr_list.append(pr)
return pr_list
# end get_connected_physical_routers
def get_lr(self, pr):
if not self.logical_routers:
return None
for lr_uuid in self.logical_routers:
lr = LogicalRouterDM.get(lr_uuid)
if lr and lr.physical_routers:
prs = lr.physical_routers
for pr_uuid in prs:
if pr == pr_uuid:
return lr
return None
# end get_lr
def get_lr_vn(self, pr):
if not self.logical_routers:
return None
for lr_uuid in self.logical_routers:
lr = LogicalRouterDM.get(lr_uuid)
if lr and lr.physical_routers:
prs = lr.physical_routers
for pr_uuid in prs:
if pr == pr_uuid:
return lr.virtual_network
return None
# end get_lr_vn
@classmethod
def set_dci_network(cls, vn_obj):
DataCenterInterconnectDM.dci_network = vn_obj
# end set_dci_network
def verify_allocated_asn(self):
self._logger.debug("DCI: verify allocated asn for %s" %
self.uuid)
if self.allocated_asn is not None and GlobalSystemConfigDM.dci_asn_namespace is not None:
ns = GlobalSystemConfigDM.dci_asn_namespace
min_asn = ns.get("asn_min")
max_asn = ns.get("asn_max")
if min_asn <= self.allocated_asn <= max_asn:
self._logger.debug(
"DCI %s: asn %d is allocated" %
(self.uuid, self.allocated_asn))
return True
self._logger.debug("DCI %s: asn not allocated"%(self.uuid))
return False
# end verify_allocated_asn
def allocate_asn(self):
if self.verify_allocated_asn() or not GlobalSystemConfigDM.dci_asn_namespace:
return
# find the first available asn
# loop through all asns to account for dangling asn in a range
ns = GlobalSystemConfigDM.dci_asn_namespace
min_asn = ns.get("asn_min")
max_asn = ns.get("asn_max")
for asn in range(min_asn, max_asn):
if not self._object_db.get_dci_for_asn(asn):
self.allocated_asn = asn
self._object_db.add_dci_asn(self.uuid, asn)
self._logger.debug(
"DCI: allocated asn %d for %s" %
(self.allocated_asn, self.uuid))
return
self._logger.error(
"DCI: could not find an unused asn to allocate for %s"
% self.uuid)
# end allocate_asn
@classmethod
def delete(cls, uuid):
if uuid not in cls._dict:
return
obj = cls._dict[uuid]
obj._object_db.delete_dci(obj.uuid)
obj.update_multiple_refs('logical_router', {})
obj.update_single_ref('virtual_network', None)
del cls._dict[uuid]
# end delete
# end class DataCenterInterconnectDM
class FabricDM(DBBaseDM):
_dict = {}
obj_type = 'fabric'
def __init__(self, uuid, obj_dict=None):
self.uuid = uuid
self.name = None
self.fabric_namespaces = set()
self.lo0_ipam_subnet = None
self.ip_fabric_ipam_subnet = None
self.update(obj_dict)
# end __init__
@classmethod
def _get_ipam_subnets_for_virtual_network(cls, obj, vn_type):
vn_uuid = None
virtual_network_refs = obj.get('virtual_network_refs') or []
for ref in virtual_network_refs:
if vn_type in ref['attr']['network_type']:
vn_uuid = ref['uuid']
break
# Get the IPAM attached to the virtual network
ipam_subnets = None
if vn_uuid is not None:
vn = VirtualNetworkDM.get(vn_uuid)
if vn is not None:
ipam_refs = vn.get('network_ipam_refs')
if ipam_refs:
ipam_ref = ipam_refs[0]
ipam_subnets = ipam_ref['attr'].get('ipam_subnets')
return ipam_subnets
# end _get_ipam_for_virtual_network
def update(self, obj=None):
if obj is None:
obj = self.read_obj(self.uuid)
self.name = obj['fq_name'][-1]
# Get the 'loopback' type virtual network
self.lo0_ipam_subnet =\
self._get_ipam_subnets_for_virtual_network(obj, 'loopback')
# Get the 'ip_fabric' type virtual network
self.ip_fabric_ipam_subnet = \
self._get_ipam_subnets_for_virtual_network(obj, 'ip_fabric')
# end update
# end class FabricDM
class FabricNamespaceDM(DBBaseDM):
_dict = {}
obj_type = 'fabric_namespace'
def __init__(self, uuid, obj_dict=None):
self.uuid = uuid
self.name = None
self.as_numbers = None
self.asn_ranges = None
self.update(obj_dict)
# end __init__
def _read_as_numbers(self, obj):
fabric_namespace_type = obj.get('fabric_namespace_type')
if fabric_namespace_type != "ASN":
return
tag_ids = list(set([tag['uuid'] for tag in obj.get('tag_refs') or []]))
if len(tag_ids) == 0:
return
tag = self.read_obj(tag_ids[0], "tag")
if tag.get('tag_type_name') != 'label' or\
tag.get('tag_value') != 'fabric-ebgp-as-number':
return
value = obj.get('fabric_namespace_value')
if value is not None and value['asn'] is not None and\
value['asn']['asn'] is not None:
self.as_numbers = list(map(int, value['asn']['asn']))
# end _read_as_numbers
def _read_asn_ranges(self, obj):
fabric_namespace_type = obj.get('fabric_namespace_type')
if fabric_namespace_type != "ASN_RANGE":
return
tag_ids = list(set([tag['uuid'] for tag in obj.get('tag_refs') or []]))
if len(tag_ids) == 0:
return
tag = self.read_obj(tag_ids[0], "tag")
if tag.get('tag_type_name') != 'label' or\
tag.get('tag_value') != 'fabric-ebgp-as-number':
return
value = obj.get('fabric_namespace_value')
if value is not None and value['asn_ranges'] is not None:
self.asn_ranges = list(map(lambda asn_range:
(int(asn_range['asn_min']),
int(asn_range['asn_max'])),
value['asn_ranges']))
# end _read_asn_ranges
def update(self, obj=None):
if obj is None:
obj = self.read_obj(self.uuid)
self.name = obj['fq_name'][-1]
self._read_as_numbers(obj)
self._read_asn_ranges(obj)
self.add_to_parent(obj)
# end update
def delete_obj(self):
self.remove_from_parent()
# end delete_obj
# end class FabricNamespaceDM
class NodeProfileDM(DBBaseDM):
_dict = {}
obj_type = 'node_profile'
def __init__(self, uuid, obj_dict=None):
self.uuid = uuid
self.name = None
self.role_configs = set()
self.update(obj_dict)
# end __init__
def update(self, obj=None):
if obj is None:
obj = self.read_obj(self.uuid)
self.name = obj['fq_name'][-1]
# end update
# end class NodeProfileDM
class PortDM(DBBaseDM):
_dict = {}
obj_type = 'port'
def __init__(self, uuid, obj_dict=None):
self.uuid = uuid
self.tags = set()
self.physical_interfaces = set()
self.update(obj_dict)
# end __init__
def update(self, obj=None):
if obj is None:
obj = self.read_obj(self.uuid)
self.name = obj['fq_name'][-1]
self.update_multiple_refs('tag', obj)
self.update_multiple_refs('physical_interface', obj)
# end update
def delete_obj(self):
self.update_multiple_refs('tag', None)
self.update_multiple_refs('physical_interface', {})
# end delete_obj
# end class PortDM
class TagDM(DBBaseDM):
_dict = {}
obj_type = 'tag'
def __init__(self, uuid, obj_dict=None):
self.uuid = uuid
self.virtual_networks = set()
self.update(obj_dict)
# end __init__
def update(self, obj=None):
if obj is None:
obj = self.read_obj(self.uuid)
self.name = obj['fq_name'][-1].split('=')[-1]
self.update_multiple_refs('virtual_network', obj)
# end update
def delete_obj(self):
self.update_multiple_refs('virtual_network', {})
# end delete_obj
# end class TagDM
class LinkAggregationGroupDM(DBBaseDM):
_dict = {}
obj_type = 'link_aggregation_group'
def __init__(self, uuid, obj_dict=None):
self.uuid = uuid
self.name = None
self.physical_interfaces = set()
self.update(obj_dict)
# end __init__
def update(self, obj=None):
if obj is None:
obj = self.read_obj(self.uuid)
self.name = obj['fq_name'][-1]
self.add_to_parent(obj)
self.lacp_enabled = obj.get('link_aggregation_group_lacp_enabled')
self.update_multiple_refs('physical_interface', obj)
# end update
def delete_obj(self):
self.remove_from_parent()
self.update_multiple_refs('physical_interface', {})
# end delete_obj
# end class LinkAggregationGroupDM
class VirtualPortGroupDM(DBBaseDM):
_dict = {}
obj_type = 'virtual_port_group'
def __init__(self, uuid, obj_dict=None):
self.uuid = uuid
self.name = None
self.physical_interfaces = set()
self.virtual_machine_interfaces = set()
self.esi = None
self.pi_ae_map = {}
self.update(obj_dict)
self.get_esi()
# end __init__
def update(self, obj=None):
if obj is None:
obj = self.read_obj(self.uuid)
self.name = obj['fq_name'][-1]
self.add_to_parent(obj)
self.update_multiple_refs('physical_interface', obj)
self.update_multiple_refs('virtual_machine_interface', obj)
self.get_ae_for_pi(obj.get('physical_interface_refs'))
self.build_lag_pr_map()
# end update
def get_ae_for_pi(self, pi_refs):
self.pi_ae_map = {}
if not pi_refs:
return
for pi in pi_refs:
if pi.get('attr') is not None:
self.pi_ae_map.update({pi.get('uuid'): pi.get('attr').get('ae_num')})
def get_esi(self):
hash_obj = pyhash.city_64()
unpacked = struct.unpack('>8B', struct.pack('>Q', hash_obj(self.uuid)))
self.esi = '00:%s:00'%(':'.join('%02x' % i for i in unpacked))
def build_lag_pr_map(self):
for pi in self.physical_interfaces or []:
pi_obj = PhysicalInterfaceDM.get(pi)
pr_obj = PhysicalRouterDM.get(pi_obj.get_pr_uuid())
if self.uuid not in pr_obj.virtual_port_groups:
pr_obj.set_associated_lags(self.uuid)
def get_attached_sgs(self, vlan_tag):
sg_list = []
for vmi_uuid in self.virtual_machine_interfaces:
vmi_obj = VirtualMachineInterfaceDM.get(vmi_uuid)
if not vmi_obj:
return sg_list
if vmi_obj.vlan_tag == int(vlan_tag):
for sg in vmi_obj.security_groups or []:
sg = SecurityGroupDM.get(sg)
if sg and sg not in sg_list:
sg_list.append(sg)
return sg_list
# end get_attached_sgs
def delete_obj(self):
for pi in self.physical_interfaces or []:
pi_obj = PhysicalInterfaceDM.get(pi)
pr_obj = PhysicalRouterDM.get(pi_obj.get_pr_uuid())
if self.uuid in pr_obj.virtual_port_groups:
pr_obj.remove_associated_lags(self.uuid)
self.update_multiple_refs('physical_interface', {})
self.update_multiple_refs('virtual_machine_interface', {})
self.remove_from_parent()
# end delete_obj
# end class VirtualPortGroupDM
class RoleConfigDM(DBBaseDM):
_dict = {}
obj_type = 'role_config'
def __init__(self, uuid, obj_dict=None):
self.uuid = uuid
self.name = None
self.node_profile = None
self.config = None
self.job_template = None
self.job_template_fq_name = None
self.update(obj_dict)
# end __init__
def update(self, obj=None):
if obj is None:
obj = self.read_obj(self.uuid)
self.name = obj['fq_name'][-1]
self.node_profile = self.get_parent_uuid(obj)
self.add_to_parent(obj)
self.config = obj.get('role_config_config')
if self.config and isinstance(self.config, basestring):
self.config = json.loads(self.config)
self.update_single_ref('job_template', obj)
if self.job_template is not None:
self.job_template_fq_name =\
self._object_db.uuid_to_fq_name(self.job_template)
else:
self.job_template_fq_name = None
# end update
def delete_obj(self):
self.remove_from_parent()
self.update_single_ref('job_template', {})
# end delete_obj
# end class RoleConfigDM
class E2ServiceProviderDM(DBBaseDM):
_dict = {}
obj_type = 'e2_service_provider'
def __init__(self, uuid, obj_dict=None):
self.uuid = uuid
self.promiscuous = None
self.physical_routers = set()
self.peering_policys = set()
self.update(obj_dict)
# end __init__
def update(self, obj=None):
if obj is None:
obj = self.read_obj(self.uuid)
self.name = obj['fq_name'][-1]
self.promiscuous = obj.get('e2_service_provider_promiscuous')
self.update_multiple_refs('physical_router', obj)
self.update_multiple_refs('peering_policy', obj)
@classmethod
def delete(cls, uuid):
if uuid not in cls._dict:
return
obj = cls._dict[uuid]
obj.update_multiple_refs('peering_policy', {})
obj.update_multiple_refs('physical_router', {})
del cls._dict[uuid]
# end class E2ServiceProviderDM
class PeeringPolicyDM(DBBaseDM):
_dict = {}
obj_type = 'peering_policy'
def __init__(self, uuid, obj_dict=None):
self.uuid = uuid
self.e2_service_providers = set()
self.update(obj_dict)
# end __init__
def update(self, obj=None):
if obj is None:
obj = self.read_obj(self.uuid)
self.name = obj['fq_name'][-1]
self.policy_name = obj.get('name')
self.update_multiple_refs('e2_service_provider', obj)
@classmethod
def delete(cls, uuid):
if uuid not in cls._dict:
return
obj = cls._dict[uuid]
obj.update_multiple_refs('e2_service_provider', {})
del cls._dict[uuid]
# end class PeeringPolicyDM
class DMCassandraDB(VncObjectDBClient):
_KEYSPACE = DEVICE_MANAGER_KEYSPACE_NAME
_PR_VN_IP_CF = 'dm_pr_vn_ip_table'
_PR_ASN_CF = 'dm_pr_asn_table'
_DCI_ASN_CF = 'dm_dci_asn_table'
_PR_DCI_IP_CF = 'dm_pr_dci_ip_table'
# PNF table
_PNF_RESOURCE_CF = 'dm_pnf_resource_table'
_zk_path_pfx = ''
_PNF_MAX_NETWORK_ID = 4294967292
_PNF_NETWORK_ALLOC_PATH = "/id/pnf/network_id"
_PNF_MAX_VLAN = 4093
_PNF_VLAN_ALLOC_PATH = "/id/pnf/vlan_id"
_PNF_MAX_UNIT = 16385
_PNF_UNIT_ALLOC_PATH = "/id/pnf/unit_id"
dm_object_db_instance = None
@classmethod
def get_instance(cls, zkclient=None, args=None, logger=None):
if cls.dm_object_db_instance is None:
cls.dm_object_db_instance = DMCassandraDB(zkclient, args, logger)
return cls.dm_object_db_instance
# end
@classmethod
def clear_instance(cls):
cls.dm_object_db_instance = None
# end
def __init__(self, zkclient, args, logger):
self._zkclient = zkclient
self._args = args
keyspaces = {
self._KEYSPACE: {self._PR_VN_IP_CF: {},
self._PR_ASN_CF: {},
self._DCI_ASN_CF: {},
self._PR_DCI_IP_CF: {},
self._PNF_RESOURCE_CF: {}}}
cass_server_list = self._args.cassandra_server_list
cred = None
if (self._args.cassandra_user is not None and
self._args.cassandra_password is not None):
cred = {'username': self._args.cassandra_user,
'password': self._args.cassandra_password}
super(DMCassandraDB, self).__init__(
cass_server_list, self._args.cluster_id, keyspaces, None,
logger.log, credential=cred,
ssl_enabled=self._args.cassandra_use_ssl,
ca_certs=self._args.cassandra_ca_certs)
self.pr_vn_ip_map = {}
self.pr_dci_ip_map = {}
self.pr_asn_map = {}
self.asn_pr_map = {}
self.dci_asn_map = {}
self.asn_dci_map = {}
self.init_pr_map()
self.init_pr_dci_map()
self.init_pr_asn_map()
self.init_dci_asn_map()
self.pnf_vlan_allocator_map = {}
self.pnf_unit_allocator_map = {}
self.pnf_network_allocator = IndexAllocator(
zkclient, self._zk_path_pfx+self._PNF_NETWORK_ALLOC_PATH,
self._PNF_MAX_NETWORK_ID)
self.pnf_cf = self.get_cf(self._PNF_RESOURCE_CF)
self.pnf_resources_map = dict(
self.pnf_cf.get_range(column_count=0, filter_empty=True))
# end
def get_si_pr_set(self, si_id):
si_obj = ServiceInstanceDM.get(si_id)
pr_set = set()
for pt_uuid in si_obj.port_tuples:
pt_obj = PortTupleDM.get(pt_uuid)
for vmi_uuid in pt_obj.virtual_machine_interfaces:
vmi_obj = VirtualMachineInterfaceDM.get(vmi_uuid)
pi_obj = PhysicalInterfaceDM.get(vmi_obj.physical_interface)
pr_set.add(pi_obj.physical_router)
return pr_set
def get_pnf_vlan_allocator(self, pr_id):
return self.pnf_vlan_allocator_map.setdefault(
pr_id,
IndexAllocator(
self._zkclient,
self._zk_path_pfx+self._PNF_VLAN_ALLOC_PATH+pr_id+'/',
self._PNF_MAX_VLAN)
)
def get_pnf_unit_allocator(self, pi_id):
return self.pnf_unit_allocator_map.setdefault(
pi_id,
IndexAllocator(
self._zkclient,
self._zk_path_pfx+self._PNF_UNIT_ALLOC_PATH+pi_id+'/',
self._PNF_MAX_UNIT)
)
def get_pnf_resources(self, vmi_obj, pr_id):
si_id = vmi_obj.service_instance
pi_id = vmi_obj.physical_interface
if not si_id or not pi_id:
return None
if si_id in self.pnf_resources_map:
return self.pnf_resources_map[si_id]
network_id = self.pnf_network_allocator.alloc(si_id)
vlan_alloc = self.get_pnf_vlan_allocator(pr_id)
try:
vlan_alloc.reserve(0)
except ResourceExistsError:
# must have been reserved already, restart case
pass
vlan_id = vlan_alloc.alloc(si_id)
pr_set = self.get_si_pr_set(si_id)
for other_pr_uuid in pr_set:
if other_pr_uuid != pr_id:
try:
self.get_pnf_vlan_allocator(other_pr_uuid).reserve(vlan_id)
except ResourceExistsError:
pass
unit_alloc = self.get_pnf_unit_allocator(pi_id)
try:
unit_alloc.reserve(0)
except ResourceExistsError:
# must have been reserved already, restart case
pass
unit_id = unit_alloc.alloc(si_id)
pnf_resources = {
"network_id": str(network_id),
"vlan_id": str(vlan_id),
"unit_id": str(unit_id)
}
self.pnf_resources_map[si_id] = pnf_resources
self.pnf_cf.insert(si_id, pnf_resources)
return pnf_resources
# end
def delete_pnf_resources(self, si_id):
pnf_resources = self.pnf_resources_map.get(si_id, None)
if not pnf_resources:
return
self.pnf_network_allocator.delete(int(pnf_resources['network_id']))
pr_set = self.get_si_pr_set(si_id)
for pr_uuid in pr_set:
if pr_uuid in self.pnf_vlan_allocator_map:
self.get_pnf_vlan_allocator(pr_uuid).delete(
int(pnf_resources['vlan_id']))
si_obj = ServiceInstanceDM.get(si_id)
for pt_uuid in si_obj.port_tuples:
pt_obj = PortTupleDM.get(pt_uuid)
for vmi_uuid in pt_obj.virtual_machine_interfaces:
vmi_obj = VirtualMachineInterfaceDM.get(vmi_uuid)
if vmi_obj.physical_interface:
self.get_pnf_unit_allocator(vmi_obj.physical_interface).delete(
int(pnf_resources['unit_id']))
del self.pnf_resources_map[si_id]
self.pnf_cf.remove(si_id)
# end
def handle_pnf_resource_deletes(self, si_id_list):
for si_id in self.pnf_resources_map:
if si_id not in si_id_list:
self.delete_pnf_resources(si_id)
# end
def init_pr_map(self):
cf = self.get_cf(self._PR_VN_IP_CF)
pr_entries = dict(cf.get_range(column_count=1000000))
for key in pr_entries.keys():
key_data = key.split(':', 1)
cols = pr_entries[key] or {}
for col in cols.keys():
ip_used_for = DMUtils.get_ip_used_for_str(col)
(pr_uuid, vn_subnet_uuid) = (key_data[0], key_data[1])
self.add_to_pr_map(pr_uuid, vn_subnet_uuid, ip_used_for)
# end
def init_pr_dci_map(self):
cf = self.get_cf(self._PR_DCI_IP_CF)
pr_entries = dict(cf.get_range(column_count=1000000))
for key in pr_entries.keys():
key_data = key.split(':', 1)
pr_uuid, dci_key = (key_data[0], key_data[1])
self.add_to_pr_dci_map(pr_uuid, dci_key)
# end
def init_pr_asn_map(self):
cf = self.get_cf(self._PR_ASN_CF)
pr_entries = dict(cf.get_range())
for pr_uuid in pr_entries.keys():
pr_entry = pr_entries[pr_uuid] or {}
asn = pr_entry.get('asn')
if asn:
if pr_uuid not in self.pr_asn_map:
self.pr_asn_map[pr_uuid] = asn
if asn not in self.asn_pr_map:
self.asn_pr_map[asn] = pr_uuid
# end init_pr_asn_map
def init_dci_asn_map(self):
cf = self.get_cf(self._DCI_ASN_CF)
dci_entries = dict(cf.get_range())
for dci_uuid in dci_entries.keys():
dci_entry = dci_entries[dci_uuid] or {}
asn = dci_entry.get('asn')
if asn:
if dci_uuid not in self.dci_asn_map:
self.dci_asn_map[dci_uuid] = asn
if asn not in self.asn_dci_map:
self.asn_dci_map[asn] = dci_uuid
# end init_dci_asn_map
def get_ip(self, key, ip_used_for):
return self.get_one_col(self._PR_VN_IP_CF, key,
DMUtils.get_ip_cs_column_name(ip_used_for))
# end
def get_asn_for_pr(self, pr_uuid):
return self.pr_asn_map.get(pr_uuid)
# end get_asn_for_pr
def get_pr_for_asn(self, asn):
return self.asn_pr_map.get(asn)
# end get_pr_for_asn
def get_asn_for_dci(self, dci_uuid):
return self.dci_asn_map.get(dci_uuid)
# end get_asn_for_dci
def get_dci_for_asn(self, asn):
return self.asn_dci_map.get(asn)
# end get_dci_for_asn
def get_dci_ip(self, key):
return self.get_one_col(self._PR_DCI_IP_CF, key, "ip")
# end
def add_ip(self, key, ip_used_for, ip):
self.add(self._PR_VN_IP_CF, key, {DMUtils.get_ip_cs_column_name(ip_used_for): ip})
# end
def add_dci_ip(self, key, ip):
self.add(self._PR_DCI_IP_CF, key, {"ip": ip})
# end
def add_asn(self, pr_uuid, asn):
self.add(self._PR_ASN_CF, pr_uuid, {'asn': asn})
self.pr_asn_map[pr_uuid] = asn
self.asn_pr_map[asn] = pr_uuid
# end add_asn
def add_dci_asn(self, dci_uuid, asn):
self.add(self._DCI_ASN_CF, dci_uuid, {'asn': asn})
self.dci_asn_map[dci_uuid] = asn
self.asn_dci_map[asn] = dci_uuid
# end add_dci_asn
def delete_ip(self, key, ip_used_for):
self.delete(self._PR_VN_IP_CF, key, [DMUtils.get_ip_cs_column_name(ip_used_for)])
# end
def delete_dci_ip(self, key):
self.delete(self._PR_DCI_IP_CF, key)
# end
def add_to_pr_map(self, pr_uuid, vn_subnet, ip_used_for):
if pr_uuid in self.pr_vn_ip_map:
self.pr_vn_ip_map[pr_uuid].add((vn_subnet, ip_used_for))
else:
self.pr_vn_ip_map[pr_uuid] = set()
self.pr_vn_ip_map[pr_uuid].add((vn_subnet, ip_used_for))
# end
def delete_from_pr_map(self, pr_uuid, vn_subnet, ip_used_for):
if pr_uuid in self.pr_vn_ip_map:
self.pr_vn_ip_map[pr_uuid].remove((vn_subnet, ip_used_for))
if not self.pr_vn_ip_map[pr_uuid]:
del self.pr_vn_ip_map[pr_uuid]
# end
def add_to_pr_dci_map(self, pr_uuid, dci_key):
if pr_uuid in self.pr_dci_ip_map:
self.pr_dci_ip_map[pr_uuid].add(dci_key)
else:
self.pr_dci_ip_map[pr_uuid] = set()
self.pr_dci_ip_map[pr_uuid].add(dci_key)
# end
def delete_from_pr_dci_map(self, pr_uuid, dci_key):
if pr_uuid in self.pr_dci_ip_map:
self.pr_dci_ip_map[pr_uuid].remove((dci_key))
if not self.pr_dci_ip_map[pr_uuid]:
del self.pr_dci_ip_map[pr_uuid]
# end
def delete_pr(self, pr_uuid):
vn_subnet_set = self.pr_vn_ip_map.get(pr_uuid, set())
for vn_subnet_ip_used_for in vn_subnet_set:
vn_subnet = vn_subnet_ip_used_for[0]
ip_used_for = vn_subnet_ip_used_for[1]
ret = self.delete(self._PR_VN_IP_CF, pr_uuid + ':' + vn_subnet,
[DMUtils.get_ip_cs_column_name(ip_used_for)])
if ret == False:
self._logger.error("Unable to free ip from db for vn/pr/subnet/ip_used_for "
"(%s/%s/%s)" % (pr_uuid, vn_subnet, ip_used_for))
asn = self.pr_asn_map.pop(pr_uuid, None)
if asn is not None:
self.asn_pr_map.pop(asn, None)
ret = self.delete(self._PR_ASN_CF, pr_uuid)
if not ret:
self._logger.error("Unable to free asn from db for pr %s" %
pr_uuid)
# end
def delete_dci(self, dci_uuid):
asn = self.dci_asn_map.pop(dci_uuid, None)
if asn is not None:
self.asn_dci_map.pop(asn, None)
ret = self.delete(self._DCI_ASN_CF, dci_uuid)
if not ret:
self._logger.error("Unable to free dci asn from db for dci %s" %
dci_uuid)
# end
def handle_dci_deletes(self, current_dci_set):
cs_dci_set = set(self.dci_asn_map.keys())
delete_set = cs_dci_set.difference(current_dci_set)
for dci_uuid in delete_set:
self.delete_dci(dci_uuid)
# end
def handle_pr_deletes(self, current_pr_set):
cs_pr_set = set(self.pr_vn_ip_map.keys())
delete_set = cs_pr_set.difference(current_pr_set)
for pr_uuid in delete_set:
self.delete_pr(pr_uuid)
# end
def get_pr_vn_set(self, pr_uuid):
return self.pr_vn_ip_map.get(pr_uuid, set())
# end
def get_pr_dci_set(self, pr_uuid):
return self.pr_dci_ip_map.get(pr_uuid, set())
# end
@classmethod
def get_db_info(cls):
db_info = [(cls._KEYSPACE, [cls._PR_VN_IP_CF])]
return db_info
# end get_db_info
# end
|
'''
repositories
============
The following methods allow for interaction with the Tenable.sc
:sc-api:`Repository <Repository.html>` API. These items are typically seen
under the **Repositories** section of Tenable.sc.
Methods available on ``sc.repositories``:
.. rst-class:: hide-signature
.. autoclass:: RepositoryAPI
.. automethod:: accept_risk_rules
.. automethod:: asset_intersections
.. automethod:: create
.. automethod:: delete
.. automethod:: details
.. automethod:: device_info
.. automethod:: edit
.. automethod:: export_repository
.. automethod:: import_repository
.. automethod:: recast_risk_rules
.. automethod:: remote_authorize
.. automethod:: remote_fetch
.. automethod:: remote_sync
'''
from .base import SCEndpoint
from tenable.utils import dict_merge, policy_settings
from io import BytesIO
import json, semver
class RepositoryAPI(SCEndpoint):
def _constructor(self, **kw):
'''
Repository document constructor
'''
if 'nessus_sched' in kw:
kw['nessusSchedule'] = self._schedule_constructor(kw['nessus_sched'])
del(kw['nessus_sched'])
if 'mobile_sched' in kw:
kw['mobileSchedule'] = self._schedule_constructor(kw['mobile_sched'])
del(kw['mobile_sched'])
if 'remote_sched' in kw:
kw['remoteSchedule'] = self._schedule_constructor(kw['remote_sched'])
del(kw['remote_sched'])
if 'name' in kw:
# Validate the name is a string
self._check('name', kw['name'], str)
if 'description' in kw:
# Verify that the description is a string
self._check('description', kw['description'], str)
if 'format' in kw:
# The data format for the repository.
kw['dataFormat'] = self._check('format', kw['format'], str,
choices=['agent', 'IPv4', 'IPv6', 'mobile'])
del(kw['format'])
if 'repo_type' in kw:
# The type of repository
kw['type'] = self._check('repo_type', kw['repo_type'], str,
choices=['Local', 'Remote', 'Offline'])
del(kw['repo_type'])
if 'orgs' in kw:
# Validate all of the organizational sub-documents.
kw['organizations'] = [{'id': self._check('org_id', o, int)}
for o in self._check('orgs', kw['orgs'], list)]
del(kw['orgs'])
if 'trending' in kw:
# Trending should be between 0 and 365.
kw['trendingDays'] = self._check('trending', kw['trending'], int,
choices=list(range(366)))
del(kw['trending'])
if 'fulltext_search' in kw:
# trendWithRaw is the backend paramater name for "Full Text Search"
# within the UI. We will be calling it fulltest_search to more
# closely align with what the frontend calls this feature.
kw['trendWithRaw'] = str(self._check('fulltext_search',
kw['fulltext_search'], bool)).lower()
del(kw['fulltext_search'])
if 'lce_correlation' in kw:
# The correlation parameter isn't well named here, we will call it
# out as LCE correlation to specifically note what it is for.
kw['correlation'] = [{'id': self._check('lce_id', l, int)}
for l in self._check('lce_correlation', kw['lce_correlation'], list)]
del(kw['lce_correlation'])
if 'allowed_ips' in kw:
# Using valid IPs here instead of ipRange to again more closely
# align to the frontend and to more explicitly call out the
# function of this paramater
kw['ipRange'] = ','.join([self._check('ip', i, str)
for i in self._check('allowed_ips', kw['allowed_ips'], list)])
del(kw['allowed_ips'])
if 'remote_ip' in kw:
kw['remoteIP'] = self._check('remote_ip', kw['remote_ip'], str)
del(kw['remote_ip'])
if 'remote_repo' in kw:
kw['remoteID'] = self._check('remote_repo', kw['remote_repo'], int)
del(kw['remote_repo'])
if 'preferences' in kw:
# Validate that all of the preferences are K:V pairs of strings.
for key in self._check('preferences', kw['preferences'], dict):
self._check('preference:{}'.format(key), key, str)
self._check('preference:{}:value'.format(key),
kw['preferences'][key], str)
if 'mdm_id' in kw:
kw['mdm'] = {'id': self._check('mdm_id', kw['mdm_id'], int)}
del(kw['mdm_id'])
if 'scanner_id' in kw:
kw['scanner'] = {'id': self._check(
'scanner_id', kw['scanner_id'], int)}
del(kw['scanner_id'])
return kw
def _rules_constructor(self, **kw):
'''
Accept/Recast Rule Query Creator
'''
if 'plugin_id' in kw:
# Convert the snake_cased variant to the camelCased variant.
kw['pluginID'] = self._check('plugin_id', kw['plugin_id'], int)
del(kw['plugin_id'])
if 'port' in kw:
# validate port is a integer
self._check('port', kw['port'], int)
if 'orgs' in kw:
# convert the list of organization IDs into the comma-separated
# string that the API expects.
kw['organizationIDs'] = ','.join([str(self._check('org:id', o, int))
for o in self._check('orgs', kw['orgs'], list)])
del(kw['orgs'])
if 'fields' in kw:
# convert the list of field names into the comma-separated string
# that the API expects.
kw['fields'] = ','.join([self._check('field', f, str)
for f in kw['fields']])
return kw
def list(self, fields=None, repo_type=None):
'''
Retrieves a list of repositories.
:sc-api:`repository: list <Repository.htm#repository_GET>`
Args:
fields (list, optional):
The list of fields that are desired to be returned. For details
on what fields are available, please refer to the details on the
request within the repository list API doc.
repo_type (str, optional):
Restrict the response to a specific type of repository. If not
set, then all repository types will be returned. Allowed types
are ``All``, ``Local``, ``Remote``, and ``Offline``.
Returns:
:obj:`list`:
List of repository definitions.
Examples:
Retrieve all of all of the repositories:
>>> repos = sc.repositories.list()
Retrieve all of the remote repositories:
>>> repos = sc.repositories.list(repo_type='Remote')
'''
params = dict()
if repo_type:
params['type'] = self._check('repo_type', repo_type, str, choices=[
'All', 'Local', 'Remote', 'Offline'])
if fields:
params['fields'] = ','.join([self._check('field', f, str)
for f in fields])
return self._api.get('repository', params=params).json()['response']
def create(self, **kw):
'''
Creates a new repository
:sc-api:`repository: create <Repository.html#repository_POST>`
Args:
name (str): The name for the respository.
allowed_ips (list, optional):
Allowed IPs will restrict incoming data being inserted into the
repository to only the IPs that exist within the configured
CIDR ranges. Accepts a list of CIDR strings based on the
repository format (IPv4 or IPv6). If left unspecified, then it
will default to the CIDR equivalent of "allow all" for that IP
version. IPv4=0.0.0.0/0, IPv6=::/0.
description (str, optional):
A description for the repository.
format (str, optional):
The format of the repository. Valid choices are ``agent``,
``IPv4``, ``IPv6``, and ``mobile``. The default if unspecified
is ``IPv4``.
fulltext_search (bool, optional):
Should full-text searching be enabled? This option is used for
IPv4, IPv6, and agent repository formats and determins whether
the plugin output is trended along with the normalized data. If
left unspecified, the default is set to ``False``.
lce_correlation (list, optional):
What Log Correlation Engines (if any) should correlate against
this repository. A list of configured LCE numeric IDs is
supplied. This option is used on IPv4, IPv6, and agent formats
and is defaulted to nothing if left unspecified.
nessus_sched (dict, optional):
This is the .Nessus file generation schedule for IPv4 and IPv6
repository formats. This option should only be used if there
is a need to consume the Repository in a raw Nessus XML format.
If left unspecified, it will default to ``{'type': 'never'}``.
For more information refer to `Schedule Dictionaries`_
mobile_sched (dict, optional):
When using the mobile repository format, this option will inform
Tenable.sc how often to perform the MDM synchronization into the
repository. If left unspecified, it will default to
``{'type': 'never'}``. For more information refer to
`Schedule Dictionaries`_
orgs (list, optional):
A list of Organization IDs used to assign the repository to 1 or
many organizations.
preferences (dict, optional):
When using a mobile repository type, this dictionary details
the required preferences to inject into the backend scan needed
to communicate to the MDM solution.
remote_ip (str, optional):
When the Remote repository type is used, this is the IP
address of the Tenable.sc instance that the repository will be
pulled from.
remote_repo (int, optional):
When the Remote repository type is used, this is the numeric ID
of the repository on the remote host that will be pulled.
remote_sched (dict, optional):
When the Remote repository type is used, this is the schedule
dictionary that will inform Tenable.sc how often to synchronize
with the downstream Tenable.sc instance. If left unspecified
then we will default to ``{'type': 'never'}``. For more
information refer to `Schedule Dictionaries`_
repo_type (str, optional):
What type of repository is this? Valid choices are ``Local``,
``Remote``, and ``Offline``. The default if unspecified is
``Local``.
scanner_id (int, optional):
When using the mobile repository format, we must specify the
scanner from which to query the MDM source.
trending (int, optional):
How many days of trending snapshots should be created for this
repository. This value is only used for IPv4, IPv6, and agent
repositories. If not supplied, the default will be 0.
Returns:
:obj:`dict`:
The repository resource record for the newly created Repo.
Examples:
Creating a new IPv4 Repository leveraging the defaults:
>>> repo = sc.repositories.create(name='Example IPv4')
Creating a new IPv4 Repository with 90 days of trending and linked
to the first Organization:
>>> repo = sc.repositories.create(
... name='Example Trending', trending=90, orgs=[1])
Creating an IPv6 repository:
>>> repo = sc.repositories.create(
... name='Example IPv6', format='IPv6')
Creating an agent repository:
>>> repo = sc.repositories.create(
... name='Example Agent', format='agent')
Creating an MDM repository for ActiveSync that will sync every day
at 6am eastern:
>>> repo = sc.repositories.create(
... name='Example ActiveSync', mdm_id=1, scanner_id=1,
... format='mobile', orgs=[1],
... mobile_sched={
... 'repeatRule': 'FREQ=DAILY;INTERVAL=1',
... 'start': 'TZID=America/New_York:20190212T060000',
... 'type': 'ical',
... },
... preferences={
... 'domain': 'AD_DOMAIN',
... 'domain_admin': 'DA_ACCOUNT_NAME',
... 'domain_controller': 'dc1.company.tld',
... 'password': 'DA_ACCOUNT_PASSWORD'
... })
Creating a new repository to remotely sync the downstream Tenable.sc
instance's repository 1 to this host and institute trending for 90
days:
>>> repo = sc.repositories.create(
... name='Example Remote Repo',
... repo_type='Remote',
... remote_ip='192.168.0.101',
... remote_repo=1,
... trending=90,
... orgs=[1],
... remote_sched={
... 'type': 'ical',
... 'start': 'TZID=America/NewYork:20190212T060000',
... 'repeatRule': 'FREQ=DAILY;INTERVAL=1'
... })
'''
kw = self._constructor(**kw)
kw['dataFormat'] = kw.get('dataFormat', 'IPv4')
kw['type'] = kw.get('type', 'Local')
if kw['dataFormat'] in ['IPv4', 'IPv6', 'agent']:
kw['trendingDays'] = kw.get('trendingDays', 0)
kw['trendWithRaw'] = kw.get('trendWithRaw', 'false')
if kw['dataFormat'] in ['IPv4', 'IPv6']:
kw['nessusSchedule'] = kw.get('nessusSchedule', {'type': 'never'})
if kw['dataFormat'] == 'IPv4':
kw['ipRange'] = kw.get('ipRange', '0.0.0.0/0')
if kw['dataFormat'] == 'IPv6':
kw['ipRange'] = kw.get('ipRange', '::/0')
if kw['dataFormat'] == 'mobile':
kw['mobileSchedule'] = kw.get('mobileSchedule', {'type': 'never'})
if kw['type'] == 'remote':
kw['remoteSchedule'] = kw.get('remoteSchedule', {'type': 'never'})
return self._api.post('repository', json=kw).json()['response']
def details(self, id, fields=None):
'''
Retrieves the details for the specified repository.
:sc-api:`repository: details <Repository.html#repository_id_GET>`
Args:
id (int): The numeric id of the repository.
fields (list, optional):
The list of fields that are desired to be returned. For details
on what fields are available, please refer to the details on the
request within the repository details API doc.
Returns:
:obj:`dict`:
The repository resource record.
Examples:
>>> repo = sc.repositories.details(1)
'''
params = dict()
if fields:
params['fields'] = ','.join([self._check('field', f, str) for f in fields])
return self._api.get('repository/{}'.format(
self._check('id', id, int)), params=params).json()['response']
def delete(self, id):
'''
Remove the specified repository from Tenable.sc
:sc-api:`repository: delete <Repository.html#repository_id_DELETE>`
Args:
id (int): The numeric id of the repository to delete.
Returns:
:obj:`str`:
Empty response string
Examples:
>>> sc.repositories.delete(1)
'''
return self._api.delete('repository/{}'.format(
self._check('id', id, int))).json()['response']
def edit(self, id, **kw):
'''
Updates an existing repository
:sc-api:`repository: edit <Repository.html#repository_id_PATCH>`
Args:
id (int): The numeric id of the repository to edit.
allowed_ips (list, optional):
Allowed IPs will restrict incoming data being inserted into the
repository to only the IPs that exist within the configured
CIDR ranges. Accepts a list of CIDR strings based on the
repository format (IPv4 or IPv6).
description (str, optional):
A description for the repository.
lce_correlation (list, optional):
What Log Correlation Engines (if any) should correlate against
this repository. A list of configured LCE numeric IDs is
supplied. This option is used on IPv4, IPv6, and agent formats.
name (str, optional): The name for the repository.
nessus_sched (dict, optional):
This is the .Nessus file generation schedule for IPv4 and IPv6
repository formats. This option should only be used if there
is a need to consume the Repository in a raw Nessus XML format.
For more information refer to `Schedule Dictionaries`_
mobile_sched (dict, optional):
When using the mobile repository format, this option will inform
Tenable.sc how often to perform the MDM synchronization into the
repository. For more information refer to
`Schedule Dictionaries`_
orgs (list, optional):
A list of Organization IDs used to assign the repository to 1 or
many organizations.
preferences (dict, optional):
When using a mobile repository type, this dictionary details
the required preferences to inject into the backend scan needed
to communicate to the MDM solution.
remote_ip (str, optional):
When the Remote repository type is used, this is the IP
address of the Tenable.sc instance that the repository will be
pulled from.
remote_repo (int, optional):
When the Remote repository type is used, this is the numeric ID
of the repository on the remote host that will be pulled.
remote_sched (dict, optional):
When the Remote repository type is used, this is the schedule
dictionary that will inform Tenable.sc how often to synchronize
with the downstream Tenable.sc instance. For more
information refer to `Schedule Dictionaries`_
scanner_id (int, optional):
When using the mobile repository format, we must specify the
scanner from which to query the MDM source.
trending (int, optional):
How many days of trending snapshots should be created for this
repository. This value is only used for IPv4, IPv6, and agent
repositories.
Returns:
:obj:`dict`:
The repository resource record for the newly created Repo.
Examples:
>>> repo = sc.repositories.edit(1, name='Example IPv4')
'''
kw = self._constructor(**kw)
return self._api.patch('repository/{}'.format(
self._check('id', id, int)), json=kw).json()['response']
def accept_risk_rules(self, id, **kw):
'''
Retrieves the accepted risk rules associated with the specified
repository.
:sc-api:`repository: accept rules <Repository.html#RepositoryRESTReference-/repository/{id}/acceptRiskRule>`
Args:
id (int): The numeric id of the repository.
fields (list, optional):
The list of fields that are desired to be returned. For details
on what fields are available, please refer to the details on the
request within the repository accept risk rules API doc.
Returns:
:obj:`list`:
List of the accepted risk rules that apply to the repo.
Examples:
>>> rules = sc.repositories.accept_risk_rules(1)
'''
params = self._rules_constructor(**kw)
return self._api.get('repository/{}/acceptRiskRule'.format(
self._check('id', id, int)), params=params).json()['response']
def recast_risk_rules(self, id, **kw):
'''
Retrieves the recast risk rules associated with the specified
repository.
:sc-api:`repository: recast rules <Repository.html#RepositoryRESTReference-/repository/{id}/recastRiskRule>`
Args:
id (int): The numeric id of the repository.
fields (list, optional):
The list of fields that are desired to be returned. For details
on what fields are available, please refer to the details on the
request within the repository recast risk rules API doc.
Returns:
:obj:`list`:
List of the recast risk rules that apply to the repo.
Examples:
>>> rules = sc.repositories.recast_risk_rules(1)
'''
params = self._rules_constructor(**kw)
return self._api.get('repository/{}/recastRiskRule'.format(
self._check('id', id, int)), params=params).json()['response']
def asset_intersections(self, id, uuid=None, ip=None, dns=None):
'''
Retrieves the asset lists that a UUID, DNS address, or IP exists in.
:sc-api:`repository: asst intersections <Repository.html#RepositoryRESTReference-/repository/{id}/assetIntersections>`
Args:
id (int): The numeric identifier of the repository to query.
dns (str): The DNS name to query
ip (str): The IP address to query
uuid (str): The UUID to query.
Returns:
:obj:`list`:
The list of assets matching the criteria.
Examples:
>>> assetlists = sc.repositories.asset_intersection(1,
... ip='192.168.0.1')
'''
params = dict()
if dns:
params['dnsName'] = self._check('dns', dns, str)
if ip:
params['ip'] = self._check('ip', ip, str)
if uuid:
params['uuid'] = self._check('uuid', uuid, 'uuid')
return self._api.get('repository/{}/assetIntersections'.format(
self._check('id', id, int)),
params=params).json()['response'].get('assets')
def import_repository(self, id, fobj):
'''
Imports the repository archive for an offline repository.
:sc-api:`repository: import <Repository.html#RepositoryRESTReference-/repository/{id}/import>`
Args:
id (int): The numeric id associated to the offline repository.
fobj (FileObject):
The file-like object containing the repository archive.
Returns:
:obj:`dict`:
The import response record.
Example:
>>> with open('repo.tar.gz', 'rb') as archive:
... sc.repositories.import_repository(1, archive)
'''
return self._api.post('repository/{}/import'.format(
self._check('id', id, int)), json={
'file': self._api.files.upload(fobj)
}).json()['response']
def export_repository(self, id, fobj):
'''
Exports the repository and writes the archive tarball into the file
object passed.
:sc-api:`repository: export <Repository.html#RepositoryRESTReference-/repository/{id}/export>`
Args:
id (int): The numeric id associated to the repository.
fobj (FileObject):
The file-like object for the repository archive.
Returns:
:obj:`dict`:
The export response record.
Example:
>>> with open('repo.tar.gz', 'wb') as archive:
... sc.repositories.export_repository(1, archive)
'''
resp = self._api.get('repository/{}/export'.format(
self._check('id', id, int)), stream=True)
# Lets stream the file into the file-like object...
for chunk in resp.iter_content(chunk_size=1024):
if chunk:
fobj.write(chunk)
fobj.seek(0)
resp.close()
return fobj
def remote_sync(self, id):
'''
Initiates a remote synchronization with a downstream Tenable.sc
instance. This action can only be performed on an offline repository.
:sc-api:`repository: sync <Repository.html#RepositoryRESTReference-/repository/{id}/sync>`
Args:
id (int): The numeric id for the remote repository.
Returns:
:obj:`dict`:
The sync response record.
Examples:
>>> sc.repositories.remote_sync(1)
'''
return self._api.post('repository/{}/sync'.format(
self._check('id', id, int)), json={}).json()['response']
def mobile_sync(self, id):
'''
Initiates a MDM synchronization with the configured MDM source on the
mobile repository specified.
:sc-api:`repository: update mobile data <Repository.html#RepositoryRESTReference-/repository/{id}/updateMobileData>`
Args:
id (int): The numeric id for the mobile repository to run the sync.
Returns:
:obj:`dict`:
The sync response record.
Examples:
>>> sc.repositories.mobile_sync(1)
'''
return self._api.post('repository/{}/updateMobileData'.format(
self._check('id', id, int)), json={}).json()['response']
def device_info(self, id, dns=None, ip=None, uuid=None, fields=None):
'''
Retrieves the device information for the requested device on the
associated repository.
:sc-api:`repository: device info <Repository.html#RepositoryRESTReference-/repository/{id}/deviceInfo>`
`repository: ip info <Repository.html#RepositoryRESTReference-/repository/{id}/ipInfo>`
Args:
id (int): The numeric id for the repository to query.
dns (str): The DNS name to query
fields (list, optional):
The list of fields that are desired to be returned. For details
on what fields are available, please refer to the details on the
request within the repository device info API doc.
ip (str): The IP address to query
uuid (str): The UUID to query.
Returns:
:obj:`dict`:
The device resource.
Examples:
>>> host = sc.repositories.device_info(1, ip='192.168.0.1')
'''
# We will generally want to query the deviceInfo action, however if we
# happen to be on a Tenable.sc instance version thats less than 5.7, we
# have to instead query ipInfo.
method = 'deviceInfo'
if semver.match(self._api.version, '<5.7.0'):
method = 'ipInfo'
params = dict()
if fields:
params['fields'] = ','.join([self._check('field', f, str) for f in fields])
if dns:
params['dnsName'] = self._check('dns', dns, str)
if ip:
params['ip'] = self._check('ip', ip, str)
if uuid:
params['uuid'] = self._check('uuid', uuid, 'uuid')
return self._api.get('repository/{}/{}'.format(
self._check('id', id, int), method), params=params).json()['response']
def remote_authorize(self, host, username, password):
'''
Authorized communication to a downstream Tenable.sc instance with the
provided username and password.
:sc-api:`repository: authorize <Repository.html#RepositoryRESTReference-/repository/authorize>`
Args:
host (str): The downstream Tenable.sc instance ip address.
username (str): The username to authenticate with.
password (str); The password to authenticate with.
Returns:
:obj:`str`:
Empty response object
Examples:
>>> sc.repositories.remote_authorize(
... '192.168.0.101', 'admin', 'password')
'''
return self._api.post('repository/authorize', json={
'host': self._check('host', host, str),
'username': self._check('username', username, str),
'password': self._check('password', password, str)
}).json()['response']
def remote_fetch(self, host):
'''
Retrieves the list of repositories from the specified downstream
Tenable.sc instance.
:sc-api:`repository: fetch remote <Repository.html#RepositoryRESTReference-/repository/fetchRemote>`
Args:
host (str): The downstream Tenable.sc instance ip address.
Returns:
:obj:`list`:
The list of repositories on the downstream Tenable.sc instance.
'''
return self._api.get('repository/fetchRemote', params={
'host': self._check('host', host, str)}).json()['response']
|
n=0
with open("G.txt", "wt") as out_file:
while n != 10:
out_file.write("test\n")
|
# -*- coding: utf-8 -*-
import uuid
from io import StringIO
from PySide2 import QtGui
from PySide2.QtUiTools import QUiLoader
from PySide2.QtCore import QMetaObject
class UiLoader(QUiLoader):
def __init__(self, base_instance):
QUiLoader.__init__(self, base_instance)
self.base_instance = base_instance
def createWidget(self, class_name, parent=None, name=''):
if parent is None and self.base_instance:
return self.base_instance
else:
# create a new widget for child widgets
widget = QUiLoader.createWidget(self, class_name, parent, name)
if self.base_instance:
setattr(self.base_instance, name, widget)
return widget
def load_ui_file(ui_file, base_instance=None):
loader = UiLoader(base_instance)
widget = loader.load(ui_file)
QMetaObject.connectSlotsByName(widget)
return widget
class StdRedirector(StringIO):
# From http://stackoverflow.com/questions/17132994/pyside-and-python-logging/17145093#17145093
def __init__(self, widget, out=None, color=None):
"""(edit, out=None, color=None) -> can write stdout, stderr to a
QTextEdit.
edit = QTextEdit
out = alternate stream ( can be the original sys.stdout )
color = alternate color (i.e. color stderr a different color)
"""
self.edit_widget = widget
self.out = out
self.color = color
def write(self, text):
# TODO: Doesn't seem to have any effect
if self.color:
original = self.edit_widget.textColor()
self.edit_widget.setTextColor(QtGui.QColor(self.color))
self.edit_widget.moveCursor(QtGui.QTextCursor.End)
self.edit_widget.insertPlainText(text)
if self.color:
self.edit_widget.setTextColor(original)
if self.out:
self.out.write(text)
def flush(self, *args, **kwargs):
pass
def new_id():
return uuid.uuid4().hex
abt1 = '16z7c78fbfdzb9fbe893c2'
|
import os
from relevanceai.constants.config import Config
from relevanceai.constants.links import *
CONFIG_PATH = os.path.dirname(os.path.abspath(__file__)) + "/config.ini"
CONFIG = Config(CONFIG_PATH)
MAX_CACHESIZE = (
int(CONFIG["cache.max_size"]) if CONFIG["cache.max_size"] != "None" else None
)
TRANSIT_ENV_VAR = "_IS_ANALYTICS_IN_TRANSIT"
GLOBAL_DATASETS = ["_mock_dataset_"]
DATASETS = [
"coco",
"games",
"ecommerce_1",
"ecommerce_2",
"ecommerce_3",
"online_retail",
"news",
"flipkart",
"realestate2",
"toy_image_caption_coco_image_encoded",
]
MB_TO_BYTE = 1024 * 1024
LIST_SIZE_MULTIPLIER = 3
SUCCESS_CODES = [200]
RETRY_CODES = [400, 404]
HALF_CHUNK_CODES = [413, 524]
US_EAST_1 = "us-east-1"
AP_SOUTEAST_1 = "ap-southeast-1"
OLD_AUSTRALIA_EAST = "old-australia-east"
IMG_EXTS = [
".jpg",
".jpeg",
".tif",
".tiff",
".png",
".gif",
".bmp",
".eps",
]
|
from genetics.fitnessTest import FitnessTest
from genetics.generator import getNewScheduleFromList
from genetics.genepool import GenePool
from product import Product
from schedule import Task
from vessels import Vessels
genepool = GenePool()
running = True
iterationCount = 0
def addSchedule(schedule):
genepool.addSchedule(schedule)
def tick():
global iterationCount
genepool.refreshSchedules()
FitnessTest.testPool(genepool)
genepool.removeSchedules()
iterationCount += 1
# print(genepool.getBestSchedule().fitness)
if __name__ == "__main__":
vessels = Vessels()
vessels.addVessel(20)
vessels.addVessel(30)
vessels.addVessel(50)
genepool.addSchedule(getNewScheduleFromList([
[Product("1", "2", "10"), 10],
[Product("2", "2", "10"), 10],
[Product("3", "2", "10"), 10],
[Product("4", "2", "10"), 10],
[Product("5", "2", "10"), 10],
[Product("6", "2", "10"), 10],
]))
for _count in range(100):
genepool.refreshSchedules()
FitnessTest.testPool(genepool)
genepool.removeSchedules()
print(genepool.getBestSchedule().fitness)
genepool.getBestSchedule().print()
|
import numpy as np
import torch
from torch import nn, optim # , distributed
from torch.optim import lr_scheduler
from torch.backends import cudnn
from torch.utils import data
from torch.nn.utils import clip_grad_norm_
from torch.utils.tensorboard import SummaryWriter
# from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm
from . import model, dataset, ranger, benchmark
# run: python -m torch.distributed.launch main.py
def main():
# seed = 1283
# np.random.seed(seed)
# torch.manual_seed(seed)
cudnn.benchmark = True
cudnn.deterministic = True
writer = SummaryWriter(log_dir="/home/mist/output", flush_secs=30)
# distributed
# distributed.init_process_group(backend="nccl")
# local_rank = torch.distributed.get_rank()
# torch.cuda.set_device(local_rank)
# device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
train_folder = dataset.RadarDataset(train=True)
valid_folder = dataset.RadarDataset(train=False)
# distributed
# train = data.DataLoader(train_folder, batch_size=3, shuffle=False,
# pin_memory=True, num_workers=2, sampler=DistributedSampler(train_folder))
# valid = data.DataLoader(valid_folder, batch_size=1, shuffle=False,
# pin_memory=True, num_workers=2, sampler=DistributedSampler(valid_folder))
train = data.DataLoader(train_folder, batch_size=1, shuffle=False, pin_memory=True, num_workers=20)
valid = data.DataLoader(valid_folder, batch_size=1, shuffle=False, pin_memory=True, num_workers=5)
generator = model.ConvLSTMNetwork(10).cuda()
generator_loss_func = model.weightedLoss().cuda()
generator_optimizer = ranger.Ranger(generator.parameters())
generator, generator_optimizer = \
benchmark.load(generator, generator_optimizer, "./checkpoint_e20.pth.tar")
# generator.eval()
# writer.add_graph(generator, torch.rand([1, 10, 1, 512, 512]).cuda())
# w
# distributed
# generator = torch.nn.parallel.DistributedDataParallel(
# generator, device_ids=[local_rank], output_device=local_rank)
discriminator = model.Discriminator().cuda()
discriminator_loss_func = nn.BCEWithLogitsLoss().cuda()
discriminator_optimizer = optim.SGD(discriminator.parameters(), lr=1e-2)
discriminator, discriminator_optimizer = \
benchmark.load(discriminator, discriminator_optimizer, "./checkpoint_discriminator.pth.tar")
discriminator.train()
# discriminator.eval()
# writer.add_graph(discriminator, torch.rand([1, 10, 1, 512, 512]).cuda())
generator_scheduler = lr_scheduler.CosineAnnealingWarmRestarts(generator_optimizer, 10)
generator.train()
# discriminator.train()
# sample, _ = train_folder.correcter()
# writer.add_images('inputs_hmr', sample[:, 1, ...].unsqueeze(1), 0)
# writer.add_images('inputs_radar', sample[:, 0, ...].unsqueeze(1), 0)
print(generator)
print(discriminator)
for epoch in range(51):
t = tqdm(train, leave=False, total=len(train))
train_loss = []
train_acc = []
verify_loss = []
for i, (targetVar, inputVar) in enumerate(t):
# print(inputVar.size(), targetVar.size())
inputs = inputVar.cuda() # B,S,C,H,W
generator_optimizer.zero_grad()
generator_pred = generator(inputs) # B,S,C,H,W -> fake
discriminator_optimizer.zero_grad()
discriminator_pred_fake = discriminator(generator_pred)
discriminator_loss = discriminator_loss_func(discriminator_pred_fake, torch.zeros([1]).cuda())
discriminator_loss_aver = discriminator_loss.item()
discriminator_loss.backward(retain_graph=True)
label = targetVar.cuda() # B,S,C,H,W
generator_loss = generator_loss_func(generator_pred, label) # + discriminator_loss
generator_loss_aver = generator_loss.item()
generator_loss.backward()
generator_optimizer.step()
_, p, f, c, _ = benchmark.acc(label, generator_pred)
t.set_postfix({
'tL': '{:.6f}'.format(generator_loss_aver),
'dL': '{:.6f}'.format(discriminator_loss_aver),
'ep': '{:02d}'.format(epoch),
'last': '{:.2f}'.format(torch.mean(label.reshape(10, -1).sum(1))),
})
train_loss.append(generator_loss_aver)
verify_loss.append(discriminator_loss_aver)
discriminator_pred_truth = discriminator(label)
discriminator_loss_2 = discriminator_loss_func(discriminator_pred_truth, torch.ones([1]).cuda())
if i % 5 == 0:
discriminator_loss_2.backward()
discriminator_optimizer.step()
total_l = (discriminator_loss + discriminator_loss_2) / 2
writer.add_scalar('Loss/Discriminator', total_l, epoch * len(train) + i + 1)
writer.add_scalar('Loss/Train', generator_loss_aver, epoch * len(train) + i + 1)
writer.add_scalar('POD/Train', p, epoch * len(train) + i + 1)
writer.add_scalar('FAR/Train', f, epoch * len(train) + i + 1)
writer.add_scalar('ETS/Train', c, epoch * len(train) + i + 1)
# tl = aa + bb + cc + dd
# writer.add_scalar('Factor/A, TP', aa / tl, epoch * len(train) + i + 1)
# writer.add_scalar('Factor/B, FP', bb / tl, epoch * len(train) + i + 1)
# writer.add_scalar('Factor/C, FN', cc / tl, epoch * len(train) + i + 1)
# writer.add_scalar('Factor/D, TN', dd / tl, epoch * len(train) + i + 1)
torch.cuda.empty_cache()
with torch.no_grad():
generator.eval()
valid_loss = []
t = tqdm(valid, leave=False, total=len(valid))
for i, (targetVar, inputVar) in enumerate(t):
inputs = inputVar.cuda()
label = targetVar.cuda()
pred = generator(inputs)
loss = generator_loss_func(pred, label)
_, p, f, c, _ = benchmark.acc(label, pred)
loss_aver = loss.item()
t.set_postfix({
'vL': '{:.6f}'.format(loss_aver),
'ep': '{:02d}'.format(1),
'last': '{:.2f}'.format(torch.mean(label.reshape(10, -1).sum(1))),
})
valid_loss.append(loss_aver)
if i == 5:
# draw images
# pred[pred < 0.01] = 0
# writer.add_images('inputs_hmr', inputs[0, :, 1, ...], epoch)
# writer.add_images('inputs', inputs[0, :, 1, ...], epoch)
writer.add_images('inputs', inputs[0, ...], epoch)
writer.add_images('labels', label[0, ...], epoch)
writer.add_images('outputs', pred[0, ...], epoch)
writer.add_scalar('Loss/Valid', np.mean(loss_aver), epoch * len(valid) + i + 1)
writer.add_scalar('POD/Valid', p, epoch * len(valid) + i + 1)
writer.add_scalar('FAR/Valid', f, epoch * len(valid) + i + 1)
writer.add_scalar('ETS/Valid', c, epoch * len(valid) + i + 1)
generator_scheduler.step(epoch)
print("epoch: {}, loss: {:.6f}".
format(epoch, np.mean(valid_loss)))
if epoch % 4 == 0 and epoch != 0:
print("Saving checkpoint...")
state = {
"epoch": epoch,
"generator_model": generator.state_dict(),
"generator_optimizer": generator_optimizer.state_dict(),
"discriminator_model": discriminator.state_dict(),
"discriminator_optimizer": discriminator_optimizer.state_dict()
}
torch.save(state, "./checkpoint_e{}.pth.tar".format(epoch))
torch.cuda.empty_cache()
if __name__ == "__main__":
main()
|
###############################################################################
''''''
###############################################################################
import pandas as pd
import aliases
import analysis
from thesiscode.utilities import hard_cache, hard_cache_df, hard_cache_df_multi
reader = analysis.utilities.AnalysisReader('Viscoplastic', aliases.datadir)
def make_inputs():
return reader['*/inputs']
def make_inputs_frame():
_inputs = hard_cache('viscoplastic_inputs', make_inputs)
return analysis.common.make_inputs_frame(_inputs)
def get_inputs_frame():
out = hard_cache_df('viscoplastic_inputs', make_inputs_frame)
if 'hashID' in out.columns:
out = out.set_index('hashID')
return out
def make_averages_frame():
inputs = get_inputs_frame()
return analysis.common.make_averages_frame(reader, inputs)
def get_averages_frame():
out = hard_cache_df('viscoplastic_averages', make_averages_frame)
if 'hashID' in out.columns:
out = out.set_index('hashID')
return out
def make_endpoints_frames():
inputs = get_inputs_frame()
yield from analysis.common.make_endpoints_frames(reader, inputs)
def get_endpoints_frames():
outs = hard_cache_df_multi(('viscoplastic_initials', 'viscoplastic_finals'), make_endpoints_frames)
for out in outs:
if 'hashID' in out.columns:
out = out.set_index('hashID')
yield out
def get_rasters():
inputs = get_inputs_frame()
return analysis.common.get_rasters(reader, inputs, 'viscoplastic')
def get_summary_frames():
frames = (get_inputs_frame(), *get_endpoints_frames(), get_averages_frame())
commonkeys = set.intersection(*list(set(frame.index) for frame in frames))
frames = tuple(frame.loc[commonkeys] for frame in frames)
return frames
# def make_hashids(self):
# return reader['*/hashID']
# hashIDs = hard_cache('isovisc_hashids', make_hashids)
###############################################################################
###############################################################################
|
import random
numStr = random.randint(1,100)
num = int(numStr)
timesGuessed = int(0)
correct = False
while ( correct == False ):
requestStr = input("What is your number guess? ")
requestInt = int(requestStr)
if ( requestInt == num ):
print("Your number is correct! You guessed the number in ", timesGuessed, " attempts!")
correct = True
break
elif ( requestInt > num ):
print ("Your number is Too High.")
timesGuessed += 1
elif ( requestInt < num ):
print ("Your number is Too Low.")
timesGuessed += 1
|
from .verbosemanager import *
from .decorator import *
from .simple import *
from .counter import *
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 9 12:48:26 2020
@author: jacob
"""
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
def hm(plat, data_path):
plate_name = plat.get_plate_name()
params = plat.get_all_params()
# List for making heatmaps
row_letters = ["A", "B", "C", "D", "E", "F", "G", "H"]
cols = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
w, h = 3, 96
hm_data_gr = [[0 for x in range(w)] for y in range(h)]
hm_data_ymax = [[0 for x in range(w)] for y in range(h)]
index = 0
for i in range(8):
for j in range(12):
no_growth_flag=False
well = row_letters[i] + str(cols[j])
hm_data_gr[index][0] = row_letters[i]
hm_data_gr[index][1] = cols[j]
hm_data_ymax[index][0] = row_letters[i]
hm_data_ymax[index][1] = cols[j]
try:
hm_data_ymax[index][2] = params[well][1]
if params[well][1] < 0.2:
no_growth_flag=True
except KeyError:
hm_data_ymax[index][2] = None
if no_growth_flag:
hm_data_gr[index][2] = 0
else:
try:
hm_data_gr[index][2] = params[well][0]
except KeyError:
hm_data_gr[index][2] = None
index += 1
# Tranposes data
hm_df_gr = pd.DataFrame(hm_data_gr)
hm_df_gr.columns = ["Rows", "Columns", "GR"]
hm_df_gr = hm_df_gr.pivot(index="Rows", columns="Columns", values="GR")
hm_df_ymax = pd.DataFrame(hm_data_ymax)
hm_df_ymax.columns = ["Rows", "Columns", "Ymax"]
hm_df_ymax = hm_df_ymax.pivot(index="Rows", columns="Columns", values="Ymax")
# Formatting heatmap to align with 96 well plate
sns.set(font_scale=3)
f, ax = plt.subplots(figsize=(42,28))
with sns.axes_style("white"):
sns.heatmap(hm_df_gr, ax=ax, mask=hm_df_gr.isnull(), linewidth=0.5, cmap="magma", annot=True, vmin=0.05)
ax.set_title(plate_name + ": Raw Growth Rate Values\n\n")
plt.yticks(rotation=0)
ax.xaxis.tick_top()
ax.set_xlabel('')
ax.set_ylabel('')
plt.savefig(data_path + "Heatmaps/GR/" + plate_name)
plt.close()
# Formatting heatmap to align with 96 well plate
sns.set(font_scale=3)
f, ax = plt.subplots(figsize=(42,28))
with sns.axes_style("white"):
sns.heatmap(hm_df_ymax, ax=ax, mask=hm_df_ymax.isnull(), linewidth=0.5, cmap="magma", annot=True, vmin=0.2)
ax.set_title(plate_name + ": Raw Ymax Values\n\n")
plt.yticks(rotation=0)
ax.xaxis.tick_top()
ax.set_xlabel('')
ax.set_ylabel('')
plt.savefig(data_path + "Heatmaps/Ymax/" + plate_name)
plt.close()
|
# -*- coding: utf-8 -*-
from common.helpers.loadConfig import LoadJsonFiles
from common.helpers.operationResults import OperationResults
from server.icity_server import CONFIGURE_FILE, CONFIGURE_HASH
SUPPORTED_DRIVERS = ['mysql', 'mongodb', 'sqlite', 'postgresql']
DRIVER_MAP = {
"mysql" : "MYSQL_DATABASE_CONNECTION",
"postgresql": "PGSQL_DATABASE_CONNECTION",
"sqlite" : "SQLITE_DATABASE_CONNECTION",
"infuxdb" : "INFLUX_DATABASE_CONNECTION",
"mongodb" : "MONGO_DATABASE_CONNECTION"
}
"""
Class that inform a error code of http list
* class httpErrorCodes
* requires python 3.+, PyQt5
* version 1.0.0
* package pyCommom
* subpackage pyCommom
* author Alcindo Schleder <alcindoschleder@gmail.com>
* copyright Vocatio Telecom <https://www.vocatiotelecom.com.br>
"""
class ConfigureConnection(OperationResults):
def __init__(self):
super(ConfigureConnection, self).__init__()
self.resultStatusCode = 200
self._globalConfig = None
self._dbDriver = 'sqlite'
self._configDriver = DRIVER_MAP[self._dbDriver]
try:
ljf = LoadJsonFiles(CONFIGURE_FILE)
self.result = ljf.checkFileHash(CONFIGURE_HASH)
if (self.resultStatusCode != 200):
raise Exception(self.resultStatusMessage)
self._globalConfig = ljf.dictData
except Exception as e:
msg = f'Can not load config file {CONFIGURE_FILE}!!\nRazon: {e.args}'
self.resultStatusCode = 500
self.resultStatusMessage = msg
raise Exception(msg)
def connectionUri(self):
if (self.resultStatusCode != 200):
return False
driver = self._globalConfig[self._configDriver]["database"]["driver"]
host = self._globalConfig[self._configDriver]["database"]["host"]
user = self._globalConfig[self._configDriver]["database"]["user"]
pwd = self._globalConfig[self._configDriver]["database"]["password"]
dbName = self._globalConfig[self._configDriver]["database"]["db_name"]
if (driver == 'sqlite'):
from data import DATABASE_PATH
return f'{driver}:///{DATABASE_PATH}/{dbName}'
else:
return f'{driver}://{user}:{pwd}@{host}/{dbName}'
@property
def globalConfig(self):
return self._globalConfig
@property
def databaseDriver(self):
return self._globalConfig[self._configDriver]["database"]["driver"]
@property
def databaseName(self):
return self._globalConfig[self._configDriver]["database"]["db_name"]
@property
def databaseUser(self):
return self._globalConfig[self._configDriver]["database"]["user"]
@property
def databasePassword(self):
return self._globalConfig[self._configDriver]["database"]["password"]
@property
def dbDriver(self):
return self._dbDriver
@dbDriver.setter
def dbDriver(self, driver: str):
if (driver in SUPPORTED_DRIVERS):
self._dbDriver = driver
self._configDriver = DRIVER_MAP[self._dbDriver]
else:
self.resultStatusMessage = "Driver '%s' not implemented yet!" %(self._dbDriver)
|
from django.contrib import admin
from django.contrib.admin.options import ModelAdmin
from django.contrib.auth import logout as auth_logout
from django.contrib.messages import info
from django.db.models import get_model
from django import http
from django.shortcuts import redirect
from django.template import RequestContext
from django.template.loader import get_template
from django.utils.translation import ugettext_lazy as _
from mezzanine.conf import settings
from mezzanine.core.forms import LoginForm, SignupForm, get_edit_form
from mezzanine.core.models import Displayable
from mezzanine.utils.views import is_editable, paginate, render
from mezzanine.utils.views import set_cookie
def account(request, template="account.html"):
"""
Display and handle both the login and signup forms.
"""
login_form = LoginForm()
signup_form = SignupForm()
if request.method == "POST":
posted_form = None
message = ""
if request.POST.get("login") is not None:
login_form = LoginForm(request.POST)
if login_form.is_valid():
posted_form = login_form
message = _("Successfully logged in")
else:
signup_form = SignupForm(request.POST)
if signup_form.is_valid():
signup_form.save()
posted_form = signup_form
message = _("Successfully signed up")
if posted_form is not None:
posted_form.login(request)
info(request, message)
return redirect(request.GET.get("next", "/"))
context = {"login_form": login_form, "signup_form": signup_form}
return render(request, template, context)
def logout(request):
"""
Log the user out.
"""
auth_logout(request)
info(request, _("Successfully logged out"))
return redirect(request.GET.get("next", "/"))
def set_device(request, device=""):
"""
Sets a device name in a cookie when a user explicitly wants to go
to the site for a particular device (eg mobile).
"""
response = redirect(request.GET.get("next", "/"))
set_cookie(response, "mezzanine-device", device, 60 * 60 * 24 * 365)
return response
def direct_to_template(request, template, extra_context=None, **kwargs):
"""
Replacement for Django's ``direct_to_template`` that uses
``TemplateResponse`` via ``mezzanine.utils.views.render``.
"""
context = extra_context or {}
context["params"] = kwargs
for (key, value) in context.items():
if callable(value):
context[key] = value()
return render(request, template, context)
def edit(request):
"""
Process the inline editing form.
"""
model = get_model(request.POST["app"], request.POST["model"])
obj = model.objects.get(id=request.POST["id"])
form = get_edit_form(obj, request.POST["fields"], data=request.POST,
files=request.FILES)
if not is_editable(obj, request):
response = _("Permission denied")
elif form.is_valid():
form.save()
model_admin = ModelAdmin(model, admin.site)
message = model_admin.construct_change_message(request, form, None)
model_admin.log_change(request, obj, message)
response = ""
else:
response = form.errors.values()[0][0]
return http.HttpResponse(unicode(response))
def search(request, template="search_results.html"):
"""
Display search results.
"""
settings.use_editable()
query = request.GET.get("q", "")
results = Displayable.objects.search(query)
results = paginate(results, request.GET.get("page", 1),
settings.SEARCH_PER_PAGE,
settings.MAX_PAGING_LINKS)
context = {"query": query, "results": results}
return render(request, template, context)
def server_error(request, template_name='500.html'):
"""
Mimics Django's error handler but adds ``STATIC_URL`` to the
context.
"""
context = RequestContext(request, {"STATIC_URL": settings.STATIC_URL})
t = get_template(template_name)
return http.HttpResponseServerError(t.render(context))
|
from flask import Flask
from flask_session import Session
from flask_sqlalchemy import SQLAlchemy
from config import db_path
import os
app = Flask(__name__)
app.config.from_object("config")
db = SQLAlchemy(app)
Session(app)
# set up date base if it doesn't exist
if not os.path.exists(db_path):
from app.models import User, Feed
os.mkdir(db_path)
db.create_all()
# module level import need to be here to avoid a pesky cicular import.
from app import views
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from ctypes import *
import time
import cv2
import numpy as np
import Queue
import platform
import time
import os
try:
if platform.system() == 'Darwin':
libuvc = cdll.LoadLibrary("libuvc.dylib")
elif platform.system() == 'Linux':
libuvc = cdll.LoadLibrary("libuvc.so")
else:
libuvc = cdll.LoadLibrary("libuvc")
except OSError:
print "Error: could not find libuvc!"
exit(1)
class uvc_context(Structure):
_fields_ = [("usb_ctx", c_void_p),
("own_usb_ctx", c_uint8),
("open_devices", c_void_p),
("handler_thread", c_ulong),
("kill_handler_thread", c_int)]
class uvc_device(Structure):
_fields_ = [("ctx", POINTER(uvc_context)),
("ref", c_int),
("usb_dev", c_void_p)]
class uvc_stream_ctrl(Structure):
_fields_ = [("bmHint", c_uint16),
("bFormatIndex", c_uint8),
("bFrameIndex", c_uint8),
("dwFrameInterval", c_uint32),
("wKeyFrameRate", c_uint16),
("wPFrameRate", c_uint16),
("wCompQuality", c_uint16),
("wCompWindowSize", c_uint16),
("wDelay", c_uint16),
("dwMaxVideoFrameSize", c_uint32),
("dwMaxPayloadTransferSize", c_uint32),
("dwClockFrequency", c_uint32),
("bmFramingInfo", c_uint8),
("bPreferredVersion", c_uint8),
("bMinVersion", c_uint8),
("bMaxVersion", c_uint8),
("bInterfaceNumber", c_uint8)]
class uvc_format_desc(Structure):
pass
class timeval(Structure):
_fields_ = [("tv_sec", c_long), ("tv_usec", c_long)]
class uvc_frame(Structure):
_fields_ = [ # /** Image data for this frame */
("data", POINTER(c_uint8)),
# /** Size of image data buffer */
("data_bytes", c_size_t),
# /** Width of image in pixels */
("width", c_uint32),
# /** Height of image in pixels */
("height", c_uint32),
# /** Pixel data format */
("frame_format", c_uint), # enum uvc_frame_format frame_format
# /** Number of bytes per horizontal line (undefined for compressed format) */
("step", c_size_t),
# /** Frame number (may skip, but is strictly monotonically increasing) */
("sequence", c_uint32),
# /** Estimate of system time when the device started capturing the image */
("capture_time", timeval),
# /** Handle on the device that produced the image.
# * @warning You must not call any uvc_* functions during a callback. */
("source", POINTER(uvc_device)),
# /** Is the data buffer owned by the library?
# * If 1, the data buffer can be arbitrarily reallocated by frame conversion
# * functions.
# * If 0, the data buffer will not be reallocated or freed by the library.
# * Set this field to zero if you are supplying the buffer.
# */
("library_owns_data", c_uint8)]
class uvc_device_handle(Structure):
_fields_ = [("dev", POINTER(uvc_device)),
("prev", c_void_p),
("next", c_void_p),
("usb_devh", c_void_p),
("info", c_void_p),
("status_xfer", c_void_p),
("status_buf", c_ubyte * 32),
("status_cb", c_void_p),
("status_user_ptr", c_void_p),
("button_cb", c_void_p),
("button_user_ptr", c_void_p),
("streams", c_void_p),
("is_isight", c_ubyte)]
class lep_oem_sw_version(Structure):
_fields_ = [("gpp_major", c_ubyte),
("gpp_minor", c_ubyte),
("gpp_build", c_ubyte),
("dsp_major", c_ubyte),
("dsp_minor", c_ubyte),
("dsp_build", c_ubyte),
("reserved", c_ushort)]
def call_extension_unit(devh, unit, control, data, size):
return libuvc.uvc_get_ctrl(devh, unit, control, data, size, 0x81)
AGC_UNIT_ID = 3
OEM_UNIT_ID = 4
RAD_UNIT_ID = 5
SYS_UNIT_ID = 6
VID_UNIT_ID = 7
UVC_FRAME_FORMAT_UYVY = 4
UVC_FRAME_FORMAT_I420 = 5
UVC_FRAME_FORMAT_RGB = 7
UVC_FRAME_FORMAT_BGR = 8
UVC_FRAME_FORMAT_Y16 = 13
def print_device_info(devh):
vers = lep_oem_sw_version()
call_extension_unit(devh, OEM_UNIT_ID, 9, byref(vers), 8)
print "Version gpp: {0}.{1}.{2} dsp: {3}.{4}.{5}".format(
vers.gpp_major, vers.gpp_minor, vers.gpp_build,
vers.dsp_major, vers.dsp_minor, vers.dsp_build,
)
flir_pn = create_string_buffer(32)
call_extension_unit(devh, OEM_UNIT_ID, 8, flir_pn, 32)
print "FLIR part #: {0}".format(flir_pn.raw)
flir_sn = create_string_buffer(8)
call_extension_unit(devh, SYS_UNIT_ID, 3, flir_sn, 8)
print "FLIR serial #: {0}".format(repr(flir_sn.raw))
BUF_SIZE = 2
q = Queue.Queue(BUF_SIZE)
def py_frame_callback(frame, userptr):
array_pointer = cast(frame.contents.data, POINTER(
c_uint16 * (frame.contents.width * frame.contents.height)))
data = np.frombuffer(
array_pointer.contents, dtype=np.dtype(np.uint16)
).reshape(
frame.contents.height, frame.contents.width
) # no copy
# data = np.fromiter(
# frame.contents.data, dtype=np.dtype(np.uint8), count=frame.contents.data_bytes
# ).reshape(
# frame.contents.height, frame.contents.width, 2
# ) # copy
if frame.contents.data_bytes != (
2 * frame.contents.width * frame.contents.height):
return
if not q.full():
q.put(data)
PTR_PY_FRAME_CALLBACK = CFUNCTYPE(
None, POINTER(uvc_frame),
c_void_p)(py_frame_callback)
def generate_colour_map():
"""
Conversion of the colour map from GetThermal to a numpy LUT:
https://github.com/groupgets/GetThermal/blob/bb467924750a686cc3930f7e3a253818b755a2c0/src/dataformatter.cpp#L6
"""
lut = np.zeros((256, 1, 3), dtype=np.uint8)
colourmap_ironblack = [
255, 255, 255, 253, 253, 253, 251, 251, 251, 249, 249, 249, 247, 247,
247, 245, 245, 245, 243, 243, 243, 241, 241, 241, 239, 239, 239, 237,
237, 237, 235, 235, 235, 233, 233, 233, 231, 231, 231, 229, 229, 229,
227, 227, 227, 225, 225, 225, 223, 223, 223, 221, 221, 221, 219, 219,
219, 217, 217, 217, 215, 215, 215, 213, 213, 213, 211, 211, 211, 209,
209, 209, 207, 207, 207, 205, 205, 205, 203, 203, 203, 201, 201, 201,
199, 199, 199, 197, 197, 197, 195, 195, 195, 193, 193, 193, 191, 191,
191, 189, 189, 189, 187, 187, 187, 185, 185, 185, 183, 183, 183, 181,
181, 181, 179, 179, 179, 177, 177, 177, 175, 175, 175, 173, 173, 173,
171, 171, 171, 169, 169, 169, 167, 167, 167, 165, 165, 165, 163, 163,
163, 161, 161, 161, 159, 159, 159, 157, 157, 157, 155, 155, 155, 153,
153, 153, 151, 151, 151, 149, 149, 149, 147, 147, 147, 145, 145, 145,
143, 143, 143, 141, 141, 141, 139, 139, 139, 137, 137, 137, 135, 135,
135, 133, 133, 133, 131, 131, 131, 129, 129, 129, 126, 126, 126, 124,
124, 124, 122, 122, 122, 120, 120, 120, 118, 118, 118, 116, 116, 116,
114, 114, 114, 112, 112, 112, 110, 110, 110, 108, 108, 108, 106, 106,
106, 104, 104, 104, 102, 102, 102, 100, 100, 100, 98, 98, 98, 96, 96,
96, 94, 94, 94, 92, 92, 92, 90, 90, 90, 88, 88, 88, 86, 86, 86, 84, 84,
84, 82, 82, 82, 80, 80, 80, 78, 78, 78, 76, 76, 76, 74, 74, 74, 72, 72,
72, 70, 70, 70, 68, 68, 68, 66, 66, 66, 64, 64, 64, 62, 62, 62, 60, 60,
60, 58, 58, 58, 56, 56, 56, 54, 54, 54, 52, 52, 52, 50, 50, 50, 48, 48,
48, 46, 46, 46, 44, 44, 44, 42, 42, 42, 40, 40, 40, 38, 38, 38, 36, 36,
36, 34, 34, 34, 32, 32, 32, 30, 30, 30, 28, 28, 28, 26, 26, 26, 24, 24,
24, 22, 22, 22, 20, 20, 20, 18, 18, 18, 16, 16, 16, 14, 14, 14, 12, 12,
12, 10, 10, 10, 8, 8, 8, 6, 6, 6, 4, 4, 4, 2, 2, 2, 0, 0, 0, 0, 0, 9,
2, 0, 16, 4, 0, 24, 6, 0, 31, 8, 0, 38, 10, 0, 45, 12, 0, 53, 14, 0,
60, 17, 0, 67, 19, 0, 74, 21, 0, 82, 23, 0, 89, 25, 0, 96, 27, 0, 103,
29, 0, 111, 31, 0, 118, 36, 0, 120, 41, 0, 121, 46, 0, 122, 51, 0, 123,
56, 0, 124, 61, 0, 125, 66, 0, 126, 71, 0, 127, 76, 1, 128, 81, 1, 129,
86, 1, 130, 91, 1, 131, 96, 1, 132, 101, 1, 133, 106, 1, 134, 111, 1,
135, 116, 1, 136, 121, 1, 136, 125, 2, 137, 130, 2, 137, 135, 3, 137,
139, 3, 138, 144, 3, 138, 149, 4, 138, 153, 4, 139, 158, 5, 139, 163,
5, 139, 167, 5, 140, 172, 6, 140, 177, 6, 140, 181, 7, 141, 186, 7,
141, 189, 10, 137, 191, 13, 132, 194, 16, 127, 196, 19, 121, 198, 22,
116, 200, 25, 111, 203, 28, 106, 205, 31, 101, 207, 34, 95, 209, 37,
90, 212, 40, 85, 214, 43, 80, 216, 46, 75, 218, 49, 69, 221, 52, 64,
223, 55, 59, 224, 57, 49, 225, 60, 47, 226, 64, 44, 227, 67, 42, 228,
71, 39, 229, 74, 37, 230, 78, 34, 231, 81, 32, 231, 85, 29, 232, 88,
27, 233, 92, 24, 234, 95, 22, 235, 99, 19, 236, 102, 17, 237, 106, 14,
238, 109, 12, 239, 112, 12, 240, 116, 12, 240, 119, 12, 241, 123, 12,
241, 127, 12, 242, 130, 12, 242, 134, 12, 243, 138, 12, 243, 141, 13,
244, 145, 13, 244, 149, 13, 245, 152, 13, 245, 156, 13, 246, 160, 13,
246, 163, 13, 247, 167, 13, 247, 171, 13, 248, 175, 14, 248, 178, 15,
249, 182, 16, 249, 185, 18, 250, 189, 19, 250, 192, 20, 251, 196, 21,
251, 199, 22, 252, 203, 23, 252, 206, 24, 253, 210, 25, 253, 213, 27,
254, 217, 28, 254, 220, 29, 255, 224, 30, 255, 227, 39, 255, 229, 53,
255, 231, 67, 255, 233, 81, 255, 234, 95, 255, 236, 109, 255, 238, 123,
255, 240, 137, 255, 242, 151, 255, 244, 165, 255, 246, 179, 255, 248,
193, 255, 249, 207, 255, 251, 221, 255, 253, 235, 255, 255, 24]
def chunk(
ulist, step): return map(
lambda i: ulist[i: i + step],
xrange(0, len(ulist),
step))
chunks = chunk(colourmap_ironblack, 3)
red = []
green = []
blue = []
for chunk in chunks:
red.append(chunk[0])
green.append(chunk[1])
blue.append(chunk[2])
lut[:, 0, 0] = blue
lut[:, 0, 1] = green
lut[:, 0, 2] = red
return lut
def ktof(val):
return (1.8 * ktoc(val) + 32.0)
def ktoc(val):
return (val - 27315) / 100.0
def ctok(val):
return (val * 100.0) + 27315
def raw_to_8bit(data):
cv2.normalize(data, data, 0, 65535, cv2.NORM_MINMAX)
np.right_shift(data, 8, data)
return cv2.cvtColor(np.uint8(data), cv2.COLOR_GRAY2RGB)
def display_temperature(img, val_k, loc, color):
val = ktoc(val_k)
cv2.putText(img, "{0:.1f} degC".format(val), loc,
cv2.FONT_HERSHEY_SIMPLEX, 0.75, color, 2)
x, y = loc
cv2.line(img, (x - 2, y), (x + 2, y), color, 1)
cv2.line(img, (x, y - 2), (x, y + 2), color, 1)
def main():
ctx = POINTER(uvc_context)()
dev = POINTER(uvc_device)()
devh = POINTER(uvc_device_handle)()
ctrl = uvc_stream_ctrl()
res = libuvc.uvc_init(byref(ctx), 0)
if res < 0:
print "uvc_init error"
exit(1)
try:
res = libuvc.uvc_find_device(ctx, byref(dev), 0, 0, 0)
if res < 0:
print "uvc_find_device error"
exit(1)
try:
res = libuvc.uvc_open(dev, byref(devh))
if res < 0:
print "uvc_open error"
exit(1)
print "device opened!"
print_device_info(devh)
libuvc.uvc_get_stream_ctrl_format_size(
devh, byref(ctrl),
UVC_FRAME_FORMAT_Y16, 160, 120, 9)
res = libuvc.uvc_start_streaming(
devh, byref(ctrl),
PTR_PY_FRAME_CALLBACK, None, 0)
if res < 0:
print "uvc_start_streaming failed: {0}".format(res)
exit(1)
output_dir = "captures"
colour_map = generate_colour_map()
#
# Min/max value to pin across the LUT
#
min_c = ctok(7)
max_c = ctok(20)
try:
while True:
data = q.get(True, 500)
if data is None:
break
data = cv2.resize(data[:, :], (640, 480))
minVal, maxVal, minLoc, maxLoc = cv2.minMaxLoc(data)
#
# Dirty-hack to ensure that the LUT is always scaled
# against the colours we care about
#
data[0][0] = min_c
data[-1][-1] = max_c
print minVal, maxVal
img = raw_to_8bit(data)
img = cv2.LUT(img, colour_map)
timestr = time.strftime("%Y%m%d-%H%M%S")
#
# Max/min values in the top-left
#
font = cv2.FONT_HERSHEY_SIMPLEX
time_str = "{:.2f}, {:.2f}".format(
ktoc(minVal), ktoc(maxVal))
cv2.putText(img, time_str, (10, 32),
font, 1.0, (155, 165, 237), 2, cv2.CV_AA)
cv2.imwrite(os.path.join(
output_dir, "{:s}.png".format(timestr)), img)
time.sleep(20)
finally:
libuvc.uvc_stop_streaming(devh)
print "done"
finally:
libuvc.uvc_unref_device(dev)
finally:
libuvc.uvc_exit(ctx)
if __name__ == '__main__':
main()
# EOF
|
__title__ = 'Simple-Reverse-Proxy'
__description__ = 'A simple reverse proxy implementation using python simplicity.'
__url__ = 'https://github.com/MarcosVs98/sreverse-proxy'
__version__ = '1.0.0'
__build__ = 0x022300
__author__ = 'Marcos Silveira'
__author_email__ = 'marcosvs@protonmail.com'
__license__ = 'MIT License'
__copyright__ = 'Copyright (c) 2021 MarcosVs98'
|
__author__ = 'satish'
import pickle
import csv
def save_obj(obj, name ):
with open( name + '.pkl', 'wb') as f:
pickle.dump(obj, f, protocol=2)
def load_obj(name ):
with open( name + '.pkl', 'rb') as f:
return pickle.load(f)
DM = load_obj("DesignMatrix")
ftest = open('comedy_comparisons.test')
#unique = defaultdict(int)
csv_fte = csv.reader(ftest)
iRow = []
for row in csv_fte:
try:
if DM[row[0]]:
if DM[row[1]]:
iRow.append(row)
except:
continue
save_obj(iRow,"icomedy_comparisons.test")
print(len(iRow))
# NewDM = load_obj("../ProcessedDesignMatrix")
# IDs = list(set(DM.keys()) - set(NewDM.keys()))
# print(len(IDs))
|
import logging
from wellspring.rest.wellspring_rest_base import *
from wellspring.services import device_service
LOGGER = logging.getLogger(__name__)
def register_device(request):
return handle_rest_request(request, device_post_handler, ["POST"])
def device_post_handler(request, response, device_uuid, pathParams):
responseBody = build_base_wellspring_message()
if device_service.device_exists(device_uuid):
responseBody["message"] = "Device is already registered"
response.content = jsonify(responseBody)
return response
device_service.add_device(device_uuid)
responseBody["message"] = "Device newly registered"
response.content = jsonify(responseBody)
return response
|
from operator import itemgetter
n, m = map(int, input().split())
city_num = [0 for _ in range(n)]
l = []
for i in range(m):
p, y = map(int, input().split())
l.append([i, p, y])
l.sort(key=itemgetter(2))
for i in range(m):
j, p, y = l[i]
city_num[p-1] += 1
idz = str(str(p).zfill(6))
idk = str(str(city_num[p-1]).zfill(6))
l[i].append(idz+idk)
l.sort(key=itemgetter(0))
for i in range(m):
print(l[i][-1])
|
'''Example to illustrate Quantile Regression
Author: Josef Perktold
'''
import numpy as np
import statsmodels.api as sm
from statsmodels.sandbox.regression.quantile_regression import quantilereg
sige = 5
nobs, k_vars = 500, 5
x = np.random.randn(nobs, k_vars)
#x[:,0] = 1
y = x.sum(1) + sige * (np.random.randn(nobs)/2 + 1)**3
p = 0.5
res_qr = quantilereg(y,x,p)
res_qr2 = quantilereg(y,x,0.25)
res_qr3 = quantilereg(y,x,0.75)
res_ols = sm.OLS(y, np.column_stack((np.ones(nobs), x))).fit()
##print 'ols ', res_ols.params
##print '0.25', res_qr2
##print '0.5 ', res_qr
##print '0.75', res_qr3
params = [res_ols.params, res_qr2, res_qr, res_qr3]
labels = ['ols', 'qr 0.25', 'qr 0.5', 'qr 0.75']
import matplotlib.pyplot as plt
#sortidx = np.argsort(y)
fitted_ols = np.dot(res_ols.model.exog, params[0])
sortidx = np.argsort(fitted_ols)
x_sorted = res_ols.model.exog[sortidx]
fitted_ols = np.dot(x_sorted, params[0])
plt.figure()
plt.plot(y[sortidx], 'o', alpha=0.75)
for lab, beta in zip(['ols', 'qr 0.25', 'qr 0.5', 'qr 0.75'], params):
print '%-8s'%lab, np.round(beta, 4)
fitted = np.dot(x_sorted, beta)
lw = 2 if lab == 'ols' else 1
plt.plot(fitted, lw=lw, label=lab)
plt.legend()
plt.show()
|
import numpy as np
from scipy.integrate import trapz
import matplotlib.pyplot as plt
from genessa.timeseries.gaussian import GaussianModel
from matplotlib.collections import LineCollection
# internal python imports
from ..figures.settings import *
class ComparisonMethods:
""" Methods for comparison objects. """
@staticmethod
def integrate(t, y, indices=None):
"""
Integrate 1D array.
Args:
t (np.ndarray[float]) - sample times
y (np.ndarray[float]) - sample values
indices (list) - list of indices of contiguous subsegments
Returns:
area (float)
"""
if indices is None:
return trapz(y, t)
else:
return sum([trapz(y[ind], t[ind]) for ind in indices])
@staticmethod
def extract_region_below(rbounds, cbounds):
"""
Extract region that falls below reference confidence band.
Args:
rbounds (tuple) - lower and upper bounds for reference band
cbounds (tuple) - lower and upper bounds for compared band
Returns:
indices (list) - list of indices of segments below reference
lbound (np.ndarray[float]) - lower bound for region
ubound (np.ndarray[float]) - upper bound for region
"""
below = cbounds[0] < rbounds[0]
indices = np.split(np.arange(below.size),1+np.diff(below).nonzero()[0])
indices = [ind for ind in indices if np.all(below[ind])]
lbound = cbounds[0]
ubound = np.vstack((rbounds[0], cbounds[1])).min(axis=0)
return indices, lbound, ubound
@staticmethod
def extract_region_above(rbounds, cbounds):
"""
Extract region that falls above reference confidence band.
Args:
rbounds (tuple) - lower and upper bounds for reference band
cbounds (tuple) - lower and upper bounds for compared band
Returns:
indices (list) - list of indices of segments above reference
lbound (np.ndarray[float]) - lower bound for region
ubound (np.ndarray[float]) - upper bound for region
"""
above = cbounds[1] > rbounds[1]
indices = np.split(np.arange(above.size),1+np.diff(above).nonzero()[0])
indices = [ind for ind in indices if np.all(above[ind])]
lbound = np.vstack((rbounds[1], cbounds[0])).max(axis=0)
ubound = cbounds[1]
return indices, lbound, ubound
class ComparisonProperties:
"""
Properties for comparison methods.
Properties:
t (np.ndarray[float]) - reference timepoints
_peak_index (int) - time index of peak expression
_peak_time (float) - time of peak expression
_comparison_index (int) - time index of comparison
_comparison_time (float) - time of comparison
lower (np.ndarray[float]) - lower bound for reference trajectories
upper (np.ndarray[float]) - upper bound for reference trajectories
fractions_below (np.ndarray[float]) - fractions below lower bound
fractions_above (np.ndarray[float]) - fractions above upper bound
"""
@property
def t(self):
""" Reference timepoints. """
return self.reference.t
# @property
# def threshold(self):
# """ Commitment threshold. """
# return self.reference.peaks[self.dim] * self.fraction_of_max
@property
def threshold(self):
""" Commitment threshold. """
return self.upper[self._comparison_index]
@property
def _peak_index(self):
""" Index of peak expression. """
return self.reference.peak_indices[self.dim]
@property
def _peak_time(self):
""" Time of peak expression. """
return self.t[self._peak_index]
@property
def _comparison_index(self):
""" Index of time at which reference reaches threshold. """
# evaluate population mean at comparison time
final_mean = self.reference.peaks[self.dim] * self.fraction_of_max
peak_index = self.reference.peak_indices[self.dim]
# determine first time at which mean reaches final level
indices = self.reference.index(final_mean, self.dim, mode='mean')
if indices.size == 0 or indices[-1] <= peak_index:
return None
else:
return indices[-1]
# @property
# def _comparison_index(self):
# """ Index of time at which reference reaches threshold. """
# indices = self.reference.index(self.threshold, self.dim, mode='upper')
# if indices.size == 0 or indices[-1] == 0:
# return None
# else:
# return indices[-1]
@property
def _comparison_time(self):
""" Time at which reference reaches threshold. """
return self.t[self.comparison_index]
@property
def lower(self):
""" Lower bound of reference. """
q = (100-self.bandwidth)/2
return self.reference.evaluate_quantile(q)[self.dim]
@property
def upper(self):
""" Upper bound of reference. """
q = (100+self.bandwidth)/2
return self.reference.evaluate_quantile(q)[self.dim]
@property
def fractions_below(self):
""" Fractions of trajectories below the lowest reference. """
return (self.compared.states[:, self.dim, :] < self.lower).mean(axis=0)
@property
def fractions_above(self):
""" Fractions of trajectories above the highest reference. """
return (self.compared.states[:, self.dim, :] > self.upper).mean(axis=0)
class ComparisonVis:
"""
Visualization methods for comparison objects.
"""
def shade_outlying_areas(self,
alpha=0.2,
reference_color='k',
compared_color='k',
above_color='r',
above_alpha=0.5,
below_color='b',
below_alpha=0.5,
ax=None,
show_threshold=False):
"""
Visualize comparison by shading the region encompassing trajectories that lie below or above all reference trajectories.
Args:
alpha (float) - opacity for shaded regions of confidence band
reference_color (str) - color for reference confidence band
compared_color (str) - color for compared confidence band
above_color, below_color (str) - colors for above/below reference
ax (matplotlib.axes.AxesSubplot) - if None, create figure
show_threshold (bool) - if True, show threshold definition
"""
# create figure if axes weren't provided
if ax is None:
fig, ax = plt.subplots(figsize=(3, 2))
# extract bounds for confidence bands
tf = self.comparison_index + 1
t = self.t[:tf]
rbounds = (self.lower[:tf], self.upper[:tf])
cbounds = (self.compared.lower[self.dim][:tf],
self.compared.upper[self.dim][:tf])
# plot confidence band for reference
ax.fill_between(t, *rbounds, color=reference_color, alpha=alpha)
ax.plot(t, rbounds[0], '-k')
ax.plot(t, rbounds[1], '-k')
# plot confidence band for compared
ax.fill_between(t, *cbounds, color=compared_color, alpha=alpha)
ax.plot(t, cbounds[0], '--k')
ax.plot(t, cbounds[1], '--k')
# shade regions below reference
ind_b, lbound_b, ubound_b = self.extract_region_below(rbounds, cbounds)
for ind in ind_b:
ax.fill_between(t[ind],
lbound_b[ind],
ubound_b[ind],
color=below_color,
alpha=below_alpha)
# shade regions above reference
ind_a, lbound_a, ubound_a = self.extract_region_above(rbounds, cbounds)
for ind in ind_a:
ax.fill_between(t[ind],
lbound_a[ind],
ubound_a[ind],
color=above_color,
alpha=above_alpha)
# display threshold definition
if show_threshold:
self.display_threshold_definition(ax)
# format axis
self.format_axis(ax)
def plot_outlying_trajectories(self, ax=None, show_threshold=False):
"""
Visualize comparison by plotting the trajectories that lie below or above the reference trajectories.
Args:
ax (matplotlib.axes.AxesSubplot) - if None, create figure
show_threshold (bool) - if True, show threshold definition
"""
# create figure if axes weren't provided
if ax is None:
fig, ax = plt.subplots(figsize=(3, 2))
# extract bounds for confidence bands
tf = self.comparison_index + 1
lower, upper = self.lower[:tf], self.upper[:tf]
t = self.t[:tf]
# plot confidence band for reference
ax.fill_between(t, lower, upper, color='k', alpha=0.2)
ax.plot(t, lower, '-k')
ax.plot(t, upper, '-k')
# assemble segments of trajectories below/above reference extrema
segments_below, segments_within, segments_above = [], [], []
for x in self.compared.states[:, self.dim, :tf]:
below, above = x<lower, x>upper
# select outlying line segments
ind_b = np.split(np.arange(x.size), np.diff(below).nonzero()[0]+1)
ib = list(filter(lambda i: np.all(below[i]), ind_b))
ind_a = np.split(np.arange(x.size), np.diff(above).nonzero()[0]+1)
ia = list(filter(lambda i: np.all(above[i]), ind_a))
iw = list(filter(lambda i: not np.all(above[i]), ind_a))
# append line segments to lists
segments_below.extend([list(zip(t[i], x[i])) for i in ib])
segments_above.extend([list(zip(t[i], x[i])) for i in ia])
segments_within.extend([list(zip(t[i], x[i])) for i in iw])
# compile line objects
lines_below = LineCollection(segments_below, colors='b')
lines_above = LineCollection(segments_above, colors='r')
lines_within = LineCollection(segments_within, colors='k', alpha=0.1)
# add lines to plot
for lines in (lines_below, lines_within, lines_above):
ax.add_collection(lines)
# display threshold definition
if show_threshold:
self.display_threshold_definition(ax)
# format axis
self.format_axis(ax)
def display_threshold_definition(self, ax):
"""
Display arrows defining threshold and commitment time.
Note: will likely crash if axes limits aren't set
Args:
ax (matplotlib.axes.AxesSubplot)
"""
# plot threshold geometry
peak_time = self._peak_time
comparison_time = self.comparison_time
peak_value = self.reference.peaks[self.dim]
max_error = self.compared.upper[self.dim][self.comparison_index]
# add vertical arrow defining threshold value
ax.annotate(s='',
xy=(peak_time, self.threshold),
xytext=(peak_time, peak_value),
arrowprops=dict(arrowstyle='<->', shrinkA=0, shrinkB=0))
# add horizontal arrow defining commitment time
ax.annotate(s='',
xy=(peak_time, self.threshold),
xytext=(comparison_time, self.threshold),
arrowprops=dict(arrowstyle='<->', shrinkA=0, shrinkB=0))
# add vertical arrow defining error
ax.annotate(s='',
xy=(1+comparison_time, self.threshold),
xytext=(1+comparison_time, max_error),
arrowprops=dict(arrowstyle='<->', shrinkA=0, shrinkB=0, color='k'))
# annotate error
ax.text(comparison_time+1.5,
(self.threshold+max_error)/2,
'error',
ha='left',
va='center')
def format_axis(self, ax):
"""
Format axis.
Args:
ax (matplotlib.axes.AxesSubplot)
"""
ax.set_xlabel('Time (h)')
# display comparison metrics
#self.display_metrics(ax)
def display_metrics(self, ax, **kwargs):
"""
Display comparison metrics on axes.
Args:
ax (matplotlib.axes.AxesSubplot)
"""
x = ax.get_xlim()[1] - 0.05*ax.get_xticks().ptp()
y = ax.get_ylim()[1] - 0.05*ax.get_yticks().ptp()
kw = dict(ha='right', va='top', fontsize=8)
ax.text(x, y, '{:0.1%} error'.format(self.error), **kw)
ax.text(x, y, '\n{:0.1%} above'.format(self.above), color='r', **kw)
ax.text(x, y, '\n\n{:0.1%} below'.format(self.below), color='b', **kw)
class Comparison(ComparisonProperties, ComparisonMethods, ComparisonVis):
"""
Base class for comparing a timeseries against a reference.
Comparison is based on evaluating the fraction of trajectories that lie above or below the reference trajectory confidence band.
Attributes:
reference (TimeSeries) - reference timeseries
compared (TimeSeries) - timeseries to be compared
bandwidth (float) - width of confidence band
fraction_of_max (float) - fraction of peak mean reference value used to define commitment time
dim (int) - state space dimension to be compared
below (float) - fraction of confidence band below the reference
above (float) - fraction of confidence band above the reference
error (float) - total non-overlapping fraction of confidence band
below_threshold (float) - fraction below lower threshold
above_threshold (float) - fraction above upper threshold
threshold_error (float) - fraction outside thresholds
reached_comparison (bool) - if True, simulation reached comparison time
tstype (type) - python class for timeseries objects
Properties:
t (np.ndarray[float]) - reference timepoints
_peak_index (int) - time index of peak expression
_peak_time (float) - time of peak expression
_comparison_index (int) - time index of comparison
_comparison_time (float) - time of comparison
lower (np.ndarray[float]) - lower bound for reference trajectories
upper (np.ndarray[float]) - upper bound for reference trajectories
fractions_below (np.ndarray[float]) - fractions below lower bound
fractions_above (np.ndarray[float]) - fractions above upper bound
"""
def __init__(self, reference, compared,
bandwidth=98,
fraction_of_max=0.3,
dim=-1):
"""
Instantiate timeseries comparison object.
Args:
reference (TimeSeries) - reference timeseries
compared (TimeSeries) - timeseries to be compared
bandwidth (float) - width of confidence band, 0 to 100
fraction_of_max (float) - fraction of peak mean reference value used to define commitment time
dim (int) - state space dimension to be compared
"""
# store simulation trajectories
self.reference = reference
self.compared = compared
# store attributes
self.bandwidth = bandwidth
self.fraction_of_max = fraction_of_max
self.dim = dim
self.tstype = self.reference.__class__
# evaluate comparison index and time
self.compare()
def __getstate__(self):
""" Returns all attributes except TimeSeries instances. """
excluded = ('reference', 'compared')
return {k: v for k, v in self.__dict__.items() if k not in excluded}
def compare(self):
""" Run comparison procedure. """
# determine whether commitment threshold is reached
self.comparison_index = self._comparison_index
if self.comparison_index is None:
self.reached_comparison = False
else:
self.reached_comparison = True
# evaluate comparison metric
if self.reached_comparison:
self.comparison_time = self.t[self.comparison_index]
# evaluate integrated error
below, above = self.evaluate()
self.below = below
self.above = above
self.error = below + above
# evaluate threshold error
below_threshold, above_threshold = self.evaluate_threshold()
self.below_threshold = below_threshold
self.above_threshold = above_threshold
self.threshold_error = below_threshold + above_threshold
def evaluate(self):
"""
Evaluate comparison.
Returns:
below (float) - mean fraction of trajectories below the reference
above (float) - mean fraction of trajectories above the reference
"""
# determine start index (pulse onset)
#ind = self.reference.mean[self.dim].nonzero()[0][0] + 1
t0 = 0
tf = self.comparison_index
# evalaute fractions below/above confidence band
t = self.t[t0: tf] - self.t[t0]
t_normalized = t / t.max()
# determine correction factor
correction = (100-self.bandwidth)/2/100
# fraction below
fractions_below = self.fractions_below[t0: tf]
fractions_below -= correction
fractions_below[fractions_below<0] = 0
# fraction above
fractions_above = self.fractions_above[t0: tf]
fractions_above -= correction
fractions_above[fractions_above<0] = 0
below = self.integrate(t_normalized, fractions_below)
above = self.integrate(t_normalized, fractions_above)
return below, above
def evaluate_threshold(self):
"""
Evaluate comparison.
Returns:
below (float) - mean fraction of trajectories below the reference
above (float) - mean fraction of trajectories above the reference
"""
below = self.fractions_below[self.comparison_index]
above = self.fractions_above[self.comparison_index]
# apply correction
correction = (100-self.bandwidth)/2/100
below -= correction
above -= correction
return max(below, 0), max(above, 0)
class GaussianComparison(Comparison):
"""
Class for comparing a timeseries against a reference. Comparison is based on evaluating the fraction of the compared timeseries that lies above or below the reference timeseries.
Attributes:
reference (GaussianModel) - reference timeseries
compared (GaussianModel) - timeseries to be compared
tskwargs (dict) - keyword arguments for timeseries instantiation
Inherited Attributes:
dim (int) - state space dimension to be compared
below (float) - fraction of confidence band below the reference
above (float) - fraction of confidence band above the reference
error (float) - total non-overalpping fraction of confidence band
tstype (type) - python class for timeseries objects
Properties:
t (np.ndarray[float]) - reference timepoints
lower (np.ndarray[float]) - lower bound for reference trajectories
upper (np.ndarray[float]) - upper bound for reference trajectories
fractions_below (np.ndarray[float]) - fractions below lower bound
fractions_above (np.ndarray[float]) - fractions above upper bound
"""
def __init__(self,
reference,
compared,
bandwidth=98,
dim=-1):
"""
Instantiate timeseries comparison object.
Args:
reference (TimeSeries) - reference timeseries
compared (TimeSeries) - timeseries to be compared
bandwidth (float) - width of confidence band, 0 to 100
dim (int) - state space dimension to be compared
"""
# fit gaussian models to timeseries
reference = GaussianModel.from_timeseries(reference, bandwidth/100)
compared = GaussianModel.from_timeseries(compared, bandwidth/100)
# call parent instantiation (runs evaluation)
super().__init__(reference, compared, bandwidth=bandwidth, dim=dim)
# store timeseries kwargs
self.tskwargs = dict(bandwidth=bandwidth)
# @property
# def lower(self):
# """ Lower bound of reference. """
# return self.reference.lower[self.dim]
# @property
# def upper(self):
# """ Upper bound of reference. """
# return self.reference.upper[self.dim]
@property
def lower(self):
""" Lower bound of reference. """
q = (100-self.bandwidth)/2/100
return self.reference.norm.ppf(q)[self.dim]
@property
def upper(self):
""" Upper bound of reference. """
q = (100+self.bandwidth)/2/100
return self.reference.norm.ppf(q)[self.dim]
@property
def fractions_below(self):
""" Fractions of trajectories below the lowest reference. """
return self.compared.norm.cdf(self.lower)[self.dim]
@property
def fractions_above(self):
""" Fractions of trajectories above the highest reference. """
return 1 - self.compared.norm.cdf(self.upper)[self.dim]
class PromoterComparison(Comparison):
"""
Comparison method for promoter perturbations.
Uses commitment time based on lower bound, and evaluates threshold at time of peak expression.
"""
@property
def threshold(self):
""" Commitment threshold. """
#return self.reference.peaks[self.dim] * self.fraction_of_max
return self.lower[self._comparison_index]
@property
def _comparison_index(self):
"""
Index of time at which reference reaches threshold.
** NOT USED FOR THRESHOLD - this is just to determine integration range
** THRESHOLD USES PEAK (see below)
"""
endpoint = self.reference.peaks[self.dim] * self.fraction_of_max
indices = self.reference.index(endpoint, self.dim, mode='lower')
if indices.size == 0 or indices[-1] == 0:
return None
else:
return indices[-1]
def evaluate_threshold(self):
"""
Evaluate threshold comparison.
Returns:
below (float) - mean fraction of trajectories below the reference
above (float) - mean fraction of trajectories above the reference
"""
below = self.fractions_below[self._peak_index]
above = self.fractions_above[self._peak_index]
return below, above
class MultiComparison(Comparison):
"""
Class for making multiple comparisons of a timeseries against a reference.
Comparison is based on evaluating the fraction of trajectories that lie above or below the reference trajectory confidence band. Comparison is evaluated at multiple timepoints.
Attributes:
reference (TimeSeries) - reference timeseries
compared (TimeSeries) - timeseries to be compared
bandwidth (float) - width of confidence band
fraction_of_max (np.ndarray[float]) - fractions of peak mean reference value used to define commitment time
dim (int) - state space dimension to be compared
below (np.ndarray[float]) - fractions of confidence band below the reference
above (np.ndarray[float]) - fractions of confidence band above the reference
error (np.ndarray[float]) - total non-overlapping fraction of confidence band
below_threshold (np.ndarray[float]) - fraction below lower threshold
above_threshold (np.ndarray[float]) - fraction above upper threshold
threshold_error (np.ndarray[float]) - fraction outside thresholds
reached_comparison (np.ndarray[bool]) - if True, simulation reached comparison time
tstype (type) - python class for timeseries objects
Properties:
t (np.ndarray[float]) - reference timepoints
_peak_index (int) - time index of peak expression
_peak_time (float) - time of peak expression
_comparison_index (np.ndarray[int]) - time index of comparison
_comparison_time (np.ndarray[float]) - time of comparison
lower (np.ndarray[float]) - lower bound for reference trajectories
upper (np.ndarray[float]) - upper bound for reference trajectories
fractions_below (np.ndarray[float]) - fractions below lower bound
fractions_above (np.ndarray[float]) - fractions above upper bound
"""
def __init__(self, reference, compared,
bandwidth=98,
dim=-1):
"""
Instantiate timeseries multiple-comparison object.
Args:
reference (TimeSeries) - reference timeseries
compared (TimeSeries) - timeseries to be compared
bandwidth (float) - width of confidence band, 0 to 100
dim (int) - state space dimension to be compared
"""
# store simulation trajectories
self.reference = reference
self.compared = compared
# store attributes
self.fraction_of_max = np.arange(0.1, 1., .1)[::-1]
self.bandwidth = bandwidth
self.dim = dim
self.tstype = self.reference.__class__
# evaluate comparison index and time
self.compare()
@property
def _comparison_index(self):
""" Index of time at which reference reaches threshold. """
# evaluate population means at comparison times
final_means = self.reference.peaks[self.dim] * self.fraction_of_max
peak_index = self.reference.peak_indices[self.dim]
def find_value(value):
""" Determine first time at which mean reaches <value>. """
indices = self.reference.index(value, self.dim, mode='mean')
if indices.size == 0 or indices[-1] <= peak_index:
return None
else:
return indices[-1]
return np.array([find_value(x) for x in final_means])
@property
def _comparison_time(self):
""" Time at which reference reaches threshold. """
return self.t[self.comparison_index]
def compare(self):
""" Run comparison procedure. """
# determine whether commitment threshold is reached
self.comparison_index = self._comparison_index
self.reached_comparison = (self.comparison_index != None)
comparison_time = []
below, above, error = [], [], []
below_threshold, above_threshold, threshold_error = [], [], []
for i, comparison_index in enumerate(self.comparison_index):
# evaluate comparison metric
if not self.reached_comparison[i]:
comparison_time.append(None)
below.append(None)
above.append(None)
error.append(None)
below_threshold.append(None)
above_threshold.append(None)
threshold_error.append(None)
else:
comparison_time.append(self.t[comparison_index])
# evaluate integrated error
b, a = self.evaluate(comparison_index, t0=0)
below.append(b)
above.append(a)
error.append(b+a)
# evaluate threshold error
bt, at = self.evaluate_threshold(comparison_index)
below_threshold.append(bt)
above_threshold.append(at)
threshold_error.append(bt + at)
self.comparison_time = np.array(comparison_time)
self.below = np.array(below)
self.above = np.array(above)
self.error = np.array(error)
self.below_threshold = np.array(below_threshold)
self.above_threshold = np.array(above_threshold)
self.threshold_error = np.array(threshold_error)
def evaluate(self, tf, t0=0):
"""
Evaluate comparison.
Args:
tf (int) - time index for comparison
t0 (int) - time index for integration start
Returns:
below (float) - mean fraction of trajectories below the reference
above (float) - mean fraction of trajectories above the reference
"""
# evalaute fractions below/above confidence band
t = self.t[t0: tf] - self.t[t0]
t_normalized = t / t.max()
# determine correction factor
correction = (100-self.bandwidth)/2/100
# fraction below
fractions_below = self.fractions_below[t0: tf]
fractions_below -= correction
fractions_below[fractions_below<0] = 0
# fraction above
fractions_above = self.fractions_above[t0: tf]
fractions_above -= correction
fractions_above[fractions_above<0] = 0
below = self.integrate(t_normalized, fractions_below)
above = self.integrate(t_normalized, fractions_above)
return below, above
def evaluate_threshold(self, comparison_index):
"""
Evaluate comparison.
Args:
comparison_index (int) - time index for comparison
Returns:
below (float) - mean fraction of trajectories below the reference
above (float) - mean fraction of trajectories above the reference
"""
below = self.fractions_below[comparison_index]
above = self.fractions_above[comparison_index]
# apply correction
correction = (100-self.bandwidth)/2/100
below -= correction
above -= correction
return max(below, 0), max(above, 0)
def shade_outlying_areas(self,
alpha=0.2,
reference_color='k',
compared_color='k',
above_color='r',
above_alpha=0.5,
below_color='b',
below_alpha=0.5,
ax=None,
show_threshold=False):
"""
Visualize comparison by shading the region encompassing trajectories that lie below or above all reference trajectories.
Args:
alpha (float) - opacity for shaded regions of confidence band
reference_color (str) - color for reference confidence band
compared_color (str) - color for compared confidence band
above_color, below_color (str) - colors for above/below reference
ax (matplotlib.axes.AxesSubplot) - if None, create figure
show_threshold (bool) - if True, show threshold definition
"""
# create figure if axes weren't provided
if ax is None:
fig, ax = plt.subplots(figsize=(3, 2))
# extract bounds for confidence bands
threshold_index = self.reached_comparison.nonzero()[0][-1]
tf = self.comparison_index[threshold_index] + 1
t = self.t[:tf]
rbounds = (self.lower[:tf], self.upper[:tf])
cbounds = (self.compared.lower[self.dim][:tf],
self.compared.upper[self.dim][:tf])
# plot confidence band for reference
ax.fill_between(t, *rbounds, color=reference_color, alpha=alpha)
ax.plot(t, rbounds[0], '-k')
ax.plot(t, rbounds[1], '-k')
# plot confidence band for compared
ax.fill_between(t, *cbounds, color=compared_color, alpha=alpha)
ax.plot(t, cbounds[0], '--k')
ax.plot(t, cbounds[1], '--k')
# shade regions below reference
ind_b, lbound_b, ubound_b = self.extract_region_below(rbounds, cbounds)
for ind in ind_b:
ax.fill_between(t[ind],
lbound_b[ind],
ubound_b[ind],
color=below_color,
alpha=below_alpha)
# shade regions above reference
ind_a, lbound_a, ubound_a = self.extract_region_above(rbounds, cbounds)
for ind in ind_a:
ax.fill_between(t[ind],
lbound_a[ind],
ubound_a[ind],
color=above_color,
alpha=above_alpha)
# display threshold definition
if show_threshold:
self.display_threshold_definition(ax)
# format axis
self.format_axis(ax)
def display_metrics(self, ax, threshold_index=-1):
"""
Display comparison metrics on axes.
Args:
ax (matplotlib.axes.AxesSubplot)
threshold_index (int) - index of threshold used
"""
x = ax.get_xlim()[1] - 0.05*ax.get_xticks().ptp()
y = ax.get_ylim()[1] - 0.05*ax.get_yticks().ptp()
kw = dict(ha='right', va='top', fontsize=8)
ax.text(x, y, '{:0.1%} error'.format(self.error[threshold_index]), **kw)
ax.text(x, y, '\n{:0.1%} above'.format(self.above[threshold_index]), color='r', **kw)
ax.text(x, y, '\n\n{:0.1%} below'.format(self.below[threshold_index]), color='b', **kw)
|
from flask import Flask
from flask_restful import Resource, Api
from classifier import *
app = Flask(__name__)
api = Api(app)
class Classifier(Resource):
def get(self):
return {
'products': ['Ice Cream', 'Chocolate', 'Fruit', 'Eggs']
}
api.add_resource(Classifier, '/')
if __name__ == '__main__':
app.run(host='0.0.0.0', port=80, debug=True)
|
from biobb_common.tools import test_fixtures as fx
from biobb_amber.pdb4amber.pdb4amber_run import pdb4amber_run
class TestPdb4amberRun():
def setUp(self):
fx.test_setup(self, 'pdb4amber_run')
def tearDown(self):
fx.test_teardown(self)
pass
def test_pdb4amber_run(self):
pdb4amber_run(properties=self.properties, **self.paths)
assert fx.not_empty(self.paths['output_pdb_path'])
assert fx.equal(self.paths['output_pdb_path'], self.paths['ref_output_pdb_path'])
|
"""
Model definition for regression based on the BoW model.
"""
import os
import argparse
import numpy as np
from sklearn.linear_model import SGDClassifier
from sklearn.metrics import (
confusion_matrix, classification_report, accuracy_score, f1_score)
from joblib import dump, load
from data_processing import DataIterator
from vectorizer import BoWVectorizer, BERTVectorizer
from regression import RegressionModel
from utils import safe_folder_create
VECTORIZERS = {
'BoW': BoWVectorizer,
'bert': BERTVectorizer
}
class RegressionModel:
ONE_HOT_DICT = {
'positive': 1,
'neutral': 0,
'negative': -1
}
def __init__(self, config, dataset, vectorizer):
self.config = config
self.dataset = dataset
self.vectorizer = self.get_vectorizer(vectorizer)
self.model_path = os.path.join('models', self.config.model_type)
safe_folder_create('models')
safe_folder_create(self.model_path)
def get_vectorizer(self, vectorizer):
if self.config.model_type == "BoW":
train = self.dataset(
self.config.batch_size, 'sentiment_1400k', 'train').batched()
vectorizer = vectorizer(self.config.max_length).fit(
train)
elif self.config.model_type == "bert":
vectorizer = vectorizer(self.config.max_length)
return vectorizer
def _vectorize_batch(self, batch):
data_out, labels = self.vectorizer.vectorize(batch)
return data_out, labels
def train(self, _C=1.0):
print("Creating new model")
model = SGDClassifier(loss="log", max_iter=4000, )
# model = LogisticRegression(max_iter = 4000, C=_C)
classes = np.unique(["positive", "negative"])
for i, batch in enumerate(
self.dataset(
self.config.batch_size, 'sentiment_1400k', 'train').batched()):
x_tr, y_tr = self._vectorize_batch(batch)
model.partial_fit(x_tr, y_tr, classes=classes)
if i % 100 == 0:
print(f"Batch {i} processed.")
print("Model trained")
self._save_model(model, 'sentiment_model')
def _save_model(self, model, model_name):
model_path = self.model_path + f'/{model_name}.joblib'
dump(model, model_path)
print(f"Model saved to {model_path}")
def _reload_model(self, model_name):
clf = SGDClassifier()
model_path = self.model_path + f'/{model_name}.joblib'
try:
clf = load(model_path)
print(f"Model loaded from {model_path}")
except NotFittedError:
print("Need to train a model first")
return clf
def evaluate(self):
model = self._reload_model('sentiment_model')
y_preds = []
y_actual = []
for batch in self.dataset(
self.config.batch_size, 'sentiment_1400k', 'test').batched():
new_X, new_y = self._vectorize_batch(batch)
y_preds += model.predict(new_X).tolist()
y_actual += new_y
confusion = confusion_matrix(
y_actual, y_preds, labels=["positive", "negative"])
# classification report for precision, recall f1-score and accuracy
matrix = classification_report(
y_actual, y_preds, labels=["positive", "negative"])
print('Classification report : \n',matrix)
accuracy = accuracy_score(y_actual, y_preds)
f1 = f1_score(
y_actual, y_preds, labels=["positive", "negative"], average='macro')
return {
"Model_name": self.config.model_type,
"checkpoint_path": self.model_path,
"accuracy": accuracy,
"f1_score": f1
}
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--batch_size', help='The size of the batches to use '
'when training the models', type=int,
default=32)
parser.add_argument('--max_length', help='Maximum number of tokens to '
'use when vectorizing text', type=int,
default=50)
parser.add_argument('--model_type', help='Model type to use ',
type=str, default='BoW')
parser.add_argument('--job', help='Whether to train or evaluate the model.',
type=str, default='train')
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
iterator = DataIterator
vectorizer = VECTORIZERS[args.model_type]
model = RegressionModel(args, iterator, vectorizer)
if args.job == 'train':
model.train()
elif args.job == 'evaluate':
results = model.evaluate()
print(results)
|
#!/usr/bin/env python3
"""Build Skyfield's internal table of constellation boundaries.
See:
https://iopscience.iop.org/article/10.1086/132034/pdf
http://cdsarc.u-strasbg.fr/viz-bin/Cat?VI/42
"""
import argparse
import os
import sys
import numpy as np
from numpy import array, searchsorted
from skyfield import api
URL = 'http://cdsarc.u-strasbg.fr/ftp/VI/42/data.dat'
def main():
with api.load.open(URL) as f:
lines = list(f)
unique_ra = set()
unique_dec = set()
fracs = set()
boundaries = []
for line in lines:
fields = line.split()
ra_low = extend(fields[0])
ra_up = extend(fields[1])
de_low = extend(fields[2])
const = fields[3].decode('ascii')
print(ra_low, const)
#print(ra_int(ra_low))
#fracs.add(fields[0].split(b'.')[1])
unique_ra.add(ra_low)
unique_ra.add(ra_up)
unique_dec.add(de_low)
fracs.add(const)
boundaries.append([ra_low, ra_up, de_low, const])
print(sorted(fracs))
print('constellations:', len(fracs))
print('unique_ra:', len(unique_ra))
print('unique_dec:', len(unique_dec))
sorted_consts = array(sorted(fracs))
sorted_ra = array(sorted(unique_ra))
sorted_dec = array(sorted(unique_dec))
assert sorted_ra[0] == 0
assert sorted_ra[-1] == 24
assert sorted_dec[0] == -90
assert sorted_dec[-1] == 88
sorted_ra = sorted_ra[1:]
sorted_dec = sorted_dec[1:]
print('bytes', sorted_ra.nbytes)
print('bytes', sorted_dec.nbytes)
#grid = [[5] * len(unique_dec)] * len(unique_ra)
#grid = array(grid, 'i1')
row = [-128] * len(sorted_ra)
grid = []
i = 0
de = -90.0
for ra_low, ra_up, de_low, const in boundaries[::-1]:
if de_low > de:
grid.append(row)
row = list(row)
de = de_low
i0 = searchsorted(sorted_ra, ra_low, side='right')
i1 = searchsorted(sorted_ra, ra_up, side='right')
c = searchsorted(sorted_consts, const)
# if ra_up == 24.0:
# print(sorted_ra, ra_low, ra_up)
# print(i0, i1, '?', len(row))
# exit()
for j in range(i0, i1):
row[j] = c
grid.append(row)
grid.append(row)
grid.append(row)
#grid = grid[::-1]
grid = array(grid, 'i1').T
assert len(sorted_ra) == 236
assert searchsorted(sorted_ra, 0, side='right') == 0
assert searchsorted(sorted_ra, 0.06, side='right') == 0
assert searchsorted(sorted_ra, 0.07, side='right') == 1
assert searchsorted(sorted_ra, 23.8, side='right') == 234
assert searchsorted(sorted_ra, 23.9, side='right') == 235
assert searchsorted(sorted_ra, 24.0, side='right') == 236
sorted_ra = sorted_ra[:-1]
assert len(sorted_ra) == 235
assert searchsorted(sorted_ra, 0) == 0
assert searchsorted(sorted_ra, 0.06) == 0
assert searchsorted(sorted_ra, 0.07) == 1
assert searchsorted(sorted_ra, 23.8) == 234
assert searchsorted(sorted_ra, 23.9) == 235
assert searchsorted(sorted_ra, 24.0) == 235
print(sorted_consts[57])
print(grid)
print('shape', grid.shape)
print('bytes', grid.nbytes)
for ra, dec in [(0, 0), (0.1, 0.1),
(5.59, -5.45),
(16, 80), (16, 90), (16, -90), (24, 360),
([0, 16], [0, 80])]:
c = compute_constellation(ra, dec, sorted_ra, sorted_dec,
sorted_consts, grid)
print('=', ra, dec, c)
path = os.path.dirname(__file__) + '/../skyfield/data/constellations'
np.savez_compressed(
path,
sorted_ra=sorted_ra,
sorted_dec=sorted_dec,
radec_to_index=grid,
indexed_abbreviations=sorted_consts,
)
def compute_constellation(ra, dec, sorted_ra, sorted_dec, sorted_consts, grid):
i = searchsorted(sorted_ra, ra)
j = searchsorted(sorted_dec, dec)
#print(dec, sorted_dec)
#print(ra, sorted_ra)
print("ra,dec", ra, dec)
print("i,j", i, j)
return sorted_consts[grid[i, j]]
def extend(s):
"""Return a float for `s` extended to machine precision.
Takes a string like '13.6667', passes it to `float()`,
and snaps it to the nearest whole second.
"""
return round(3600 * float(s)) / 3600.
# Some discarded code that I might want to revive someday: how to grow
# and shrink a list of segments as new ones supersede old ones on the
# way down the sky.
def segment_experiment():
assert insert_segment([0, 4, 7, 10], 0, 3) == [0, 3, 4, 7, 10]
assert insert_segment([0, 4, 7, 10], 4, 7) == [0, 4, 7, 10]
assert insert_segment([0, 4, 7, 10], 6, 9) == [0, 4, 6, 9, 10]
assert insert_segment([0, 4, 7, 10], 7, 10) == [0, 4, 7, 10]
assert insert_segment([0, 4, 7, 10], 0, 10) == [0, 10]
assert insert_segment([0, 10], 4, 7) == [0, 4, 7, 10]
assert insert_segment([], 4, 7) == [4, 7]
segments = []
n = 0
for ra_low, ra_up, de_low in boundaries[::-1]:
segments = insert_segment(segments, ra_low, ra_up)
print(len(segments), end=' ')
n += len(segments)
print(n)
def insert_segment(ra_list, ra_low, ra_up):
new = []
i = 0
while i < len(ra_list) and ra_list[i] < ra_low:
new.append(ra_list[i])
i += 1
new.append(ra_low)
new.append(ra_up)
while i < len(ra_list) and ra_list[i] <= ra_up:
i += 1
while i < len(ra_list):
new.append(ra_list[i])
i += 1
return new
if __name__ == '__main__':
main()
|
#!/usr/bin/env python
# coding: utf-8
# <div><img src="attachment:qgssqml2021wordmark.png"></div>
# # Part I: Introduction to Qiskit
# Welcome to Qiskit! Before starting with the exercises, please run the cell below by pressing 'shift' + 'return'.
# In[1]:
import numpy as np
# Importing standard Qiskit libraries
from qiskit import QuantumCircuit, transpile, assemble, Aer, IBMQ, execute
from qiskit.quantum_info import Statevector
from qiskit.visualization import plot_bloch_multivector, plot_histogram
from qiskit_textbook.problems import dj_problem_oracle
# ## I.1: Basic Rotations on One Qubit and Measurements on the Bloch Sphere
#
# Before getting into complicated circuits on many qubits, let us start by looking at a single qubit. Read this chapter: https://qiskit.org/textbook/ch-states/introduction.html to learn the basics about the Bloch sphere, Pauli operators, as well as the Hadamard gate and the $S$ and $S^\dagger$ gates.
#
# By default, states in qiskit start in $|0\rangle$, which corresponds to "arrow up" on the Bloch sphere. Play around with the gates $X$, $Y$, $Z$, $H$, $S$ and $S^\dagger$ to get a feeling for the different rotations. To do so, insert combinations of the following code lines in the lines indicated in the program:
#
# qc.x(0) # rotation by Pi around the x-axis
# qc.y(0) # rotation by Pi around the y-axis
# qc.z(0) # rotation by Pi around the z-axis
# qc.s(0) # rotation by Pi/2 around the z-axis
# qc.sdg(0) # rotation by -Pi/2 around the z-axis
# qc.h(0) # rotation by Pi around an axis located halfway between x and z
#
# Try to reach the given state in the Bloch sphere in each of the following exercises. (Press Shift + Enter to run a code cell)
# ### 1.) Let us start easy by performing a bit flip. The goal is to reach the state $|1\rangle$.
#
# <div><img src="attachment:state1.png" width="200"></div>
# In[2]:
def lab1_ex1():
qc = QuantumCircuit(1)
#
#
qc.x(0)
#
#
return qc
state = Statevector.from_instruction(lab1_ex1())
plot_bloch_multivector(state)
# In[3]:
from qc_grader import grade_lab1_ex1
# Note that the grading function is expecting a quantum circuit without measurements
grade_lab1_ex1(lab1_ex1())
# ### 2.) Next, we would like to create superposition. The goal is to reach the state $|+\rangle = \frac{1}{\sqrt{2}}\left(|0\rangle + |1\rangle\right)$.
#
# <div><img src="attachment:plusstate.png" width="200"></div>
# In[4]:
def lab1_ex2():
qc = QuantumCircuit(1)
#
#
qc.h(0)
#
#
return qc
state = Statevector.from_instruction(lab1_ex2())
plot_bloch_multivector(state)
# In[5]:
from qc_grader import grade_lab1_ex2
# Note that the grading function is expecting a quantum circuit without measurements
grade_lab1_ex2(lab1_ex2())
# ### 3.) Let's combine those two. The goal is to reach the state $|-\rangle = \frac{1}{\sqrt{2}}\left(|0\rangle - |1\rangle\right)$.
#
# <div><img src="attachment:minusstate.png" width="200"></div>
#
# Can you even come up with different ways?
# In[6]:
def lab1_ex3():
qc = QuantumCircuit(1)
#
qc.x(0)
qc.h(0)
#
#
return qc
state = Statevector.from_instruction(lab1_ex3())
plot_bloch_multivector(state)
# In[7]:
from qc_grader import grade_lab1_ex3
# Note that the grading function is expecting a quantum circuit without measurements
grade_lab1_ex3(lab1_ex3())
# ### 4.) Finally, we move on to the complex numbers. The goal is to reach the state $|- i\rangle = \frac{1}{\sqrt{2}}\left(|0\rangle - i|1\rangle\right)$.
#
# <div><img src=attachment:leftstate.png width="200"></div>
# In[18]:
def lab1_ex4():
qc = QuantumCircuit(1)
#
qc.h(0)
#qc.y(0)
qc.sdg(0)
#
#
return qc
state = Statevector.from_instruction(lab1_ex4())
plot_bloch_multivector(state)
# In[19]:
from qc_grader import grade_lab1_ex4
# Note that the grading function is expecting a quantum circuit without measurements
grade_lab1_ex4(lab1_ex4())
# ## I.2: Quantum Circuits Using Multi-Qubit Gates
# Great job! Now that you've understood the single-qubit gates, let us look at gates on multiple qubits. Check out this chapter if you would like to refresh the theory: https://qiskit.org/textbook/ch-gates/introduction.html. The basic gates on two and three qubits are given by
#
# qc.cx(c,t) # controlled-X (= CNOT) gate with control qubit c and target qubit t
# qc.cz(c,t) # controlled-Z gate with control qubit c and target qubit t
# qc.ccx(c1,c2,t) # controlled-controlled-X (= Toffoli) gate with control qubits c1 and c2 and target qubit t
# qc.swap(a,b) # SWAP gate that swaps the states of qubit a and qubit b
#
# We start with an easy gate on two qubits, the controlled-NOT (also CNOT) gate . As it has no effect applied on two qubits in state $|0\rangle$, we apply a Hadamard gate before to bring the control qubit in superposition. This way, we can create entanglement. The resulting state is one of the so-called Bell states.
# ### 5.) Construct the Bell state $|\Phi^+\rangle = \frac{1}{\sqrt{2}}\left(|01\rangle + |10\rangle\right)$.
# In[22]:
def lab1_ex5():
qc = QuantumCircuit(2,2) # this time, we not only want two qubits, but also two classical bits for the measurement
#
#
qc.x(1)
qc.h(0)
qc.cx(0,1)
#
#
return qc
qc = lab1_ex5()
qc.draw() # we draw the circuit
# In[23]:
from qc_grader import grade_lab1_ex5
# Note that the grading function is expecting a quantum circuit without measurements
grade_lab1_ex5(lab1_ex5())
# Let us now also add a measurement to the above circuit so that we can execute it (using the simulator) and plot the histogram of the corresponding counts.
# In[ ]:
qc.measure(0, 0) # we perform a measurement on qubit q_0 and store the information on the classical bit c_0
qc.measure(1, 1) # we perform a measurement on qubit q_1 and store the information on the classical bit c_1
backend = Aer.get_backend('qasm_simulator') # we choose the simulator as our backend
counts = execute(qc, backend, shots = 1000).result().get_counts() # we run the simulation and get the counts
plot_histogram(counts) # let us plot a histogram to see the possible outcomes and corresponding probabilities
# As you can see in the histogram, the only possible outputs are "01" and "10", so the states of the two qubits are always perfectly anti-correlated.
# ### 6.) Write a function that builds a quantum circuit on 3 qubits and creates the GHZ-like state, $|\Psi\rangle = \frac{1}{\sqrt{2}} \left(|010\rangle - |101 \rangle \right)$.
# Hint: the following circuit constructs the GHZ state, $|GHZ\rangle = \frac{1}{\sqrt{2}} \left(|000\rangle + |111 \rangle \right)$:
#
# <div><img src="attachment:ghz_circuit.png" width="200"></div>
# In[33]:
def lab1_ex6():
#
qc = QuantumCircuit(3)
qc.x(0)
qc.h(0)
qc.cx(0,1)
qc.cx(1,2)
qc.x(1)
#
#
return qc
qc = lab1_ex6()
qc.draw() # we draw the circuit
# In[34]:
from qc_grader import grade_lab1_ex6
# Note that the grading function is expecting a quantum circuit without measurements
grade_lab1_ex6(lab1_ex6())
# Congratulations for finishing these introductory exercises! Hopefully, they got you more familiar with the Bloch sphere and basic quantum gates. Let us now apply this knowledge to the second part, where we construct our first quantum algorithm, the Deutsch-Jozsa algorithm.
# # Part II: Oracles and the Deutsch-Jozsa algorithm
# Many quantum algoritms revolve around the notion of so called $\textit{oracles}$. An oracle is a function that can be considered as a 'black box'. We generally want to find out specific properties of this function. We do this by asking questions to the oracle (*querying*). The query complexity is then defined as the minimum number of queries in order to find these properties.
#
#
# To get familiar with the use of oracles we will now consider the Deutsch-Josza problem. We will see that the quantum solution has a drastically lower query complexity than its classical counterpart.
#
# ## II.1: Deutsch-Jozsa Problem <a id='djproblem'> </a>
#
# We are given a hidden Boolean function $f$, which takes as input a string of bits, and returns either $0$ or $1$, that is:
#
# $$
# f(\{x_0,x_1,x_2,...\}) \rightarrow 0 \textrm{ or } 1 \textrm{ , where } x_n \textrm{ is } 0 \textrm{ or } 1$$
#
# The property of the given Boolean function is that it is guaranteed to either be balanced or constant. A constant function returns all $0$'s or all $1$'s for any input, while a balanced function returns $0$'s for exactly half of all inputs and $1$'s for the other half. Our task is to determine whether the given function is balanced or constant.
#
# The Deutsch-Jozsa algorithm was the first example of a quantum algorithm that performs better than the best classical algorithm. It showed that there can be advantages to using a quantum computer as a computational tool for a specific problem.
#
# In the Deutsch-Josza problem you are given an unknown orcale. This is in Qiskit implemented by the function:
# In[35]:
oraclenr = 4 # determines the oracle (can range from 1 to 5)
oracle = dj_problem_oracle(oraclenr) # gives one out of 5 oracles
oracle.name = "DJ-Oracle"
# This function gives a certain oracle with 5 input qubits. The last qubit ($q_4$) will be the output. In order to get a feeling for the oracle, let us create a circuit to which we add the oracle such that we can pass it different input strings and then measure the output of $q_4$. This corresponds to the classical way of determining whether the oracle is balanced or constant.
# In[36]:
def dj_classical(n, input_str):
# build a quantum circuit with n qubits and 1 classical readout bit
dj_circuit = QuantumCircuit(n+1,1)
# Prepare the initial state corresponding to your input bit string
for i in range(n):
if input_str[i] == '1':
dj_circuit.x(i)
# append oracle
dj_circuit.append(oracle, range(n+1))
# measure the fourth qubit
dj_circuit.measure(n,0)
return dj_circuit
n = 4 # number of qubits
input_str = '1111'
dj_circuit = dj_classical(n, input_str)
dj_circuit.draw() # draw the circuit
# Now we simulate the results to find the outcome of this circuit. Try different input bit strings to see the corresponding outputs!
# In[46]:
input_str = '1111'
dj_circuit = dj_classical(n, input_str)
qasm_sim = Aer.get_backend('qasm_simulator')
transpiled_dj_circuit = transpile(dj_circuit, qasm_sim)
qobj = assemble(transpiled_dj_circuit, qasm_sim)
results = qasm_sim.run(qobj).result()
answer = results.get_counts()
plot_histogram(answer)
# Do you already have an idea whether the oracle is balanced or constant? How many inputs would you need to check minimally and maximally for this oracle?
# In[71]:
def lab1_ex7():
min_nr_inputs = 2
max_nr_inputs = (2**n/2)+1
return [min_nr_inputs, max_nr_inputs]
# In[72]:
from qc_grader import grade_lab1_ex7
# Note that the grading function is expecting a list of two integers
grade_lab1_ex7(lab1_ex7())
# ## II.2: Quantum Solution to the Deutsch-Josza Problem
# Using a quantum computer, we can find out if the oracle is constant or balanced with 100% confidence after only one call to the function $f(x)$, provided we have the function $f$ implemented as a quantum oracle, which maps the state $\vert x\rangle \vert y\rangle $ to $ \vert x\rangle \vert y \oplus f(x)\rangle$, where $\oplus$ is addition modulo $2$. Below we will walk through the algorithm.
# Prepare two quantum registers. The first is an $n$-qubit register initialised to $|0\rangle$, and the second is a one-qubit register initialised to $|-\rangle =\frac{1}{\sqrt{2}}\left(|0\rangle - |1 \rangle \right)$. Note, that with Qiskit states are described as $\vert b_3 b_2 b_1 b_0\rangle_{q_3q_2q_1q_0}$, i.e. just like for binary numbers, the last bit $b_0$ corresponds to the state of the first qubit. Thus, we want to initialize the state $$\vert \psi_0 \rangle = \vert -\rangle \otimes \vert0\rangle^{\otimes n}.$$
# In[85]:
n=4
def psi_0(n):
qc = QuantumCircuit(n+1,n)
qc.x(n)
qc.h(n)
# Build the state (|00000> - |10000>)/sqrt(2)
#
#
# FILL YOUR CODE IN HERE
#
#
return qc
dj_circuit = psi_0(n)
dj_circuit.draw()
# Applying the quantum bit oracle to any state $\vert x\rangle \vert y\rangle$ would yield the state $\vert x\rangle \vert y \oplus f(x)\rangle$.
# As we have prepared the state $|y\rangle$, which corresponds to the state on the last qubit $q_n$, in the state $|-\rangle$, the output of the oracle for any input bitstring $x$ is given by:
# $$ \frac{1}{\sqrt{2}}\vert x\rangle (\vert f(x)\rangle - \vert 1 \oplus f(x)\rangle) = \frac{1}{\sqrt{2}} (-1)^{f(x)}|x\rangle (|0\rangle - |1\rangle ) = (-1)^{f(x)}|x\rangle |-\rangle.$$
# Thus, we have created a phase oracle acting on the bit string $x$.
#
# Before applying the oracle, we need to create our input state on the first $n$ qubits though. For that we want an equal superposition state, so that the total state on all $n+1$ qubits is given by $$\vert \psi_1 \rangle = \frac{1}{\sqrt{2^{n+1}}}\sum_{x=0}^{2^n-1} \vert x\rangle \left(|0\rangle - |1 \rangle \right)$$
# In[97]:
def psi_1(n):
#obtain the |psi_0> =(|00000> - |10000>)/sqrt(2)
qc = psi_0(n)
# create the superposition state |psi_1>
#
#
qc.h(range(n))
#
#
return qc
dj_circuit = psi_1(n)
dj_circuit.draw()
# Now we are ready to apply our oracle to the prepared superposition state $\vert \psi_1 \rangle$. This gives the state
# $$ \vert \psi_2 \rangle = \frac{1}{\sqrt{2^{n+1}}}\sum_{x=0}^{2^n-1} \vert x\rangle (\vert f(x)\rangle - \vert 1 \oplus f(x)\rangle) = \frac{1}{\sqrt{2^{n+1}}}\sum_{x=0}^{2^n-1}(-1)^{f(x)}|x\rangle (|0\rangle - |1\rangle ) = \frac{1}{\sqrt{2^{n}}}\sum_{x=0}^{2^n-1}(-1)^{f(x)}|x\rangle |-\rangle.$$
# In[98]:
def psi_2(oracle,n):
# circuit to obtain psi_1
qc = psi_1(n)
# append the oracle
qc.append(oracle, range(n+1))
return qc
dj_circuit = psi_2(oracle, n)
dj_circuit.draw()
# In the final part of our algorithm we disregard the outcome on our second register and we apply an n-fold Hadamard to our first register. Afterwards we measure the outcome on these qubits.
# In[108]:
def lab1_ex8(oracle, n): # note that this exercise also depends on the code in the functions psi_0 (In [24]) and psi_1 (In [25])
qc = psi_2(oracle, n)
# apply n-fold hadamard gate
#
#
qc.h(range(n))
#
#
# add the measurement by connecting qubits to classical bits
#
#
qc.measure(0,n-n)
qc.measure(1,n-n+1)
qc.measure(2,n-n+2)
qc.measure(3,n-n+3)
#
#
return qc
dj_circuit = lab1_ex8(oracle, n)
dj_circuit.draw()
# In[109]:
from qc_grader import grade_lab1_ex8
# Note that the grading function is expecting a quantum circuit with measurements
grade_lab1_ex8(lab1_ex8(dj_problem_oracle(4),n))
# At this point the second single qubit register may be ignored. Applying a Hadamard gate to each qubit in the first register yields the state:
# $$
# \begin{aligned}
# \lvert \psi_3 \rangle
# & = \frac{1}{2^n}\sum_{x=0}^{2^n-1}(-1)^{f(x)}
# \left[ \sum_{y=0}^{2^n-1}(-1)^{x \cdot y}
# \vert y \rangle \right] \\
# & = \frac{1}{2^n}\sum_{y=0}^{2^n-1}
# \left[ \sum_{x=0}^{2^n-1}(-1)^{f(x)+x \cdot y} \right]
# \vert y \rangle,
# \end{aligned}
# $$
#
# where $x \cdot y = x_0y_0 \oplus x_1y_1 \oplus \ldots \oplus x_{n-1}y_{n-1}$ is the sum of the bitwise product.
#
# Let us now run the circuit including the measurement of the first register on the simulator:
# In[ ]:
qasm_sim = Aer.get_backend('qasm_simulator')
transpiled_dj_circuit = transpile(dj_circuit, qasm_sim)
qobj = assemble(transpiled_dj_circuit)
results = qasm_sim.run(qobj).result()
answer = results.get_counts()
plot_histogram(answer)
# As we learnt in the lecture, if the output is the zero bit string, we know that the oracle is constant. If it is any other bit string, we know that it is balanced. You may also check the other oracles by just changing the oracle number in the beginning where the oracle is defined!
|
from rest_framework import permissions
from rest_framework import compat
from gaskserv.models import TimeEntry
class IsAuthenticated(permissions.BasePermission):
def has_object_permission(self, request, view, obj):
return request.user and compat.is_authenticated(request.user)
class IsOwnerOrReadOnly(permissions.BasePermission):
def has_object_permission(self, request, view, obj):
if request.method in permissions.SAFE_METHODS:
return True
return obj.owner == request.user
class IsEntryValidForEnd(permissions.BasePermission):
def has_object_permission(self, request, view, obj):
le = request.query_params.get('last_entry', None)
if request.method == 'PATCH' and obj.end_time is not None and le:
return False
return True
class HasAllEntriesValid(permissions.BasePermission):
def has_permission(self, request, view):
has_unfinished_time_entries = TimeEntry.objects.filter(end_time=None, owner=request.user.id)
if has_unfinished_time_entries and request.method == 'POST':
return False
return True
class IsMemberOrOwnerOrReadOnly(permissions.BasePermission):
def has_object_permission(self, request, view, obj):
# if safe method
if request.method in permissions.SAFE_METHODS:
return True
# if member of team
for team in obj.teams.all():
for member in team.members.all():
if request.user == member:
return True
# if owner
return obj.owner == request.user
|
# coding: utf-8
from os import DirEntry
import socketserver
from os import path
# Copyright 2013 Abram Hindle, Eddie Antonio Santos
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# Furthermore it is derived from the Python documentation examples thus
# some of the code is Copyright © 2001-2013 Python Software
# Foundation; All Rights Reserved
#
# http://docs.python.org/2/library/socketserver.html
#
# run: python freetests.py
# try: curl -v -X GET http://127.0.0.1:8080/
# Copyright 2021 Shiyu Xiu
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class MyWebServer(socketserver.BaseRequestHandler):
def handle(self):
self.base_url = "http://127.0.0.1:8080"
self.response = "HTTP/1.1 "#response message and reinitilize every time
self.data = self.request.recv(1024).strip()
print ("Got a request of: %s\n" % self.data)
# self.request.sendall(bytearray("OK",'utf-8'))
if len(self.data)>0:
self.get()
#print("data is "+self.response)
self.request.sendall(bytearray(self.response,'utf-8'))
def check_legal_path(self,dir):
p = path.normpath("www" + dir)#nomalize the path
#print(p+"\n")
#print(path.exists(p) and ("www" in p))
return path.exists(p) and ("www" in p);#handle the unsecure case
def get(self):
#parse the request data into list
data_list = self.data.decode("utf-8").split(" ")
method = data_list[0]
directory = data_list[1]
if method == "GET":
if directory[-1] == '/':#with slash
#handle the root html file
if self.check_legal_path(directory):
if path.isfile("www"+directory):
file_type =directory.split(".")[-1]
if file_type == "html":
self.response+="200 OK\r\nContent-Type: text/html; charset=UTF-8\r\n\r\n"
file = open("www" + directory)
for line in file:
self.response += line + "\r\n"
file.close()
elif file_type == "css":
self.response += "200 OK\r\nContent-Type: text/css; charset=UTF-8\r\n\r\n"
css_file = open("www"+directory)
for line in css_file:
self.response += line + "\r\n"
css_file.close()
else:
#serve other file
self.response += "200 OK\r\nContent-Type: application/octet-stream; charset=UTF-8\r\n\r\n"
f = open("www"+directory)
for line in f:
self.response += line + "\r\n"
f.close()
else:
self.response+="200 OK\r\nContent-Type: text/html; charset=UTF-8\r\n\r\n"
file = open("www"+directory+"index.html")
for line in file:
self.response += line + "\r\n"
file.close()
else:
self.response+="404 Not Found\r\n"
else:#without slash
if self.check_legal_path(directory):#path is legal
if path.isfile("www"+directory):#the directory leads to a file
file_type =directory.split(".")[-1]
if file_type == "html":
self.response+="200 OK\r\nContent-Type: text/html; charset=UTF-8\r\n\r\n"
file = open("www" + directory)
for line in file:
self.response += line + "\r\n"
file.close()
elif file_type == "css":
self.response += "200 OK\r\nContent-Type: text/css; charset=UTF-8\r\n\r\n"
css_file = open("www"+directory)
for line in css_file:
self.response += line + "\r\n"
css_file.close()
else:
#serve other file
self.response += "200 OK\r\nContent-Type: application/octet-stream; charset=UTF-8\r\n\r\n"
f = open("www"+directory)
for line in f:
self.response += line + "\r\n"
f.close()
else:
self.response += "301 Moved Permanently\r\n" + "Location: " + self.base_url + directory + "/\r\n"
else:
self.response+="404 Not Found\r\n"
else:
self.response+="405 Method Not Allowed\r\n"
if __name__ == "__main__":
HOST, PORT = "localhost", 8080
socketserver.TCPServer.allow_reuse_address = True
# Create the server, binding to localhost on port 8080
server = socketserver.TCPServer((HOST, PORT), MyWebServer)
# Activate the server; this will keep running until you
# interrupt the program with Ctrl-C
server.serve_forever()
|
"""InVision - Weighted Moving Average."""
from typing import List
import click
import numpy as np
import pandas as pd
def moving_avg_prediction(data: pd.DataFrame, num_obs: int) -> pd.DataFrame:
"""Computes average of the last n observations.
A future value of the variable depends on the average of its n previous values.
Args:
data (pd.DataFrame): series to be analyzed.
num_obs (int): number of observations to make the average.
Returns:
pd.DataFrame: dataframe with the prediction.
"""
data_map: pd.DataFrame = data.copy()
prediction = data_map.iloc[-num_obs:].mean()
next_date = max(data_map.index) + pd.to_timedelta(1, unit="h")
data_map.loc[next_date] = prediction
click.echo(
f"The predicted value using a moving average for {next_date} is {prediction.values[0]}"
)
return data_map
def moving_avg_smoothing(data: pd.DataFrame, window: int) -> pd.DataFrame:
"""Smooths the original time series to identify trends.
Args:
data (pd.DataFrame): series to be analyzed.
window (int): rolling window size.
Returns:
pd.DataFrame: smoothed dataframe.
"""
rolling_mean: pd.DataFrame = data.rolling(window=window).mean()
return rolling_mean
def weighted_moving_avg_prediction(
data: pd.DataFrame, weights: List[float]
) -> pd.DataFrame:
"""Computes the weighted average of the last n observations.
The weights sum up to one with larger weights assigned to more recent observations.
Args:
data (pd.DataFrame): series to be analyzed.
weights (List[float]): weights.
Returns:
pd.DataFrame: dataframe with the prediction.
"""
data_wmap: pd.DataFrame = data.copy()
np_weights: np.ndarray = np.array(weights)
num_obs: int = len(weights)
data_values = data_wmap.iloc[-num_obs:].values.flatten().reshape((3,))
prediction = np.dot(data_values, np_weights)
next_date = max(data_wmap.index) + pd.to_timedelta(1, unit="h")
data_wmap.loc[next_date] = prediction
click.echo(
f"The predicted value using a weighted moving average for {next_date} is "
f"{prediction}"
)
return data_wmap
def main():
"""Main function of the module."""
pass
if __name__ == "__main__":
main()
|
"""
Software License Agreement (Apache 2.0)
Copyright (c) 2020, The MITRE Corporation.
All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
This project was developed by The MITRE Corporation.
If this code is used in a deployment or embedded within another project,
it is requested that you send an email to opensource@mitre.org in order to
let us know where this software is being used.
"""
"""Analyzes a graph output from the operations side of the framework (crawling
an application). This module represents what developers would want as a base
analysis, which should be compatible with any type of interface and application
context.
To create a custom Analyzer that inherits from BaseAnalzyer, you may need to
edit any of the following blocks. CTRL+F the names (ex: "Formatting sections")
to jump directly to these blocks of code.
Formatting sections
- _build_sections : Stores formatting and text information for sections that
run once per analysis. There are no build_sections implemented for the
BaseAnalyzer, but these should not be specific to a particular user model,
and should only be executed once per analysis. Potential examples include:
- assessing the difficulty to reach pre-configured paths
- complexity of network
- consistency of information, themes, styles, across states.
- _crawl_user_sections : Stores formatting and text information for sections that
run once per crawl_user. Some sections that are coded into BaseAnalzyer are:
- metrics: compares network metrics to the graph produced by the
crawl_user compared to the build_user (see
self._calculate_user_metrics() for the implementation).
- paths: analyzes optimal paths between all pairs of nodes for the
crawl_user compared to the build_user (see self.analyze_paths() for the
implementation).
NOTE: For any child class of BaseAnalyzer, whatever data in _build_sections
and _crawl_user_sections will append/overwrite to all higher-level
inherited classes. No need to copy and paste if you want to inherit
those sections, but you may want to delete them in your class's
customized __init__() if you do not want those higher-level sections.
Constructor/initializer
See the doc-string for details on what this does, but if you want to track any
additional data (ex: predefined paths, supporting files, etc) or perform other
tasks before you analyze, this is the place to do it.
Property (getter/setter) methods
No general advice is given here, but add methods to this block as necessary.
You will most likely not need to override existing methods (except for the
`data` methods).
Methods to perform section-specific analyses
No general advice is given here, but add methods to this block as necessary.
You will most likely not need to override existing methods.
Private methods to format analyzed sections of the report
This is where the methods coded above get pulled to be cleanly organized into
the report. If you implement any new section-specific methods, be sure to
add them to these functions.
Public utility functions
Functions that may be called in certain circumstances (ex: printing graphs
for demo/debug), but are not typically called in the standard workflow.
"""
import collections
import json
import os
import pathlib
from types import ModuleType
import matplotlib.pyplot as plt
import matplotlib
matplotlib.use('Agg')
import networkx as nx
from networkx.drawing.nx_agraph import graphviz_layout
import numpy as np
import pandas as pd
from tabulate import tabulate
from .graph import Graph
from .user import UserModel
class BaseAnalyzer:
# Defining filenames for output files (path is parameterized)
_json_data_fname = "analyzed_data.json"
_report_fname = "analysis_report.md"
_analyzed_gml_fname = "analyzed_full_graph.gml"
_paths_df_fname = "paths_df.csv"
# --
# Formatting sections. Title and guide to be printed for each section.
# May be overridden.
#
"""Configure these diction
In _build_sections and _crawl_user_sections below, the structure of the
dictionary is to be as follows:
<section_id> : {"label": <section_title>,
"guide_lines": <guide_lines>
}
NOTE: The values wrapped in "" should not change, but is wrapped in <>
should be filled with meaningful content as described below:
section_id -- str that uniquely identifies the section (not seen in
rendered report but necessary for sections to link
properly).
section_title -- str that visually represents the name of the section (what
the viewer of the report will see).
guide_lines -- list of str that describe what the section is doing in
the guide. Should clearly explain enough so that the
report is easy to understand.
"""
_build_sections = {}
_crawl_user_sections = \
{
"metrics":
{"label": "Summary Metrics",
"guide_lines":
[
' * **in-degree** -- number of edges that point into a given node',
' * **out-degree** -- number of edges that point away from a given node',
' * **strongly connected graph** -- for a given graph, a path exists between every pair of nodes (and in both directions)'
]
},
"paths":
{"label": "Path Analysis",
"guide_lines":
[
' * **path** -- an ordered series of edges that connects one node to another node',
' * **path length** -- number of edges in a path',
' * **average path length** -- for all paths in a graph, the average of all path lengths',
' * the **paths dataframe** has the following columns:',
' * **idx_from** -- index/ID of the starting node for a path',
' * **idx_to** -- index/ID of the ending node for a path',
' * **path_incr** -- this represents how much more the shortest path length from **idx_from** to **idx_to** is for the given user compared to the BuildUser. *Example*: a value of **2** means that it takes a given user 2 more actions to get from **idx_from** to **idx_to** than it does for BuildUser. **0** is desirable, higher numbers are not',
' * **dijkstra_diff** -- this represents the difference of the shortest weighted path length (using Dijkstra\'s algorithm) from **idx_from** to **idx_to** for the given user compared to the BuildUser. *Example*: a value of **0.2** means that the average score for each edge in the path from **idx_from** to **idx_to** is 0.2 lower (out of 1) for the BuildUser than it is for the CrawlUser. **0** is desirable and represents ease of accessibility, higher numbers are worse'
]
}
}
# --
# Constructor/initializer. Loads in graph and sets up needed class fields.
# May be overridden.
#
def __init__(self, graph_fpath, config):
"""Loads the graph object from graph_fpath, initializes build_user and
crawl_user graphs, and also stores the entire config for potential uses
which can be called in any instance method. Utilizes formal
getter/setter methods to track these objects.
Args:
graph_fpath: path and filename of the .gml file to analyze
"""
# Initialize class fields to None
self._graph_fpath = None
self._users = None
self._full_graph = None
self._config = None
self._report_data = None
self._output_path = None
# Set class field values
self._init_sections()
self.config = config
self.graph_fpath = graph_fpath
self._init_users()
self.report_data = collections.defaultdict(dict)
self.output_path = str(pathlib.Path(self.graph_fpath).parents[0])
# --
# Helper methods for initialization.
# May be overridden.
#
def _init_sections(self):
# Get all members of class hierarchy (besides object base class)
class_hierarchy = list(self.__class__.__mro__)[0:-1]
build_sections = {}
crawl_user_sections = {}
# Iterate from grandest parent class to self class, updating build and
# crawl_user sections.
for class_name in reversed(class_hierarchy):
build_sections.update(class_name._build_sections)
crawl_user_sections.update(class_name._crawl_user_sections)
# Update sections for the self object
self._build_sections = build_sections
self._crawl_user_sections = crawl_user_sections
def _init_users(self):
"""Dictionary that represents UserModels and their graphs. Has the
structure as follows:
{
"build_user": {
"user_model": UserModel,
"graph": networkx.graph
}
"crawl_users": {
"crawl_user_1" : {"user_model": UserModel,
"graph": networkx.graph},
"crawl_user_2" : {"user_model": UserModel,
"graph": networkx.graph},
...
}
}
"""
assert self.config is not None, "the Analyzer's config has not been" \
" set. Cannot continue with analysis"
build_user = self.config.BUILD_USER
crawl_users = self.config.CRAWL_USERS
for user_model in [build_user] + crawl_users:
assert isinstance(user_model, UserModel), f"user_models " \
f"\"{user_model}\" is not a UserModel object"
full_graph = nx.read_gml(self.graph_fpath)
self.full_graph = full_graph
# Storing the build_user and its relevant information
users_dict = dict()
users_dict["build_user"] = self._get_user_dict(build_user)
users_dict["crawl_users"] = dict()
for crawl_user in crawl_users:
name = crawl_user.get_name()
users_dict["crawl_users"][name] = self._get_user_dict(crawl_user)
self._users = users_dict
# --
# Property (getter/setter) methods.
# May be overridden.
#
@property
def graph_fpath(self):
return self._graph_fpath
@graph_fpath.setter
def graph_fpath(self, graph_fpath):
assert os.path.isfile(graph_fpath), f"graph_fpath \"{graph_fpath}\" " \
f"is not a valid file."
self._graph_fpath = graph_fpath
@property
def users(self):
return self._users
@property
def full_graph(self):
return self._full_graph
@full_graph.setter
def full_graph(self, full_graph):
"""Represents the graph with data for ALL users. Any fields that are
produced by analysis and outputted to the analyzed gml file will be
stored in this object
"""
assert isinstance(full_graph, nx.Graph), f"full_graph " \
f"\"{full_graph}\" is not a " \
f"networkx graph"
self._full_graph = full_graph
@property
def config(self):
return self._config
@config.setter
def config(self, config):
assert isinstance(config, ModuleType), "config object is not a module" \
". Cannot continue analysis."
self._config = config
@property
def report_data(self):
return self._report_data
@report_data.setter
def report_data(self, report_data):
assert isinstance(report_data, collections.defaultdict), \
"report_data must be a collections.defaultdict"
self._report_data = report_data
@property
def output_path(self):
return self._output_path
@output_path.setter
def output_path(self, output_path):
assert os.path.isdir(output_path), f"output_path \"{output_path}\" " \
f"is not a valid output directory."
self._output_path = output_path
# --
# Methods to perform section-specific analyses.
# May be overridden.
#
def _calculate_user_metrics(self, user):
"""Analyzes network metrics for the user's graph, compared to the
full graph.
Args:
user: str denoting a crawl user
Returns:
print_lines: list of lines to print to the report
"""
# Intializing lines to print
print_lines = list()
section = "metrics"
label = self._crawl_user_sections[section]["label"]
print_lines.append(f'### <a name="{user.lower()}-{section}"></a> {label}')
user_graph = self.users["crawl_users"][user]["graph"]
# computing network metrics
metrics = dict()
metrics['crawluser_nodes'] = len(user_graph.nodes())
metrics['builduser_nodes'] = len(self.full_graph.nodes())
metrics['crawluser_stub_nodes'] = sum(
[1 for n in user_graph.nodes(data=True)
if n[1]['stub'] == 'True'])
metrics['builduser_stub_nodes'] = sum([1 for n in self.full_graph.nodes(data=True) if n[1]['stub'] == 'True'])
metrics['crawluser_edges'] = len(user_graph.edges())
metrics['builduser_edges'] = len(self.full_graph.edges())
metrics['crawluser_avg_indeg'] = round(sum(d for n, d in user_graph.in_degree()) / metrics['crawluser_nodes'], 2)
metrics['crawluser_avg_outdeg'] = round(sum(d for n, d in user_graph.out_degree()) / metrics['crawluser_nodes'], 2)
metrics['builduser_avg_indeg'] = round(sum(d for n, d in self.full_graph.in_degree()) / metrics['builduser_nodes'], 2)
metrics['builduser_avg_outdeg'] = round(sum(d for n, d in self.full_graph.out_degree()) / metrics['builduser_nodes'], 2)
metrics['crawluser_is_strongly_connected'] = nx.is_strongly_connected(user_graph)
metrics['builduser_is_strongly_connected'] = nx.is_strongly_connected(self.full_graph)
# Save metrics data for eventual output
self.report_data["network_metrics"][user] = metrics
# formatting into lines for markdown
print_lines.append(f'**{round(100 * metrics["crawluser_nodes"] / metrics["builduser_nodes"],2)}% states** accessible compared to BuildUser **({metrics["crawluser_nodes"]} / {metrics["builduser_nodes"]})**\n')
if metrics['builduser_stub_nodes'] > 0:
print_lines.append(f' * **{round(100 * metrics["crawluser_stub_nodes"] / metrics["builduser_stub_nodes"],2)}% stub states** accessible compared to BuildUser **({metrics["crawluser_stub_nodes"]} / {metrics["builduser_stub_nodes"]})**\n')
else:
print_lines.append(f' * No **stub nodes** found in this graph\n')
if metrics["builduser_edges"] > 0:
print_lines.append(f'**{round(100 * metrics["crawluser_edges"] / metrics["builduser_edges"],2)}% edges** accessible compared to BuildUser **({metrics["crawluser_edges"]} / {metrics["builduser_edges"]})**\n')
else:
print_lines.append(f'**0.00% edges** accessible compared to BuildUser **({metrics["crawluser_edges"]} / {metrics["builduser_edges"]})**\n')
print_lines.append(f'**{metrics["crawluser_avg_indeg"]}** average in-degree (**{metrics["builduser_avg_indeg"]}** for BuildUser)\n')
print_lines.append(f'**{metrics["crawluser_avg_outdeg"]}** average out-degree (**{metrics["builduser_avg_outdeg"]}** for BuildUser)\n')
print_lines.append(f'strongly connected user graph: **{metrics["crawluser_is_strongly_connected"]}**')
return print_lines
def _analyze_user_paths(self, user):
"""Analyzes the shortest path lengths for a user compared to the full
graph
Args:
user: str denoting a crawl user
Returns:
paths_df: pd.DataFrame of the increased path length for a user
print_lines: list of lines to print to the report
"""
# Intializing lines to print
print_lines = list()
section = "paths"
label = self._crawl_user_sections[section]["label"]
print_lines.append(f'### <a name="{user.lower()}-{section}"></a> {label}')
# initialize state_ids lists and shortest path matrices for build and user
build_user = str(self.users["build_user"]["user_model"])
user_graph = self.users["crawl_users"][user]["graph"]
state_ids_user = list(user_graph.nodes())
state_ids_build = list(self.full_graph.nodes())
n = len(state_ids_build)
shortest_paths_build = np.full((n, n), np.nan)
shortest_paths_user = np.full((n, n), np.nan)
dijkstra_paths_user = np.full((n, n), np.nan)
# initializing scores to output
reversed_scores = {k: 1 - v for k, v in
nx.get_edge_attributes(user_graph, user).items()}
nx.set_edge_attributes(user_graph, reversed_scores, f"{user}_reversed")
# defaulting scores for nodes
nx.set_node_attributes(user_graph, 0, f'{user}AddScore')
nx.set_node_attributes(user_graph, 0, f'{user}MultScore')
# loop through and compute shortest paths for pairs of nodes for build_user
build_add_scores = {0: 0.0}
build_prod_scores = {0: 1}
for i in state_ids_build:
for j in state_ids_build:
if i != j and nx.has_path(self.full_graph, i, j):
shortest_path = nx.shortest_path(self.full_graph, source=i, target=j)
shortest_path_length = nx.shortest_path_length(self.full_graph, i, j)
shortest_paths_build[int(i), int(j)] = shortest_path_length
add_score = 0
prod_score = 1
for k in range(len(shortest_path) - 1):
node1 = shortest_path[k]
node2 = shortest_path[k + 1]
score = max([v[f'{build_user}'] for k, v in
self.full_graph.get_edge_data(node1, node2).items()])
add_score += score
prod_score *= score
# saving scores for nodes
if i == 0:
build_add_scores[j] = add_score
build_prod_scores[j] = prod_score
# saving scores to nodes
nx.set_node_attributes(self.full_graph, build_add_scores, f'{build_user}AddScore')
nx.set_node_attributes(self.full_graph, build_prod_scores, f'{build_user}MultScore')
# loop through and compute shortest paths for pairs of nodes for crawl_user
add_scores = {0: 0.0}
prod_scores = {0: 1}
for i in state_ids_user:
for j in state_ids_user:
if i != j and nx.has_path(user_graph, i, j):
shortest_paths_user[int(i), int(j)] = \
nx.shortest_path_length(user_graph, i, j)
shortest_path = nx.dijkstra_path(user_graph, i, j,
weight=f'{user}_reversed')
add_score = 0
prod_score = 1
for k in range(len(shortest_path) - 1):
node1 = shortest_path[k]
node2 = shortest_path[k + 1]
score = max([v[f'{user}'] for k, v in
user_graph.get_edge_data(node1, node2).items()])
add_score += score
prod_score *= score
dijkstra_paths_user[int(i), int(j)] = add_score
# saving scores for nodes
if i == 0:
add_scores[j] = add_score
prod_scores[j] = prod_score
# saving scores to nodes
nx.set_node_attributes(user_graph, add_scores, f'{user}AddScore')
nx.set_node_attributes(user_graph, prod_scores, f'{user}MultScore')
# Updating the full graph with cummulative Add and Mult scores for the
# given user
self.users["crawl_users"][user]["graph"] = user_graph
nx.set_node_attributes(self.full_graph, 0, f'{user}AddScore')
nx.set_node_attributes(self.full_graph, 0, f'{user}MultScore')
add_dict = dict(user_graph.nodes(data=f'{user}AddScore'))
mult_dict = dict(user_graph.nodes(data=f'{user}MultScore'))
nx.set_node_attributes(self.full_graph, add_dict, f'{user}AddScore')
nx.set_node_attributes(self.full_graph, mult_dict, f'{user}MultScore')
# get path differences for active user edges
paths_diff = np.sum(np.stack([-shortest_paths_build,
shortest_paths_user]),
axis=0)
# we are assuming that the dijkstra's distance for the BuildUser is always
# the number of edges it has to traverse because the BuildScore is always 1
dijkstra_diff = np.round(np.divide(np.sum(np.stack([shortest_paths_build,
-dijkstra_paths_user]),
axis=0), shortest_paths_build), 2)
# forming the user path increase dataframe
non_nan_idx = ~np.isnan(paths_diff)
state_pairs = np.argwhere(non_nan_idx)
state_pairs_from = state_pairs[:, 0]
state_pairs_to = state_pairs[:, 1]
state_pairs_diffs = list(paths_diff[non_nan_idx])
dijkstra_pairs_diff = list(dijkstra_diff[non_nan_idx])
paths_df = pd.DataFrame({
'idx_from': state_pairs_from,
'idx_to': state_pairs_to,
'path_incr': state_pairs_diffs,
'dijkstra_diff': dijkstra_pairs_diff
})
paths_df = paths_df.sort_values(by=['path_incr', 'dijkstra_diff'],
ascending=False)
# Tracking metrics to ouput and saving them to self.report_data
metrics = dict()
metrics["avg_path_len_incr"] = round(paths_df.path_incr.mean(), 2)
metrics["avg_path_dijkstra_diff"] = round(paths_df.dijkstra_diff.mean(),2)
self.report_data["network_metrics"][user].update(metrics)
# Saving paths dataframe
df_fname = f"{user}_{self._paths_df_fname}"
df_fpath = os.path.join(self.output_path, df_fname)
paths_df.to_csv(df_fpath, index=False)
# Formatting lines to print to the report
print_lines.append(f"\nAverage path length increase compared to "
f"BuildUser: **{metrics['avg_path_len_incr']}**\n")
print_lines.append(f"\nAverage Dijkstra difference between shortest "
f"paths compared to BuildUser: "
f"**{metrics['avg_path_dijkstra_diff']}**\n")
if len(paths_df.index) > 10:
print_lines.append(f"**First 10 rows of paths dataframe** for "
f"{user}:\n")
print_lines.append(tabulate(paths_df.head(10), tablefmt="pipe",
headers="keys", showindex=False))
else:
print_lines.append(f"**Full paths dataframe** for {user}:\n")
print_lines.append(tabulate(paths_df, tablefmt="pipe",
headers="keys", showindex=False))
print_lines.append(f"**NOTE:** The full paths csv is also stored here: "
f"`{df_fpath}`")
return print_lines, paths_df
# --
# Private methods to format analyzed sections of the report.
# May be overridden.
#
def _analyze_crawl_user(self, user):
"""Prepares lines to write to the report for all sections for a given
crawl user.
Args:
user: str denoting a crawl user
Returns:
print_lines: list of lines to print to the report
"""
print_lines = list()
print_lines.append(f'\n## <a name="{user.lower()}"></a> {user}')
# record the metrics
print_lines += self._calculate_user_metrics(user)
# record the path analysis
analyze_paths_print_lines, _ = self._analyze_user_paths(user)
print_lines += analyze_paths_print_lines
return print_lines
def _analyze_build(self):
"""Prepares lines to write to the report for all sections for the build
user.
Returns:
print_lines: list of lines to print to the report
"""
print_lines = list()
# No sections are coded for the build in BaseAnalyzer.
return print_lines
# --
# Main function that analyzes graph and output results.
# Should not need to be overridden.
#
def analyze(self):
"""Analyzes graph based on users and sections. Writes a report and
analyzed .gml file to an output directory at self.output_path.
"""
# format the top sections
users = self.users["crawl_users"].keys()
print_lines = []
print_lines += self._format_contents(users)
print_lines += self._format_guide()
# perform analysis for the build user
print_lines += self._analyze_build()
# perform analysis for the crawl_users
for user in users:
print_lines += self._analyze_crawl_user(user)
# write report to file
report_fpath = pathlib.Path(self.output_path) / self._report_fname
with open(report_fpath, 'w') as report_file:
for line in print_lines:
report_file.write(line)
report_file.write('\n')
# write new gml (G) to file
self._to_gml()
# write analyzed data to json file
json_fpath = pathlib.Path(self.output_path) / self._json_data_fname
dictionary = self.report_data
self._to_json(json_fpath, dictionary)
# --
# Private utility functions.
# Should not need to be overridden.
#
def _get_user_dict(self, user_model):
"""Helper function to initialize all data required for a user to perform
analysis
Args:
user_model: users.UserModel object
Returns:
user_dict: dictionary with keys "user_model" and "graph"
"""
user_dict = dict()
user_dict["user_model"] = user_model
selected_edges = [(u, v, k) for u, v, k, d in
self.full_graph.edges(data=True, keys=True)
if d[user_model.get_name()] > 0]
user_model_graph = self.full_graph.edge_subgraph(selected_edges).copy()
if not user_model_graph.has_node(0):
user_model_graph.add_nodes_from(self.full_graph.nodes(data=True))
user_model_graph.remove_nodes_from(list(user_model_graph.nodes())[1:])
user_dict["graph"] = user_model_graph
return user_dict
def _format_contents(self, users):
"""Prepares lines to write to the table of contents section
Args:
users: list of str name of the crawl_users to analyze
Returns:
print_lines: list of lines to print to the report
"""
print_lines = list()
print_lines.append('# Analysis Report')
print_lines.append('## Contents')
# --
# Printing the linkable guide
#
print_lines.append(f'* [Guide](#guide)')
# Printing build_sections links for the guide
for section in self._build_sections.keys():
label = self._build_sections[section]["label"]
print_lines.append(f' * [{label}](#guide-{section})')
# Printing crawl_user_sections links for the guide
for section in self._crawl_user_sections.keys():
label = self._crawl_user_sections[section]["label"]
print_lines.append(f' * [{label}](#guide-{section})')
# --
# Printing the linkable build and crawl_user sections
#
# Printing build_sections links for the guide
for section in self._build_sections.keys():
label = self._build_sections[section]["label"]
print_lines.append(f'* [{label}](#{section})')
# Printing crawl_user_sections links for each user
for user in users:
print_lines.append(f'* [{user}](#{user.lower()})')
for section in self._crawl_user_sections.keys():
label = self._crawl_user_sections[section]["label"]
print_lines.append(f' * [{label}](#{user.lower()}-{section})')
print_lines.append('')
return print_lines
def _format_guide(self):
"""Prepares lines to write to guide section
Returns:
print_lines: list of lines to print to the report
"""
print_lines = list()
print_lines.append(f'## <a name="guide"></a> Guide')
# Formatting guide for the build_sections
for section in self._build_sections.keys():
# Print guide header
label = self._build_sections[section]["label"]
print_lines.append(f'\n### <a name="guide-{section}"></a> {label}')
# Print guide lines
print_lines += self._build_sections[section]["guide_lines"]
# Formatting guide for the crawl_user_sections
for section in self._crawl_user_sections.keys():
# Print guide header
label = self._crawl_user_sections[section]["label"]
print_lines.append(f'\n### <a name="guide-{section}"></a> {label}')
# Print guide lines
print_lines += self._crawl_user_sections[section]["guide_lines"]
return print_lines
def _to_gml(self):
"""Save a networkx graph, G, to a gml file.
Normally, we should use nx.write_gml(G, output_fpath), but this does not
allow custom field names to be written to a file, specifically those with an
underscore. Also note that this function is very similar to Graph.to_gml(),
but it iterates over a networkx.Graph function instead of the states and
edges objects.
Returns:
True if there were no errors
"""
build_user = str(self.users["build_user"]["user_model"])
gml_fpath = pathlib.Path(self.output_path) / self._analyzed_gml_fname
try:
with open(gml_fpath, 'w') as f:
# Write header information
f.write('graph [\n')
f.write(' directed 1\n')
f.write(' multigraph 1\n')
f.write(f' buildUser "{build_user}"\n')
# Write node data
for state_id, state in self.full_graph.nodes(data=True):
f.write(' node [\n')
f.write(' id ' + str(state_id) + '\n')
f.write(' label "' + str(state_id) + '"\n')
for k, v in state.items():
clean_k, clean_v = Graph._clean_kv(k, v)
f.write(f' {clean_k} {clean_v}\n')
f.write(' ]\n')
# Write edge data
for source, target, edge in self.full_graph.edges(data=True):
f.write(' edge [\n')
f.write(' source ' + str(source) + '\n')
f.write(' target ' + str(target) + '\n')
for k, v in edge.items():
clean_k, clean_v = Graph._clean_kv(k, v)
f.write(f' {clean_k} {clean_v}\n')
f.write(' ]\n')
f.write(']')
return True
except Exception as e:
print(e)
return False
@staticmethod
def _to_json(fpath, dictionary):
"""Write dictionary to a filepath with an indent and error check.
Returns:
True if there were no errors
"""
try:
with open(fpath, 'w') as fp:
json.dump(dictionary, fp, indent=2)
return True
except Exception as e:
print(e)
return False
# --
# Public utility functions.
# May be overridden.
#
def plot_graphs(self):
"""Plotting the full graph and the graph of each crawl_user"""
print("Full graph:")
nx.draw(self.full_graph, pos=graphviz_layout(self.full_graph),
with_labels=True)
plt.show()
for user in self.users["crawl_users"].keys():
print(f"{user}'s graph:")
nx.draw(self.users["crawl_users"][user]["graph"],
pos=graphviz_layout(self.full_graph), with_labels=True)
plt.show()
|
from dataclasses import asdict
import pytest
from dacite import from_dict
from fhir2dataset.data_class import Element, Elements
from fhir2dataset.tools.fhirpath import multiple_search_dict
@pytest.fixture()
def resources():
resources = [
{
"resourceType": "Observation",
"id": "f001",
"text": {
"status": "generated",
"div": "<div xmlns=\"http://www.w3.org/1999/xhtml\"><p><b>Generated Narrative with Details</b></p><p><b>id</b>: f001</p><p><b>identifier</b>: 6323 (OFFICIAL)</p><p><b>status</b>: final</p><p><b>code</b>: Glucose [Moles/volume] in Blood <span>(Details : {LOINC code '15074-8' = 'Glucose [Moles/volume] in Blood', given as 'Glucose [Moles/volume] in Blood'})</span></p><p><b>subject</b>: <a>P. van de Heuvel</a></p><p><b>effective</b>: 02/04/2013 9:30:10 AM --> (ongoing)</p><p><b>issued</b>: 03/04/2013 3:30:10 PM</p><p><b>performer</b>: <a>A. Langeveld</a></p><p><b>value</b>: 6.3 mmol/l<span> (Details: UCUM code mmol/L = 'mmol/L')</span></p><p><b>interpretation</b>: High <span>(Details : {http://terminology.hl7.org/CodeSystem/v3-ObservationInterpretation code 'H' = 'High', given as 'High'})</span></p><h3>ReferenceRanges</h3><table><tr><td>-</td><td><b>Low</b></td><td><b>High</b></td></tr><tr><td>*</td><td>3.1 mmol/l<span> (Details: UCUM code mmol/L = 'mmol/L')</span></td><td>6.2 mmol/l<span> (Details: UCUM code mmol/L = 'mmol/L')</span></td></tr></table></div>", # noqa
},
"identifier": [
{
"use": "official",
"system": "http://www.bmc.nl/zorgportal/identifiers/observations",
"value": "6323",
}
],
"status": "final",
"code": {
"coding": [
{
"system": "http://loinc.org",
"code": "15074-8",
"display": "Glucose [Moles/volume] in Blood",
}
]
},
"subject": {"reference": "Patient/f001", "display": "P. van de Heuvel"},
"effectivePeriod": {"start": "2013-04-02T09:30:10+01:00"},
"issued": "2013-04-03T15:30:10+01:00",
"performer": [{"reference": "Practitioner/f005", "display": "A. Langeveld"}],
"valueQuantity": {
"value": 6.3,
"unit": "mmol/l",
"system": "http://unitsofmeasure.org",
"code": "mmol/L",
},
"interpretation": [
{
"coding": [
{
"system": "http://terminology.hl7.org/CodeSystem/v3-ObservationInterpretation", # noqa
"code": "H",
"display": "High",
}
]
}
],
"referenceRange": [
{
"low": {
"value": 3.1,
"unit": "mmol/l",
"system": "http://unitsofmeasure.org",
"code": "mmol/L",
},
"high": {
"value": 6.2,
"unit": "mmol/l",
"system": "http://unitsofmeasure.org",
"code": "mmol/L",
},
}
],
}
]
return resources
@pytest.fixture()
def elements():
elements = Elements(
[
Element(col_name="code", fhirpath="Observation.code"),
Element(col_name="subject reference", fhirpath="Observation.subject.reference"),
]
)
return elements
@pytest.fixture()
def answers():
return [
[
[
{
"coding": [
{
"system": "http://loinc.org",
"code": "15074-8",
"display": "Glucose [Moles/volume] in Blood",
}
]
}
],
["Patient/f001"],
]
]
@pytest.mark.skip()
def test_multiple_search_dict(resources, elements, answers):
elements_empty = asdict(elements)
data_dict_resources = multiple_search_dict(resources, elements_empty)
for idx_resource, data_dict in enumerate(data_dict_resources):
elements = from_dict(data_class=Elements, data=data_dict)
for idx, element in enumerate(elements.elements):
assert element.value == answers[idx_resource][idx]
|
"""
This file is part of flatlib - (C) FlatAngle
Author: João Ventura (flatangleweb@gmail.com)
This subpackage implements a simple Ephemeris using
the Python port of the Swiss Ephemeris (Pyswisseph).
The pyswisseph library must be already installed and
accessible.
"""
import flatlib
from . import swe
# Set default swefile path
swe.setPath(flatlib.PATH_RES + 'swefiles')
# Configure swefile path
def setPath(path):
swe.setPath(path)
|
# Generated by Django 4.0.1 on 2022-03-09 12:08
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
('Assets', '0002_alter_assetlist_cname_alter_assetlist_middle_ware_and_more'),
]
operations = [
migrations.CreateModel(
name='BruteRegister',
fields=[
('id', models.AutoField(db_column='id', primary_key=True, serialize=False, verbose_name='序号')),
('exploit_name', models.CharField(db_column='exploit_name', max_length=32, verbose_name='负载名称')),
('category', models.CharField(choices=[('1', 'WEB暴破'), ('2', '系统暴破')], max_length=2, verbose_name='漏洞类型')),
('file_object', models.FileField(null=True, upload_to='BruteScan/ExploitFiles/', verbose_name='负载上传')),
('description', models.TextField(db_column='description', verbose_name='漏洞描述')),
('timestamp', models.DateField(db_column='timestamp', verbose_name='创建日期')),
],
options={
'verbose_name': '负载管理',
'verbose_name_plural': '负载管理',
},
),
migrations.CreateModel(
name='BruteResult',
fields=[
('id', models.AutoField(db_column='id', primary_key=True, serialize=False, verbose_name='序号')),
('task_id', models.IntegerField(db_column='task_id', verbose_name='对应工单序号')),
('task_name', models.CharField(db_column='task_name', max_length=32, verbose_name='工单名称')),
('ip_address', models.GenericIPAddressField(db_column='ip_address', verbose_name='目标地址')),
('port', models.IntegerField(db_column='port', null=True, verbose_name='目标端口')),
('result_flag', models.BooleanField(db_column='result_flag', verbose_name='测试结果')),
('timestamp', models.DateField(db_column='timestamp', verbose_name='结束日期')),
],
options={
'verbose_name': '工单结果',
'verbose_name_plural': '工单结果',
},
),
migrations.CreateModel(
name='BruteTasks',
fields=[
('id', models.AutoField(db_column='id', primary_key=True, serialize=False, verbose_name='编号')),
('name', models.CharField(db_column='name', max_length=32, verbose_name='任务名称')),
('targets', models.TextField(db_column='targets', verbose_name='目标群')),
('timestamp', models.DateField(db_column='timestamp', default=django.utils.timezone.now, verbose_name='创建日期')),
('exploit', models.ForeignKey(default=None, on_delete=django.db.models.deletion.CASCADE, to='BruteScan.bruteregister', verbose_name='暴破负载选择')),
('target', models.ForeignKey(default=None, on_delete=django.db.models.deletion.CASCADE, to='Assets.assetlist', verbose_name='目标选择')),
],
options={
'verbose_name': '任务项',
'verbose_name_plural': '任务项',
},
),
]
|
from flask import Flask, request, jsonify
from sklearn.externals import joblib
import numpy as np
from flask_cors import CORS
app = Flask(__name__)
CORS(app)
clf = joblib.load('./model/logreg.pkl')
def getParameters():
age = request.args.get('age')
sex = request.args.get('sex')
cigsPerDay = request.args.get('cigs')
totChol = request.args.get('chol')
sysBP = request.args.get('sBP')
diabetes = request.args.get('dia')
diaBP = request.args.get('dBP')
glucose = request.args.get('gluc')
heartRate = request.args.get('hRate')
params = {
'age': age,
'sex': sex,
'cigsPerDay': cigsPerDay,
'totChol': totChol,
'sysBP': sysBP,
'diabetes': diabetes,
'diaBP': diaBP,
'glucose': glucose,
'heartRate': heartRate
}
return (params)
@app.route('/predict', methods=['GET'])
def predict():
global clf
if clf:
try:
params = getParameters()
input = np.array(
[[
int(params['age']),
int(params['sex']),
int(params['cigsPerDay']),
float(params['totChol']),
float(params['sysBP']),
float(params['diabetes']),
float(params['diaBP']),
float(params['glucose']),
float(params['heartRate'])
]]
)
prediction = (clf.predict(input)).tolist()
probability = (clf.predict_proba(input)).tolist()
return jsonify(
{
'probability': probability,
'prediction': prediction,
'data': params
}
)
except Exception as e:
return jsonify({'error': str(e), 'trace': traceback.format_exc()})
else:
return("no model")
@app.route('/model')
def model():
global clf
coefficients = clf.coef_.tolist()
intercept = clf.intercept_.tolist()
return jsonify(
{
'model': 'Logistic Regression',
'coefficients': coefficients,
'intercept': intercept
}
)
@app.route('/')
def index():
return('<h1> JSON API for predicting Coronary Heart Disease in a patient. </h1>'+
'<h2> An example query is <a href="https://heartapi.herokuapp.com/predict?age=31&sex=1&cigs=5&chol=230&sBP=280&dia=0&dBP=90&gluc=87&hRate=84">this</a></h2>'+
'<h3>It uses parameters such as Age, Sex, Blood Pressure, Diabetes, Glucose and Heart rate for prediction. </h3>'+
'<h3> This api uses scikit-learn for training the ML model and Flask for serving the webservice. It is trained on a Logistic Regression model with ~88% accuracy</h3>'+
'<p>For more details view <a href="https://github.com/agoel00/HeartDiseasePredictionAPI">source</a></p>' +
'<p>Made with 💙 by <a href="https://github.com/agoel00">Anmol Goel</a></p>')
if __name__ == '__main__':
print('Model loaded successfully!')
app.run()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
from pprint import pprint
from os import path
sys.path.append(path.dirname(path.dirname(path.abspath(__file__))))
from wrappers import SpellbookWrapper as SpellbookWrapper
import testconfig
url = testconfig.url
key = testconfig.key
secret = testconfig.secret
#test parameters
address = '1Robbk6PuJst6ot6ay2DcVugv8nxfJh5y'
block_height = 400000
registration_address = '1NC8LqAB99bYM9wVoD2grdYMMZAhjwy57A'
registration_block_height = 376000
registration_xpub = 'xpub6CUvzHsNLcxthhGJesNDPSh2gicdHLPAAeyucP2KW1vBKEMxvDWCYRJZzM4g7mNiQ4Zb9nG4y25884SnYAr1P674yQipYLU8pP5z8AmahmD'
distribution = [[u'1Robbk6PuJst6ot6ay2DcVugv8nxfJh5y', 100000, 0.1, 375786],
[u'1SansacmMr38bdzGkzruDVajEsZuiZHx9', 400000, 0.4, 375790],
[u'1Robbk6PuJst6ot6ay2DcVugv8nxfJh5y', 500000, 0.5, 375786]]
blockdistribute = SpellbookWrapper.SpellbookWrapper(url).blockdistribute()
pprint(blockdistribute.get_distributers())
pprint(blockdistribute.delete_distributer('testDistributer1', key, secret))
settings = {'distribution_source': 'LBL',
'registration_address': registration_address,
'registration_block_height': registration_block_height,
'registration_xpub': registration_xpub,
'distribution': distribution,
'minimum_amount': 12300,
'threshold': 150000,
'status': 'Active',
'visibility': 'Public',
'description': 'this is a test',
'creator': 'Wouter Glorieux',
'creator_email': 'info@valyrian.tech',
'youtube': 'C0DPdy98e4c',
'fee_address': '1Woutere8RCF82AgbPCc5F4KuYVvS4meW',
'fee_percentage': 1.0,
'maximum_transaction_fee': 7500,
'address_type': 'BIP44'}
pprint(blockdistribute.save_distributer('testDistributer1', settings, key, secret))
pprint(blockdistribute.save_distributer('', settings, key, secret))
pprint(blockdistribute.save_distributer('1', settings, key, secret))
pprint(blockdistribute.update_distribution('testDistributer1', key, secret))
pprint(blockdistribute.get_distributer('testDistributer1'))
pprint(blockdistribute.check_address('testDistributer1', '1Robbk6PuJst6ot6ay2DcVugv8nxfJh5y'))
#pprint(blockdistribute.do_distributing('testDistributer1'))
|
""" agent controllers package """
|
import cobra
from cobra.core import Model
from typing import Tuple, List
import pandas as pd
import subprocess
from warnings import warn
from ncmw.utils import (
get_default_configs,
get_default_medium,
get_biomass_reaction,
DATA_PATH,
)
def gapfill_model(
model: Model, eps: float = 1e-6, fill_model_base: str = "base", **kwargs
):
"""Adds reactions to the model, such that it has growth in the given medium
Args:
model (Model): Cobra model
eps ([type], optional): Minimum growth value. Defaults to 1e-6.
fill_model_base (str, optional): The base set of reactions to consider .
Defaults to "ecoli".
kwargs (dict, optional): Additional kwargs for cobra gapfilling, seed pycobra
documentation https://cobrapy.readthedocs.io/en/latest/gapfilling.html
Returns:
Model: Cobra model that has growth if algorithm succeeds
list: List of reactions that were added
"""
model = model.copy()
growth = model.slim_optimize()
if growth > eps:
# Already has growth gapfilling is unnecessary
return model, []
if isinstance(fill_model_base, Model):
fill_model = fill_model_base
elif fill_model_base == "base":
test_model = cobra.io.read_sbml_model(
DATA_PATH + "/gapfill_baseline/" + "model.xml"
)
fill_model = cobra.Model("universal_reactions")
fill_model.add_reactions(test_model.reactions)
else:
raise NotImplementedError()
if "demand_reactions" in kwargs:
demand_reactions = kwargs.pop("demand_reactions")
else:
demand_reactions = False
try:
solution = cobra.flux_analysis.gapfill(
model, fill_model, demand_reactions=demand_reactions, **kwargs
)[-1]
filled_model = model.copy()
filled_model.add_reactions(solution)
except:
warn("The model still has no growth... . We try an alternative")
filled_model = model.copy()
_, rec = gapfill_medium(model)
try:
solution = []
for r in rec:
objective = model.reactions.get_by_id(r).flux_expression
model.objective = objective
s = cobra.flux_analysis.gapfill(
model, fill_model, demand_reactions=demand_reactions, **kwargs
)[-1]
solution.extend(s)
filled_model.add_reactions(solution)
except:
warn("The model still has no growth... . We greadily add sink reactions...")
solution = []
for r in rec:
from cobra import Reaction
reaction = Reaction("SK" + r[2:])
solution.append("SK" + r[2:])
reaction.lower_bound = -0.1
reaction.upper_bound = 1000
filled_model.add_reaction(reaction)
reaction.add_metabolites({r[3:]: -1.0})
if filled_model.slim_optimize() > eps:
break
assert filled_model.slim_optimize() > eps, "Gapfilling failed..."
return filled_model, solution
def gapfill_medium(model: Model, eps: float = 1e-1) -> Tuple[Model, List]:
"""This will add the minimal set of exchange reactions such that the model
has more than eps growth.
Args:
model (Model): Cobra model which has less than eps growth
eps (float, optional): Value for which we consider the model to have zero
growth . Defaults to 1e-6.
Returns:
Model: Cobra model with extended medium
list: List of extended metabolites
"""
model_help = model.copy()
try:
if model_help.slim_optimize() > eps:
# Already feasible model.
return model, []
except:
pass
# Turn on all exchanges
for ex in model_help.exchanges:
ex.lower_bound = -10
# We can gapfill any exchange reaction that currently is not in the medium
gapfillable = set([ex.id for ex in model_help.exchanges]).difference(
set(model.medium.keys())
)
# print(f"There are {len(gapfillable)} many metabolites to fill the medium")
biomass = get_biomass_reaction(model_help)
# Binary variables: Theta_i
# This is an indicator which is zero if the metabolite should be added to the medium
thetas = []
for i in range(len(gapfillable)):
thetas.append(model_help.problem.Variable("theta_" + str(i), type="binary"))
# Constraints for exchanges, which are turned of for theta_i = 1
theta_constraints = []
for i, id in enumerate(gapfillable):
reaction = model_help.reactions.get_by_id(id)
min_bound = -10
reaction.lower_bound = min_bound
cons = model_help.problem.Constraint(
(reaction.flux_expression + min_bound * thetas[i]), lb=min_bound, ub=1000
)
theta_constraints.append(cons)
# Constraints for growth rates, which must be at least 10% MBR
constraint_growth = model_help.problem.Constraint(
biomass.flux_expression, lb=eps, ub=1000
)
# Adding new variables and constraints.
model_help.add_cons_vars(thetas)
model_help.add_cons_vars(theta_constraints)
model_help.add_cons_vars(constraint_growth)
# Objevtive is maximising turned of exchanges, that is sum of theta_is
objective = model_help.problem.Objective(sum(thetas), direction="max")
model_help.objective = objective
model_help.solver.update()
# Model optimization
sol = model_help.optimize()
# Change medium and check if it worked
new_exchanges = [
ex.id
for ex in model_help.exchanges
if ex.flux < 0 and ex.id not in model.medium
]
extended_medium = model.medium
for id in new_exchanges:
extended_medium[id] = 10
model.medium = extended_medium
# assert model.slim_optimize() > eps, "The medium extension failed for some reason..."
return model, new_exchanges
def set_default_configs_and_snm3_medium(
model: Model, configs: str = "default.json", medium="snm3.json"
):
"""This
Args:
model (Model): A cobra model
configs (str): File name of a config file in json format
medium (str): File name of a medium file in json format
Returns:
Model: Cobra model
"""
# Set bounds
configs_dict = get_default_configs(configs)
medium_dict = get_default_medium(medium)
for key, val in configs_dict.items():
key = key.split(".")
if "reactions" in key[0]:
for reaction in model.reactions:
reaction_dic = reaction.__dict__
if reaction_dic["_" + key[1]] != 0:
reaction_dic["_" + key[1]] = val
else:
reaction = model.reactions.get_by_id(key[0])
reaction_dic = reaction.__dict__
if reaction_dic["_" + key[1]] != 0:
reaction_dic["_" + key[1]] = val
# Set medium
exchanges = [ex.id for ex in model.exchanges]
model_medium = dict()
for key in medium_dict:
if key in exchanges:
model_medium[key] = medium_dict[key]
model.medium = model_medium
return model
def score_memote(model_path: str, report_path: str, solver_timout: str = "120") -> None:
"""Generates a memote evaluation report for the model quality.
NOTE: This typically requires a rather consistent model, otherwise it breaks.
NOTE: This can take a while on a local computer, especially if the solver_timeout is high
Args:
str: Path to the model file: Typically SBML format
str: Path to the model file: Typically SBML format
str: Time in seconds until the solver timeouts.
"""
try:
p = subprocess.Popen(
[
"memote",
"report",
"snapshot",
"--filename",
report_path,
"--solver-timeout",
solver_timout,
model_path,
]
)
return p
except ValueError(
"It seems that the model cannot be evaluated or the installation of memote is broken..."
):
print("You may consider to score the model online: https://memote.io/")
def create_consistent_model(model: Model) -> Tuple[Model, pd.DataFrame]:
"""This will create a more consistent model, using fastcc
(https://journals.plos.org/ploscompbiol/article?id=10.1371/journal.pcbi.1003424) and
also returns a summary report with the improvments.
Args:
model (cobra.core.Model): Cobra metabolic model class
verbose (bool, optional): Print out results in console . Defaults to True.
Returns:
cobra.core.Model: Returns a more consistent cobra model.
pd.DataFrame: Returns some statistics that may be imprved.
"""
blocked_reactions = cobra.flux_analysis.find_blocked_reactions(model)
met_formulas = cobra.manipulation.check_metabolite_compartment_formula(model)
mass_charge_balance = cobra.manipulation.check_mass_balance(model)
consistent_model = fastcc(model)
blocked_reactions_consistent = cobra.flux_analysis.find_blocked_reactions(
consistent_model
)
met_formulas_consistent = cobra.manipulation.check_metabolite_compartment_formula(
consistent_model
)
mass_charge_balance_consistent = cobra.manipulation.check_mass_balance(
consistent_model
)
data = {
"Blocked reactions": [
len(blocked_reactions),
len(blocked_reactions_consistent),
],
"Metabolite formula problems": [
len(met_formulas),
len(met_formulas_consistent),
],
"Mass charge balance violations": [
len(mass_charge_balance),
len(mass_charge_balance_consistent),
],
}
df = pd.DataFrame(data, index=["Original Model", "Consistent model"])
return consistent_model, df
def fastcc(model: Model) -> Model:
"""FastCC algorithm to increase model quality by resolving conflicts and removing
unnecessary e.g. blocked pathways
https://journals.plos.org/ploscompbiol/article?id=10.1371/journal.pcbi.1003424
Returns:
cobra.core.Model: Returns a more consistent cobra model.
"""
consistent_model = cobra.flux_analysis.fastcc(model)
return consistent_model
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.