content
stringlengths 5
1.05M
|
|---|
from django.http import HttpResponse, JsonResponse
from rest_framework import status
from rest_framework.response import Response
from rest_framework.decorators import api_view
from rest_framework.generics import get_object_or_404
from rest_framework.parsers import JSONParser
from .serializers import GameSerializer
from .models import Game
@api_view(['GET', 'POST'])
def game_list(request, format=None):
if request.method == "GET":
games = Game.objects.all()
serialized = GameSerializer(games, many=True)
return Response(serialized.data)
elif request.method == "POST":
data = JSONParser().parse(request)
serialized = GameSerializer(data=data)
if serialized.is_valid():
serialized.save()
return Response(serialized.data, status=status.HTTP_201_CREATED)
return Response(serialized.errors, status=status.HTTP_400_BAD_REQUEST)
@api_view(['GET', 'PUT', 'DELETE'])
def game_detail(request, slug, format=None):
game = get_object_or_404(Game, slug=slug)
if request.method == "GET":
return Response(GameSerializer(game).data)
if request.method == "PUT":
data = JSONParser().parse(request)
serialized = GameSerializer(game, data=data)
if serialized.is_valid():
serialized.save()
return Response(serialized.data)
return Response(serialized.errors, status=status.HTTP_400_BAD_REQUEST)
if request.method == "DELETE":
game.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
|
import argparse
import shlex
import greenlet as gl
import numpy as np
import tensorflow as tf
from pong.mechanics.pong import Pong, S
from pong.mechanics import policies
from pong.mechanics import constants as c
from pong.utils import common
from pong.utils.tf_machinery import NeuralNetwork
class NNPolicy(policies.PolicyRight):
@classmethod
def create_from_commandline(cls, side, argv):
parser = argparse.ArgumentParser()
parser.add_argument("save_path")
parser.add_argument("--name", "-n", default="NeuralNetwork")
args = parser.parse_args(shlex.split(argv))
return cls(side, **vars(args))
def __init__(self, side, layers=[50, 50, 50], save_path=None,
name="Neural"):
super().__init__(side, name)
self.save_path = save_path
self.g = tf.Graph()
with self.g.as_default():
self.nn = NeuralNetwork(Pong.STATE_DIM, Pong.NUM_ACTIONS, layers)
if save_path is not None:
self.nn.load(save_path)
def get_action_right(self, state, *args):
return gl.getcurrent().parent.switch((self.side, state))
def evaluate_all(self, states):
return self.nn.predict_argmax(states)
class TargetNNPolicy(policies.PolicyRight):
@classmethod
def create_from_commandline(cls, side, argv):
parser = argparse.ArgumentParser()
parser.add_argument("save_path")
parser.add_argument("--name", "-n", default="NeuralNetwork")
args = parser.parse_args(shlex.split(argv))
return cls(side, **vars(args))
def __init__(self, side, layers=[50, 50, 50], save_path=None, name="Neural"):
super().__init__(side, name)
self.save_path = save_path
self.g = tf.Graph()
with self.g.as_default():
self.nn = NeuralNetwork(Pong.STATE_DIM, 1, layers)
if save_path is not None:
self.nn.load(save_path)
def get_action_right(self, state, *args):
return gl.getcurrent().parent.switch((self.side, state))
def evaluate_all(self, states):
y = states[:, S.R_Y]
low = y - 0.5 * c.HPL
high = y + 0.5 * c.HPL
T = self.nn.predict_raw(states)[:, 0]
A = np.zeros(states.shape[0], np.int32)
A[T < low] = c.A_DOWN
A[T > high] = c.A_UP
return A
class FastEvaluate:
def __init__(self, l_pol, r_pol, disc=False):
self.l_pol = l_pol
self.r_pol = r_pol
self.disc = disc
def run_episode(self, *args):
if self.disc:
sim = Pong(random_positions=True, f=self.r_pol.discretization)
else:
sim = Pong(random_positions=True)
while not sim.done:
state = sim.get_state()
l_a = self.l_pol.get_action(state)
r_a = self.r_pol.get_action(state)
sim.step(l_a, r_a)
return sim.win_string()
def run_episodes(self, n):
eval_states = np.zeros((n, Pong.STATE_DIM), np.float32)
threads = []
for i in range(n):
t = gl.greenlet(self.run_episode)
threads.append(t)
A = [None] * n
alive = np.ones(n, np.bool)
while alive.any():
flags = {"l": np.zeros(n, np.bool), "r": np.zeros(n, np.bool)}
for i in range(n):
if alive[i]:
data = threads[i].switch(A[i])
if not threads[i].dead:
side, state = data
eval_states[i] = state
flags[side][i] = True
else:
alive[i] = False
self.score[data] += 1
A = np.zeros(n, np.int32)
if flags["l"].any():
A[flags["l"]] = self.l_pol.evaluate_all(eval_states[flags["l"]])
if flags["r"].any():
A[flags["r"]] = self.r_pol.evaluate_all(eval_states[flags["r"]])
def estimate(self, n):
self.score = {"l": 0, "r": 0, "draw": 0}
self.run_episodes(n)
return self.score
policies.POLICIES["nn"] = NNPolicy
policies.POLICIES["targetnn"] = TargetNNPolicy
|
from abc import ABC, abstractmethod
class MessageParser(ABC):
"""
Class which provides parsing services
"""
def __init__(self, input_data):
"""
Init section taking file with data for parsing
:param input_data: string or file with input data
"""
self.input_data = input_data
@abstractmethod
def parse_file_partially(self):
"""
Parsing file just for pieces (lines, or nodes) and yields actual parsed piece
:return: generated actual parsed piece of file
"""
pass
|
# 17
wordCounts = {
1: 'one',
2: 'two',
3: 'three',
4: 'four',
5: 'five',
6: 'six',
7: 'seven',
8: 'eight',
9: 'nine',
10: 'ten',
11: 'eleven',
12: 'twelve',
13: 'thirteen',
14: 'fourteen',
15: 'fifteen',
16: 'sixteen',
17: 'seventeen',
18: 'eighteen',
19: 'nineteen',
20: 'twenty',
30: 'thirty',
40: 'forty',
50: 'fifty',
60: 'sixty',
70: 'seventy',
80: 'eighty',
90: 'ninety',
1000: 'onethousand'
}
def countChars (x: int) -> str:
try:
return wordCounts[x]
except:
if x < 100:
if x % 10 == 0:
return wordCounts[x - x % 10]
else:
return wordCounts[x - x % 10] + wordCounts[x % 10]
else:
if x % 100 == 0:
return wordCounts[int(x / 100)] + 'hundred'
else:
return wordCounts[int(x / 100)] + 'hundred' + 'and' + countChars(x % 100)
s = ''
for i in range(1000):
s += countChars(i + 1)
print(len(s))
|
# -*- coding: utf-8 -*-
from csv import reader
from pathlib import Path
from typing import Dict, List, Tuple
from tqdm import tqdm
from resources.constants import SECTION_TYPES
def load_language_set(ranking_type: List[str], section_type: str, folder_name: str) -> List[str]:
print("INFO: Loading language subsets")
file_path = Path(__file__).absolute()
root_folder = file_path.parent.parent
path_data_folder = Path(root_folder).joinpath(folder_name)
sets = {}
for ranking in ranking_type:
ranking_folders = get_folders(path_data_folder)
if ranking_folders:
ranking_folder = ranking_folders.get(ranking)
section_folders = get_folders(ranking_folder)
sets[ranking] = get_languages(section_type, section_folders)
else:
print(f"ERROR: {folder_name} does not exist or is empty")
lengths = []
for ranking, sections in sets.items():
for section, languages in sections.items():
length = len(languages)
lengths.append(length)
language_set = []
if len(set(lengths)) == 1:
for ranking in sets.keys():
languages = sets[ranking][section_type]
language_set.extend(languages)
break
else:
print(f"ERROR: There is a ranking that does not contain the same number of languages")
return language_set
def load_data(ranking_type: str, section_type: str, folder_name: str,
split_names: bool) -> Dict[str, Dict[str, Dict[str, List[Tuple[str, str, float]]]]]:
print("INFO: Loading data")
file_path = Path(__file__).absolute()
root_folder = file_path.parent.parent
path_data_folder = Path(root_folder).joinpath(folder_name)
ranking_folders = get_folders(path_data_folder)
data = {}
if ranking_folders:
if ranking_type == "all":
for ranking_name, ranking_folder in tqdm(ranking_folders.items(), desc=f"Loading data from all rankings"):
section_folders = get_folders(ranking_folder)
data[ranking_name] = get_files(section_type, section_folders, split_names)
else:
ranking_folder = ranking_folders.get(ranking_type)
section_folders = get_folders(ranking_folder)
data[ranking_type] = get_files(section_type, section_folders, split_names)
else:
print(f"ERROR: {folder_name} does not exist or is empty")
return data
def get_folders(path_data_folder: Path) -> Dict[str, Path]:
folders = {}
for item in path_data_folder.glob("*"):
if item.is_dir():
name = item.name
folders[name] = item
return folders
def get_languages(section_type: str, folders: Dict[str, Path]) -> Dict[str, List[str]]:
languages = {}
files = []
if section_type == "all":
for section in SECTION_TYPES:
folder = folders.get(section)
for file in folder.iterdir():
filename = file.stem
files.append(filename)
languages[section] = files
else:
folder = folders.get(section_type)
for file in folder.iterdir():
filename = file.stem
files.append(filename)
languages[section_type] = files
return languages
def get_files(section_type: str, folders: Dict[str, Path], split_names: bool) -> Dict[str, Dict[str, List[Tuple[str, str, float]]]]:
data = {}
if section_type == "all":
for section in SECTION_TYPES:
folder = folders.get(section)
files = read_files(folder, split_names)
data[section] = files
else:
folder = folders.get(section_type)
files = read_files(folder, split_names)
data[section_type] = files
return data
def read_files(folder: Path, split_names: bool) -> Dict[str, List[Tuple[str, str, float]]]:
csv_files = {}
for file in folder.iterdir():
with open(file, 'rt', encoding="utf-8", newline='') as csv_file:
filename = file.stem
file_content = reader(csv_file, dialect='excel')
results = []
for row in file_content:
if split_names:
name = row[0].strip().split(" (")[0]
else:
name = row[0].strip()
software = row[1].strip()
score = float(row[2].strip())
position = (name, software, score)
results.append(position)
csv_files[filename] = results
return csv_files
|
import math
import random
import numpy as np
import utils
from utils import VECTORLENGTH
class LT:
def __init__(self): # Initialize member fields
self._weights = [] # holds probability of the weights
self._setWeights() # sets the probability of the weights
"""if not LT._isBinomCalculated:
self.initArrayOfBinom()"""
def _setWeights(self): # set the probability for the weights and save it in self.weights
self.weights = [] # clear Array
self._weights.append(1 / VECTORLENGTH)
for i in range(1, VECTORLENGTH): # for every variable:
w = 1 / (i * (i + 1)) # calculate Probability
self._weights.append(w) # write it in Array
def _getWeight(self): # get a weight, accordingly to its probability
randomNum = random.random() # get a random float between 0 and 1
index = 0 # set the index to 0
while True: # while randomNum > 0:
randomNum -= self._weights[index] # subtract probability of weight at index
if randomNum <= 0: # if randomNum <= 0 break.
break
index += 1 # else increase index by one.
return index + 1 # return index == weight
def getVector(self): # returns a vector based on the weight
weight = self._getWeight() # get a random weight
vector = self._getVector(weight)
binaryVector = utils.intToBin(vector, VECTORLENGTH)
# binaryVector = BinaryVector.intToBin(weight, self._vectorSize)
return vector, binaryVector # return array with binary representation of weight of given lengt
def _getVector(self, weight: int):
arr = np.arange(VECTORLENGTH)
np.random.shuffle(arr)
a = 0
for i in range(weight):
a += int(math.pow(2, arr[i]))
return a
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Oct 5 17:14:13 2019
@author: avneesh
"""
import torch.nn as nn
import torchvision.models as models
import torch.nn.functional as F
import torch
class Model(nn.Module):
def __init__(self):
super(Model, self).__init__()
# inception_firstlayer = models.inception_v3(pretrained = True) #load just the first conv layer
inception = models.inception_v3(pretrained = True, aux_logits=False) #load upto the classification layers except first conv layer
modules = list(inception.children())[0:16] # exclude the first conv layer.
# w1 = inception_firstlayer.state_dict()['weight'][:,0,:,:]
# w2 = inception_firstlayer.state_dict()['weight'][:,1,:,:]
# w3 = inception_firstlayer.state_dict()['weight'][:,2,:,:]
# w4 = w1+w2+w3 # add the three weigths of the channels
# w4 = w4.unsqueeze(1)# make it 4 dimensional
first_conv = nn.Conv2d(1, 3, kernel_size=(1,1), padding = (1,1)) #create a new conv layer
# print(len(first_conv.weight))
# first_conv.weight = torch.nn.Parameter(w4, requires_grad=True) #initialize the conv layer's weigths with w4
# first_conv.bias = torch.nn.Parameter(inception_firstlayer.state_dict()['bias'], requires_grad=True) #initialize the conv layer's weigths with vgg's first conv bias
self.first_convlayer = first_conv #the first layer is 1 channel (Grayscale) conv layer
self.inception = nn.Sequential(*modules)
self.fc1 = nn.Linear(20480, 1000)
self.fc2 = nn.Linear(1002, 2)
def forward(self, x, y):
x=self.first_convlayer(x)
x=self.inception(x)
x = F.relu(self.fc1(x.view(x.size(0), -1)), inplace=True) #flatten
x = torch.cat([x, y], dim=1)
x = self.fc2(x)
return x
m = Model()
#print(m)
|
#!/usr/bin/env python
# Recal VCF, using likelihood column.
# Only if it's a C-T or G-A transition.
#
# @author: James Boocock
# @date: 16/03/2015
#
import argparse
import sys
import vcf
import collections
import copy
def is_ga_or_ct(ref,alt):
if(len(ref) == 1 and len(alt) == 1):
alt = alt[0]
if(ref == "C" and alt == "T"):
return True
elif(ref == "T" and alt == "C"):
return True
elif(ref == "G" and alt == "A"):
return True
elif(ref == "A" and alt == "G"):
return True
else:
return False
def recal_vcf(input_vcf,min_dp=2):
vcf_reader = vcf.Reader(open(input_vcf,'r'),strict_whitespace=True)
vcf_writer = vcf.Writer(sys.stdout, vcf_reader)
for record in vcf_reader:
temp_record = record
for i, sample in enumerate(record.samples):
idx = 0
ref = record.REF
alt = record.ALT
#record.samples[i].data = collections.namedtuple("CallData",f_keys)
# print(sample)
f_keys = (record.samples[i].data._fields)
f_vals = [ record.samples[i][vx] for vx in (f_keys)]
handy_dict = dict(zip(f_keys,f_vals))
if(is_ga_or_ct(ref,alt)):
pl = sample['PL']
if( pl is not None ):
pheno_l = [int(o) for o in pl]
if(pheno_l[0] < pheno_l[2]):
handy_dict['GT'] = '0/0'
else:
handy_dict['GT'] = '1/1'
new_values = [ handy_dict['GT'] if x == 'GT' else None for x in f_keys]
record.samples[i].data = record.samples[i].data._make(new_values)
vcf_writer.write_record(record)
def main():
parser = argparse.ArgumentParser(description="Recal the VCF file")
parser.add_argument("input_vcf",help="Input VCF that we are going to recalfrom")
parser.add_argument("-m","--mapping_output",dest="",help="")
args = parser.parse_args()
recal_vcf(args.input_vcf)
if __name__=="__main__":
main()
|
from __future__ import unicode_literals
import datetime
from django.test import TestCase
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User
from django.utils.timezone import make_aware, get_default_timezone
from happenings.models import Event
from happenings.templatetags.happenings_tags import upcoming_events
class UpcomingEventsTemplateTagTest(TestCase):
"""
Test that upcoming_events template tag returns upcoming_events.
For more thorough tests see integration_tests.test_upcoming_events.
"""
def setUp(self):
self.url = reverse('calendar:list')
def test_upcoming_events_no_events(self):
events = upcoming_events()
self.assertEqual(len(events), 1)
self.assertEqual(events['upcoming_events'], [])
def test_events_same_date(self):
"""Created June 13, 2014 after a bug was found."""
user = User.objects.create_user(
'foo', 'bar@example.com', 'secret'
)
d = make_aware(
datetime.datetime(2019, 5, 3, 0, 0, 0, 0),
get_default_timezone()
)
event = Event.objects.create(
start_date=d,
end_date=d,
all_day=True,
created_by=user,
title="The big event",
description="Amazing event",
repeat="NEVER",
)
event2 = Event.objects.create(
start_date=d,
end_date=d,
all_day=True,
created_by=user,
title="The other event",
description="Incredible event",
repeat="NEVER",
)
event.save()
event2.save()
events = upcoming_events(finish=2000)
self.assertEqual(len(events['upcoming_events']), 2)
|
# Copyright (c) 2019, IRIS-HEP
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import json
from datetime import datetime
import requests
import logging
MAX_RETRIES = 3
class ServiceXAdapter:
def __init__(self, endpoint, file_prefix=None):
self.endpoint = endpoint
self.file_prefix = file_prefix
self.logger = logging.getLogger(__name__)
self.logger.addHandler(logging.NullHandler())
def post_status_update(self, status_msg, severity="info"):
success = False
attempts = 0
while not success and attempts < MAX_RETRIES:
try:
requests.post(self.endpoint + "/status", data={
"timestamp": datetime.now().isoformat(),
"source": "DID Finder",
"severity": severity,
"info": status_msg
})
success = True
except requests.exceptions.ConnectionError:
self.logger.exception(f'Connection error to ServiceX App. Will retry '
f'(try {attempts} out of {MAX_RETRIES}')
attempts += 1
if not success:
self.logger.error(f'After {attempts} tries, failed to send ServiceX App a status '
f'message: {str(status_msg)} - Ignoring error.')
def _prefix_file(self, file_path):
return file_path if not self.file_prefix else self.file_prefix+file_path
def put_file_add(self, file_info):
success = False
attempts = 0
while not success and attempts < MAX_RETRIES:
try:
mesg = {
"timestamp": datetime.now().isoformat(),
"file_path": self._prefix_file(file_info['file_path']),
'adler32': file_info['adler32'],
'file_size': file_info['file_size'],
'file_events': file_info['file_events']
}
requests.put(self.endpoint + "/files", json=mesg)
self.logger.info(f"Metric: {json.dumps(mesg)}")
success = True
except requests.exceptions.ConnectionError:
self.logger.exception(f'Connection error to ServiceX App. Will retry '
f'(try {attempts} out of {MAX_RETRIES}')
attempts += 1
if not success:
self.logger.error(f'After {attempts} tries, failed to send ServiceX App a put_file '
f'message: {str(file_info)} - Ignoring error.')
def post_preflight_check(self, file_entry):
success = False
attempts = 0
while not success and attempts < MAX_RETRIES:
try:
requests.post(self.endpoint + "/preflight", json={
'file_path': file_entry['file_path']
})
success = True
except requests.exceptions.ConnectionError:
self.logger.exception(f'Connection error to ServiceX App. Will retry '
f'(try {attempts} out of {MAX_RETRIES}')
attempts += 1
if not success:
self.logger.error(f'After {attempts} tries, failed to send ServiceX App a put_file '
f'message: {str(file_entry)} - Ignoring error.')
def put_fileset_complete(self, summary):
success = False
attempts = 0
while not success and attempts < MAX_RETRIES:
try:
requests.put(self.endpoint + "/complete", json=summary)
success = True
except requests.exceptions.ConnectionError:
self.logger.exception(f'Connection error to ServiceX App. Will retry '
f'(try {attempts} out of {MAX_RETRIES}')
attempts += 1
if not success:
self.logger.error(f'After {attempts} tries, failed to send ServiceX App a put_file '
f'message: {str(summary)} - Ignoring error.')
|
__version__ = "1.3.0.1"
|
import requests
from bs4 import BeautifulSoup
import locale
import json
import csv
def merge_two_dicts(x, y):
"""Given two dicts, merge them into a new dict as a shallow copy."""
z = x.copy()
z.update(y)
return z
locale.setlocale( locale.LC_ALL, 'en_US.UTF-8' )
print("\n\nGetting User Page\n\n")
page = requests.get("https://www.fanfiction.net/u/2583361/")
print("\n\nGetting User Page - Completed\n\n")
# page.content = page.content.encode('utf-8')
soup = BeautifulSoup(page.content, 'html.parser')
fav_stories_div = soup.find_all('div', class_='z-list favstories')
stories_metadata = []
for n, story in enumerate(fav_stories_div):
story_elements = list(story.children)
desc_with_metadata = str(story.find_all('div', class_='z-indent z-padtop')[0])
desc_metadata = str(story.find_all('div', class_='z-indent z-padtop')[0].find_all('div', class_="z-padtop2 xgray")[0])
desc_metadata_soup = story.find_all('div', class_='z-indent z-padtop')[0].find_all('div', class_="z-padtop2 xgray")[0]
desc_without_metadata = desc_with_metadata.replace(desc_metadata, "")
desc_without_metadata_soup = BeautifulSoup(desc_without_metadata, 'html.parser')
desc_metadata_list = [x.strip() for x in desc_metadata_soup.get_text().split(' - ')]
story_metadata = {
"story_name": story_elements[0].get_text(),
"story_start_url_relative": story_elements[0].get('href'),
"story_end_url_relative": story_elements[2].get('href'),
"author_name": story_elements[4].get_text(),
"author_url_relative": story_elements[4].get('href'),
"reviews_url_relative": story.find_all('a', class_='reviews')[0].get('href'),
"story_summary": desc_without_metadata_soup.get_text()
}
if "Crossover" in desc_metadata_list[0]:
del desc_metadata_list[0]
story_metadata = merge_two_dicts(story_metadata, {
"story_crossover": "Y",
"story_parent": desc_metadata_list[0].split(' & ')
})
else:
story_metadata = merge_two_dicts(story_metadata, {
"story_crossover": "N",
"story_parent": [desc_metadata_list[0]]
})
if "Chapters" in desc_metadata_list[3]:
desc_metadata_list.insert(3, "Unknown")
if "Published" in desc_metadata_list[9]:
desc_metadata_list.insert(9, "Updated: Unknown")
if "Complete" == desc_metadata_list[-1]:
del desc_metadata_list[-1]
story_metadata = merge_two_dicts(story_metadata, {
"story_complete": "Y"
})
else:
story_metadata = merge_two_dicts(story_metadata, {
"story_complete": "N"
})
if "Published" in desc_metadata_list[-1]:
desc_metadata_list.append("Unknown")
if len(desc_metadata_list) == 12:
try:
desc_metadata_dict = {
"story_parent": desc_metadata_list[0],
"story_rating": desc_metadata_list[1].split(': ')[1],
"story_language": desc_metadata_list[2],
"story_genre": desc_metadata_list[3],
"story_chapter_count": desc_metadata_list[4].split(': ')[1],
"story_word_count": locale.atoi(desc_metadata_list[5].split(': ')[1]),
"story_review_count": desc_metadata_list[6].split(': ')[1],
"story_favourite_count": locale.atoi(desc_metadata_list[7].split(': ')[1]),
"story_follow_count": locale.atoi(desc_metadata_list[8].split(': ')[1]),
"story_last_updated_date": desc_metadata_list[9].split(': ')[1],
"story_published_date": desc_metadata_list[10].split(': ')[1],
"story_main_characters": desc_metadata_list[11].split(', ')
}
except IndexError as e:
print e, len(desc_metadata_list), story_metadata, desc_metadata_list
break
story_metadata = merge_two_dicts(story_metadata, desc_metadata_dict)
else:
print len(desc_metadata_list), story_metadata, desc_metadata_list
stories_metadata.append(story_metadata)
with open('fav_stories_metadata.json', 'w') as outfile_json:
json.dump(stories_metadata, outfile_json)
|
import Mask_class as MC
import os
import time
import glob
import cv2 as cv
import argparse
import numpy as np
# start = time.clock()
# time.sleep(5)
# end = time.clock()
# print ("times used is : " , end - start)
if __name__ == "__main__":
net = MC.Mask()
parser = argparse.ArgumentParser()
parser.add_argument('--path', help="dataset for evaluation")
arg = parser.parse_args()
kitti_path = arg.path
image_paths = glob.glob(os.path.join(kitti_path + '/image_0/', '*.png'))
print ("image_path.size is : ", len(image_paths))
image_paths = sorted(image_paths)
sum_of_mask_time = 0.
sum_of_mask_img = len(image_paths)
for image_path in image_paths:
start = time.time()
image = cv.imread(image_path)
if len(image.shape) == 2:
im1 = np.zeros((image.shape[0], image.shape[1], 3))
im1[:, :, 0] = image
im1[:, :, 1] = image
im1[:, :, 2] = image
image = im1
mask = np.zeros((image.shape[0], image.shape[1]))
results = net.model.detect([image], verbose = 0)
# Visualize results
r = results[0]
i = 0
num_mask = 1
for roi in r['rois']:
if net.class_names[r['class_ids'][i]] == 'person':
image_m = r['masks'][:,:,i]
mask[image_m == 1] = num_mask
num_mask += 1
if net.class_names[r['class_ids'][i]] == 'bicycle':
image_m = r['masks'][:,:,i]
mask[image_m == 1] = num_mask
num_mask += 1
if net.class_names[r['class_ids'][i]] == 'car':
image_m = r['masks'][:,:,i]
mask[image_m == 1] = num_mask
num_mask += 1
if net.class_names[r['class_ids'][i]] == 'motorcycle':
mask[image_m == 1] = num_mask
num_mask += 1
# if self.class_names[r['class_ids'][i]] == 'airplane':
# image_m = r['masks'][:,:,i]
# mask[image_m == 1] = 1.
if net.class_names[r['class_ids'][i]] == 'bus':
image_m = r['masks'][:,:,i]
mask[image_m == 1] = num_mask
num_mask += 1
# if self.class_names[r['class_ids'][i]] == 'train':
# image_m = r['masks'][:,:,i]
# mask[image_m == 1] = 1.
if net.class_names[r['class_ids'][i]] == 'truck':
image_m = r['masks'][:,:,i]
mask[image_m == 1] = num_mask
num_mask += 1
if net.class_names[r['class_ids'][i]] == 'boat':
image_m = r['masks'][:,:,i]
mask[image_m == 1] = num_mask
num_mask += 1
# if self.class_names[r['class_ids'][i]] == 'bird':
# image_m = r['masks'][:,:,i]
# mask[image_m == 1] = 1.
# if self.class_names[r['class_ids'][i]] == 'cat':
# image_m = r['masks'][:,:,i]
# mask[image_m == 1] = 1.
# if self.class_names[r['class_ids'][i]] == 'dog':
# image_m = r['masks'][:,:,i]
# mask[image_m == 1] = 1.
# if self.class_names[r['class_ids'][i]] == 'horse':
# image_m = r['masks'][:,:,i]
# mask[image_m == 1] = 1.
# if self.class_names[r['class_ids'][i]] == 'sheep':
# image_m = r['masks'][:,:,i]
# mask[image_m == 1] = 1.
# if self.class_names[r['class_ids'][i]] == 'cow':
# image_m = r['masks'][:,:,i]
# mask[image_m == 1] = 1.
# if self.class_names[r['class_ids'][i]] == 'elephant':
# image_m = r['masks'][:,:,i]
# mask[image_m == 1] = 1.
# if self.class_names[r['class_ids'][i]] == 'bear':
# image_m = r['masks'][:,:,i]
# mask[image_m == 1] = 1.
# if self.class_names[r['class_ids'][i]] == 'zebra':
# image_m = r['masks'][:,:,i]
# mask[image_m == 1] = 1.
# if self.class_names[r['class_ids'][i]] == 'giraffe':
# image_m = r['masks'][:,:,i]
# mask[image_m == 1] = 1.
i+=1
end = time.time()
print("each time : ", end - start)
sum_of_mask_time = sum_of_mask_time + end - start
#print('GetSeg mask shape:',mask.shape)
# mask = (mask*255).astype(np.uint8)
# im_res = np.zeros((h,w,3))
# im_res[:,:,0]=mask
# im_res[:,:,1]=mask
# im_res[:,:,2]=mask
# print ("显示Mask");
# plt.figure("Mask_result")
# plt.imshow(im_res)
# plt.axis('on')
# plt.title('image')
# plt.show()
# print ("Save mask")
# save_path = dir_path_epo + "_mask/" + save_txt_name
# print ("save mask in ", save_path)
# np.savetxt(save_path, mask, fmt = "%d")
# return mask
mean_of_mask_time = sum_of_mask_time / sum_of_mask_img
Hz_of_mask = 1 / mean_of_mask_time
print ("mean_of_mask_time is : ", mean_of_mask_time)
print ("Hz_of_mask is : ", Hz_of_mask)
|
import click
from tqdm import tqdm
from os.path import join as join_path
import skimage.io
import torch
import os.path
import numpy as np
import natural.number
import models
import model_utils
import utils
import constants as c
import image_utils
from utils import Params, Timer, ensure_dir_exists
@click.command()
@click.option('--dataset_a', type=click.Path(exists=True), help='Path to dataset A')
@click.option('--dataset_b', type=click.Path(exists=True), help='Path to dataset B')
@click.option('--use_cuda/--no_cuda', default=False, show_default=True)
@click.option('--checkpoint_path', default='checkpoint', show_default=True, type=click.Path(exists=True, dir_okay=True), help='Checkpoint path')
@click.option('--test_save_path', default='test-output', show_default=True, help='Folder to save test images')
def test(**kwargs):
params = Params(kwargs)
print('Params:')
params.pretty_print()
print()
use_cuda = params.use_cuda
if use_cuda:
assert torch.cuda.is_available()
with Timer('Loading models'):
gen_a_to_b, gen_b_to_a = load_models_for_evaluation(params.checkpoint_path)
print('#weights in gen_a_to_b:', natural.number.number(model_utils.compute_num_weights(gen_a_to_b)))
print('#weights in gen_b_to_a:', natural.number.number(model_utils.compute_num_weights(gen_b_to_a)))
if use_cuda:
gen_a_to_b.cuda()
gen_b_to_a.cuda()
a_to_b_save_path = join_path(params.test_save_path, c.A_TO_B_GEN_TEST_DIR)
b_to_a_save_path = join_path(params.test_save_path, c.B_TO_A_GEN_TEST_DIR)
ensure_dir_exists(a_to_b_save_path)
ensure_dir_exists(b_to_a_save_path)
filenames = utils.listdir(params.dataset_a, extensions=('.png', '.jpg'))
for filename in tqdm(filenames, desc='A to B'):
filepath = join_path(params.dataset_a, filename)
a = image_utils.load_image(filepath)
b_fake = generate_fake_image(image=a, generator_net=gen_a_to_b, use_cuda=use_cuda)
root, ext = os.path.splitext(filename)
a_filepath = join_path(a_to_b_save_path, '{}-a{}'.format(root, ext))
skimage.io.imsave(a_filepath, a)
a_to_b_filepath = join_path(a_to_b_save_path, '{}-a-to-b{}'.format(root, ext))
skimage.io.imsave(a_to_b_filepath, b_fake)
filenames = utils.listdir(params.dataset_b, extensions=('.png', '.jpg'))
for filename in tqdm(filenames, desc='B to A'):
filepath = join_path(params.dataset_b, filename)
b = image_utils.load_image(filepath)
a_fake = generate_fake_image(image=b, generator_net=gen_b_to_a, use_cuda=use_cuda)
root, ext = os.path.splitext(filename)
b_filepath = join_path(b_to_a_save_path, '{}-b{}'.format(root, ext))
skimage.io.imsave(b_filepath, b)
b_to_a_filepath = join_path(b_to_a_save_path, '{}-b-to-a{}'.format(root, ext))
skimage.io.imsave(b_to_a_filepath, a_fake)
def load_models_for_evaluation(checkpoint_path):
assert os.path.exists(checkpoint_path), checkpoint_path
gen_a_to_b = models.GeneratorNet()
gen_b_to_a = models.GeneratorNet()
gen_a_to_b.load_state_dict(torch.load(join_path(checkpoint_path, c.A_TO_B_GEN_DIR)))
gen_b_to_a.load_state_dict(torch.load(join_path(checkpoint_path, c.B_TO_A_GEN_DIR)))
gen_a_to_b.eval() # Evaluation mode.
gen_b_to_a.eval()
return gen_a_to_b, gen_b_to_a
def generate_fake_image(image, generator_net, use_cuda):
image = image_utils.normalize(image)
image = image[np.newaxis, :, :, :]
image = np.transpose(image, (0, 3, 1, 2)) # (batch, y, x, channel) -> (batch, channel, y, x)
image = torch.from_numpy(image)
if use_cuda:
image = image.cuda()
image = torch.autograd.Variable(image, requires_grad=False)
fake = generator_net(image)
fake = fake[0, :, :, :]
if use_cuda:
fake = fake.cpu()
fake = fake.data.numpy()
fake = np.transpose(fake, (1, 2, 0)) # (channel, y, x) -> (y, x, channel)
fake = image_utils.unnormalize(fake[:, :, :])
return fake
if __name__ == '__main__':
test()
|
# -*- coding: utf-8 -*-
from gluon.http import HTTP
from gluon.storage import Storage
class S3Config(Storage):
def __init__(self, T):
self.auth = Storage()
self.base = Storage()
self.database = Storage()
self.gis = Storage()
self.mail = Storage()
self.L10n = Storage()
self.security = Storage()
self.T = T
# Auth settings
def get_auth_hmac_key(self):
return self.auth.get("hmac_key", "akeytochange")
def get_auth_registration_requires_verification(self):
return self.auth.get("registration_requires_verification", False)
def get_auth_registration_requires_approval(self):
return self.auth.get("registration_requires_approval", False)
def get_auth_openid(self):
return self.auth.get("openid", False)
# Base settings
def get_base_public_url(self):
return self.base.get("public_url", "http://127.0.0.1:8000")
def get_base_migrate(self):
return self.base.get("migrate", True)
def get_base_prepopulate(self):
return self.base.get("prepopulate", True)
# Database settings
def get_database_string(self):
db_type = self.database.get("db_type", "sqlite")
pool_size = self.database.get("pool_size", 0)
if (db_type == "sqlite"):
db_string = "sqlite://storage.db"
elif (db_type == "mysql"):
db_string = "mysql://%s:%s@%s/%s" % \
(self.database.get("username", "sahana"),
self.database.get("password", "password"),
self.database.get("host", "localhost"),
self.database.get("database", "sahana"))
elif (db_type == "postgres"):
db_string = "postgres://%s:%s@%s/%s" % \
(self.database.get("username", "sahana"),
self.database.get("password", "password"),
self.database.get("host", "localhost"),
self.database.get("database", "sahana"))
else:
raise HTTP(501, body="Database type '%s' not recognised - please correct file models/000_config.py." % db_type)
if pool_size:
return (db_string, pool_size)
else:
return db_string
# GIS (Map) Settings
def get_gis_locations_hierarchy(self):
T = self.T
gis_location_hierarchy = {
"L0":T("Country"),
"L1":T("Province"),
"L2":T("District"),
"L3":T("Town"),
"L4":T("Village")
}
return self.gis.get("locations_hierarchy", gis_location_hierarchy)
def get_gis_map_selector(self):
return self.gis.get("map_selector", True)
def get_gis_display_l0(self):
return self.gis.get("display_L0", False)
def get_gis_display_l1(self):
return self.gis.get("display_L1", True)
def get_gis_edit_l0(self):
return self.gis.get("edit_L0", True)
def get_gis_edit_l1(self):
return self.gis.get("edit_L1", True)
def get_gis_edit_l2(self):
return self.gis.get("edit_L2", True)
def get_gis_edit_l3(self):
return self.gis.get("edit_L3", True)
def get_gis_edit_l4(self):
return self.gis.get("edit_L4", True)
def get_gis_edit_l5(self):
return self.gis.get("edit_L5", True)
def get_gis_geoserver_url(self):
return self.gis.get("geoserver_url", "http://localhost/geoserver")
def get_gis__username(self):
return self.gis.get("geoserver_username", "admin")
def get_gis_geoserver_password(self):
return self.gis.get("geoserver_password", "password")
def get_gis_spatialdb(self):
return self.gis.get("spatialdb", False)
# L10N Settings
def get_L10n_countries(self):
return self.L10n.get("countries", "")
def get_L10n_default_language(self):
return self.L10n.get("default_language", "en")
def get_L10n_display_toolbar(self):
return self.L10n.get("display_toolbar", True)
def get_L10n_languages(self):
return self.L10n.get("languages", { "en":self.T("English") })
def get_L10n_utc_offset(self):
return self.L10n.get("utc_offset", "UTC +0000")
# Mail settings
def get_mail_server(self):
return self.mail.get("server", "127.0.0.1:25")
def get_mail_server_login(self):
return self.mail.get("login", False)
def get_mail_sender(self):
return self.mail.get("sender", "sahana@your.org")
def get_mail_approver(self):
return self.mail.get("approver", "useradmin@your.org")
# Security Settings
def get_security_policy(self):
return self.security.get("policy", 1)
def get_security_map(self):
return self.security.get("map", False)
# Active modules list
def has_module(self, module_name):
if not self.modules:
_modules = [
"admin", # Admin
"gis", # GIS
"doc", # Document Library
"pr", # Person Registry
"org", # Organisation Registry
"budget", # Budgetting
"cr", # Camp Registry
"delphi", # Delphi Decision Maker
"dvi", # Disaster Victim Identification
"dvr", # Disaster Victim Registry
"hms", # Hospital Management
#"lms", # Logistics
"mpr", # Missing Person Registry
"msg", # Messaging
#"nim", # Nursing Information Manager
"rms", # Request Management
"ticket", # Ticketing
"vol" # Volunteer Management
]
else:
_modules = self.modules
return module_name in _modules
|
from model.group import Group
from model.contact import Contact
import random
from random import randrange
def test_add_some_contact_to_some_group(app, ormdb):
if app.contact.count() == 0:
app.contact.create(Contact(firstname="First name", middlename="MiddleName", lastname="LastName"))
if app.group.count() == 0:
app.group.create(Group(name="name_edited", header="header_edited", footer="footer_edited"))
# Get contacts that are not in groups
contacts = ormdb.get_contacts_without_group()
if len(contacts) == 0:
# Search for contacts that are at least not in one group
contacts = ormdb.get_contacts_without_at_least_one_group()
if len(contacts) == 0:
app.contact.create(Contact(firstname="First name with empty group", middlename="MiddleName", lastname="LastName"))
contacts = ormdb.get_contacts_without_group()
assert len(contacts) == 1
# Perform adding test
c_index = randrange(len(contacts))
groups = ormdb.get_groups_without_contact(contacts[c_index])
g_index = randrange(len(groups))
contacts_in_group_before = ormdb.get_contacts_in_group(groups[g_index])
app.contact.add_contact_to_group(contacts[c_index], groups[g_index])
contacts_in_group_after = ormdb.get_contacts_in_group(groups[g_index])
contacts_in_group_before.append(contacts[c_index])
assert sorted(contacts_in_group_before, key=Contact.id_or_max) == sorted(contacts_in_group_after, key=Contact.id_or_max)
def test_delete_contact_from_some_group(app, ormdb):
if app.contact.count() == 0:
app.contact.create(Contact(firstname="First name", middlename="MiddleName", lastname="LastName"))
if app.group.count() == 0:
app.group.create(Group(name="name_edited", header="header_edited", footer="footer_edited"))
# Get current values and generate indexes randomly
all_groups_with_contacts = ormdb.get_groups_with_contacts()
# In case no contacts in any group - add random contact to random group
if len(all_groups_with_contacts) == 0:
contacts = ormdb.get_contact_list()
c_index = randrange(len(contacts))
groups = ormdb.get_group_list()
g_index = randrange(len(groups))
app.contact.add_contact_to_group(contacts[c_index], groups[g_index])
all_groups_with_contacts = ormdb.get_groups_with_contacts()
assert len(all_groups_with_contacts) == 1
# Continue test with deleting contact from group
g_index = randrange(len(all_groups_with_contacts))
group = all_groups_with_contacts[g_index]
contacts_in_group_before = ormdb.get_contacts_in_group(group)
c_index = randrange(len(contacts_in_group_before))
contact = contacts_in_group_before[c_index]
# Remove contact from group via UI
app.contact.remove_contact_from_group(contact, group)
# Get changes values
contacts_in_group_after = ormdb.get_contacts_in_group(group)
contacts_in_group_before.remove(contact)
assert sorted(contacts_in_group_before, key=Contact.id_or_max) == sorted(contacts_in_group_after,
key=Contact.id_or_max)
|
import os
import time
from shutil import copyfile
import captcha_algo_decoder
def test(dataset_directory):
for filename in os.listdir(dataset_directory):
if filename.endswith(".txt"):
file_with_captcha_solve = open(os.path.join(dataset_directory, filename),
encoding="UTF-8")
captcha_solve = file_with_captcha_solve.read().upper()
file_with_captcha_solve.close()
image_filename = filename.replace("_request", "")[:-3] + "png"
decode_result = captcha_algo_decoder.decoder(
os.path.join(dataset_directory, image_filename))
print(f"{decode_result}:{captcha_solve}")
if decode_result != captcha_solve:
copyfile(os.path.join(dataset_directory, image_filename),
f"incorrect_captcha/{captcha_solve}.png")
os.remove(os.path.join(dataset_directory, filename))
os.remove(os.path.join(dataset_directory, image_filename))
if __name__ == '__main__':
start_time = time.time()
test("captcha_dataset\\evergreen")
print(time.time() - start_time)
|
# -*- coding: utf-8 -*-
def rhombicuboctahedron():
import vtk
# First, you need to store the vertex locations.
import numpy as np
fu = 1 # full unit
hu = 0.5 # half unit
d = np.sqrt((fu ** 2) / 2) # diag
hh = hu + d # half height
# left view faces us
import utool as ut
import six
import itertools
counter = ut.partial(six.next, itertools.count(0))
vertex_locations = vtk.vtkPoints()
vertex_locations.SetNumberOfPoints(24)
p1, p2, p3 = np.array([(-hu, -hu, hh), (hu, -hu, hh), (hu, hu, hh), (-hu, hu, hh)]).T
plist = [p1, p2, p3]
# three of the six main faces
# perms = list(itertools.permutations((0, 1, 2), 3))
perms = [(0, 1, 2), (0, 2, 1), (2, 0, 1)]
vertex_array = []
# VERTEXES
# left, up, back
vplist = ['L', 'U', 'B', 'R', 'D', 'F']
vpdict = {}
print('perms = %r' % (perms,))
for x in range(3):
vp = vplist[x]
p = np.vstack(ut.take(plist, perms[x])).T
counts = [counter() for z in range(4)]
vpdict[vp] = counts
vertex_array.extend(p.tolist())
vertex_locations.SetPoint(counts[0], p[0])
vertex_locations.SetPoint(counts[1], p[1])
vertex_locations.SetPoint(counts[2], p[2])
vertex_locations.SetPoint(counts[3], p[3])
# three more of the six main faces
perms = [(0, 1, 2), (0, 2, 1), (2, 0, 1)]
plist[-1] = -plist[-1]
# right, down, front
print('perms = %r' % (perms,))
for x in range(3):
p = np.vstack(ut.take(plist, perms[x])).T
counts = [counter() for z in range(4)]
vp = vplist[x + 3]
vpdict[vp] = counts
vertex_array.extend(p.tolist())
vertex_locations.SetPoint(counts[0], p[0])
vertex_locations.SetPoint(counts[1], p[1])
vertex_locations.SetPoint(counts[2], p[2])
vertex_locations.SetPoint(counts[3], p[3])
pd = vtk.vtkPolyData()
pd.SetPoints(vertex_locations)
polygon_faces = vtk.vtkCellArray()
face_dict = {
'L': [vpdict['L'][0], vpdict['L'][1], vpdict['L'][2], vpdict['L'][3]],
'D': [vpdict['D'][0], vpdict['D'][1], vpdict['D'][2], vpdict['D'][3]],
'U': [vpdict['U'][0], vpdict['U'][1], vpdict['U'][2], vpdict['U'][3]],
'F': [vpdict['F'][0], vpdict['F'][1], vpdict['F'][2], vpdict['F'][3]],
'R': [vpdict['R'][0], vpdict['R'][1], vpdict['R'][2], vpdict['R'][3]],
'B': [vpdict['B'][0], vpdict['B'][1], vpdict['B'][2], vpdict['B'][3]],
'FL': [vpdict['L'][0], vpdict['L'][3], vpdict['F'][2], vpdict['F'][3]],
'BL': [vpdict['L'][1], vpdict['L'][2], vpdict['B'][2], vpdict['B'][3]],
'UL': [vpdict['L'][2], vpdict['L'][3], vpdict['U'][3], vpdict['U'][2]],
'DL': [vpdict['L'][0], vpdict['L'][1], vpdict['D'][2], vpdict['D'][3]],
'UFL': [vpdict['L'][3], vpdict['F'][2], vpdict['U'][3]],
'DFL': [vpdict['L'][0], vpdict['F'][3], vpdict['D'][3]],
'UBL': [vpdict['L'][2], vpdict['B'][2], vpdict['U'][2]],
'DBL': [vpdict['L'][1], vpdict['B'][3], vpdict['D'][2]],
'UFR': [vpdict['R'][3], vpdict['F'][1], vpdict['U'][0]],
'DFR': [vpdict['R'][0], vpdict['F'][0], vpdict['D'][0]],
'UBR': [vpdict['R'][2], vpdict['B'][1], vpdict['U'][1]],
'DBR': [vpdict['R'][1], vpdict['B'][0], vpdict['D'][1]],
'FR': [vpdict['R'][3], vpdict['R'][0], vpdict['F'][0], vpdict['F'][1]],
'BR': [vpdict['R'][2], vpdict['R'][1], vpdict['B'][0], vpdict['B'][1]],
'UR': [vpdict['R'][3], vpdict['R'][2], vpdict['U'][1], vpdict['U'][0]],
'DR': [vpdict['R'][1], vpdict['R'][0], vpdict['D'][0], vpdict['D'][1]],
'DF': [vpdict['F'][0], vpdict['F'][3], vpdict['D'][3], vpdict['D'][0]],
'DB': [vpdict['B'][3], vpdict['B'][0], vpdict['D'][1], vpdict['D'][2]],
'UF': [vpdict['F'][1], vpdict['F'][2], vpdict['U'][3], vpdict['U'][0]],
'UB': [vpdict['B'][2], vpdict['B'][1], vpdict['U'][1], vpdict['U'][2]],
}
for key, vert_ids in face_dict.items():
# if key != 'L':
# continue
if len(vert_ids) == 4:
q = vtk.vtkQuad()
else:
q = vtk.vtkTriangle()
for count, idx in enumerate(vert_ids):
q.GetPointIds().SetId(count, idx)
polygon_faces.InsertNextCell(q)
# Next you create a vtkPolyData to store your face and vertex information
# that
# represents your polyhedron.
pd = vtk.vtkPolyData()
pd.SetPoints(vertex_locations)
pd.SetPolys(polygon_faces)
face_stream = vtk.vtkIdList()
face_stream.InsertNextId(polygon_faces.GetNumberOfCells())
vertex_list = vtk.vtkIdList()
polygon_faces.InitTraversal()
while polygon_faces.GetNextCell(vertex_list) == 1:
face_stream.InsertNextId(vertex_list.GetNumberOfIds())
for j in range(vertex_list.GetNumberOfIds()):
face_stream.InsertNextId(vertex_list.GetId(j))
ug = vtk.vtkUnstructuredGrid()
ug.SetPoints(vertex_locations)
ug.InsertNextCell(vtk.VTK_POLYHEDRON, face_stream)
# writer = vtk.vtkUnstructuredGridWriter()
# writer.SetFileName("rhombicuboctahedron.vtk")
# # writer.SetInputData(ug)
# writer.SetInput(ug)
# writer.Write()
mapper = vtk.vtkDataSetMapper()
mapper.SetInput(ug)
actor = vtk.vtkActor()
actor.SetMapper(mapper)
if 1:
# Read the image data from a file
import utool as ut
textureCoords = vtk.vtkFloatArray()
textureCoords.SetNumberOfComponents(3)
# coords = ut.take(vertex_array, face_dict['L'])
# for coord in coords:
# textureCoords.InsertNextTuple(tuple(coord))
textureCoords.InsertNextTuple((0, 0, 0))
textureCoords.InsertNextTuple((1, 0, 0))
textureCoords.InsertNextTuple((1, 1, 0))
textureCoords.InsertNextTuple((0, 1, 0))
# Create texture object
fpath = ut.grab_test_imgpath('zebra.png')
reader = vtk.vtkPNGReader()
reader.SetFileName(fpath)
texture = vtk.vtkTexture()
texture.SetInput(reader.GetOutput())
texture.RepeatOff()
texture.InterpolateOff()
ptdat = pd.GetPointData()
ptdat.SetTCoords(textureCoords)
actor.SetTexture(texture)
ren = vtk.vtkRenderer()
ren.AddActor(actor)
renw = vtk.vtkRenderWindow()
renw.AddRenderer(ren)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renw)
ren.ResetCamera()
renw.Render()
iren.Start()
def rhombic_dodecahedron():
# http://www.vtk.org/pipermail/vtkusers/2014-September/085077.html
import vtk
# This is a Rhombic Dodecahedron.
# First, you need to store the vertex locations.
vertex_locations = vtk.vtkPoints()
vertex_locations.SetNumberOfPoints(14)
vertex_locations.SetPoint(0, (-0.816497, -0.816497, 0.00000))
vertex_locations.SetPoint(1, (-0.816497, 0.000000, -0.57735))
vertex_locations.SetPoint(2, (-0.816497, 0.000000, 0.57735))
vertex_locations.SetPoint(3, (-0.816497, 0.816497, 0.00000))
vertex_locations.SetPoint(4, (0.000000, -0.816497, -0.57735))
vertex_locations.SetPoint(5, (0.000000, -0.816497, 0.57735))
vertex_locations.SetPoint(6, (0.000000, 0.000000, -1.15470))
vertex_locations.SetPoint(7, (0.000000, 0.000000, 1.15470))
vertex_locations.SetPoint(8, (0.000000, 0.816497, -0.57735))
vertex_locations.SetPoint(9, (0.000000, 0.816497, 0.57735))
vertex_locations.SetPoint(10, (0.816497, -0.816497, 0.00000))
vertex_locations.SetPoint(11, (0.816497, 0.000000, -0.57735))
vertex_locations.SetPoint(12, (0.816497, 0.000000, 0.57735))
vertex_locations.SetPoint(13, (0.816497, 0.816497, 0.00000))
# Next, you describe the polygons that represent the faces using the vertex
# indices in the vtkPoints that stores the vertex locations. There are a
# number
# of ways to do this that you can find in examples on the Wiki.
polygon_faces = vtk.vtkCellArray()
q = vtk.vtkQuad()
q.GetPointIds().SetId(0, 7)
q.GetPointIds().SetId(1, 12)
q.GetPointIds().SetId(2, 10)
q.GetPointIds().SetId(3, 5)
polygon_faces.InsertNextCell(q)
q = vtk.vtkQuad()
q.GetPointIds().SetId(0, 7)
q.GetPointIds().SetId(1, 12)
q.GetPointIds().SetId(2, 13)
q.GetPointIds().SetId(3, 9)
polygon_faces.InsertNextCell(q)
q = vtk.vtkQuad()
q.GetPointIds().SetId(0, 7)
q.GetPointIds().SetId(1, 9)
q.GetPointIds().SetId(2, 3)
q.GetPointIds().SetId(3, 2)
polygon_faces.InsertNextCell(q)
q = vtk.vtkQuad()
q.GetPointIds().SetId(0, 7)
q.GetPointIds().SetId(1, 2)
q.GetPointIds().SetId(2, 0)
q.GetPointIds().SetId(3, 5)
polygon_faces.InsertNextCell(q)
q = vtk.vtkQuad()
q.GetPointIds().SetId(0, 6)
q.GetPointIds().SetId(1, 11)
q.GetPointIds().SetId(2, 10)
q.GetPointIds().SetId(3, 4)
polygon_faces.InsertNextCell(q)
q = vtk.vtkQuad()
q.GetPointIds().SetId(0, 6)
q.GetPointIds().SetId(1, 4)
q.GetPointIds().SetId(2, 0)
q.GetPointIds().SetId(3, 1)
polygon_faces.InsertNextCell(q)
q = vtk.vtkQuad()
q.GetPointIds().SetId(0, 6)
q.GetPointIds().SetId(1, 1)
q.GetPointIds().SetId(2, 3)
q.GetPointIds().SetId(3, 8)
polygon_faces.InsertNextCell(q)
q = vtk.vtkQuad()
q.GetPointIds().SetId(0, 6)
q.GetPointIds().SetId(1, 8)
q.GetPointIds().SetId(2, 13)
q.GetPointIds().SetId(3, 11)
polygon_faces.InsertNextCell(q)
q = vtk.vtkQuad()
q.GetPointIds().SetId(0, 10)
q.GetPointIds().SetId(1, 11)
q.GetPointIds().SetId(2, 13)
q.GetPointIds().SetId(3, 12)
polygon_faces.InsertNextCell(q)
q = vtk.vtkQuad()
q.GetPointIds().SetId(0, 13)
q.GetPointIds().SetId(1, 8)
q.GetPointIds().SetId(2, 3)
q.GetPointIds().SetId(3, 9)
polygon_faces.InsertNextCell(q)
q = vtk.vtkQuad()
q.GetPointIds().SetId(0, 3)
q.GetPointIds().SetId(1, 1)
q.GetPointIds().SetId(2, 0)
q.GetPointIds().SetId(3, 2)
polygon_faces.InsertNextCell(q)
q = vtk.vtkQuad()
q.GetPointIds().SetId(0, 0)
q.GetPointIds().SetId(1, 4)
q.GetPointIds().SetId(2, 10)
q.GetPointIds().SetId(3, 5)
polygon_faces.InsertNextCell(q)
# Next you create a vtkPolyData to store your face and vertex information
# that
# represents your polyhedron.
pd = vtk.vtkPolyData()
pd.SetPoints(vertex_locations)
pd.SetPolys(polygon_faces)
# If you wanted to be able to load in the saved file and select the entire
# polyhedron, you would need to save it as a vtkUnstructuredGrid, and you
# would
# need to put the data into a vtkPolyhedron. This is a bit more involved
# than
# the vtkPolyData that I used above. For a more in-depth discussion, see:
# http://www.vtk.org/Wiki/VTK/Polyhedron_Support
# Based on the link above, I need to construct a face stream:
face_stream = vtk.vtkIdList()
face_stream.InsertNextId(polygon_faces.GetNumberOfCells())
vertex_list = vtk.vtkIdList()
polygon_faces.InitTraversal()
while polygon_faces.GetNextCell(vertex_list) == 1:
face_stream.InsertNextId(vertex_list.GetNumberOfIds())
for j in range(vertex_list.GetNumberOfIds()):
face_stream.InsertNextId(vertex_list.GetId(j))
ug = vtk.vtkUnstructuredGrid()
ug.SetPoints(vertex_locations)
ug.InsertNextCell(vtk.VTK_POLYHEDRON, face_stream)
# --------------#
# output stuff #
# --------------#
writer = vtk.vtkUnstructuredGridWriter()
writer.SetFileName('rhombic_dodecahedron.vtk')
# writer.SetInputData(ug)
writer.SetInput(ug)
writer.Write()
# ---------------------#
# visualization stuff #
# ---------------------#
# mapper = vtk.vtkPolyDataMapper()
# mapper.SetInputData(pd)
mapper = vtk.vtkDataSetMapper()
mapper.SetInput(ug)
actor = vtk.vtkActor()
actor.SetMapper(mapper)
ren = vtk.vtkRenderer()
ren.AddActor(actor)
renw = vtk.vtkRenderWindow()
renw.AddRenderer(ren)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renw)
ren.ResetCamera()
renw.Render()
iren.Start()
if __name__ == '__main__':
r"""
CommandLine:
python -m wbia.plottool.test_vtk_poly
python -m wbia.plottool.test_vtk_poly --allexamples
python plottool/test_vtk_poly.py
"""
rhombicuboctahedron()
|
import enum
import errno
import os
import platform
import socket
import traceback
from abc import ABCMeta, abstractmethod
import mozprocess
from ..environment import wait_for_service
from ..wptcommandline import require_arg # noqa: F401
here = os.path.dirname(__file__)
def cmd_arg(name, value=None):
prefix = "-" if platform.system() == "Windows" else "--"
rv = prefix + name
if value is not None:
rv += "=" + value
return rv
def maybe_add_args(required_args, current_args):
for required_arg in required_args:
# If the arg is in the form of "variable=value", only add it if
# no arg with another value for "variable" is already there.
if "=" in required_arg:
required_arg_prefix = "%s=" % required_arg.split("=")[0]
if not any(item.startswith(required_arg_prefix) for item in current_args):
current_args.append(required_arg)
else:
if required_arg not in current_args:
current_args.append(required_arg)
return current_args
def certificate_domain_list(list_of_domains, certificate_file):
"""Build a list of domains where certificate_file should be used"""
cert_list = []
for domain in list_of_domains:
cert_list.append({"host": domain, "certificateFile": certificate_file})
return cert_list
def get_free_port():
"""Get a random unbound port"""
while True:
s = socket.socket()
try:
s.bind(("127.0.0.1", 0))
except OSError:
continue
else:
return s.getsockname()[1]
finally:
s.close()
def get_timeout_multiplier(test_type, run_info_data, **kwargs):
if kwargs["timeout_multiplier"] is not None:
return kwargs["timeout_multiplier"]
return 1
def browser_command(binary, args, debug_info):
if debug_info:
if debug_info.requiresEscapedArgs:
args = [item.replace("&", "\\&") for item in args]
debug_args = [debug_info.path] + debug_info.args
else:
debug_args = []
command = [binary] + args
return debug_args, command
class BrowserError(Exception):
pass
class Browser:
"""Abstract class serving as the basis for Browser implementations.
The Browser is used in the TestRunnerManager to start and stop the browser
process, and to check the state of that process. This class also acts as a
context manager, enabling it to do browser-specific setup at the start of
the testrun and cleanup after the run is complete.
:param logger: Structured logger to use for output.
"""
__metaclass__ = ABCMeta
process_cls = None
init_timeout = 30
def __init__(self, logger):
self.logger = logger
def __enter__(self):
self.setup()
return self
def __exit__(self, *args, **kwargs):
self.cleanup()
def setup(self):
"""Used for browser-specific setup that happens at the start of a test run"""
pass
def settings(self, test):
"""Dictionary of metadata that is constant for a specific launch of a browser.
This is used to determine when the browser instance configuration changes, requiring
a relaunch of the browser. The test runner calls this method for each test, and if the
returned value differs from that for the previous test, the browser is relaunched.
"""
return {}
@abstractmethod
def start(self, group_metadata, **kwargs):
"""Launch the browser object and get it into a state where is is ready to run tests"""
pass
@abstractmethod
def stop(self, force=False):
"""Stop the running browser process."""
pass
@abstractmethod
def pid(self):
"""pid of the browser process or None if there is no pid"""
pass
@abstractmethod
def is_alive(self):
"""Boolean indicating whether the browser process is still running"""
pass
def cleanup(self):
"""Browser-specific cleanup that is run after the testrun is finished"""
pass
def executor_browser(self):
"""Returns the ExecutorBrowser subclass for this Browser subclass and the keyword arguments
with which it should be instantiated"""
return ExecutorBrowser, {}
def maybe_parse_tombstone(self):
"""Possibly parse tombstones on Android device for Android target"""
pass
def check_crash(self, process, test):
"""Check if a crash occured and output any useful information to the
log. Returns a boolean indicating whether a crash occured."""
return False
class NullBrowser(Browser):
def __init__(self, logger, **kwargs):
super().__init__(logger)
def start(self, **kwargs):
"""No-op browser to use in scenarios where the TestRunnerManager shouldn't
actually own the browser process (e.g. Servo where we start one browser
per test)"""
pass
def stop(self, force=False):
pass
def pid(self):
return None
def is_alive(self):
return True
class ExecutorBrowser:
"""View of the Browser used by the Executor object.
This is needed because the Executor runs in a child process and
we can't ship Browser instances between processes on Windows.
Typically this will have a few product-specific properties set,
but in some cases it may have more elaborate methods for setting
up the browser from the runner process.
"""
def __init__(self, **kwargs):
for k, v in kwargs.items():
setattr(self, k, v)
@enum.unique
class OutputHandlerState(enum.IntEnum):
BEFORE_PROCESS_START = 1
AFTER_PROCESS_START = 2
AFTER_HANDLER_START = 3
AFTER_PROCESS_STOP = 4
class OutputHandler:
"""Class for handling output from a browser process.
This class is responsible for consuming the logging from a browser process
and passing it into the relevant logger. A class instance is designed to
be passed as the processOutputLine argument to mozprocess.ProcessHandler.
The setup of this class is complex for various reasons:
* We need to create an instance of the class before starting the process
* We want access to data about the running process e.g. the pid
* We want to launch the process and later setup additional log handling
which is restrospectively applied to any existing output (this supports
prelaunching browsers for performance, but having log output depend on the
tests that are run e.g. for leak suppression).
Therefore the lifecycle is as follows::
output_handler = OutputHandler(logger, command, **output_handler_kwargs)
proc = ProcessHandler(command, ..., processOutputLine=output_handler)
output_handler.after_process_start(proc.pid)
[...]
# All logging to this point was buffered in-memory, but after start()
# it's actually sent to the logger.
output_handler.start(**output_logger_start_kwargs)
[...]
proc.wait()
output_handler.after_process_stop()
Since the process lifetime and the output handler lifetime are coupled (it doesn't
work to reuse an output handler for multiple processes), it might make sense to have
a single class that owns the process and the output processing for the process.
This is complicated by the fact that we don't always run the process directly,
but sometimes use a wrapper e.g. mozrunner.
"""
def __init__(self, logger, command, **kwargs):
self.logger = logger
self.command = command
self.pid = None
self.state = OutputHandlerState.BEFORE_PROCESS_START
self.line_buffer = []
def after_process_start(self, pid):
assert self.state == OutputHandlerState.BEFORE_PROCESS_START
self.logger.debug("OutputHandler.after_process_start")
self.pid = pid
self.state = OutputHandlerState.AFTER_PROCESS_START
def start(self, **kwargs):
assert self.state == OutputHandlerState.AFTER_PROCESS_START
self.logger.debug("OutputHandler.start")
# Need to change the state here before we try to empty the buffer
# or we'll just re-buffer the existing output.
self.state = OutputHandlerState.AFTER_HANDLER_START
for item in self.line_buffer:
self(item)
self.line_buffer = None
def after_process_stop(self, clean_shutdown=True):
# If we didn't get as far as configure, just
# dump all logs with no configuration
self.logger.debug("OutputHandler.after_process_stop")
if self.state < OutputHandlerState.AFTER_HANDLER_START:
self.start()
self.state = OutputHandlerState.AFTER_PROCESS_STOP
def __call__(self, line):
if self.state < OutputHandlerState.AFTER_HANDLER_START:
self.line_buffer.append(line)
return
# Could assert that there's no output handled once we're in the
# after_process_stop phase, although technically there's a race condition
# here because we don't know the logging thread has finished draining the
# logs. The solution might be to move this into mozprocess itself.
self.logger.process_output(self.pid,
line.decode("utf8", "replace"),
command=" ".join(self.command) if self.command else "")
class WebDriverBrowser(Browser):
__metaclass__ = ABCMeta
def __init__(self, logger, binary=None, webdriver_binary=None,
webdriver_args=None, host="127.0.0.1", port=None, base_path="/",
env=None, **kwargs):
super().__init__(logger)
if webdriver_binary is None:
raise ValueError("WebDriver server binary must be given "
"to --webdriver-binary argument")
self.logger = logger
self.binary = binary
self.webdriver_binary = webdriver_binary
self.host = host
self._port = port
self.base_path = base_path
self.env = os.environ.copy() if env is None else env
self.webdriver_args = webdriver_args if webdriver_args is not None else []
self.url = f"http://{self.host}:{self.port}{self.base_path}"
self._output_handler = None
self._cmd = None
self._proc = None
def make_command(self):
"""Returns the full command for starting the server process as a list."""
return [self.webdriver_binary] + self.webdriver_args
def start(self, group_metadata, **kwargs):
try:
self._run_server(group_metadata, **kwargs)
except KeyboardInterrupt:
self.stop()
def create_output_handler(self, cmd):
"""Return an instance of the class used to handle application output.
This can be overridden by subclasses which have particular requirements
for parsing, or otherwise using, the output."""
return OutputHandler(self.logger, cmd)
def _run_server(self, group_metadata, **kwargs):
cmd = self.make_command()
self._output_handler = self.create_output_handler(cmd)
self._proc = mozprocess.ProcessHandler(
cmd,
processOutputLine=self._output_handler,
env=self.env,
storeOutput=False)
self.logger.debug("Starting WebDriver: %s" % ' '.join(cmd))
try:
self._proc.run()
except OSError as e:
if e.errno == errno.ENOENT:
raise OSError(
"WebDriver executable not found: %s" % self.webdriver_binary)
raise
self._output_handler.after_process_start(self._proc.pid)
try:
wait_for_service(self.logger, self.host, self.port)
except Exception:
self.logger.error(
"WebDriver was not accessible "
f"within the timeout:\n{traceback.format_exc()}")
raise
self._output_handler.start(group_metadata=group_metadata, **kwargs)
self.logger.debug("_run complete")
def stop(self, force=False):
self.logger.debug("Stopping WebDriver")
clean = True
if self.is_alive():
# Pass a timeout value to mozprocess Processhandler.kill()
# to ensure it always returns within it.
# See https://bugzilla.mozilla.org/show_bug.cgi?id=1760080
kill_result = self._proc.kill(timeout=5)
if force and kill_result != 0:
clean = False
self._proc.kill(9, timeout=5)
success = not self.is_alive()
if success and self._output_handler is not None:
# Only try to do output post-processing if we managed to shut down
self._output_handler.after_process_stop(clean)
self._output_handler = None
return success
def is_alive(self):
return hasattr(self._proc, "proc") and self._proc.poll() is None
@property
def pid(self):
if self._proc is not None:
return self._proc.pid
@property
def port(self):
# If no port is supplied, we'll get a free port right before we use it.
# Nothing guarantees an absence of race conditions here.
if self._port is None:
self._port = get_free_port()
return self._port
def cleanup(self):
self.stop()
def executor_browser(self):
return ExecutorBrowser, {"webdriver_url": self.url,
"host": self.host,
"port": self.port}
|
import os
import time
from colorama import *
from input import *
from background import *
from paddle import *
from ball import *
from brick import *
from powerup import *
from boss import *
from laser import *
# Windows doesn't support ANSI coloring but Windows API does
# init() makes Windows API run these colors
init()
# initialize variables
start_time = time.time()
prev_time = start_time
LIVES = [3] # only object can be passed by reference
LVL = [1]
ROWS = 45
COLS = 150
ball_launched = [0] # 1 = ball launched from paddle
flag = 0
lvlStartTime = [start_time]
bulletTime = [10]
bulletMaxTime = [10]
generateBricks_lvl1(bg.getGrid())
def Message(msg):
if msg == "q":
print("\t\t\t\t\t\t\t Score: ", SCORE[0])
print("\t\t\t _____ ______\n" +
"\t\t\t| | | | | \n" +
"\t\t\t| | | | | \n" +
"\t\t\t|----| |-------| |----- \n" +
"\t\t\t| | | | \n" +
"\t\t\t|_____| | |______\n" )
elif msg == "gameOver":
os.system("aplay sound/gameOver.wav -q &")
print("\t\t\t\t\t\t\t Score: ", SCORE[0])
print("\t\t\t $$$$$$\ $$$$$$\ $$\ $$\ $$$$$$$$\ $$$$$$\ $$\ $$\ $$$$$$$$\ $$$$$$$\ \n" +
"\t\t\t$$ __$$\ $$ __$$\ $$$\ $$$ |$$ _____| $$ __$$\ $$ | $$ |$$ _____|$$ __$$\ \n" +
"\t\t\t$$ / \__|$$ / $$ |$$$$\ $$$$ |$$ | $$ / $$ |$$ | $$ |$$ | $$ | $$ |\n" +
"\t\t\t$$ |$$$$\ $$$$$$$$ |$$\$$\$$ $$ |$$$$$\ $$ | $$ |\$$\ $$ |$$$$$\ $$$$$$$ |\n" +
"\t\t\t$$ |\_$$ |$$ __$$ |$$ \$$$ $$ |$$ __| $$ | $$ | \$$\$$ / $$ __| $$ __$$< \n" +
"\t\t\t$$ | $$ |$$ | $$ |$$ |\$ /$$ |$$ | $$ | $$ | \$$$ / $$ | $$ | $$ |\n" +
"\t\t\t\$$$$$$ |$$ | $$ |$$ | \_/ $$ |$$$$$$$$\ $$$$$$ | \$ / $$$$$$$$\ $$ | $$ |\n" +
"\t\t\t \______/ \__| \__|\__| \__|\________| \______/ \_/ \________|\__| \__|\n" )
print("\n")
def Scoreboard(grid):
grid[0][0] = Fore.WHITE + Back.MAGENTA + Style.BRIGHT +"S"+ Style.RESET_ALL
grid[0][1] = Fore.WHITE + Back.MAGENTA + Style.BRIGHT +"C"+ Style.RESET_ALL
grid[0][2] = Fore.WHITE + Back.MAGENTA + Style.BRIGHT +"O"+ Style.RESET_ALL
grid[0][3] = Fore.WHITE + Back.MAGENTA + Style.BRIGHT +"R"+ Style.RESET_ALL
grid[0][4] = Fore.WHITE + Back.MAGENTA + Style.BRIGHT +"E"+ Style.RESET_ALL
grid[0][5] = Fore.WHITE + Back.MAGENTA + Style.BRIGHT +":"+ Style.RESET_ALL
grid[0][6] = int(SCORE[0])
grid[1][0] = Fore.WHITE + Back.MAGENTA + Style.BRIGHT +"L"+ Style.RESET_ALL
grid[1][1] = Fore.WHITE + Back.MAGENTA + Style.BRIGHT +"I"+ Style.RESET_ALL
grid[1][2] = Fore.WHITE + Back.MAGENTA + Style.BRIGHT +"V"+ Style.RESET_ALL
grid[1][3] = Fore.WHITE + Back.MAGENTA + Style.BRIGHT +"E"+ Style.RESET_ALL
grid[1][4] = Fore.WHITE + Back.MAGENTA + Style.BRIGHT +"S"+ Style.RESET_ALL
grid[1][5] = Fore.WHITE + Back.MAGENTA + Style.BRIGHT +":"+ Style.RESET_ALL
grid[1][6] = int(LIVES[0])
grid[1][144] = Fore.WHITE + Back.RED + Style.BRIGHT +"L"+ Style.RESET_ALL
grid[1][145] = Fore.WHITE + Back.RED + Style.BRIGHT +"V"+ Style.RESET_ALL
grid[1][146] = Fore.WHITE + Back.RED + Style.BRIGHT +"L"+ Style.RESET_ALL
grid[1][147] = Fore.WHITE + Back.RED + Style.BRIGHT +":"+ Style.RESET_ALL
grid[1][148] = int(LVL[0])
grid[2][0] = Fore.WHITE + Back.MAGENTA + Style.BRIGHT +"T"+ Style.RESET_ALL
grid[2][1] = Fore.WHITE + Back.MAGENTA + Style.BRIGHT +"I"+ Style.RESET_ALL
grid[2][2] = Fore.WHITE + Back.MAGENTA + Style.BRIGHT +"M"+ Style.RESET_ALL
grid[2][3] = Fore.WHITE + Back.MAGENTA + Style.BRIGHT +"E"+ Style.RESET_ALL
grid[2][4] = Fore.WHITE + Back.MAGENTA + Style.BRIGHT +":"+ Style.RESET_ALL
grid[2][5] = int(time.time() - start_time)
if LVL[0] == 3:
grid[2][141] = Fore.WHITE + Back.GREEN + Style.BRIGHT +"H"+ Style.RESET_ALL
grid[2][142] = Fore.WHITE + Back.GREEN + Style.BRIGHT +"E"+ Style.RESET_ALL
grid[2][143] = Fore.WHITE + Back.GREEN + Style.BRIGHT +"A"+ Style.RESET_ALL
grid[2][144] = Fore.WHITE + Back.GREEN + Style.BRIGHT +"L"+ Style.RESET_ALL
grid[2][145] = Fore.WHITE + Back.GREEN + Style.BRIGHT +"T"+ Style.RESET_ALL
grid[2][146] = Fore.WHITE + Back.GREEN + Style.BRIGHT +"H"+ Style.RESET_ALL
grid[2][147] = Fore.WHITE + Back.GREEN + Style.BRIGHT +":"+ Style.RESET_ALL
grid[2][148] = int(boss.getHealth())
# returns 1 if all lvls finished
def lvlUp():
FALL_BRICK[0] = 0
lvlStartTime[0] = time.time()
os.system("aplay sound/lvlUp.wav -q &")
# removing bricks from prev lvl
while len(obj1)!=0:
obj1.remove(obj1[0])
grid = bg.getGrid()
for rows in range(3,41):
for col in range(1,149):
grid[rows][col] = ' '
# powerup resets
if fastBall[0]!=' ': fastBall[0].resetPowerup()
if multiplyBall[0]!=' ': multiplyBall[0].resetPowerup()
if paddleShrink[0]!=' ': paddleShrink[0].resetPowerup()
if paddleExpand[0]!=' ': paddleExpand[0].resetPowerup()
if thruBall[0]!=' ': thruBall[0].resetPowerup()
if paddleGrab[0]!=' ': paddleGrab[0].resetPowerup()
if paddleGrab[0]!=' ': paddleGrab[0].resetPowerup()
if fireBall[0]!=' ': fireBall[0].resetPowerup()
if shootingPaddle[0]!=' ': shootingPaddle[0].resetPowerup()
LVL[0]+=1
if LVL[0] > 3:
Message("gameOver")
return 1
elif LVL[0] == 2:
generateBricks_lvl2(bg.getGrid())
elif LVL[0] == 3:
boss.placeBoss(bg.getGrid())
generateBricks_lvl3(bg.getGrid())
Scoreboard(bg.getGrid())
return 0
os.system("clear")
while True:
if time.time() - prev_time >= 0.1:
prev_time = time.time()
Scoreboard(bg.getGrid())
paddle.placePaddle(bg.getGrid())
if ball_launched[0] == 0:
ball[0].placeAbovePaddle(paddle.getX(), bg.getGrid())
else:
for i in list(ball):
i.moveBall(LIVES, ball_launched, bg.getGrid(), LVL)
# taking input
letter = input_to()
if letter == 'q':
Message("q")
break
elif letter == 'a':
paddle.movePaddle("a", bg.getGrid())
elif letter == 'd':
paddle.movePaddle("d", bg.getGrid())
elif letter == 'w' and ball_launched[0] == 0:
ball_launched[0] = 1
# temporary for duplicative powerup
elif letter == 'x':
size = len(ball)
for i in range(size):
ball.append(duplicateBall(ball[i]))
# lvl change
elif letter == 'l':
if lvlUp()==1:
break
if fastBall[0] != ' ':
fastBall[0].move(bg.getGrid())
fastBall[0].update(ball)
# print(fastBall[0].getActivated())
if multiplyBall[0] != ' ':
multiplyBall[0].move(bg.getGrid())
multiplyBall[0].update()
if multiplyBall[0]._changed == 0 and multiplyBall[0]._activated == 1:
multiplyBall[0]._changed = 1
for i in range(len(ball)):
ball.append(duplicateBall(ball[i]))
# powerup time elapsed
if multiplyBall[0]._changed == 1 and multiplyBall[0]._activated == 0 and flag == 0:
flag=1
for i in range(len(ball)-1,0,-1):
grid = bg.getGrid()
grid[ball[i]._y][ball[i]._x] = ' '
ball.remove(ball[i])
if paddleShrink[0] != ' ':
paddleShrink[0].move(bg.getGrid())
paddleShrink[0].update()
# print(paddle_change[0])
if paddleExpand[0] != ' ':
paddleExpand[0].move(bg.getGrid())
paddleExpand[0].update()
if thruBall[0] != ' ':
thruBall[0].move(bg.getGrid())
thruBall[0].update(ball)
if paddleGrab[0] != ' ':
paddleGrab[0].move(bg.getGrid())
paddleGrab[0].update()
if fireBall[0] != ' ':
fireBall[0].move(bg.getGrid())
fireBall[0].update(ball)
if shootingPaddle[0] != ' ':
shootingPaddle[0].move(bg.getGrid())
shootingPaddle[0].update(bg.getGrid())
if shootingPaddle[0]._activated == 1:
if shootingPaddle[0]._shoottime >= shootingPaddle[0]._shootmaxtime:
shootingPaddle[0]._shoottime = 0
laser.append(Laser())
for i in range(len(laser)-1,-1,-1):
if laser[i]._dead == 1:
laser.remove(laser[i])
continue
laser[i].shoot(bg.getGrid())
if LVL[0] == 3:
if boss.getHealth() <= 0:
lvlUp()
break
if len(bullets) == 0:
bullets.append(Bullets())
if bulletTime[0] >= bulletMaxTime[0]:
bullets.append(Bullets())
bulletTime[0] = 0
os.system("aplay sound/laser.wav -q &")
else:
bulletTime[0] += 1
for i in range(len(bullets)-1,-1,-1):
if bullets[i]._dead == 1:
bullets.remove(bullets[i])
continue
bullets[i].move(bg.getGrid(), LIVES)
print("\033[%d;%dH" % (0, 0)) # position cursor at x across, y down
bg.printGrid()
# if all bricks broken then lvl up
if len(obj1) == 0 and LVL[0]!=3:
if lvlUp() == 1:
break
if LVL[0] == 3:
boss.placeBoss(bg.getGrid())
placeBricks(bg.getGrid())
if time.time()-lvlStartTime[0] >= FALL_BRICK_TIME[0] and LVL[0] != 3:
FALL_BRICK[0] = 1
# brick touched the bottom
if finish[0] == 1:
Message("gameOver")
break
if bossFinish[0] == 1:
# SCORE[0]+=100
lvlUp()
break
print(Style.RESET_ALL)
if LIVES[0] <= 0:
Message("gameOver")
break
|
import numpy as np
def getMinChannel(img,AtomsphericLight):
imgGrayNormalization = np.zeros((img.shape[0], img.shape[1]), dtype=np.float16)
for i in range(0, img.shape[0]):
for j in range(0, img.shape[1]):
localMin = 1
for k in range(0, 3):
# print('AtomsphericLight[k]',AtomsphericLight[k])
imgNormalization = img.item((i, j, k)) / AtomsphericLight[k]
if imgNormalization < localMin:
localMin = imgNormalization
imgGrayNormalization[i, j] = localMin
# print('imgGrayNormalization',imgGrayNormalization)
# print('np.max(imgGrayNormalization)',np.max(imgGrayNormalization))
return imgGrayNormalization
def getTransmission(img,AtomsphericLight ,blockSize):
img = np.float16(img)
img = getMinChannel(img,AtomsphericLight)
AtomsphericLight = AtomsphericLight / 255.0
addSize = int((blockSize - 1) / 2)
newHeight = img.shape[0] + blockSize - 1
newWidth = img.shape[1] + blockSize - 1
# 中间结果
imgMiddle = np.zeros((newHeight, newWidth))
imgMiddle[:, :] = 1
imgMiddle[addSize:newHeight - addSize, addSize:newWidth - addSize] = img
# print('imgMiddle',imgMiddle)
imgDark = np.zeros((img.shape[0], img.shape[1]))
localMin = 1
for i in range(addSize, newHeight - addSize):
for j in range(addSize, newWidth - addSize):
localMin = 1
for k in range(i - addSize, i + addSize + 1):
for l in range(j - addSize, j + addSize + 1):
if imgMiddle.item((k, l)) < localMin:
localMin = imgMiddle.item((k, l))
imgDark[i - addSize, j - addSize] = localMin
transmission = (1 - imgDark) / (1 - 0.1 / np.max(AtomsphericLight))
transmission = np.clip(transmission, 0.1, 0.9)
# for i in range(0, transmission.shape[0]):
# for j in range(0, transmission.shape[1]):
# if transmission[i, j] < 0.01:
# transmission[i, j] = 0.01
# if transmission[i, j] > 0.99:
# transmission[i, j] = 0.99
return transmission
|
"""Utilities and helpers useful in other modules
"""
from typing import Text, Union
from six import ensure_binary
TextOrBytes = Union[Text, bytes]
def text_to_ascii_bytes(text):
# type: (TextOrBytes) -> bytes
"""Convert a text-or-bytes value to ASCII-encoded bytes
If the input is already `bytes`, we simply return it as is
"""
return ensure_binary(text, 'ascii')
|
import math
from collections import namedtuple
from itertools import chain
from math import inf
from operator import itemgetter
from pathlib import Path
from statistics import median
from typing import Tuple, Sized, List
import cv2
import matplotlib.colors
import numpy as np
from matplotlib import pyplot as plt
from utils.fret_detection import fret_detection, fret_detection_with_hough_lines
from utils.image import Image
from utils.string_detection import string_detection, string_detection_with_hough_lines
class GuitarImage(Image):
i = 1
Crop_Area = namedtuple('Crop_Area', ['higher_y', 'lower_y'])
Coordinate = namedtuple("Coordinate", ["x", "y"])
def __init__(self, save_img=False, **kwargs) -> None: # , file_name:str=""):
Image.__init__(self, **kwargs) # , file_name=file_name)
if save_img:
self.init_with_saving_imgs()
else:
self.color_img = cv2.flip(src=self.color_img, flipCode=1)
self.rotated, self.rotation_angle, self.image_center = self.rotate_img()
crop_res = self.crop_neck_with_hough_lines()
self.crop_area = self.Crop_Area(crop_res[1], crop_res[2])
self.cropped = crop_res[0]
detected_frets = fret_detection_with_hough_lines(cropped_neck_img=self.cropped)
self.frets = self.calculate_frets_xs(detected_frets=detected_frets)
self.strings = string_detection_with_hough_lines(cropped_neck_img=self.cropped, fret_lines=detected_frets)
def init_with_saving_imgs(self):
self.step = 1
self.color_img = cv2.flip(src=self.color_img, flipCode=1)
self.save_img(step=f"{self.step}_initial_img", i=self.i)
self.step += 1
self.rotated, self.rotation_angle, self.image_center = self.rotate_img()
self.rotated.save_img(step=f"{self.step}_rotation", i=self.i)
self.step += 1
crop_res = self.crop_neck_with_hough_lines() # crop_neck_with_hough_lines()
self.crop_area = self.Crop_Area(crop_res[1], crop_res[2])
self.cropped = crop_res[0]
self.cropped.save_img(step=f"{self.step}_crop", i=self.i)
self.step += 1
detected_frets = fret_detection_with_hough_lines(
cropped_neck_img=self.cropped) # fret_detection(cropped_neck_img=self.cropped)
self.cropped.save_img(step=f"{self.step}_fret_detection", i=self.i)
self.step += 1
self.frets = self.calculate_frets_xs(detected_frets=detected_frets)
self.strings = string_detection_with_hough_lines(cropped_neck_img=self.cropped, fret_lines=detected_frets)
self.cropped.save_img(step=f"{self.step}_string_detection", i=self.i)
self.step += 1
@staticmethod
def calculate_frets_xs(detected_frets: Sized) -> List[int]:
fret_xs = [(line[0][0] + line[1][0])//2 for line in detected_frets]
fret_xs_pairwise = zip(fret_xs[:len(fret_xs)], fret_xs[1:])
return list([(xs[0] + xs[1]) // 2 for xs in fret_xs_pairwise])
# detected_frets_pairwise = [
# (t[0][0], t[1][0]) for t in zip(detected_frets[:len(detected_frets)], detected_frets[1:])
# ]
# return list(sorted([(line[0][0] + line[0][1]) // 2 for line in detected_frets_pairwise]))
def get_chord_coordinates(self, chord_to_draw: str) -> List[Coordinate]:
note_by_string = chord_to_draw.split(',')
drawing_coordinates = []
for string, fret in enumerate(note_by_string):
if fret != 'x' and fret != '0' and string <= len(self.strings) - 1:
x = self.frets[int(fret) - 1]
y = self.strings[int(string)](x) + self.crop_area.higher_y
restored_coordinate = self.restore_coordinates(rotated_X=x, rotated_Y=y, center=self.image_center)
drawing_coordinates.append(restored_coordinate)
cv2.circle(
img=self.color_img,
center=(restored_coordinate.x, restored_coordinate.y),
radius=1,
color=(0, 187, 255),
thickness=int(self.cropped.width * 0.008))
# cv2.imshow("string: " + str(string) + ", fret: " + str(fret), cv2.cvtColor(self.color_img, cv2.COLOR_BGR2RGB))
# cv2.waitKey()
return drawing_coordinates
def get_chord_coordinates_relative(self, chord_coordinates: List[Coordinate]) -> List[Coordinate]:
return [self.Coordinate(x / float(self.width), y / float(self.height)) for (x, y) in chord_coordinates]
def crop_neck(self) -> Tuple[Image, int, int]:
edges = cv2.Canny(image=self.rotated.blur_gray, threshold1=20, threshold2=90)
edges = cv2.Canny(image=edges, threshold1=20, threshold2=180)
mag = self.get_magnitude(edges)
ret, mag = cv2.threshold(src=mag, thresh=127, maxval=255, type=cv2.THRESH_BINARY)
lines = cv2.HoughLinesP(image=mag.astype(np.uint8), rho=1, theta=np.pi / 180, threshold=18,
minLineLength=46)
# for line in lines:
# cv2.line(self.color_img, (line[0][0], line[0][1]), (line[0][2], line[0][3]),
# (255, 0, 0), 3) # int(cropped_neck_img.height * 0.02))
y = chain.from_iterable(itemgetter(1, 3)(line[0]) for line in lines)
y = list(sorted(y))
y_differences = [0]
first_y = 0
last_y = inf
for i in range(len(y) - 1):
y_differences.append(y[i + 1] - y[i])
for i in range(len(y_differences) - 1):
if y_differences[i] == 0:
last_y = y[i]
if i > 3 and first_y == 0:
first_y = y[i]
return Image(img=self.rotated.color_img[first_y - 10:last_y + 10]), first_y - 10, last_y + 10
def crop_neck_with_hough_lines(self) -> Tuple[Image, int, int]:
dst = cv2.Canny(image=self.rotated.blur_gray, threshold1=50, threshold2=200, apertureSize=3)
cdst = cv2.cvtColor(dst, cv2.COLOR_GRAY2BGR)
height = self.height
width = self.width
lines = cv2.HoughLines(image=dst.astype(np.uint8), rho=1, theta=np.pi / 180, threshold=160)
vertical_lines = []
horizontal_lines = []
if lines is not None:
for i in range(0, len(lines)):
rho = lines[i][0][0]
theta = lines[i][0][1]
a = math.cos(theta)
b = math.sin(theta)
x0 = a * rho
y0 = b * rho
pt1 = (int(x0 + 2000 * (-b)), int(y0 + 2000 * (a)))
pt2 = (int(x0 - 2000 * (-b)), int(y0 - 2000 * (a)))
if pt2[0] - pt1[0] != 0:
slope = (pt2[1] - pt1[1]) / (pt2[0] - pt1[0])
else:
slope = 100000
y_axis_intr = pt1[1] - slope * pt1[0]
if math.fabs(slope) < 0.06:
y_in_middle = slope * width / 2 + y_axis_intr
horizontal_lines.append((slope,
y_axis_intr,
pt1,
pt2,
y_in_middle))
else:
x_in_middle = (height / 2 - y_axis_intr) / slope
vertical_lines.append((slope,
y_axis_intr,
pt1, # (abs(pt1[0]), abs(pt1[1])),
pt2, # (abs(pt2[0]), abs(pt2[1])),
x_in_middle))
#
# horizontal_lines.sort(key=lambda tup: tup[1])
# horizontal_slopes = [math.fabs(line[0]) for line in horizontal_lines]
# filtered_horizontal_lines = []
# last_horizontal_added = -1
# last_delta = 0
# min_slope = max(min(horizontal_slopes), 0.004)
#
# for i in range(1, len(horizontal_lines)):
# if last_horizontal_added == -1:
# if math.fabs(horizontal_lines[i][0]) <= min_slope * 2:
# filtered_horizontal_lines.append(horizontal_lines[i])
# last_horizontal_added = 0
# else:
# delta = horizontal_lines[i][4] - filtered_horizontal_lines[last_horizontal_added][4]
#
# if math.fabs(horizontal_lines[i][0]) <= min_slope * 2 and delta > height/100 and \
# delta > (last_delta - height/100) and horizontal_lines[i][4] < height * 0.7:# and \
# #len(filtered_horizontal_lines) <= 9:
# filtered_horizontal_lines.append(horizontal_lines[i])
# last_horizontal_added += 1
# last_delta = delta
#
# final_filtered_horizontal_lines = []
# filtered_horizontal_lines.sort(key=lambda tup: tup[4])
# for i in reversed(range(1, len(filtered_horizontal_lines))):
# delta = filtered_horizontal_lines[i][4] - filtered_horizontal_lines[i-1][4]
# if delta > height / 80:
# final_filtered_horizontal_lines.insert(0,filtered_horizontal_lines[i])
# if filtered_horizontal_lines[1][4] - filtered_horizontal_lines[0][4]:
# final_filtered_horizontal_lines.insert(0, filtered_horizontal_lines[0])
#
# final_length = len(final_filtered_horizontal_lines)
# # final_filtered_horizontal_lines = final_filtered_horizontal_lines[0:9]#final_length-9:final_length]
horizontal_lines = [[*line[2], *line[3]] for line in horizontal_lines]
for line in horizontal_lines:
cv2.line(cdst, (line[0],line[1]), (line[2], line[3]), (0, 0, 255), 3, cv2.LINE_AA)
# cv2.imshow(str(line[0]) + " " + str(line[1]), cdst)
# cv2.waitKey()
#
# plt.imshow(cdst)
# # cv2.imshow("Detected lines - Probabilistic Houh Line Transform", cdstP)
# plt.show()
y = chain.from_iterable(itemgetter(1, 3)(line) for line in horizontal_lines)
y = list(sorted(y))
y_differences = [0]
first_y = 0
last_y = inf
for i in range(len(y) - 1):
y_differences.append(y[i + 1] - y[i])
for i in range(len(y_differences) - 1):
if y_differences[i] == 0:
last_y = y[i]
if i > 3 and first_y == 0:
first_y = y[i]
return Image(img=self.rotated.color_img[first_y - 10:last_y + 10]), first_y - 10, last_y + 10
def rotate_img(self) -> Tuple[Image, float, Tuple[float, float]]:
med_slope = self.calc_med_slope()
rotation_angle = - med_slope * 60
image_center = tuple(np.array(self.color_img.shape[1::-1]) / 2)
rot_mat = cv2.getRotationMatrix2D(image_center, rotation_angle, 1.0)
rotated = cv2.warpAffine(self.color_img, rot_mat, self.color_img.shape[1::-1], flags=cv2.INTER_LINEAR)
return Image(img=rotated), rotation_angle, image_center # , file_name=guitar_image.name)
def calc_med_slope(self) -> float:
edges = cv2.Canny(self.blur_gray, 30, 150)
mag = self.get_magnitude(edges)
lines = cv2.HoughLinesP(mag, 1, np.pi / 180, 15, 50, 50)
slopes = []
for line in lines:
x1, y1, x2, y2 = line[0]
slope = float(y2 - y1) / (float(x2 - x1) + 0.001)
slopes.append(slope)
return median(slopes)
@staticmethod
def get_magnitude(img):
gradient_X = cv2.Sobel(img, cv2.CV_64F, 1, 0)
gradient_Y = cv2.Sobel(img, cv2.CV_64F, 0, 1)
magnitude = np.sqrt((gradient_X ** 2) + (gradient_Y ** 2))
magnitude = cv2.convertScaleAbs(magnitude)
return magnitude
def restore_coordinates(self, rotated_X: int, rotated_Y: int, center: Tuple[float, float]) -> Coordinate:
rad = self.rotation_angle * math.pi / 180
p, q = center
restored_X = ((rotated_X - p) * math.cos(rad)) - ((rotated_Y - q) * math.sin(rad)) + p
restored_Y = ((rotated_X - p) * math.sin(rad)) + ((rotated_Y - q) * math.cos(rad)) + q
return self.Coordinate(x=int(restored_X), y=int(restored_Y))
|
"""
A program analyzing 3D protein structures from PDB to generate 2D binding motives. For Further information see https://github.com/Cardypro/StructureAnalyzer
"""
import math
import os
from typing import Dict, Tuple, List, Union, Optional
from dataclasses import dataclass
from collections import defaultdict
import networkx as nx
import pysmiles as ps
from pymol import cmd, stored
from tabulate import tabulate
vdwRadii: Dict[str, Optional[float]] = {}
def defineDict(defaultRadius: Optional[float]) -> None:
"""
defines the vdw-radii dict as given by Truhlar et al. If the key isn't in the dict, the defaultRadius will be returned.
"""
global vdwRadii
vdwRadii = defaultdict(lambda: defaultRadius)
vdwRadii.update({
"H": 1.10,
"Li": 1.81,
"Na": 2.27,
"K": 2.75,
"Rb": 3.03,
"Cs": 3.43,
"Fr": 3.48, # End I
"Be": 1.53,
"Mg": 1.73,
"Ca": 2.31,
"Sr": 2.49,
"Ba": 2.68,
"Ra": 2.83, # End II
"B": 1.92,
"Al": 1.84,
"Ga": 1.87,
"In": 1.93,
"Tl": 1.96, # End III
"C": 1.70,
"Si": 2.10,
"Ge": 2.11,
"Sn": 2.17,
"Pb": 2.02, # End IV
"N": 1.55,
"P": 1.80,
"As": 1.85,
"Sb": 2.06,
"Bi": 2.07, # End V
"O": 1.52,
"S": 1.80,
"Se": 1.90,
"Te": 2.06,
"Po": 1.97, # End VI
"F": 1.47,
"Cl": 1.75,
"Br": 1.83,
"I": 1.98,
"At": 2.02, # End VII
"He": 1.40,
"Ne": 1.54,
"Ar": 1.88,
"Kr": 2.02,
"Xe": 2.16,
"Rn": 2.20 # End Main Group
})
@dataclass
class Atom:
"""class representing an Atom in the pdb-file
parameter:
float x: pos x
float y: pos y
float z: pos z
str model: which protein, e.g. 6hn0
str chain: which side chain, e.g. A
str resn: name of residue, e.g. DIF or ASN
str resi: identifier of residue, e.g. 607
str name: name of atom, e.g. CL4
str element: element of atom, e.g. CL
"""
x: float = 0 # pos x
y: float = 0 # pos y
z: float = 0 # pos z
model: str = "none" # which protein, e.g. 6hn0
chain: str = "none" # which sidechain, e.g. A
resn: str = "none" # name of residue, e.g. DIF
resi: str = "none" # identifier of residue, e.g. 607
name: str = "none" # name of atom, e.g. CL4
elem: str = "none"
@property
def element(self) -> str:
"""
Returns:
string: element with capital first letter as usual (e.g. CL -> Cl)
"""
return self.elem[0]+self.elem[1:].lower() # element, e.g. Cl
@property
def identifierString(self) -> str:
"""
Returns:
string: identifierString to adress a certain Atom in the pdb structure via pyMOL
"""
return f"{self.model}//{self.chain}/{self.resn}`{self.resi}/{self.name}"
@property
def pos(self) -> Tuple[float, float, float]:
"""
Returns:
triple: cartesian coordinates of the atom
"""
return (self.x, self.y, self.z)
@dataclass
class Interaction:
"""
class representing a Interaction between 2 Atoms
"""
atomA: Atom
atomB: Atom
dist: float
def calcDist(pos1: Tuple[float, float, float], pos2: Tuple[float, float, float]) -> float:
"""
calculates the 3D-distance of two given coordinates
"""
x1 = pos1[0]
y1 = pos1[1]
z1 = pos1[2]
x2 = pos2[0]
y2 = pos2[1]
z2 = pos2[2]
dist = math.sqrt((x2-x1)**2 + (y2-y1)**2 + (z2-z1)**2)
return dist
def calcCogFromStr(selection: str) -> Tuple[float, float, float]:
"""
calculates the center of geometry of a given PyMOL selection
"""
stored.cogX, stored.cogY, stored.cogZ = 0, 0, 0
stored.i = 1
# has to be in an if statement since otherwise there have to be multiple for loops (pyMOL)
cmd.iterate_state(-1, selection, """\
if(True):
stored.cogX += x
stored.cogY += y
stored.cogZ += z
stored.i += 1
""")
return(stored.cogX/stored.i, stored.cogY/stored.i, stored.cogZ/stored.i)
def calcCogFromList(entries: List[Atom]) -> Tuple[float, float, float]:
"""
calculates the center of geometry of a given Array containing atoms
"""
sumX, sumY, sumZ = 0.0, 0.0, 0.0
for entry in entries:
sumX += entry.x
sumY += entry.y
sumZ += entry.z
avgX = sumX/len(entries)
avgY = sumY/len(entries)
avgZ = sumZ/len(entries)
return(avgX, avgY, avgZ)
def calcCog(argument: Union[str, list]) -> Tuple[float, float, float]:
"""
calculates the Center of Geometry of a given selection or list of atoms
Args:
argument (str or list): either a PyMOL-selection name or a List of atoms
Returns:
Tuple[float, float, float]: 3D-coords of CoG
"""
if isinstance(argument, str):
return calcCogFromStr(argument)
if isinstance(argument, list):
return calcCogFromList(argument)
exit("unable to calculate the CoG from the given argument")
return (0, 0, 0)
def analyzeInput(inputString: str) -> Tuple[List[str], List[str], List[str]]:
"""
splits the input string so it can be read
Args:
inputString (str): has to be like "elemA|elemB|... factor*vdw elemC|elemD|..."
Returns:
list: list of lists. Like [['C', 'N'], ['2','vdw'], ['C', 'O']]
"""
inputParts = inputString.split()
inputA = inputParts[0].split("|")
length = inputParts[1].split("*")
inputB = inputParts[2].split("|")
return (inputA, length, inputB)
def getCutoff(array: Tuple[Atom, List[str], Atom]) -> Optional[float]:
"""
calculates cutoff via vdwRadii
Args:
array (list): like [Atom1, ['factor','vdw'], Atom2]
Returns:
float: max distance between the atoms to be evaluated as interaction
"""
elementA = array[0].element
elementB = array[2].element
if elementA not in vdwRadii:
print(f"{elementA} not found. Using default radius instead.")
if elementB not in vdwRadii:
print(f"{elementB} not found. Using default radius instead.")
radiusA = vdwRadii[elementA]
radiusB = vdwRadii[elementB]
if radiusA is None:
print(
f"Unable to evaluate vdwRadii for {elementA} since no default radius is given.")
return None
if radiusB is None:
print(
f"Unable to evaluate vdwRadii for {elementB} since no default radius is given.")
return None
factor = float(array[1][0])
return (radiusA + radiusB) * factor
def buildGraph(atomlist: List[Atom]) -> nx.Graph:
"""
turns the given molecule (list of atoms) into a network graph
Args:
atomlist (list of Atoms): all Atoms belonging to a molecule
Returns:
networkx.Graph
"""
visitedAtoms = []
queue = atomlist
graph = nx.Graph()
cmd.h_add()
while len(queue) != 0:
stored.currNeighbor = []
currentNode = queue.pop(-1)
cmd.select("neighborSelection",
f"neighbor {currentNode.identifierString}")
stored.currentResn = currentNode.resn
cmd.iterate_state(-1, "neighborSelection", """\
if resn == stored.currentResn:
stored.currNeighbor.append(Atom(x, y, z, model, chain, resn, resi, name, elem))
""")
graph.add_node(currentNode.identifierString,
element=currentNode.element, charge=0)
for atom in stored.currNeighbor:
graph.add_edge(currentNode.identifierString, atom.identifierString)
if atom.identifierString not in visitedAtoms:
visitedAtoms.append(atom.identifierString)
queue.append(atom)
ps.fill_valence(graph, respect_hcount=True, respect_bond_order=False)
cmd.remove("hydro")
return graph
# writes a .mrv-file (XML format) that can be opened with e.g. Marvinsketch
def writeXML(graph: nx.Graph, interactionList: List[Interaction], pdbCode: str, ligand: List[Atom]) -> None:
"""
writes a .mrv-file (XML format) that can be opened with e.g. Marvin Sketch
Args:
graph (Networx.Graph):
"""
# creates an output folder
ligandName = f"{ligand[0].resn}{ligand[0].resi}"
file = open(
(f"./Output/{pdbCode} {ligandName}.mrv"), "w", encoding="utf-8")
file.write("<MDocument>\n<MChemicalStruct>\n<molecule>\n")
dictionary = dict()
# all atoms
file.write("<atomArray>\n")
nodeID = 1
for node in list(graph.nodes(data=True)):
nodeIdentifier = node[0]
nodeDict = node[1]
if nodeDict["element"] != "H":
file.write("<atom id=\"a" + str(nodeID) +
"\" elementType=\"" + nodeDict["element"] + "\"/>" + "\n")
dictionary[nodeIdentifier] = nodeID
nodeID += 1
file.write("</atomArray>\n")
# all bonds
file.write("<bondArray>\n")
for edge in graph.edges.data():
startAtom = edge[0]
endAtom = edge[1]
bondOrder = edge[2]["order"]
if graph.nodes[endAtom]["element"] != "H" and graph.nodes[startAtom]["element"] != "H":
file.write("<bond atomRefs2=\"a" + str(dictionary[startAtom]) + " a" + str(
dictionary[endAtom]) + "\" order=\"" + str(bondOrder) + "\"/>\n")
file.write("</bondArray>\n</molecule>\n</MChemicalStruct>\n")
# interactions
interactionID = 0
for interactions in interactionList:
try:
atomA = interactions.atomA
atomB = interactions.atomB
file.write("<MPolyline id=\"line" + str(interactionID) +
"\" lineColor=\"#ff9933\" thickness=\"0.04\">\n")
file.write("<MAtomSetPoint atomRefs=\"m1.a" +
str(dictionary[atomA.identifierString]) + "\"/>\n")
file.write("<MAtomSetPoint atomRefs=\"m1.a" +
str(dictionary[atomB.identifierString]) + "\"/>\n")
file.write("</MPolyline>\n")
except:
print("Error writing interactions tags\n", interactions, ligandName)
file.close()
return
# distances
file.write("<MTextBox id=\"distBox" +
str(interactionID) + "\" autoSize=\"true\">\n")
file.write("<Field name=\"text\"><![CDATA[{D font=Arial,size=9}{fg=#000000}" + str(
round(interactions.dist, 3)) + " \u00c5]]></Field>\n")
file.write("<MPoint x=\"0\" y=\"0\"/>\n")
file.write("<MPoint x=\"0\" y=\"0\"/>\n")
file.write("<MPoint x=\"0\" y=\"0\"/>\n")
file.write("<MPoint x=\"0\" y=\"0\"/>\n")
file.write("</MTextBox>\n")
file.write("<MPolyline id=\"distLine" + str(interactionID) +
"\" lineColor=\"#000000\" thickness=\"0.01\">\n")
file.write("<MRectanglePoint pos=\"4\" rectRef=\"distBox" +
str(interactionID) + "\"/>\n")
file.write("<MMidPoint lineRef=\"line" + str(interactionID) + "\"/>\n")
file.write("</MPolyline>\n")
interactionID += 1
# name tags for interactions
nameID = 0
done = []
for interactions in interactionList:
try:
atomB = interactions.atomB
if (atomB.resn, atomB.resi) not in done and atomB.resn != "HOH": # no water tag
done.append((atomB.resn, atomB.resi))
file.write(
f"<MTextBox id=\"box{nameID}\" autoSize=\"true\">\n")
file.write("<Field name=\"text\"><![CDATA[{D font=Arial,size=11}{fg=#000000}" + atomB.resn[0] +
atomB.resn[1:].lower() + " " + atomB.resi + "]]></Field>\n")
file.write("<MPoint x=\"0\" y=\"0\"/>\n")
file.write("<MPoint x=\"0\" y=\"0\"/>\n")
file.write("<MPoint x=\"0\" y=\"0\"/>\n")
file.write("<MPoint x=\"0\" y=\"0\"/>\n")
file.write("</MTextBox>\n")
file.write("<MPolyline id=\"boxline" + str(nameID) +
"\" thickness=\"0.01\" lineColor=\"#0000ff\">\n")
file.write("<MRectanglePoint pos=\"4\" rectRef=\"box" +
str(nameID) + "\"/>\n")
file.write("<MAtomSetPoint atomRefs=\"m1.a" +
str(dictionary[atomB.identifierString]) + "\"/>\n")
nameID += 1
file.write("</MPolyline>\n")
except:
print("Error writing name tags\n", interactions, ligandName)
file.close()
return
file.write("</MDocument>")
file.close()
def writeTable(file, interactionList: List[Interaction]) -> None:
"""
writes the interaction table to a markdown file
Args:
file (filehandle): the file to be written in
interactionList (list): list of Interaction objects
"""
AtomName = interactionList[0].atomA
file.write(f"\n # {AtomName.resn} {AtomName.resi} \n")
table = []
for interaction in interactionList:
AtomA = interaction.atomA
AtomB = interaction.atomB
dist = interaction.dist
table.append([f"{AtomA.resn} {AtomA.resi}/{AtomA.name}", dist,
f"{AtomB.resn} {AtomB.resi}/{AtomB.name}", f"{AtomB.element}"])
formatedTable = tabulate(table, headers=[
"atom ligand", "distance [A]", "atom pocket", "element"], tablefmt="github")
print(formatedTable)
file.write(formatedTable)
file.close()
def StructureAnalyzer(pdbCode: str = "6hn0", ligandCode: str = "DIF", inputString: str = "* 1*vdw *", ignoreH2O: bool = False, defaultRadius: Optional[float] = None, pocketSize: float = 8.0, writeMD: bool = True) -> None:
"""
Main-code. Calculates the distances between a selected ligand and all atoms within a given cutoff-restriction of a given .pdb-code.
Args:
pdbCode (str, optional): Determines the protein structure from pdb. Defaults to "6hn0".
ligandCode (str, optional): Determines the pdb code of the ligand. Defaults to "DIF".
inputString (str, optional): see readme. Defaults to "* 1*vdw *".
ignoreH2O (bool, optional): Determines if water should be ignored. Defaults to False.
defaultRadius (float, optional): Default atom radius if no radius is given for the element. Defaults to None.
pocketSize (float, optional): View distance of pocket and ligand in pyMOL. Defaults to 8.
writeMD (bool, optional): Determinest if a markdown file should be written. Defaults to True.
"""
try:
os.mkdir("Output")
except:
pass
if writeMD:
mdFile = open((f"./Output/{pdbCode}.md"), "w", encoding="utf-8")
mdFile.close()
defineDict(defaultRadius)
cmd.reinitialize()
condition = analyzeInput(inputString)
cmd.fetch(pdbCode) # downloads given .pdb-file
cmd.remove("hydro")
cmd.select("allLigands", "resn " + ligandCode)
stored.allLigandsAtoms = []
stored.oldResi = ""
# iterates all Atoms belonging to the given ligand code and splits them up so you have an array of atoms
cmd.iterate_state(-1, "allLigands", """\
if(resi == stored.oldResi):
stored.allLigandsAtoms[(len(stored.allLigandsAtoms)-1)].append(Atom(x, y, z, model, chain, resn, resi, name, elem))
else:
stored.oldResi = resi
stored.allLigandsAtoms.append([Atom(x, y, z, model, chain, resn, resi, name, elem)])
""")
# gets the ligand with the least distance to the global cog
for ligands in stored.allLigandsAtoms:
ligandResName = ligands[0].resn # e.g. DIF
ligandResID = ligands[0].resi # e.g. 601
LigandName = ligandResName + str(ligandResID) # e.g. DIFxxx
print(f"Analyzing {LigandName}...")
# drawing pocket and ligand
cmd.hide('all')
cmd.select(LigandName, ligandResName +
"`" + str(ligandResID) + "/")
cmd.select('view', 'br. all within ' + str(pocketSize) +
' of ' + LigandName)
pocketLayerName = f"pocket_{LigandName}"
cmd.select(pocketLayerName, 'view and not ' + LigandName)
cmd.show('sticks', pocketLayerName)
cmd.show('sticks', LigandName)
cmd.show('nb_spheres', pocketLayerName)
cmd.show('nb_spheres', LigandName)
cmd.util.cbaw(pocketLayerName)
cmd.util.cbao(LigandName)
stored.atomsPocket = [] # all Atoms of the Pocket
# reads all informations belonging to the selected binding pocket
cmd.iterate_state(-1, pocketLayerName,
"stored.atomsPocket.append(Atom(x, y, z, model, chain, resn, resi, name, elem))")
interactionList = []
atomsForGraph = []
# main-main-code: calculates the distances of each atom belonging to the pocket to each atom belonging to the ligand. If the distance is less than the cutoff the distance is drawn
for ligandAtoms in ligands:
atomsForGraph.append(ligandAtoms)
conditionElementsLigand = condition[0]
if not (ligandAtoms.element in conditionElementsLigand or "*" in conditionElementsLigand):
continue
for pocketAtoms in stored.atomsPocket:
if (pocketAtoms.resn == "HOH") and ignoreH2O:
continue
conditionElementsPocket = condition[2]
if not (pocketAtoms.element in conditionElementsPocket or "*" in conditionElementsPocket):
continue
conditionDistance = condition[1]
if "vdw" in conditionDistance:
cutoff = getCutoff(
(ligandAtoms, conditionDistance, pocketAtoms))
else:
cutoff = float(conditionDistance[0])
if cutoff is None:
continue
currDist = calcDist(ligandAtoms.pos, pocketAtoms.pos)
if currDist > cutoff:
continue
interactionLayerName = f"inter_{LigandName}"
cmd.distance(
interactionLayerName, ligandAtoms.identifierString, pocketAtoms.identifierString, cutoff+1)
cmd.color("cyan", interactionLayerName)
cmd.show("dashes", interactionLayerName)
interactionList.append(Interaction(
ligandAtoms, pocketAtoms, currDist))
atomsForGraph.append(pocketAtoms)
currGraph = buildGraph(atomsForGraph)
writeXML(currGraph, interactionList, pdbCode, ligands)
print(f"Analyzing {LigandName} finished")
if writeMD:
mdFile = open((f"./Output/{pdbCode}.md"), "a", encoding="utf-8")
writeTable(mdFile, interactionList)
print(f"Analyzing {pdbCode} finished")
def multipleAnalyzer(pdbArray: List[str], ligand: str = "DIF", inputString: str = "* 1*vdw *", ignoreH2O: bool = False, defaultRadius: Optional[float] = None) -> None:
"""
executes the StructureAnalyzer multiple times for a list of pdb-codes
Args:
pdbArray (List[str]): list containing the pdb-codes to be analyzed
ligand (str, optional): pdb-code of the ligand. Defaults to "DIF".
inputString (str, optional): String determining the cutoff criteria. Defaults to "* 1*vdw *".
ignoreH2O (bool, optional): Decides if water should be ignored evaluating the interactions. Defaults to False.
defaultRadius (Optional[float], optional): Fallback radius if a atom radius is not in the list given by Truhlar et al. Defaults to None.
"""
for code in pdbArray:
cmd.reinitialize()
print(f"\n start {code}")
StructureAnalyzer(code, ligand, inputString, ignoreH2O, defaultRadius)
|
from api.views import ShopViewSet, ProductViewSet
from rest_framework import routers
router = routers.DefaultRouter()
router.register('shop', ShopViewSet, basename='Shop')
router.register('product', ProductViewSet, basename='Product')
|
"""steps v.2
Revision ID: d115acfbcb61
Revises: 33f050e2a226
Create Date: 2021-05-07 20:24:39.438596
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'd115acfbcb61'
down_revision = '33f050e2a226'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_unique_constraint(None, 'steps', ['id'])
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(None, 'steps', type_='unique')
# ### end Alembic commands ###
|
CapnpLangToolchainInfo = provider(fields = {
"lang_shortname": "short name for the toolchain / language, e.g. 'cc', 'java', 'rust', etc.",
"plugin": "plugin target to pass to capnp_tool for this language",
"plugin_deps": "plugin depepencies to pass to capnp_tool invocation for this language",
"runtime": "language-dependent runtime target to e.g. link with compiled libraries",
})
def _capnp_lang_toolchain_impl(ctx):
return CapnpLangToolchainInfo(
lang_shortname = ctx.attr.lang_shortname,
plugin = ctx.attr.plugin,
plugin_deps = ctx.attr.plugin_deps,
runtime = ctx.attr.runtime,
)
capnp_lang_toolchain = rule(
implementation = _capnp_lang_toolchain_impl,
attrs = {
"lang_shortname": attr.string(),
"plugin": attr.label(
allow_single_file = True,
cfg = "exec",
executable = True,
),
"plugin_deps": attr.label_list(
allow_files = True,
cfg = "exec",
),
"runtime": attr.label(
cfg = "target",
),
},
)
|
class TreeNode:
def __init__(self, data):
self.data = data
self.left = None
self.right = None
def create_binary_tree(input_list=[]):
if input_list is None or len(input_list) == 0:
return None
data = input_list.pop(0)
if data is None:
return None
node = TreeNode(data)
node.left = create_binary_tree(input_list)
node.right = create_binary_tree(input_list)
return node
def pre_order_traversal_with_stack(node):
stack = []
while node is not None or len(stack) > 0:
while node is not None:
print(node.data)
stack.append(node)
node = node.left
if len(stack) > 0:
node = stack.pop()
node = node.right
my_input_list = list([3, 2, 9, None, None, 10, None, None, 8, None, 4])
root = create_binary_tree(my_input_list)
print("前序遍历:")
pre_order_traversal_with_stack(root)
|
import torch.nn as nn
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, MeanSpectralNormConv2d):
nn.init.kaiming_normal_(m.conv.weight, mode='fan_out', nonlinearity='relu')
if m.bias is not None:
nn.init.constant_(m.bias, 0)
if m.conv.bias is not None:
nn.init.constant_(m.conv.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.constant_(m.bias, 0)
|
#!/usr/bin/env python
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import logging
from uuid import UUID
import azure.functions as func
from onefuzztypes.enums import ErrorCode, NodeState
from onefuzztypes.models import Error
from onefuzztypes.requests import AgentRegistrationGet, AgentRegistrationPost
from onefuzztypes.responses import AgentRegistration
from ..onefuzzlib.agent_authorization import verify_token
from ..onefuzzlib.azure.creds import get_fuzz_storage, get_instance_url
from ..onefuzzlib.azure.queue import get_queue_sas
from ..onefuzzlib.pools import Node, NodeMessage, Pool
from ..onefuzzlib.request import not_ok, ok, parse_uri
def create_registration_response(machine_id: UUID, pool: Pool) -> func.HttpResponse:
base_address = get_instance_url()
events_url = "%s/api/agents/events" % base_address
commands_url = "%s/api/agents/commands" % base_address
work_queue = get_queue_sas(
pool.get_pool_queue(),
account_id=get_fuzz_storage(),
read=True,
update=True,
process=True,
)
return ok(
AgentRegistration(
events_url=events_url,
commands_url=commands_url,
work_queue=work_queue,
)
)
def get(req: func.HttpRequest) -> func.HttpResponse:
get_registration = parse_uri(AgentRegistrationGet, req)
if isinstance(get_registration, Error):
return not_ok(get_registration, context="agent registration")
agent_node = Node.get_by_machine_id(get_registration.machine_id)
if agent_node is None:
return not_ok(
Error(
code=ErrorCode.INVALID_REQUEST,
errors=[
"unable to find a registration associated with machine_id '%s'"
% get_registration.machine_id
],
),
context="agent registration",
status_code=404,
)
else:
pool = Pool.get_by_name(agent_node.pool_name)
if isinstance(pool, Error):
return not_ok(
Error(
code=ErrorCode.INVALID_REQUEST,
errors=[
"unable to find a pool associated with the provided machine_id"
],
),
context="agent registration",
)
return create_registration_response(agent_node.machine_id, pool)
def post(req: func.HttpRequest) -> func.HttpResponse:
registration_request = parse_uri(AgentRegistrationPost, req)
logging.info("Registration request: %s", (registration_request))
if isinstance(registration_request, Error):
return not_ok(registration_request, context="agent registration")
pool = Pool.get_by_name(registration_request.pool_name)
if isinstance(pool, Error):
return not_ok(
Error(
code=ErrorCode.INVALID_REQUEST,
errors=["unable to find pool '%s'" % registration_request.pool_name],
),
context="agent registration",
)
node = Node.get_by_machine_id(registration_request.machine_id)
if node:
if node.version != registration_request.version:
NodeMessage.clear_messages(node.machine_id)
node.version = registration_request.version
node.reimage_requested = False
node.state = NodeState.init
node.reimage_queued = False
else:
node = Node(
pool_name=registration_request.pool_name,
machine_id=registration_request.machine_id,
scaleset_id=registration_request.scaleset_id,
version=registration_request.version,
)
node.save()
# if any tasks were running during an earlier instance of this node, clear them out
node.mark_tasks_stopped_early()
return create_registration_response(node.machine_id, pool)
def main(req: func.HttpRequest) -> func.HttpResponse:
if req.method == "POST":
m = post
elif req.method == "GET":
m = get
else:
raise Exception("invalid method")
return verify_token(req, m)
|
from __future__ import annotations
from racketinterpreter.classes import errors as err
from racketinterpreter.classes import tokens as t
class ParenthesesAnalyzer:
PAREN_MAP = {
'(': ')',
'[': ']',
'{': '}'
}
def __init__(self) -> None:
self.paren_stack = []
def received_paren(self, token: t.Token) -> None:
if token.type is t.TokenType.LPAREN:
self.paren_stack.append(token)
elif token.type is t.TokenType.RPAREN:
if len(self.paren_stack) == 0:
raise err.ParenError(
error_code=err.ErrorCode.RS_UNEXPECTED_RIGHT_PARENTHESIS,
token=token
)
else:
left_paren = self.paren_stack[-1].value
if self.PAREN_MAP[left_paren] != token.value:
raise err.ParenError(
error_code=err.ErrorCode.RS_INCORRECT_RIGHT_PARENTHESIS,
token=token,
left_paren=left_paren,
correct_right_paren=self.PAREN_MAP[left_paren],
incorrect_right_paren=token.value
)
self.paren_stack.pop()
def reached_eof(self, token: t.Token) -> None:
if len(self.paren_stack) != 0:
left_paren = self.paren_stack[-1].value
raise err.ParenError(
error_code=err.ErrorCode.RS_EXPECTED_RIGHT_PARENTHESIS,
token=token,
left_paren=left_paren,
right_paren=self.PAREN_MAP[left_paren]
)
|
"""
This module provides classes for testing RanklistRow object
"""
import unittest
from codeforces import RanklistRow, Party, ProblemResult
class RanklistRowTests(unittest.TestCase):
def setUp(self):
self.row = RanklistRow()
def load_from_dict(self):
d = {
"party": {
"contestId": 374,
"members": [{"handle": "Deception"}],
"participantType": "CONTESTANT",
"ghost": False,
"room": 46,
"startTimeSeconds": 1387380600
},
"rank": 1,
"points": 4902.0,
"penalty": 0,
"successfulHackCount": 11,
"unsuccessfulHackCount": 1,
"problemResults": [
{
"points": 312.0,
"rejectedAttemptCount": 1,
"type": "FINAL",
"bestSubmissionTimeSeconds": 4174
}, {
"points": 596.0,
"rejectedAttemptCount": 2,
"type": "FINAL",
"bestSubmissionTimeSeconds": 4583
}, {
"points": 1128.0,
"rejectedAttemptCount": 0,
"type": "FINAL",
"bestSubmissionTimeSeconds": 3751
}, {
"points": 1816.0,
"rejectedAttemptCount": 0,
"type": "FINAL",
"bestSubmissionTimeSeconds": 1430
}, {
"points": 0.0,
"rejectedAttemptCount": 0,
"type": "FINAL"
}
],
"lastSubmissionTimeSeconds": 424242
}
self.row.load_from_dict(d)
self.assertEqual(Party(d['party']), self.row.party)
self.assertEqual(1, self.row.rank)
self.assertEqual(4902.0, self.row.points)
self.assertEqual(0, self.row.penalty)
self.assertEqual(11, self.row.successful_hack_count)
self.assertEqual(1, self.row.unsuccessful_hack_count)
self.assertEqual(list(map(ProblemResult, d['problemResults'])), self.row.problem_results)
self.assertEqual(424242, self.row.last_submission_time)
def load_only_required_from_dict(self):
"""
Required fields are:
party
rank
points
penalty
successfulHackCount
unsuccessfulHackCount
problemResults
"""
d = {
"party": {
"contestId": 374,
"members": [{"handle": "Deception"}],
"participantType": "CONTESTANT",
"ghost": False,
"room": 46,
"startTimeSeconds": 1387380600
},
"rank": 1,
"points": 4902.0,
"penalty": 0,
"successfulHackCount": 11,
"unsuccessfulHackCount": 1,
"problemResults": [
{
"points": 312.0,
"rejectedAttemptCount": 1,
"type": "FINAL",
"bestSubmissionTimeSeconds": 4174
}, {
"points": 596.0,
"rejectedAttemptCount": 2,
"type": "FINAL",
"bestSubmissionTimeSeconds": 4583
}, {
"points": 1128.0,
"rejectedAttemptCount": 0,
"type": "FINAL",
"bestSubmissionTimeSeconds": 3751
}, {
"points": 1816.0,
"rejectedAttemptCount": 0,
"type": "FINAL",
"bestSubmissionTimeSeconds": 1430
}, {
"points": 0.0,
"rejectedAttemptCount": 0,
"type": "FINAL"
}
]
}
self.row.load_from_dict(d)
self.assertEqual(Party(d['party']), self.row.party)
self.assertEqual(1, self.row.rank)
self.assertEqual(4902.0, self.row.points)
self.assertEqual(0, self.row.penalty)
self.assertEqual(11, self.row.successful_hack_count)
self.assertEqual(1, self.row.unsuccessful_hack_count)
self.assertEqual(list(map(ProblemResult, d['problemResults'])), self.row.problem_results)
self.assertIsNone(self.row.last_submission_time)
def test_load_from_json(self):
d = {
"party": {
"contestId": 374,
"members": [{"handle": "Deception"}],
"participantType": "CONTESTANT",
"ghost": False,
"room": 46,
"startTimeSeconds": 1387380600
},
"rank": 1,
"points": 4902.0,
"penalty": 0,
"successfulHackCount": 11,
"unsuccessfulHackCount": 1,
"problemResults": [
{
"points": 312.0,
"rejectedAttemptCount": 1,
"type": "FINAL",
"bestSubmissionTimeSeconds": 4174
}, {
"points": 596.0,
"rejectedAttemptCount": 2,
"type": "FINAL",
"bestSubmissionTimeSeconds": 4583
}, {
"points": 1128.0,
"rejectedAttemptCount": 0,
"type": "FINAL",
"bestSubmissionTimeSeconds": 3751
}, {
"points": 1816.0,
"rejectedAttemptCount": 0,
"type": "FINAL",
"bestSubmissionTimeSeconds": 1430
}, {
"points": 0.0,
"rejectedAttemptCount": 0,
"type": "FINAL"
}
],
"lastSubmissionTimeSeconds": 424242
}
json = str(d).replace('False', 'false').replace("'", '"')
self.row.load_from_json(json)
self.assertEqual(Party(d['party']), self.row.party)
self.assertEqual(1, self.row.rank)
self.assertEqual(4902.0, self.row.points)
self.assertEqual(0, self.row.penalty)
self.assertEqual(11, self.row.successful_hack_count)
self.assertEqual(1, self.row.unsuccessful_hack_count)
self.assertEqual(list(map(ProblemResult, d['problemResults'])), self.row.problem_results)
self.assertEqual(424242, self.row.last_submission_time)
def test_load_only_required_from_json(self):
"""
Required fields are:
party
rank
points
penalty
successfulHackCount
unsuccessfulHackCount
problemResults
"""
d = {
"party": {
"contestId": 374,
"members": [{"handle": "Deception"}],
"participantType": "CONTESTANT",
"ghost": False,
"room": 46,
"startTimeSeconds": 1387380600
},
"rank": 1,
"points": 4902.0,
"penalty": 0,
"successfulHackCount": 11,
"unsuccessfulHackCount": 1,
"problemResults": [
{
"points": 312.0,
"rejectedAttemptCount": 1,
"type": "FINAL",
"bestSubmissionTimeSeconds": 4174
}, {
"points": 596.0,
"rejectedAttemptCount": 2,
"type": "FINAL",
"bestSubmissionTimeSeconds": 4583
}, {
"points": 1128.0,
"rejectedAttemptCount": 0,
"type": "FINAL",
"bestSubmissionTimeSeconds": 3751
}, {
"points": 1816.0,
"rejectedAttemptCount": 0,
"type": "FINAL",
"bestSubmissionTimeSeconds": 1430
}, {
"points": 0.0,
"rejectedAttemptCount": 0,
"type": "FINAL"
}
]
}
json = str(d).replace('False', 'false').replace("'", '"')
self.row.load_from_json(json)
self.assertEqual(Party(d['party']), self.row.party)
self.assertEqual(1, self.row.rank)
self.assertEqual(4902.0, self.row.points)
self.assertEqual(0, self.row.penalty)
self.assertEqual(11, self.row.successful_hack_count)
self.assertEqual(1, self.row.unsuccessful_hack_count)
self.assertEqual(list(map(ProblemResult, d['problemResults'])), self.row.problem_results)
self.assertIsNone(self.row.last_submission_time)
if __name__ == '__main__':
unittest.main()
|
# pylint:disable=ungrouped-imports
from unittest.mock import patch
import pytest
import activitylogs
import auditor
import tracker
from event_manager.events import permission as permission_events
from tests.utils import BaseTest
@pytest.mark.auditor_mark
class AuditorPermissionTest(BaseTest):
"""Testing subscribed events"""
def setUp(self):
auditor.validate()
auditor.setup()
tracker.validate()
tracker.setup()
activitylogs.validate()
activitylogs.setup()
super().setUp()
@patch('tracker.service.TrackerService.record_event')
@patch('activitylogs.service.ActivityLogService.record_event')
def test_permission_project_denied(self, activitylogs_record, tracker_record):
auditor.record(event_type=permission_events.PERMISSION_PROJECT_DENIED,
id=1,
user_id=2,
actor_id=1,
actor_name='foo',
event='some.event')
assert tracker_record.call_count == 1
assert activitylogs_record.call_count == 1
@patch('tracker.service.TrackerService.record_event')
@patch('activitylogs.service.ActivityLogService.record_event')
def test_permission_repo_denied(self, activitylogs_record, tracker_record):
auditor.record(event_type=permission_events.PERMISSION_REPO_DENIED,
project_id=1,
project_user_id=2,
actor_id=1,
actor_name='foo',
event='some.event')
assert tracker_record.call_count == 1
assert activitylogs_record.call_count == 1
@patch('tracker.service.TrackerService.record_event')
@patch('activitylogs.service.ActivityLogService.record_event')
def test_permission_experiment_group_denied(self, activitylogs_record, tracker_record):
auditor.record(event_type=permission_events.PERMISSION_EXPERIMENT_GROUP_DENIED,
id=1,
user_id=2,
project_id=1,
project_user_id=2,
actor_id=1,
actor_name='foo',
event='some.event')
assert tracker_record.call_count == 1
assert activitylogs_record.call_count == 1
@patch('tracker.service.TrackerService.record_event')
@patch('activitylogs.service.ActivityLogService.record_event')
def test_permission_experiment_denied(self, activitylogs_record, tracker_record):
auditor.record(event_type=permission_events.PERMISSION_EXPERIMENT_DENIED,
id=1,
user_id=2,
project_id=1,
project_user_id=2,
actor_id=1,
actor_name='foo',
event='some.event')
assert tracker_record.call_count == 1
assert activitylogs_record.call_count == 1
@patch('tracker.service.TrackerService.record_event')
@patch('activitylogs.service.ActivityLogService.record_event')
def test_permission_tensorboard_denied(self, activitylogs_record, tracker_record):
auditor.record(event_type=permission_events.PERMISSION_TENSORBOARD_DENIED,
id=1,
user_id=2,
project_id=1,
project_user_id=2,
actor_id=1,
actor_name='foo',
event='some.event')
assert tracker_record.call_count == 1
assert activitylogs_record.call_count == 1
@patch('tracker.service.TrackerService.record_event')
@patch('activitylogs.service.ActivityLogService.record_event')
def test_permission_notebook_denied(self, activitylogs_record, tracker_record):
auditor.record(event_type=permission_events.PERMISSION_NOTEBOOK_DENIED,
id=1,
user_id=2,
project_id=1,
project_user_id=2,
actor_id=1,
actor_name='foo',
event='some.event')
assert tracker_record.call_count == 1
assert activitylogs_record.call_count == 1
@patch('tracker.service.TrackerService.record_event')
@patch('activitylogs.service.ActivityLogService.record_event')
def test_permission_build_job_denied(self, activitylogs_record, tracker_record):
auditor.record(event_type=permission_events.PERMISSION_BUILD_JOB_DENIED,
id=1,
user_id=2,
project_id=1,
project_user_id=2,
actor_id=1,
actor_name='foo',
event='some.event')
assert tracker_record.call_count == 1
assert activitylogs_record.call_count == 1
@patch('tracker.service.TrackerService.record_event')
@patch('activitylogs.service.ActivityLogService.record_event')
def test_permission_experiment_job_denied(self, activitylogs_record, tracker_record):
auditor.record(event_type=permission_events.PERMISSION_EXPERIMENT_JOB_DENIED,
id=1,
user_id=2,
project_id=1,
project_user_id=2,
actor_id=1,
actor_name='foo',
event='some.event')
assert tracker_record.call_count == 1
assert activitylogs_record.call_count == 1
@patch('tracker.service.TrackerService.record_event')
@patch('activitylogs.service.ActivityLogService.record_event')
def test_permission_cluster_denied(self, activitylogs_record, tracker_record):
auditor.record(event_type=permission_events.PERMISSION_CLUSTER_DENIED,
actor_id=1,
actor_name='foo',
event='some.event')
assert tracker_record.call_count == 1
assert activitylogs_record.call_count == 1
@patch('tracker.service.TrackerService.record_event')
@patch('activitylogs.service.ActivityLogService.record_event')
def test_permission_user_role_denied(self, activitylogs_record, tracker_record):
auditor.record(event_type=permission_events.PERMISSION_USER_ROLE_DENIED,
user_id=2,
actor_id=1,
actor_name='foo',
event='some.event')
assert tracker_record.call_count == 1
assert activitylogs_record.call_count == 1
|
# encoding: UTF-8
from __future__ import print_function
from .order_type import *
class OrderManager(object):
'''
Manage/track all the orders
'''
def __init__(self):
self._internal_order_id = 0 # unique internal_orderid
self._order_dict = {} # internal_order to [# sent, # filled, is_canceled,
def place_order(self, o):
if o.internal_order_id < 0: # internal_order_id not yet assigned
o.internal_order_id = self._internal_order_id
self._internal_order_id = self._internal_order_id + 1
self._order_dict[o.internal_order_id] = o
|
import logging
import typing
import random
import heapq
import time
import asyncio
import threading
from .actor import Actor
from .message import ActorMessage
from .state import ActorState, OUTBOX, EXPORT, ERROR, ERROR_NOTRY
from .storage import ActorLocalStorage
from .registery import ActorRegistery
from .builtin_actors.name import (
ACTOR_SYSTEM,
ACTOR_MESSAGE_FETCHER,
ACTOR_MESSAGE_ACKER,
ACTOR_MESSAGE_NOTIFY_SENDER,
ACTOR_STORAGE_COMPACTOR,
)
from .prometheus import metric_queue_op, ACTOR_QUEUE_INBOX_SIZE, ACTOR_QUEUE_OUTBOX_SIZE
LOG = logging.getLogger(__name__)
class ActorStorageState:
def __init__(self, storage, state):
self._storage = storage
self._state = state
def __getattr__(self, *args, **kwargs):
return getattr(self._state, *args, **kwargs)
def apply(self, type, **kwargs):
self._state.apply(type, **kwargs)
self._storage.append(type, **kwargs)
def apply_notify(self, **kwargs):
self.apply('notify', **kwargs)
def apply_inbox(self, **kwargs):
self.apply('inbox', **kwargs)
def apply_execute(self, **kwargs):
self.apply('execute', **kwargs)
def apply_outbox(self, **kwargs):
self.apply('outbox', **kwargs)
def apply_done(self, **kwargs):
self.apply('done', **kwargs)
def apply_complete(self, **kwargs):
self.apply('complete', **kwargs)
def apply_export(self, **kwargs):
self.apply('export', **kwargs)
def apply_acked(self, **kwargs):
self.apply('acked', **kwargs)
def apply_retry(self, **kwargs):
self.apply('retry', **kwargs)
def apply_restart(self, **kwargs):
self.apply('restart', **kwargs)
class ActorQueue:
def __init__(
self,
actor_name: str,
registery: ActorRegistery,
state: ActorState,
schedule_fetcher,
concurrency: int = 100,
max_retry_count: int = 1,
max_retry_time: int = 10 * 60,
fetcher_concurrency: int = 3,
):
self.actor_name = actor_name
self.registery = registery
self.state = state
self.schedule_fetcher = schedule_fetcher
self.inbox_lowsize = max(1, concurrency // 10)
self.inbox_highsize = max(3, concurrency // 3)
self.outbox_lowsize = max(10, concurrency)
self.outbox_highsize = max(30, concurrency * 3)
self.max_retry_count = max_retry_count
self.max_retry_time = max_retry_time
self.fetcher_concurrency = fetcher_concurrency
self.inbox = [] # [(priority, message)]
self.dst_outbox = {} # dst -> [(priority, message)]
self.dst_node_outbox = {} # dst_node -> dst -> [(priority, message)]
self.is_fetching = False
def __repr__(self):
return '<{} {}>'.format(type(self).__name__, self.actor_name)
def stats(self):
dst_stats = []
for dst, v in self.dst_outbox.items():
if v:
dst_stats.append(dict(dst=dst, size=len(v)))
dst_stats = list(sorted(dst_stats, key=lambda x: x['size']))
dst_node_stats = []
for dst_node, d in self.dst_node_outbox.items():
for dst, v in d.items():
if v:
dst_node_stats.append(dict(dst_node=dst_node, dst=dst, size=len(v)))
dst_stats = list(sorted(dst_stats, key=lambda x: x['size']))
return dict(
name=self.actor_name,
inbox_lowsize=self.inbox_lowsize,
inbox_highsize=self.inbox_highsize,
outbox_lowsize=self.outbox_lowsize,
outbox_highsize=self.outbox_highsize,
inbox_size=self.inbox_size(),
outbox_size=self.outbox_size(),
is_fetching=self.is_fetching,
dst_outbox=dst_stats,
dst_node_outbox=dst_node_stats,
)
def inbox_size(self):
return len(self.inbox)
def outbox_size(self):
n = sum(len(x) for x in self.dst_outbox.values())
for box in self.dst_node_outbox.values():
n += sum(len(x) for x in box.values())
for message_ids in self.state.done_message_ids[self.actor_name].values():
n += len(message_ids)
return n
def is_inbox_empty(self):
return self.inbox_size() <= 0
def is_outbox_full(self):
return self.outbox_size() >= self.outbox_highsize
def execute_priority(self):
priority, message = self.inbox[0]
if priority is None:
priority = 100
return priority * (self.outbox_size() / self.outbox_highsize)
def op_notify(self, dst: str, src_node: str, available: bool):
self.state.apply_notify(dst=dst, src_node=src_node, available=available)
self.auto_schedule_fetcher()
def op_inbox(self, message: ActorMessage):
if message.is_expired():
LOG.warning(f'expired message {message}')
return
self.state.apply_inbox(message=message)
self.push_inbox(message)
def op_execute(self) -> ActorMessage:
while self.inbox:
priority, message = heapq.heappop(self.inbox)
if message.is_expired():
LOG.warning(f'expired message {message}')
self.state.apply_complete(message_id=message.id, status=ERROR_NOTRY)
continue
self.state.apply_execute(message_id=message.id)
self.auto_schedule_fetcher()
return message
return None
def op_outbox(self, message_id: str, outbox_messages: [ActorMessage]):
self.state.apply_outbox(message_id=message_id, outbox_messages=outbox_messages)
for x in outbox_messages:
self.push_outbox(x)
self.auto_schedule_fetcher()
def _export_box(self, result, box, retry_base_at):
priority, outbox_message = heapq.heappop(box)
if outbox_message.is_expired():
LOG.warning(f'expired outbox_message {outbox_message}')
self.state.apply_acked(outbox_message_id=outbox_message.id, status=ERROR_NOTRY)
else:
outbox_state = self.state.get_outbox_state(outbox_message.id)
if not outbox_state:
LOG.warning(f'outbox_message {outbox_message} not in state!')
return
executed_count = outbox_state['executed_count']
retry_at = retry_base_at + self.backoff_delay(executed_count)
self.state.apply_export(outbox_message_id=outbox_message.id, retry_at=retry_at)
result.append(outbox_message)
def op_export(self, dst, dst_node, maxsize) -> [ActorMessage]:
ret = []
retry_base_at = time.time() + self.max_retry_time
dst_box = self.dst_node_outbox.get(dst_node)
box = dst_box.get(dst) if dst_box else None
while len(ret) < maxsize and box:
self._export_box(ret, box, retry_base_at)
box = self.dst_outbox.get(dst)
while len(ret) < maxsize and box:
self._export_box(ret, box, retry_base_at)
self.auto_schedule_fetcher()
return ret
def op_done(self, message_id: str, status: str):
self.state.apply_done(message_id=message_id, status=status)
self.auto_schedule_fetcher()
def on_fetcher_done(self):
self.is_fetching = False
self.auto_schedule_fetcher()
def op_acked(self, outbox_message_id: str, status: str):
self.state.apply_acked(outbox_message_id=outbox_message_id, status=status)
self.auto_schedule_fetcher()
def push_inbox(self, message: ActorMessage):
heapq.heappush(self.inbox, (message.priority, message))
def push_outbox(self, outbox_message):
if outbox_message.dst_node:
outbox = self.dst_node_outbox.setdefault(outbox_message.dst_node, {})
outbox = outbox.setdefault(outbox_message.dst, [])
else:
outbox = self.dst_outbox.setdefault(outbox_message.dst, [])
heapq.heappush(outbox, (outbox_message.priority, outbox_message))
def outbox_info(self):
dst_info = []
dst_node_info = []
for dst, box in self.dst_outbox.items():
if box:
dst_info.append(dst)
for dst_node, dst_box in self.dst_node_outbox.items():
for dst, box in dst_box.items():
if box:
dst_node_info.append((dst_node, dst))
return dst_info, dst_node_info
def choice_available_upstream_list(self):
nodes = self.state.upstream.get(self.actor_name, set())
if len(nodes) <= self.fetcher_concurrency:
return nodes
return random.sample(nodes, self.fetcher_concurrency)
def auto_schedule_fetcher(self):
if self.is_fetching:
return
if self.outbox_size() > self.outbox_highsize:
return
if self.inbox_size() > self.inbox_lowsize:
return
upstream_list = self.choice_available_upstream_list()
if not upstream_list:
return
maxsize = self.inbox_highsize - self.inbox_size()
message = self.registery.create_message(
priority=0,
src=self.actor_name,
dst=ACTOR_MESSAGE_FETCHER,
dst_node=self.registery.current_node_name,
content=dict(
actor_name=self.actor_name,
upstream_list=list(upstream_list),
maxsize=maxsize,
),
)
self.schedule_fetcher(message)
self.is_fetching = True
def backoff_delay(self, executed_count):
# 8s, 64s, 8m, ...
random_seconds = random.randint(0, 8 * 1000) / 1000
return min(((8**executed_count) + random_seconds), self.max_retry_time)
def check_timeout_and_retry(self, now):
# TODO: check outbox message expired
retry_outbox_message_ids = []
error_notry_outbox_message_ids = []
for state in self.state.state.values():
if state['status'] != OUTBOX:
continue
for outbox_message_id, outbox_state in state['outbox_states'].items():
outbox_status = outbox_state['status']
retry_at = outbox_state.get('retry_at')
executed_count = outbox_state.get('executed_count')
if outbox_status == ERROR:
if retry_at and now > retry_at:
retry_outbox_message_ids.append(outbox_message_id)
elif outbox_status == EXPORT:
if now > retry_at:
outbox_message = self.state.get_outbox_message(outbox_message_id)
if executed_count > outbox_message.max_retry:
error_notry_outbox_message_ids.append(outbox_message_id)
else:
retry_outbox_message_ids.append(outbox_message_id)
for outbox_message_id in error_notry_outbox_message_ids:
self.op_acked(outbox_message_id, ERROR_NOTRY)
for outbox_message_id in retry_outbox_message_ids:
outbox_message = self.state.get_outbox_message(outbox_message_id)
if outbox_message.is_expired():
LOG.warning(f'expired outbox_message {outbox_message}')
self.state.apply_acked(outbox_message_id=outbox_message.id, status=ERROR_NOTRY)
else:
self.state.apply_retry(outbox_message_id=outbox_message.id)
self.push_outbox(outbox_message)
return len(error_notry_outbox_message_ids)
class ActorMessageQueue:
def __init__(
self,
registery: ActorRegistery,
actors: typing.Dict[str, Actor],
storage: ActorLocalStorage = None,
concurrency: int = 100,
max_retry_count: int = 1,
max_retry_time: int = 10 * 60,
max_complete_size: int = 128,
):
self.registery = registery
self.actors = actors
self.concurrency = concurrency
self.max_retry_count = max_retry_count
self.max_retry_time = max_retry_time
self.max_complete_size = max_complete_size
state = ActorState(max_complete_size=max_complete_size)
self.raw_state = state
if storage:
state = ActorStorageState(storage, state)
self.state = state
self.storage = storage
self.thread_actor_queues = {}
self.async_actor_queues = {}
self.lock = threading.Lock()
self.execute_condition = threading.Condition(self.lock)
self.is_notifing = False
self.is_compacting = False
def actor_queue(self, actor_name: str):
if actor_name not in self.actors:
raise ValueError(f'actor {actor_name} not exists')
actor = self.actors[actor_name]
if actor.is_async:
q = self.async_actor_queues.get(actor_name)
else:
q = self.thread_actor_queues.get(actor_name)
if q is None:
concurrency = self.concurrency
if actor.is_async:
concurrency *= 3
q = ActorQueue(
actor_name=actor_name,
registery=self.registery,
state=self.state,
schedule_fetcher=self._op_inbox,
concurrency=concurrency,
max_retry_count=self.max_retry_count,
max_retry_time=self.max_retry_time,
)
if actor.is_async:
self.async_actor_queues[actor_name] = q
else:
self.thread_actor_queues[actor_name] = q
return q
def all_actor_queues(self) -> typing.List[ActorQueue]:
for actor_queues in [self.async_actor_queues, self.thread_actor_queues]:
yield from actor_queues.values()
def inbox_size(self):
return sum(x.inbox_size() for x in self.all_actor_queues())
def outbox_size(self):
return sum(x.outbox_size() for x in self.all_actor_queues())
def qsize(self):
return self.inbox_size() + self.outbox_size()
def stats(self):
with self.lock:
actor_stats = []
for actor in self.all_actor_queues():
if actor.inbox_size() or actor.outbox_size():
actor_stats.append(actor.stats())
actor_stats = list(sorted(actor_stats, key=lambda x: x['name']))
return dict(
is_compacting=self.is_compacting,
is_notifing=self.is_notifing,
inbox_size=self.inbox_size(),
outbox_size=self.outbox_size(),
concurrency=self.concurrency,
max_retry_count=self.max_retry_count,
max_retry_time=self.max_retry_time,
state=self.state.stats(),
actors=actor_stats,
)
def op_execute(self) -> ActorMessage:
"""
For executors
"""
with self.execute_condition:
while True:
msg = self._op_execute(self.thread_actor_queues)
if msg is not None:
metric_queue_op('execute', msg)
return msg
self.execute_condition.wait()
async def async_op_execute(self) -> ActorMessage:
while True:
with self.lock:
msg = self._op_execute(self.async_actor_queues)
if msg is not None:
metric_queue_op('execute', msg)
return msg
await asyncio.sleep(0.1)
def op_outbox(self, message_id: str, outbox_messages: [ActorMessage]):
"""
For executors
"""
with self.lock:
message = self.state.get_message(message_id)
if not message:
LOG.warning(f'message {message_id} not exists')
return
self.actor_queue(message.dst).op_outbox(message_id, outbox_messages=outbox_messages)
for x in outbox_messages:
metric_queue_op('outbox', x)
def op_done(self, message_id: str, status: str):
"""
For executors
"""
with self.lock:
message = self.state.get_message(message_id)
self.actor_queue(message.dst).op_done(message_id, status=status)
if message.dst == ACTOR_MESSAGE_FETCHER:
self.actor_queue(message.src).on_fetcher_done()
if message.dst == ACTOR_MESSAGE_NOTIFY_SENDER:
self.is_notifing = False
if message.dst == ACTOR_STORAGE_COMPACTOR:
self.is_compacting = False
self.execute_condition.notify()
metric_queue_op('done', message)
def op_export(self, dst: str, dst_node: str, maxsize: int):
"""
For receiver (message exporter)
"""
with self.lock:
if dst == ACTOR_MESSAGE_ACKER:
ret = list(self._export_ack(dst_node, maxsize))
else:
ret = []
for actor_queue in self.all_actor_queues():
ret.extend(actor_queue.op_export(dst, dst_node, maxsize))
maxsize -= len(ret)
if maxsize <= 0:
break
self.execute_condition.notify(len(ret))
for x in ret:
metric_queue_op('export', x)
return ret
def op_notify(self, src_node: str, dst: str, available: bool):
"""
For upstream notify or message fetcher
"""
with self.lock:
self.actor_queue(dst).op_notify(dst=dst, src_node=src_node, available=available)
self.execute_condition.notify()
def op_inbox(self, message: ActorMessage):
"""
For message fetcher or receiver
"""
with self.lock:
self._op_inbox(message)
metric_queue_op('inbox', message)
def op_acked(self, outbox_message_id: ActorMessage, status: str):
"""
For message fetcher
"""
with self.lock:
outbox_message = self.state.get_outbox_message(outbox_message_id)
message = self.state.get_message(outbox_message.parent_id)
self.actor_queue(message.dst).op_acked(outbox_message_id, status=status)
self.execute_condition.notify()
metric_queue_op('acked', outbox_message)
def op_tick(self, now: int):
"""
For message monitor
"""
with self.lock:
self._auto_schedule_notifier()
self._auto_schedule_compactor()
for actor_queue in self.all_actor_queues():
num_error_notry = actor_queue.check_timeout_and_retry(now)
if num_error_notry > 0:
self.execute_condition.notify(num_error_notry)
# TODO: fix fetcher not auto scheduled in actor queue
actor_queue.auto_schedule_fetcher()
ACTOR_QUEUE_INBOX_SIZE.labels(dst=actor_queue.actor_name)\
.set(actor_queue.inbox_size())
ACTOR_QUEUE_OUTBOX_SIZE.labels(dst=actor_queue.actor_name)\
.set(actor_queue.outbox_size())
def op_restart(self):
"""
For application
"""
with self.lock:
if self.storage:
self.storage.load(self.raw_state)
self.state.apply_restart()
for message in self.state.get_inbox_messages():
self.actor_queue(message.dst).push_inbox(message)
if message.dst == ACTOR_MESSAGE_NOTIFY_SENDER:
self.is_notifing = True
if message.dst == ACTOR_STORAGE_COMPACTOR:
self.is_compacting = True
if message.dst == ACTOR_MESSAGE_FETCHER:
self.actor_queue(message.dst).is_fetching = True
for message, outbox_messages in self.state.get_outbox_messages():
for outbox_message in outbox_messages:
self.actor_queue(message.dst).push_outbox(outbox_message)
def _op_inbox(self, message):
self.actor_queue(message.dst).op_inbox(message)
self.execute_condition.notify()
def _ack_of(self, message, status):
return self.registery.create_message(
id=message.id,
priority=0,
src=message.dst,
dst=ACTOR_MESSAGE_ACKER,
dst_node=message.src_node,
content=dict(status=status),
)
def _export_ack(self, src_node, maxsize):
message_and_status = []
for dst, data in self.state.done_message_ids.items():
for message_id in data.get(src_node, []):
status = self.state.state[message_id]['status']
message = self.state.get_message(message_id)
message_and_status.append((message, status))
maxsize -= 1
if maxsize <= 0:
break
for message, status in message_and_status:
self.state.apply_complete(message_id=message.id)
yield self._ack_of(message, status)
def _auto_schedule_notifier(self):
if self.is_notifing:
return
dst_info = set()
dst_node_info = set()
for actor_queue in self.all_actor_queues():
dst_s, dst_node_s = actor_queue.outbox_info()
dst_info.update(dst_s)
dst_node_info.update(dst_node_s)
for dst, dst_node_data in self.state.done_message_ids.items():
for dst_node, items in dst_node_data.items():
if items:
dst_node_info.add((dst_node, ACTOR_MESSAGE_ACKER))
if not dst_info and not dst_node_info:
return
dst_info = [dict(dst=dst) for dst in dst_info]
dst_node_info = [dict(dst=dst, dst_node=dst_node) for dst_node, dst in dst_node_info]
message_notifier = self.registery.create_message(
priority=0,
src=ACTOR_SYSTEM,
dst=ACTOR_MESSAGE_NOTIFY_SENDER,
dst_node=self.registery.current_node_name,
content=dict(dst_info=dst_info, dst_node_info=dst_node_info),
)
self.actor_queue(message_notifier.dst).op_inbox(message_notifier)
self.is_notifing = True
def _auto_schedule_compactor(self):
if self.storage is None:
return
if self.is_compacting:
return
if not self.storage.should_compact(self.raw_state):
return
message_compactor = self.registery.create_message(
priority=0,
src=ACTOR_SYSTEM,
dst=ACTOR_STORAGE_COMPACTOR,
dst_node=self.registery.current_node_name,
)
self.actor_queue(message_compactor.dst).op_inbox(message_compactor)
self.is_compacting = True
def prepare_compact(self):
with self.lock:
return self.storage.prepare_compact(self.raw_state)
def _op_execute(self, actor_queues):
min_priority, min_actor = None, None
for actor in actor_queues.values():
if actor.is_inbox_empty() or actor.is_outbox_full():
continue
priority = actor.execute_priority()
if min_priority is None or priority < min_priority:
min_priority = priority
min_actor = actor
if min_actor is not None:
return min_actor.op_execute()
return None
|
# -*- coding: utf-8 -*-
"""
Produces fake instrument data for testing.
"""
from __future__ import print_function
from __future__ import absolute_import
import functools
import os
import numpy as np
import pandas as pds
import pysat
from pysat.instruments.methods import testing as test
# pysat required parameters
platform = 'pysat'
name = 'testing'
# dictionary of data 'tags' and corresponding description
# tags are used to choose the behaviour of dummy1
tags = {'': 'Regular testing data set',
'ascend': 'Ascending Integers from 0 testing data set',
'descend': 'Descending Integers from 0 testing data set',
'plus10': 'Ascending Integers from 10 testing data set',
'fives': 'All 5s testing data set',
'mlt_offset': 'dummy1 is offset by five from regular testing set'}
# dictionary of satellite IDs, list of corresponding tags
# a numeric string can be used in sat_id to change the number of points per day
sat_ids = {'': ['', 'ascend', 'descend', 'plus10', 'fives', 'mlt_offset']}
_test_dates = {'': {'': pysat.datetime(2009, 1, 1)}}
meta = pysat.Meta()
meta['uts'] = {'units': 's',
'long_name': 'Universal Time',
'custom': False}
meta['Epoch'] = {'units': 'Milliseconds since 1970-1-1',
'Bin_Location': 0.5,
'notes': 'UTC time at middle of geophysical measurement.',
'desc': 'UTC seconds', }
meta['mlt'] = {'units': 'hours',
'long_name': 'Magnetic Local Time',
'label': 'MLT',
'axis': 'MLT',
'desc': 'Magnetic Local Time',
'value_min': 0.0,
'value_max': 24.0,
'notes': ('Magnetic Local Time is the solar local time of the '
'field line at the location where the field crosses '
'the magnetic equator. In this case we just simulate '
'0-24 with a consistent orbital period and an offste '
'with SLT.'),
'fill': np.nan,
'scale': 'linear'}
meta['slt'] = {'units': 'hours',
'long_name': 'Solar Local Time',
'label': 'SLT',
'axis': 'SLT',
'desc': 'Solar Local Time',
'value_min': 0.0,
'value_max': 24.0,
'notes': ('Solar Local Time is the local time (zenith angle of '
'sun) of the given locaiton. Overhead noon, +/- 90 '
'is 6, 18 SLT .'),
'fill': np.nan,
'scale': 'linear'}
meta['orbit_num'] = {'units': '',
'long_name': 'Orbit Number',
'label': 'Orbit Number',
'axis': 'Orbit Number',
'desc': 'Orbit Number',
'value_min': 0.0,
'value_max': 25000.0,
'notes': ('Number of orbits since the start of the '
'mission. For this simulation we use the '
'number of 5820 second periods since the '
'start, 2008-01-01.'),
'fill': np.nan,
'scale': 'linear'}
meta['longitude'] = {'units': 'degrees', 'long_name': 'Longitude'}
meta['latitude'] = {'units': 'degrees', 'long_name': 'Latitude'}
meta['dummy1'] = {'units': '', 'long_name': 'dummy1'}
meta['dummy2'] = {'units': '', 'long_name': 'dummy2'}
meta['dummy3'] = {'units': '', 'long_name': 'dummy3'}
meta['dummy4'] = {'units': '', 'long_name': 'dummy4'}
meta['string_dummy'] = {'units': '', 'long_name': 'string_dummy'}
meta['unicode_dummy'] = {'units': '', 'long_name': 'unicode_dummy'}
meta['int8_dummy'] = {'units': '', 'long_name': 'int8_dummy'}
meta['int16_dummy'] = {'units': '', 'long_name': 'int16_dummy'}
meta['int32_dummy'] = {'units': '', 'long_name': 'int32_dummy'}
meta['int64_dummy'] = {'units': '', 'long_name': 'int64_dummy'}
def init(inst):
""" Initialization function
Shifts time index of files by 5-minutes if mangle_file_dates
set to True at pysat.Instrument instantiation.
Creates a file list for a given range if the file_date_range
keyword is set at instantiation.
Parameters
----------
file_date_range : (pds.date_range)
Optional keyword argument that specifies the range of dates for which
test files will be created
mangle_file_dates : bool
If True, the loaded file list time index is shifted by 5-minutes.
"""
inst.new_thing = True
# work on file index if keyword present
if 'file_date_range' in inst.kwargs:
# set list files routine to desired date range
# attach to the instrument object
fdr = inst.kwargs['file_date_range']
inst._list_rtn = functools.partial(list_files, file_date_range=fdr)
inst.files.refresh()
# mess with file dates if kwarg option present
if 'mangle_file_dates' in inst.kwargs:
if inst.kwargs['mangle_file_dates']:
inst.files.files.index = \
inst.files.files.index + pds.DateOffset(minutes=5)
def default(inst):
"""The default function is applied first to data as it is loaded.
"""
pass
def load(fnames, tag=None, sat_id=None, sim_multi_file_right=False,
sim_multi_file_left=False, root_date=None, file_date_range=None,
malformed_index=False, **kwargs):
""" Loads the test files
Parameters
----------
fnames : (list)
List of filenames
tag : (str or NoneType)
Instrument tag (accepts '' or a number (i.e., '10'), which specifies
the number of times to include in the test instrument)
sat_id : (str or NoneType)
Instrument satellite ID (accepts '')
sim_multi_file_right : (boolean)
Adjusts date range to be 12 hours in the future or twelve hours beyond
root_date (default=False)
sim_multi_file_left : (boolean)
Adjusts date range to be 12 hours in the past or twelve hours before
root_date (default=False)
root_date : (NoneType)
Optional central date, uses _test_dates if not specified.
(default=None)
file_date_range : (pds.date_range or NoneType)
Range of dates for files or None, if this optional arguement is not
used
(default=None)
malformed_index : bool (default=False)
If True, time index for simulation will be non-unique and non-monotonic
**kwargs : Additional keywords
Additional keyword arguments supplied at pyast.Instrument instantiation
are passed here
Returns
-------
data : (pds.DataFrame)
Testing data
meta : (pysat.Meta)
Metadataxs
"""
# create an artifical satellite data set
parts = os.path.split(fnames[0])[-1].split('-')
yr = int(parts[0])
month = int(parts[1])
day = int(parts[2][0:2])
# Specify the date tag locally and determine the desired date range
date = pysat.datetime(yr, month, day)
pds_offset = pds.DateOffset(hours=12)
if sim_multi_file_right:
root_date = root_date or _test_dates[''][''] + pds_offset
data_date = date + pds_offset
elif sim_multi_file_left:
root_date = root_date or _test_dates[''][''] - pds_offset
data_date = date - pds_offset
else:
root_date = root_date or _test_dates['']['']
data_date = date
# The sat_id can be used to specify the number of indexes to load for
# any of the testing objects
num = 86400 if sat_id == '' else int(sat_id)
num_array = np.arange(num)
uts = num_array
data = pysat.DataFrame(uts, columns=['uts'])
# need to create simple orbits here. Have start of first orbit default
# to 1 Jan 2009, 00:00 UT. 14.84 orbits per day
time_delta = date - root_date
data['mlt'] = test.generate_fake_data(time_delta.total_seconds(),
num_array, period=5820,
data_range=[0.0, 24.0])
# do slt, 20 second offset from mlt
data['slt'] = test.generate_fake_data(time_delta.total_seconds()+20,
num_array, period=5820,
data_range=[0.0, 24.0])
# create a fake longitude, resets every 6240 seconds
# sat moves at 360/5820 deg/s, Earth rotates at 360/86400, takes extra time
# to go around full longitude
data['longitude'] = test.generate_fake_data(time_delta.total_seconds(),
num_array, period=6240,
data_range=[0.0, 360.0])
# create latitude area for testing polar orbits
angle = test.generate_fake_data(time_delta.total_seconds(),
num_array, period=5820,
data_range=[0.0, 2.0*np.pi])
data['latitude'] = 90.0 * np.cos(angle)
# fake orbit number
fake_delta = date - (_test_dates[''][''] - pds.DateOffset(years=1))
data['orbit_num'] = test.generate_fake_data(fake_delta.total_seconds(),
num_array, period=5820,
cyclic=False)
# create some fake data to support testing of averaging routines
mlt_int = data['mlt'].astype(int)
long_int = (data['longitude'] / 15.0).astype(int)
if tag == 'ascend':
data['dummy1'] = [i for i in range(len(data['mlt']))]
elif tag == 'descend':
data['dummy1'] = [-i for i in range(len(data['mlt']))]
elif tag == 'plus10':
data['dummy1'] = [i + 10 for i in range(len(data['mlt']))]
elif tag == 'fives':
data['dummy1'] = [5 for i in range(len(data['mlt']))]
elif tag == 'mlt_offset':
data['dummy1'] = mlt_int + 5
else:
data['dummy1'] = mlt_int
data['dummy2'] = long_int
data['dummy3'] = mlt_int + long_int * 1000.0
data['dummy4'] = num_array
data['string_dummy'] = ['test'] * len(data)
data['unicode_dummy'] = [u'test'] * len(data)
data['int8_dummy'] = np.ones(len(data), dtype=np.int8)
data['int16_dummy'] = np.ones(len(data), dtype=np.int16)
data['int32_dummy'] = np.ones(len(data), dtype=np.int32)
data['int64_dummy'] = np.ones(len(data), dtype=np.int64)
index = pds.date_range(data_date,
data_date+pds.DateOffset(seconds=num-1),
freq='S')
if malformed_index:
index = index[0:num].tolist()
# nonmonotonic
index[0:3], index[3:6] = index[3:6], index[0:3]
# non unique
index[6:9] = [index[6]]*3
data.index = index[0:num]
data.index.name = 'Epoch'
return data, meta.copy()
def list_files(tag=None, sat_id=None, data_path=None, format_str=None,
file_date_range=None):
"""Produce a fake list of files spanning a year
Parameters
----------
tag : (str)
pysat instrument tag (default=None)
sat_id : (str)
pysat satellite ID tag (default=None)
data_path : (str)
pysat data path (default=None)
format_str : (str)
file format string (default=None)
file_date_range : (pds.date_range)
File date range (default=None)
Returns
-------
Series of filenames indexed by file time
"""
# Determine the appropriate date range for the fake files
if file_date_range is None:
start = _test_dates[''][''] - pds.DateOffset(years=1)
stop = _test_dates[''][''] + pds.DateOffset(years=2) \
- pds.DateOffset(days=1)
file_date_range = pds.date_range(start, stop)
index = file_date_range
# Create the list of fake filenames
names = [data_path + date.strftime('%Y-%m-%d') + '.nofile'
for date in index]
return pysat.Series(names, index=index)
def download(date_array, tag, sat_id, data_path=None,
user=None, password=None):
""" Download routine, not used since files are created locally"""
pass
|
# -*- coding: utf-8 -*-
from scake import Scake
import logging
_logger = logging.getLogger(__name__)
class Foo():
def __init__(self, x=1, y=2):
self.x = x
self.y = y
def __call__(self):
x = self.x() if isinstance(self.x, Foo) else self.x
y = self.y() if isinstance(self.y, Foo) else self.y
return x + y
class Bar():
def __init__(self, x, y):
self.x = x
self.y = y
def get_x(self):
return self.x
def get_y(self):
return self.y
def test_ref_dict_multi_level():
config = {
"config": {
"myvar_one": {
"x": 10,
"y": 20,
},
"myvar_two": {
"x": 100,
"y": 200,
},
},
"myvar": {
"one": {
"$Foo": "=/config/myvar_one"
},
"two": {
"$Foo": "=/config/myvar_two"
},
},
"mybar": {
"$Bar": {
"x": "=/myvar",
"y": 1,
},
},
}
s = Scake(config, class_mapping=globals())
s.run(debug=True)
mybar = s['/mybar']
assert isinstance(mybar, Bar)
assert mybar.get_y() == 1
assert "one" in mybar.get_x()
assert "two" in mybar.get_x()
_logger.warning(mybar.get_x())
_logger.warning(s["/myvar/one"])
_logger.warning(s["/myvar/two"])
assert isinstance(mybar.get_x()["one"], Foo)
assert isinstance(mybar.get_x()["two"], Foo)
assert mybar.get_x()["one"]() == 30
assert mybar.get_x()["two"]() == 300
|
"""OpenAPI core contrib requests requests module"""
from __future__ import absolute_import
from werkzeug.datastructures import ImmutableMultiDict
from requests import Request
from six.moves.urllib.parse import urlparse, parse_qs
from openapi_core.validation.request.datatypes import (
RequestParameters, OpenAPIRequest,
)
class RequestsOpenAPIRequestFactory(object):
@classmethod
def create(cls, request):
"""
Converts a requests request to an OpenAPI one
Internally converts to a `PreparedRequest` first to parse the exact
payload being sent
"""
if isinstance(request, Request):
request = request.prepare()
# Method
method = request.method.lower()
# Cookies
cookie = {}
if request._cookies is not None:
# cookies are stored in a cookiejar object
cookie = request._cookies.get_dict()
# Preparing a request formats the URL with params, strip them out again
o = urlparse(request.url)
params = parse_qs(o.query)
# extract the URL without query parameters
url = o._replace(query=None).geturl()
# gets deduced by path finder against spec
path = {}
# Order matters because all python requests issued from a session
# include Accept */* which does not necessarily match the content type
mimetype = request.headers.get('Content-Type') or \
request.headers.get('Accept')
# Headers - request.headers is not an instance of dict
# which is expected
header = dict(request.headers)
# Body
# TODO: figure out if request._body_position is relevant
body = request.body
parameters = RequestParameters(
query=ImmutableMultiDict(params),
header=header,
cookie=cookie,
path=path,
)
return OpenAPIRequest(
full_url_pattern=url,
method=method,
parameters=parameters,
body=body,
mimetype=mimetype,
)
|
# Copyright (c) 2019 Erwin de Haan. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This file is part of the FletcherFiltering project
import pyarrow as pa
import typed_ast.ast3 as ast
class ArrowTypeResolver(object):
@staticmethod
def make_method_name(arrow_type: pa.DataType) -> str:
return 'type_' + str(arrow_type).replace("[", "_").replace("]", "_")
def resolve(self, arrow_type: pa.DataType, as_stream: bool = False, as_nullable: bool = False, as_pointer: bool = False,
as_const: bool = False):
"""Dispatch method"""
method_name = self.make_method_name(arrow_type)
# Get the method from 'self'. Default to a lambda.
method = getattr(self, method_name, self.unknown_type)
# Call the method as we return it
if as_stream:
return self.wrap_in_stream(method(arrow_type, as_nullable, as_pointer), as_const)
else:
return method(arrow_type, as_nullable, as_pointer, as_const)
def wrap_in_stream(self, type_ast, as_const: bool = False):
return ast.Subscript(
value=ast.Name(
id=("const " if as_const else "") + 'hls::stream',
ctx=ast.Load()),
slice=type_ast
)
def full_type_name(self, name, as_nullable: bool = False, as_pointer: bool = False, as_const: bool = False):
type_ast = ast.Name(id=("const " if as_const else "") + name + ("*" if as_pointer else ''), ctx=ast.Load())
if as_nullable:
type_ast = ast.Subscript(
value=ast.Name(
id='nullable',
ctx=ast.Load()),
slice=type_ast
)
return type_ast
def type_bool(self, arrow_type, as_nullable: bool = False, as_pointer: bool = False, as_const: bool = False):
return self.full_type_name("f_bool", as_nullable, as_pointer, as_const)
def type_int8(self, arrow_type, as_nullable: bool = False, as_pointer: bool = False, as_const: bool = False):
return self.full_type_name("f_int8", as_nullable, as_pointer, as_const)
def type_uint8(self, arrow_type, as_nullable: bool = False, as_pointer: bool = False, as_const: bool = False):
return self.full_type_name("f_uint8", as_nullable, as_pointer, as_const)
def type_int16(self, arrow_type, as_nullable: bool = False, as_pointer: bool = False, as_const: bool = False):
return self.full_type_name("f_int16", as_nullable, as_pointer, as_const)
def type_uint16(self, arrow_type, as_nullable: bool = False, as_pointer: bool = False, as_const: bool = False):
return self.full_type_name("f_uint16", as_nullable, as_pointer, as_const)
def type_int32(self, arrow_type, as_nullable: bool = False, as_pointer: bool = False, as_const: bool = False):
return self.full_type_name("f_int32", as_nullable, as_pointer, as_const)
def type_uint32(self, arrow_type, as_nullable: bool = False, as_pointer: bool = False, as_const: bool = False):
return self.full_type_name("f_uint32", as_nullable, as_pointer, as_const)
def type_int64(self, arrow_type, as_nullable: bool = False, as_pointer: bool = False, as_const: bool = False):
return self.full_type_name("f_int64", as_nullable, as_pointer, as_const)
def type_uint64(self, arrow_type, as_nullable: bool = False, as_pointer: bool = False, as_const: bool = False):
return self.full_type_name("f_uint64", as_nullable, as_pointer, as_const)
def type_date32(self, arrow_type, as_nullable: bool = False, as_pointer: bool = False, as_const: bool = False):
return self.full_type_name("f_date32", as_nullable, as_pointer, as_const)
def type_date64(self, arrow_type, as_nullable: bool = False, as_pointer: bool = False, as_const: bool = False):
return self.full_type_name("f_date64", as_nullable, as_pointer, as_const)
def type_timestamp_s_(self, arrow_type, as_nullable: bool = False, as_pointer: bool = False, as_const: bool = False):
return self.full_type_name("f_uint64", as_nullable, as_pointer, as_const)
def type_timestamp_ms_(self, arrow_type, as_nullable: bool = False, as_pointer: bool = False, as_const: bool = False):
return self.full_type_name("f_uint64", as_nullable, as_pointer, as_const)
def type_timestamp_us_(self, arrow_type, as_nullable: bool = False, as_pointer: bool = False, as_const: bool = False):
return self.full_type_name("f_uint64", as_nullable, as_pointer, as_const)
def type_timestamp_ns_(self, arrow_type, as_nullable: bool = False, as_pointer: bool = False, as_const: bool = False):
return self.full_type_name("f_uint64", as_nullable, as_pointer, as_const)
def type_string(self, arrow_type, as_nullable: bool = False, as_pointer: bool = False, as_const: bool = False):
return self.full_type_name("f_uint8", as_nullable, as_pointer, as_const)
def type_halffloat(self, arrow_type, as_nullable: bool = False, as_pointer: bool = False, as_const: bool = False):
return self.full_type_name("f_float16", as_nullable, as_pointer, as_const)
def type_float(self, arrow_type, as_nullable: bool = False, as_pointer: bool = False, as_const: bool = False):
return self.full_type_name("f_float32", as_nullable, as_pointer, as_const)
def type_double(self, arrow_type, as_nullable: bool = False, as_pointer: bool = False, as_const: bool = False):
return self.full_type_name("f_float64", as_nullable, as_pointer, as_const)
def unknown_type(self, arrow_type, as_nullable: bool = False, as_pointer: bool = False, as_const: bool = False):
raise NotImplementedError('{}{}{} cannot be processed'.format(arrow_type, ' as pointer' if as_pointer else '',
' as constant' if as_const else ''))
|
import numpy
import ilcs_parser
from parseratorvariable import ParseratorType
CITATION = (
('chapter', ('Chapter',)),
('act prefix', ('ActPrefix',)),
('section', ('Section',)),
('subsection', ('SubSection',)),
)
class ILCSType(ParseratorType):
type = 'ILCS'
def tagger(self, field):
return self.tag(field)
def __init__(self, definition):
self.components = (('Citation', self.compareFields, CITATION),)
block_parts = ('Citation',)
super().__init__(definition, ilcs_parser.tag, block_parts)
# Add exact match and attempted indicators to the distance vector
self.num_additional_indicators = 2
self.expanded_size += self.num_additional_indicators
def fields(self, field):
"""
Override the parent method to append an exact match field.
"""
fields = super().fields(field)
fields += [('attempted match', 'Dummy'), ('exact match', 'Exact')]
return fields
def comparator(self, field_1, field_2):
"""
Override the parent method to append an exact match field.
"""
# Temporarily subtract the exact and attempted match indicators from
# expanded_size, since the parent method assumes that the last element
# of the distance vector is the full-string comparison
self.expanded_size -= self.num_additional_indicators
distances = super().comparator(field_1, field_2)
self.expanded_size += self.num_additional_indicators
# Set the attempted match indicator variable
try:
parsed_variable_1 = self.tagger(field_1)
parsed_variable_2 = self.tagger(field_2)
except TypeError:
attempted = 0
else:
variable_type_1, variable_type_2 = parsed_variable_1[1], parsed_variable_2[1]
if 'Ambiguous' in (variable_type_1, variable_type_2):
attempted = 0
else:
attempted = int(parsed_variable_1.is_attempted == parsed_variable_2.is_attempted)
distances = numpy.append(distances, attempted)
# Set the exact match indicator variable
exact_match = 1 if field_1 == field_2 else 0
distances = numpy.append(distances, exact_match)
return distances
|
# -*- coding: utf-8 -*-
"""
:copyright: (c) 2018 by Neil Jarvis
:licence: MIT, see LICENCE for more details
"""
from __future__ import absolute_import, unicode_literals, print_function
def nth(number):
if str(number)[-1] == '1':
return number + 'st'
elif str(number)[-1] == '2':
return number + 'nd'
elif str(number)[-1] == '3':
return number + '3rd'
else:
return number + 'th'
|
from contextlib import contextmanager
@contextmanager
def assert_raises(expected_exception):
'''
unittest.TestCase Python 2.6 compatible assert_raises context manager
'''
context = Context()
try:
yield context
except expected_exception as e:
context.exception = e
except Exception as e:
raise Exception('Unexpected exception thrown:', e)
else:
raise Exception('{} not thrown'.format(expected_exception))
class Context(object):
def __init__(self):
self.exception = None
|
from collections import Counter
def counter_arithmetic():
sales_day1 = Counter(apple=4, orange=9, banana=10)
sales_day2 = Counter({'apple': 10, 'orange': 8, 'banana': 2})
print("sales_day1", sales_day1)
print("sales_day2", sales_day2)
print("sales_day1 + sales_day2", sales_day1 + sales_day2)
print("sales_day1 - sales_day2", sales_day1 - sales_day2)
print("sales_day1 & sales_day2", sales_day1 & sales_day2)
print("sales_day1 | sales_day2", sales_day1 | sales_day2)
def counter_unary_operation():
counter = Counter(a=2, b=-4, c=0)
print("+counter", +counter)
print("-counter", -counter)
def counter_funcs():
counter = Counter({2: 2, 3: 3, 17: 1})
print("elements(): ", list(counter.elements()))
if __name__ == "__main__":
counter_arithmetic()
counter_unary_operation()
counter_funcs()
|
"""
Status: The algorithm works and an example of using the algorithm is finished,
so I am done working on this module.
A module that implements the Knuth-Plass text formatting algorithm in Python.
"""
from typing import List, Callable, Union, Dict, Generator, Tuple
from collections import namedtuple
class JUSTIFY:
LEFT = "LEFT"
RIGHT = "RIGHT"
CENTER = "CENTER"
FULL = "FULL"
WHITESPACE_CHARS = ' \t\r\n\f\v'
WHITESPACE = set(ch for ch in WHITESPACE_CHARS)
Num = Union[int, float]
INF = 10000
GLUE, BOX, PENALTY = "Glue", "Box", "Penalty"
# =============================================================================
# Specifications (Glue, Box, Penalty)
# -----------------------------------------------------------------------------
class Spec:
def __init__(self):
super().__init__()
def is_glue(self): return False
def is_box(self): return False
def is_penalty(self): return False
def is_forced_break(self): return False
class Glue(Spec):
"""
Glue refers to blank space that can vary its width in specified ways; it is
an elastic mortar used between boxes in a typeset line.
"""
__slots__ = ['width', 'stretch', 'shrink']
t = GLUE
def __init__(self, shrink:Num, width:Num, stretch:Num):
"""
Init for a Glue Object. You can think of shrink, width, and stretch as
shrink: the max you can lessen the width by
width: ideal width
stretch: the max amount of space you can add to the width
In other words: this glue has minimum width (`width` - `shrink`) and
maximum width (`width` + `stretch`)
NOTE: in the paper, a Glue is specified with order
"width, stretch, shrink". That makes absolutely no sense so I've
changed the parameters to be in order "shrink, width, stretch"
instead.
"""
self.shrink: Num = shrink
self.width: Num = width
self.stretch: Num = stretch
def r_width(self, r):
"""
Returns the width of this glue for the given ratio r.
"""
if r < 0: return self.width + (r * self.shrink)
else: return self.width + (r * self.stretch)
def is_glue(self): return True
def copy(self):
return Glue(self.shrink, self.width, self.stretch)
def __eq__(self, o:object):
if isinstance(o, self.__class__):
return o.width == self.width and o.stretch == self.stretch and o.shrink == self.shrink
return False
def __repr__(self):
return f'<{self.__class__.__name__}(width={self.width}, stretch={self.stretch}, shrink={self.shrink})>'
class Box(Spec):
"""
A box refers to something that is to be typeset: either a character from
some font of type, or a black rectangle such as a horizontal or
vertical rule, or something built up from several characters such as an
accented letter or a mathematical formula. The contents of a box may be
extremely complicated, or they may be extremely simple; the
line-breaking algorithm does not peek inside a box to see what it
contains, so we may consider the boxes to be sealed and locked.
"""
__slots__ = ['width', 'value']
t = BOX
def __init__(self, width:Num, value:Num):
self.width: Num = width # The fixed width of the box (so width of what is in the box)
self.value: Num = value # Value is something like a glyph/character. Algorithm does not use this, only width param so value can be whatever you want, as long as the width reflects its width.
def is_box(self): return True
def copy(self):
return Box(self.width, self.value)
def __eq__(self, o:object):
if isinstance(o, self.__class__):
return o.width == self.width and o.value == self.value
return False
def __repr__(self):
return f'<{self.__class__.__name__}(width={self.width}, value={self.value})>'
class Penalty(Spec):
"""
Penalty specifications refer to potential places to end one line of a
paragraph and begin another (AKA, a linebreak), with a certain
‘aesthetic cost’ indicating how desirable or undesirable such a
breakpoint would be. The width of a penalty is how much typset material
needs to be added if you break here AKA 0 if nothing and the width of
a hyphen if you want to add a hyphen here because you are breaking off
a word.
"""
__slots__ = ['width', 'penalty', 'flagged']
t = PENALTY
def __init__(self, width:Num, penalty:Num, flagged:bool):
self.width: Num = width # Width of extra typeset material (width of the hyphen)
self.penalty: Num = penalty # The penalty to breaking here
self.flagged: Num = flagged # Whether there is a hyphen here
def is_penalty(self): return True
def is_forced_break(self): return (self.penalty == -INF)
def copy(self):
return Penalty(self.width, self.penalty, self.flagged)
def __eq__(self, o:object):
if isinstance(o, self.__class__):
return o.width == self.width and o.penalty == self.penalty and o.flagged == self.flagged
return False
def __repr__(self):
return f'<{self.__class__.__name__}(width={self.width}, penalty={self.penalty}, flagged={self.flagged})>'
Spec = Union[Glue, Box, Penalty]
# =============================================================================
# Parsing Text into List of Specs
# -----------------------------------------------------------------------------
def make_paragraph(text):
"""
An example function that takes in text and returns a paragraph from it that
can be used in the Knuth-Plass Algorithm.
"""
# Turn chunk of text into a paragraph
L = []
for ch in text:
if ch in ' \n':
# Add the space between words
# it's 2 units +/- 1 so can be 1, 2, or 3 units long
L.append(Glue(1, 2, 1))
elif ch == '@':
# Append forced break
L.append(Penalty(0, -INF))
elif ch == '~':
# Append no-break so cannot break here under any circumstances
L.append(Penalty(0, INF))
else:
# All characters are 1 unit wide
L.append(Box(1, ch))
# Append closing penalty and glue
L.extend(std_paragraph_end())
return L
def std_paragraph_end():
"""
Returns the standard closing penalty for a paragraph as a list of Penalty,
Glue, and Penalty Objects. Just extend your List[Spec] by it and it
should end properly.
"""
return [Penalty(0, INF, 0), # Forced non-break (must not break here, otherwise a Box coming before the Glue after this would allow a break to be here)
Glue( 0, 0, INF), # Glue that fills the rest of the last line (even if that fill is 0 width)
Penalty(0, -INF, 1)] # Forced break (Ends last line)
# =============================================================================
# The Actual Knuth-Plass Algorithm
# -----------------------------------------------------------------------------
class Break:
"""
A class representing a break in the text as calculated by the Knuth-Plass
algorithm.
"""
__slots__ = ["position", "line", "fitness_class", "total_width", "total_stretch", "total_shrink", "demerits", "previous", "previous_break", "next_break"]
def __init__(self, position, line, fitness_class, total_width, total_stretch, total_shrink, demerits, previous=None, previous_break=None, next_break=None):
self.position = position
self.line = line
self.fitness_class = fitness_class
self.total_width = total_width
self.total_stretch = total_stretch
self.total_shrink = total_shrink
self.demerits = demerits
self.previous = previous # Used by algorithm
# Used by linked list
self.previous_break = previous_break
self.next_break = next_break
def iterate_forward(self, start_with_self=True):
"""
Since Breaks are in a linked list, this function lets you iterate
forward in the list.
The iteration starts at the next Break node forward.
"""
curr = self if start_with_self else self.next_break
while curr is not None:
yield curr
curr = curr.next_break
def iterate_backward(self, start_with_self=True):
"""
Since Breaks are in a linked list, this function lets you iterate
backwards in the list.
The iteration starts at the previous Break node backwards.
"""
curr = self if start_with_self else self.previous_break
while curr is not None:
yield curr
curr = curr.previous_break
def iterate_front_to_back(self):
"""
Garauntees that the iteration is happening from the first node to the
last node (unless a link's previous_break or next_break has been
messed up manually)
"""
first_node = None
for node in self.iterate_backward(True):
first_node = node
for node in first_node.iterate_forward(True):
yield node
def insert(self, break_obj):
"""
Inserts a break object into this Break object's position in the linked list.
"""
break_obj.remove_from_linked_list()
# Connect previous break with break_obj
if self.previous_break is not None:
self.previous_break.next_break = break_obj
break_obj.previous_break = self.previous_break
# connect break_obj with self
break_obj.next_break = self
self.previous_break = break_obj
def insert_after(self, break_obj):
"""
Inserts the given Break object directly after this object.
"""
if self.next_break is None:
self.append(break_obj)
else:
self.next_break.insert(break_obj)
def append(self, break_obj):
# Remove from current list, if in one
break_obj.remove_from_linked_list()
# Find last node (could be this node)
last_node = self
for node in self.iterate_forward(False):
last_node = node
# Connect last node to the new one
last_node.next_break = break_obj
break_obj.previous_break = last_node
def remove_from_linked_list(self):
"""
Removes this Break from the linked list it is in.
Returns the next_break in the list if possible or the previous_break if
next_break is None
"""
if self.previous_break is not None:
self.previous_break.next_break = self.next_break
if self.next_break is not None:
self.next_break.previous_break = self.previous_break
self.next_break = None
self.previous_break = None
def __len__(self):
length = None
for i, node in enumerate(self.iterate_front_to_back()):
length = i
return 0 if length is None else length + 1
def copy(self):
"""
Copies this Break object, not the linked list itself so not the previous_break
and next_break.
"""
return Break(self.position, self.line, self.fitness_class, self.total_width, self.total_stretch, self.total_shrink, self.demerits)
def __repr__(self):
return f"<{self.__class__.__name__}(pos={self.position}, line={self.line}, fitness_class={self.fitness_class}, total_width={self.total_width}, total_stretch={self.total_stretch}, total_shrink={self.total_shrink}, demerits={self.demerits})>"
def list_str(self):
"""
Returns the string representing the list that this Break is a part of.
"""
out = '['
first_node = None
for node in self.iterate_backward(True):
first_node = node
out += repr(first_node)
for node in first_node.iterate_forward(False):
out += ', ' + repr(node)
out += ']'
return out
# -- Give the Algorithm Function Itself
BreakpointInfo = namedtuple('BreakpointInfo', ['break_point_obj', 'line_info'])
LineInfo = namedtuple('LineInfo', ["total_num_lines", "ratio", "line_num", "line_length", "line_contents"])
def knuth_plass_breaks(paragraph:List[Spec],
line_lengths:Union[List[Num], Num, \
Generator[Num, None, None]], # l1, l2,... in the paper
looseness:int=0, # q in the paper
tolerance:int=1, # rho in the paper
fitness_demerit:Num=100, # gamma in the paper
flagged_demerit:Num=100, # alpha in the paper
ret_vals:bool=False
):
"""
Takes in a list of Glue, Box, and Penalty objects, runs the Knuth-Plass
algorithm, and yields the results.
IMPORTANT : If you are trying to break up text, then it is very important
that every single char in the text is represented by 1 box or glue
because that is how the algorithm knows and returns what index of the
text it is supposed to break it.
paragraph : A list of Glue, Box, and Penalty items that you want the breaks
for.
line_lengths : a list of integers giving the lengths of each line. The
last element of the list is reused for subsequent lines after it.
looseness : An integer value. If it's positive, the paragraph will be set
to take that many lines more than the optimum value. If it's negative,
the paragraph is set as tightly as possible. Defaults to zero, meaning the
optimal length for the paragraph.
tolerance : the maximum adjustment ratio allowed for a line. Defaults to 1.
fitness_demerit : additional value added to the demerit score when two
consecutive lines are in different fitness classes.
flagged_demerit : additional value added to the demerit score when breaking
at the second of two flagged penalties.
ret_vals : If True, it will return the values, otherwise this
method returns the values as a generator. The generator implementation
is default and saves on a lot of memmory, but means that the output can
only iterated through once before you have to run this method again to
get another generator.
return : the return value is a generator/list that returns BreakpointInfo
namedtuples. These have the following format:
BreakpointInfo(
break_point_obj: the actual breakpoint object generated
line_info: namedtuple (contains info for each line) LineInfo(
total_num_lines: int, the total number of lines generated
ratio: int, for each Glue object on this line, give this
ratio to the Glue object's `r_width()` method to have the
method return what this Glue's width should be if you want
to JUSTIFY.FULL your text
line_num: int, the 1-indexed number of the line you are
currently on. So the first line yielded by the generator
is line 1
line_length: int, how long this line is supposed to be,
according to what was given to the generator
line_contents :
the list/generator that yields Glue, Box, and Penalty
objects that specify what is supposed to be on this line
)
)
"""
def is_feasible_breakpoint(i):
"""Return true if position 'i' is a feasible breakpoint."""
spec = paragraph[i]
if spec.is_penalty() and spec.penalty < INF:
# Forced Breakpoint
return True
elif i > 0 and paragraph[i-1].is_box() and spec.is_glue():
# Breakpoint when glue directly follows a box
return True
else:
return False
if isinstance(line_lengths, int) or isinstance(line_lengths, float):
line_lengths = [line_lengths]
#m = len(paragraph)
if len(paragraph) == 0: return [] # No text, so no breaks
# Precompute the running sums of width, stretch, and shrink (W,Y,Z in the
# original paper). These make it easy to measure the width/stretch/shrink
# between two indexes; just compute sum_*[pos2] - sum_*[pos1]. Note that
# sum_*[i] is the total up to but not including the box at position i.
sum_width = {}; sum_stretch = {}; sum_shrink = {}
width_sum = stretch_sum = shrink_sum = 0
for i, spec in enumerate(paragraph):
sum_width[i] = width_sum
sum_stretch[i] = stretch_sum
sum_shrink[i] = shrink_sum
width_sum += spec.width
if spec.is_glue():
stretch_sum = stretch_sum + spec.stretch
shrink_sum = shrink_sum + spec.shrink
def measure_width(pos1, pos2):
"""Add up the widths between positions 1 and 2"""
return sum_width[pos2] - sum_width[pos1]
def measure_stretch(pos1, pos2):
"""Add up the stretch between positions 1 and 2"""
return sum_stretch[pos2] - sum_stretch[pos1]
def measure_shrink(pos1, pos2):
"""Add up the shrink between positions 1 and 2"""
return sum_shrink[pos2] - sum_shrink[pos1]
def compute_adjustment_ratio(pos1, pos2, line, line_lengths):
"""Compute adjustment ratio for the line between pos1 and pos2"""
ideal_width = measure_width(pos1, pos2) # ideal width
if paragraph[pos2].is_penalty():
ideal_width += paragraph[pos2].width
# Get the length of the current line; if the line_lengths list
# is too short, the last value is always used for subsequent
# lines.
if line < len(line_lengths):
available_width = line_lengths[line]
else:
available_width = line_lengths[-1]
# Compute how much the contents of the line would have to be
# stretched or shrunk to fit into the available space.
if ideal_width < available_width:
# You would have to stretch this line if you want it to fit on the
# desired line
y = measure_stretch(pos1, pos2) # The total amount of stretch (in whatever units all the parts of the paragraph are measured in) you can stretch this line by
if y > 0:
# Since it is possible to stretch the line, found out how much
# you should stretch it by to take up the full width of the line
r = (available_width - ideal_width) / float(y)
else:
r = INF
elif ideal_width > available_width:
# Must shrink the line by removing space from glue if you want it
# to fit on the line
z = measure_shrink(pos1, pos2) # Total amount you could possibly shrink this line by to make it fit on the current desired line
if z > 0:
# Since it is possible to shrink the line, find how much you
# should shrink it to fit it perfectly (width matches desired
# width) on the line
r = (available_width - ideal_width) / float(z)
else:
r = INF
else:
# Exactly the right length!
r = 0
return r
A = Break(position=0, line=0, fitness_class=1, total_width=0, total_stretch=0, total_shrink=0, demerits=0)
first_active_node = A # The first node in the active_nodes linked list. This node will never change
def add_active_node(first_active_node, node):
"""
Add a node to the active node list.
The node is added so that the list of active nodes is always
sorted by line number, and so that the set of (position, line,
fitness_class) tuples has no repeated values.
"""
# Find the first index at which the active node's line number is equal
# to or greater than the line for 'node'. This gives us the insertion
# point.
for curr_node in first_active_node.iterate_forward(True):
insertion_node = curr_node
if curr_node.line >= node.line:
break
# Check if there's a node with the same line number and position and
# fitness. This lets us ensure that the list of active nodes always has
# unique (line, position, fitness) values.
for curr_node in insertion_node.iterate_forward(True):
if curr_node.line != node.line:
break
if (curr_node.fitness_class == node.fitness_class \
and curr_node.position == node.position):
# A match, so just return without adding the node
return
# Insert the new node so that the line numbers are in order
if insertion_node.line < node.line:
insertion_node.insert_after(node)
else:
insertion_node.insert(node)
# -- End Function
max_len = 0
breaks_to_remove = []
for i, B in enumerate(paragraph):
max_len = max(max_len, len(first_active_node))
# Determine if this box is a feasible breakpoint and
# perform the main loop if it is.
if is_feasible_breakpoint(i):
# Loop over the list of active nodes, and compute the fitness
# of the line formed by breaking at A and B. The resulting
breaks = [] # List of feasible breaks
for A in first_active_node.iterate_forward(True):
r = compute_adjustment_ratio(A.position, i, A.line, line_lengths)
# 1. You cannot shrink the line more than the shrinkage
# available (but, notice that you can stretch the line more
# than specified)
# 2. If B, the new breakpoint we are currently looking it, is a
# forced breakpoint, then you have to take it instead of any
# previous breakpoint so remove the breakpoints that do not
# allow you to take this current breakpoint B
if (r < -1 or B.is_forced_break()):
# Deactivate node A so long as it will not empty all active nodes
breaks_to_remove.append(A)
if -1 <= r <= tolerance:
# Compute demerits and fitness class
p = B.penalty if B.is_penalty() else 0
if p >= 0:
demerits = (1 + 100 * abs(r)**3 + p) ** 3
elif B.is_forced_break():
demerits = (1 + 100 * abs(r)**3) ** 2 - p**2
else:
demerits = (1 + 100 * abs(r)**3) ** 2
curr_f = 1 if spec.is_penalty() and spec.flagged else 0
next_spec = paragraph[A.position]
next_f = 1 if next_spec.is_penalty() and next_spec.flagged else 0
demerits += (flagged_demerit * curr_f * next_f)
# Figure out the fitness class of this line (tight, loose,
# very tight, or very loose).
if r < -.5: fitness_class = 0
elif r <= .5: fitness_class = 1
elif r <= 1: fitness_class = 2
else: fitness_class = 3
# If two consecutive lines are in very different fitness
# classes, add to the demerit score for this break.
if abs(fitness_class - A.fitness_class) > 1:
demerits = demerits + fitness_demerit
# Record a feasible break from A to B
brk = Break(
position = i,
line = A.line + 1,
fitness_class = fitness_class,
total_width = sum_width[i],
total_stretch = sum_stretch[i],
total_shrink = sum_shrink[i],
demerits = demerits,
previous = A
)
breaks.append(brk)
# end for A in active_nodes
# Now remove all nodes that need to be removed from the
# active_nodes list
while breaks_to_remove:
brk = breaks_to_remove.pop()
if brk is first_active_node:
# Since brk is the first node in the linked list and we
# want to remove brk, we have to either update
# first_active_node before deleting it or just
# not delete it if it is the only node in the list
if first_active_node.next_break is not None:
first_active_node = first_active_node.next_break
brk.remove_from_linked_list()
else:
brk.remove_from_linked_list()
# Add in the new breaks
if breaks:
for brk in breaks:
add_active_node(first_active_node, brk)
# end if self.feasible_breakpoint()
# end for i in range(m)
# Find the active node with the lowest number of demerits.
# NOTE: this loop MUST use "<", not "<=" because "<=" leads to the lines
# with maximum allowable stretch to be used i.e. the most space possible
# will be added to each line
A = first_active_node
for node in first_active_node.iterate_forward(False):
if node.demerits < A.demerits:
A = node
if looseness != 0:
# The search for the appropriate active node is a bit more complicated;
# we look for a node with a paragraph length that's as close as
# possible to (A.line + looseness) with the minimum number of demerits.
best = 0
d = INF
for br in first_active_node.iterate_forward(True):
delta = br.line - A.line
# The two branches of this 'if' statement are for handling values
# of looseness that are either positive or negative.
if ((looseness <= delta < best) or (best < delta < looseness)):
s = delta
d = br.demerits
b = br
elif delta == best and br.demerits < d:
# This break is of the same length, but has fewer demerits and
# hence is the one we should use.
d = br.demerits
b = br
A = b
# Generate the list of chosen break points
breaks = []
break_objs = []
while A is not None:
breaks.append(A.position)
break_objs.append(A)
A = A.previous
break_objs.reverse()
breaks.reverse()
# -- Now Actually Yield/Return the Results
assert breaks[0] == 0
def line_length_gen():
i = 0
while True:
if i < len(line_lengths):
yield line_lengths[i]
else:
yield line_lengths[-1]
i += 1
total_num_lines = (len(breaks) - 1) # How many lines the text was broken into
def ret_vals_gen():
line_start = 0
line_num = 0
for break_point, line_length in zip(breaks[1:], line_length_gen()):
ratio = compute_adjustment_ratio(line_start, break_point, line_num, line_lengths)
def line_contents():
for i in range(line_start, break_point, 1):
yield paragraph[i]
# line_num + 1 because line_num is 0 indexed but line_num given should not be
yield BreakpointInfo(break_point, LineInfo(total_num_lines, ratio, line_num + 1, line_length, line_contents()))
line_num += 1
line_start = break_point + 1
if ret_vals:
# Return the values as lists rather than a generator
rets = []
for break_point, line_info in ret_vals_gen():
rets.append(BreakpointInfo(break_point, LineInfo(*line_info[:-1], tuple(spec.copy() for spec in line_info.line_contents))))
return rets
else:
# Return a generator that will yield the values without taking up more memory
return ret_vals_gen()
def str_for_breaks(breaks, justify:str=JUSTIFY.LEFT, end_mark:str=''):
"""
Takes what is returned by the knuth_plass_breaks() function and turns it
into a string depending on the given justification.
Note: This method assumes that all boxes in the given breaks have
characters (strings) in them and not other things like a picture or
something.
"""
def insert_spaces(string, num_spaces):
"""
Inserts the given number of spaces into the given string, trying to put
them inbetween words from the left side to the right.
"""
while True:
out = ''
added_space = False
add_space = False
for ch in string:
if num_spaces > 0 and add_space == True and ch in WHITESPACE:
out += ' '
num_spaces -= 1
added_space = True
add_space = False
else:
add_space = True
out += ch
# If had no opportunity to add a space, then probably last line of
# Justified paragraph so its left justified anyway. Just add a
# space to the end.
if not added_space and num_spaces > 0:
out += ' '
num_spaces -= 1
if num_spaces <= 0:
break
string = out
out = ''
return out
justify = justify.upper() # Justify constants are all upper-case, so make sure this matches as long as same word used
out = ''
curr_line = ''
for break_point_obj, line_info in breaks:
total_num_lines = line_info.total_num_lines
line_num = line_info.line_num
ratio = line_info.ratio
line_length = line_info.line_length
line_contents = line_info.line_contents
last_spec = None
# -- Build the current line
for spec in line_contents:
if spec.is_glue():
if justify == JUSTIFY.FULL and (not (line_num == total_num_lines)):
# Need to add space inbetween words to fully justify text
# on the left and right
width = int(spec.r_width(ratio))
else:
# Not Full justified, so no extra spaces between the words.
width = 1
curr_line += ' ' * width
elif spec.is_box():
curr_line += spec.value # This assumes that the value is a string character
# -- Justify The Built Line
if (justify == JUSTIFY.LEFT) or (justify == JUSTIFY.FULL and line_num == total_num_lines):
curr_line = curr_line.lstrip(WHITESPACE_CHARS)
out += curr_line + (' ' * (line_length - len(curr_line)))
elif justify == JUSTIFY.RIGHT:
curr_line = curr_line.rstrip(WHITESPACE_CHARS)
out += (' ' * (line_length - len(curr_line))) + curr_line
elif justify == JUSTIFY.CENTER:
curr_line = curr_line.strip(WHITESPACE_CHARS)
total_spaces_needed = line_length - len(curr_line)
# NOTE: this will skew the text of this line left by 1 space if
# this line's text is not perfectly centerable. If had floating
# point width spaces, then would be perfectly centered always, but
# can't because using str's instead
right_spaces = total_spaces_needed // 2
left_spaces = total_spaces_needed - right_spaces
out += (' ' * left_spaces) + curr_line + (' ' * right_spaces)
elif justify == JUSTIFY.FULL:
# NOTE: Because the algorithm assumes that glues can have decimal
# widths but strings need ints, we have cut off some space when we
# converted them to integer widths. That is why we have to use
# `insert_spaces` here: some space was probably cut off so we need
# to add some back.
curr_line = insert_spaces(curr_line, line_length - len(curr_line))
out += curr_line
else:
raise Exception(f"Gave unknown justification specification: {justify}")
#print(curr_line)
curr_line = ''
out += end_mark + "\n"
return out
# =============================================================================
# Main
# -----------------------------------------------------------------------------
def main():
short_text = """Among other public buildings in a certain town, which for many reasons it will be prudent to refrain from mentioning, and to which I will assign no fictitious name, there is one anciently common to most towns, great or small: to wit, a workhouse; and in this workhouse was born; on a day and date which I need not trouble myself to repeat, inasmuch as it can be of no possible consequence to the reader, in this stage of the business at all events; the item of mortality whose name is prefixed to the head of this chapter."""
medium_text = """For the next eight or ten months, Oliver was the victim of a systematic course of treachery and deception. He was brought up by hand. The hungry and destitute situation of the infant orphan was duly reported by the workhouse authorities to the parish authorities. The parish authorities inquired with dignity of the workhouse authorities, whether there was no female then domiciled in “the house” who was in a situation to impart to Oliver Twist, the consolation and nourishment of which he stood in need. The workhouse authorities replied with humility, that there was not. Upon this, the parish authorities magnanimously and humanely resolved, that Oliver should be “farmed,” or, in other words, that he should be dispatched to a branch-workhouse some three miles off, where twenty or thirty other juvenile offenders against the poor-laws, rolled about the floor all day, without the inconvenience of too much food or too much clothing, under the parental superintendence of an elderly female, who received the culprits at and for the consideration of sevenpence-halfpenny per small head per week. Sevenpence-halfpenny’s worth per week is a good round diet for a child; a great deal may be got for sevenpence-halfpenny, quite enough to overload its stomach, and make it uncomfortable. The elderly female was a woman of wisdom and experience; she knew what was good for children; and she had a very accurate perception of what was good for herself. So, she appropriated the greater part of the weekly stipend to her own use, and consigned the rising parochial generation to even a shorter allowance than was originally provided for them. Thereby finding in the lowest depth a deeper still; and proving herself a very great experimental philosopher."""
def print_out(*breaks_args, **kwargs):
kwargs["ret_vals"] = True
breaks = knuth_plass_breaks(*breaks_args, **kwargs)
print()
print("JUSTIFIED LEFT")
print("==============")
print(str_for_breaks(breaks, JUSTIFY.LEFT, '|'))
print()
print("JUSTIFIED RIGHT")
print("===============")
print(str_for_breaks(breaks, JUSTIFY.RIGHT, '|'))
print()
print("JUSTIFIED CENTER")
print("================")
print(str_for_breaks(breaks, JUSTIFY.CENTER, '|'))
print()
print("JUSTIFIED FULL")
print("==============")
print(str_for_breaks(breaks, JUSTIFY.FULL, '|'))
print("----------------------------------------")
print_out(make_paragraph(short_text), range(120, 20, -10), tolerance=1)
print_out(make_paragraph(short_text), 100, tolerance=1)
#print_out(make_paragraph(medium_text), 100, tolerance=1)
#print_out(make_paragraph(medium_long_text), 100, tolerance=1) # takes a few seconds
#print_out(make_paragraph(long_text), 100, tolerance=1) # takes a very long time
medium_long_text = \
"""Whether I shall turn out to be the hero of my own life, or whether that
station will be held by anybody else, these pages must show. To begin my
life with the beginning of my life, I record that I was born (as I have
been informed and believe) on a Friday, at twelve o’clock at night.
It was remarked that the clock began to strike, and I began to cry,
simultaneously.
In consideration of the day and hour of my birth, it was declared by
the nurse, and by some sage women in the neighbourhood who had taken a
lively interest in me several months before there was any possibility
of our becoming personally acquainted, first, that I was destined to be
unlucky in life; and secondly, that I was privileged to see ghosts and
spirits; both these gifts inevitably attaching, as they believed, to
all unlucky infants of either gender, born towards the small hours on a
Friday night.
I need say nothing here, on the first head, because nothing can show
better than my history whether that prediction was verified or falsified
by the result. On the second branch of the question, I will only remark,
that unless I ran through that part of my inheritance while I was still
a baby, I have not come into it yet. But I do not at all complain of
having been kept out of this property; and if anybody else should be in
the present enjoyment of it, he is heartily welcome to keep it.
I was born with a caul, which was advertised for sale, in the
newspapers, at the low price of fifteen guineas. Whether sea-going
people were short of money about that time, or were short of faith and
preferred cork jackets, I don’t know; all I know is, that there was but
one solitary bidding, and that was from an attorney connected with the
bill-broking business, who offered two pounds in cash, and the balance
in sherry, but declined to be guaranteed from drowning on any higher
bargain. Consequently the advertisement was withdrawn at a dead
loss--for as to sherry, my poor dear mother’s own sherry was in the
market then--and ten years afterwards, the caul was put up in a raffle
down in our part of the country, to fifty members at half-a-crown a
head, the winner to spend five shillings. I was present myself, and I
remember to have felt quite uncomfortable and confused, at a part of
myself being disposed of in that way. The caul was won, I recollect, by
an old lady with a hand-basket, who, very reluctantly, produced from it
the stipulated five shillings, all in halfpence, and twopence halfpenny
short--as it took an immense time and a great waste of arithmetic, to
endeavour without any effect to prove to her. It is a fact which will
be long remembered as remarkable down there, that she was never drowned,
but died triumphantly in bed, at ninety-two. I have understood that it
was, to the last, her proudest boast, that she never had been on the
water in her life, except upon a bridge; and that over her tea (to which
she was extremely partial) she, to the last, expressed her indignation
at the impiety of mariners and others, who had the presumption to go
‘meandering’ about the world. It was in vain to represent to her
that some conveniences, tea perhaps included, resulted from this
objectionable practice. She always returned, with greater emphasis and
with an instinctive knowledge of the strength of her objection, ‘Let us
have no meandering.’"""
long_text = \
"""Whether I shall turn out to be the hero of my own life, or whether that
station will be held by anybody else, these pages must show. To begin my
life with the beginning of my life, I record that I was born (as I have
been informed and believe) on a Friday, at twelve o’clock at night.
It was remarked that the clock began to strike, and I began to cry,
simultaneously.
In consideration of the day and hour of my birth, it was declared by
the nurse, and by some sage women in the neighbourhood who had taken a
lively interest in me several months before there was any possibility
of our becoming personally acquainted, first, that I was destined to be
unlucky in life; and secondly, that I was privileged to see ghosts and
spirits; both these gifts inevitably attaching, as they believed, to
all unlucky infants of either gender, born towards the small hours on a
Friday night.
I need say nothing here, on the first head, because nothing can show
better than my history whether that prediction was verified or falsified
by the result. On the second branch of the question, I will only remark,
that unless I ran through that part of my inheritance while I was still
a baby, I have not come into it yet. But I do not at all complain of
having been kept out of this property; and if anybody else should be in
the present enjoyment of it, he is heartily welcome to keep it.
I was born with a caul, which was advertised for sale, in the
newspapers, at the low price of fifteen guineas. Whether sea-going
people were short of money about that time, or were short of faith and
preferred cork jackets, I don’t know; all I know is, that there was but
one solitary bidding, and that was from an attorney connected with the
bill-broking business, who offered two pounds in cash, and the balance
in sherry, but declined to be guaranteed from drowning on any higher
bargain. Consequently the advertisement was withdrawn at a dead
loss--for as to sherry, my poor dear mother’s own sherry was in the
market then--and ten years afterwards, the caul was put up in a raffle
down in our part of the country, to fifty members at half-a-crown a
head, the winner to spend five shillings. I was present myself, and I
remember to have felt quite uncomfortable and confused, at a part of
myself being disposed of in that way. The caul was won, I recollect, by
an old lady with a hand-basket, who, very reluctantly, produced from it
the stipulated five shillings, all in halfpence, and twopence halfpenny
short--as it took an immense time and a great waste of arithmetic, to
endeavour without any effect to prove to her. It is a fact which will
be long remembered as remarkable down there, that she was never drowned,
but died triumphantly in bed, at ninety-two. I have understood that it
was, to the last, her proudest boast, that she never had been on the
water in her life, except upon a bridge; and that over her tea (to which
she was extremely partial) she, to the last, expressed her indignation
at the impiety of mariners and others, who had the presumption to go
‘meandering’ about the world. It was in vain to represent to her
that some conveniences, tea perhaps included, resulted from this
objectionable practice. She always returned, with greater emphasis and
with an instinctive knowledge of the strength of her objection, ‘Let us
have no meandering.’
Not to meander myself, at present, I will go back to my birth.
I was born at Blunderstone, in Suffolk, or ‘there by’, as they say in
Scotland. I was a posthumous child. My father’s eyes had closed upon
the light of this world six months, when mine opened on it. There is
something strange to me, even now, in the reflection that he never saw
me; and something stranger yet in the shadowy remembrance that I have
of my first childish associations with his white grave-stone in the
churchyard, and of the indefinable compassion I used to feel for it
lying out alone there in the dark night, when our little parlour
was warm and bright with fire and candle, and the doors of our house
were--almost cruelly, it seemed to me sometimes--bolted and locked
against it.
An aunt of my father’s, and consequently a great-aunt of mine, of whom
I shall have more to relate by and by, was the principal magnate of our
family. Miss Trotwood, or Miss Betsey, as my poor mother always called
her, when she sufficiently overcame her dread of this formidable
personage to mention her at all (which was seldom), had been married
to a husband younger than herself, who was very handsome, except in the
sense of the homely adage, ‘handsome is, that handsome does’--for he
was strongly suspected of having beaten Miss Betsey, and even of having
once, on a disputed question of supplies, made some hasty but determined
arrangements to throw her out of a two pair of stairs’ window. These
evidences of an incompatibility of temper induced Miss Betsey to pay him
off, and effect a separation by mutual consent. He went to India with
his capital, and there, according to a wild legend in our family, he was
once seen riding on an elephant, in company with a Baboon; but I think
it must have been a Baboo--or a Begum. Anyhow, from India tidings of his
death reached home, within ten years. How they affected my aunt, nobody
knew; for immediately upon the separation, she took her maiden name
again, bought a cottage in a hamlet on the sea-coast a long way off,
established herself there as a single woman with one servant, and
was understood to live secluded, ever afterwards, in an inflexible
retirement.
My father had once been a favourite of hers, I believe; but she was
mortally affronted by his marriage, on the ground that my mother was ‘a
wax doll’. She had never seen my mother, but she knew her to be not
yet twenty. My father and Miss Betsey never met again. He was double
my mother’s age when he married, and of but a delicate constitution. He
died a year afterwards, and, as I have said, six months before I came
into the world.
This was the state of matters, on the afternoon of, what I may be
excused for calling, that eventful and important Friday. I can make no
claim therefore to have known, at that time, how matters stood; or to
have any remembrance, founded on the evidence of my own senses, of what
follows.
My mother was sitting by the fire, but poorly in health, and very low in
spirits, looking at it through her tears, and desponding heavily about
herself and the fatherless little stranger, who was already welcomed by
some grosses of prophetic pins, in a drawer upstairs, to a world not at
all excited on the subject of his arrival; my mother, I say, was sitting
by the fire, that bright, windy March afternoon, very timid and sad, and
very doubtful of ever coming alive out of the trial that was before her,
when, lifting her eyes as she dried them, to the window opposite, she
saw a strange lady coming up the garden.
My mother had a sure foreboding at the second glance, that it was
Miss Betsey. The setting sun was glowing on the strange lady, over the
garden-fence, and she came walking up to the door with a fell rigidity
of figure and composure of countenance that could have belonged to
nobody else.
When she reached the house, she gave another proof of her identity.
My father had often hinted that she seldom conducted herself like any
ordinary Christian; and now, instead of ringing the bell, she came and
looked in at that identical window, pressing the end of her nose against
the glass to that extent, that my poor dear mother used to say it became
perfectly flat and white in a moment.
She gave my mother such a turn, that I have always been convinced I am
indebted to Miss Betsey for having been born on a Friday.
My mother had left her chair in her agitation, and gone behind it in
the corner. Miss Betsey, looking round the room, slowly and inquiringly,
began on the other side, and carried her eyes on, like a Saracen’s Head
in a Dutch clock, until they reached my mother. Then she made a frown
and a gesture to my mother, like one who was accustomed to be obeyed, to
come and open the door. My mother went.
‘Mrs. David Copperfield, I think,’ said Miss Betsey; the emphasis
referring, perhaps, to my mother’s mourning weeds, and her condition.
‘Yes,’ said my mother, faintly.
‘Miss Trotwood,’ said the visitor. ‘You have heard of her, I dare say?’
My mother answered she had had that pleasure. And she had a disagreeable
consciousness of not appearing to imply that it had been an overpowering
pleasure.
‘Now you see her,’ said Miss Betsey. My mother bent her head, and begged
her to walk in.
They went into the parlour my mother had come from, the fire in the best
room on the other side of the passage not being lighted--not having
been lighted, indeed, since my father’s funeral; and when they were both
seated, and Miss Betsey said nothing, my mother, after vainly trying to
restrain herself, began to cry. ‘Oh tut, tut, tut!’ said Miss Betsey, in
a hurry. ‘Don’t do that! Come, come!’
My mother couldn’t help it notwithstanding, so she cried until she had
had her cry out.
‘Take off your cap, child,’ said Miss Betsey, ‘and let me see you.’
My mother was too much afraid of her to refuse compliance with this odd
request, if she had any disposition to do so. Therefore she did as she
was told, and did it with such nervous hands that her hair (which was
luxuriant and beautiful) fell all about her face.
‘Why, bless my heart!’ exclaimed Miss Betsey. ‘You are a very Baby!’
My mother was, no doubt, unusually youthful in appearance even for her
years; she hung her head, as if it were her fault, poor thing, and said,
sobbing, that indeed she was afraid she was but a childish widow, and
would be but a childish mother if she lived. In a short pause which
ensued, she had a fancy that she felt Miss Betsey touch her hair, and
that with no ungentle hand; but, looking at her, in her timid hope, she
found that lady sitting with the skirt of her dress tucked up, her hands
folded on one knee, and her feet upon the fender, frowning at the fire.
‘In the name of Heaven,’ said Miss Betsey, suddenly, ‘why Rookery?’
‘Do you mean the house, ma’am?’ asked my mother.
‘Why Rookery?’ said Miss Betsey. ‘Cookery would have been more to the
purpose, if you had had any practical ideas of life, either of you.’
‘The name was Mr. Copperfield’s choice,’ returned my mother. ‘When he
bought the house, he liked to think that there were rooks about it.’
The evening wind made such a disturbance just now, among some tall old
elm-trees at the bottom of the garden, that neither my mother nor Miss
Betsey could forbear glancing that way. As the elms bent to one another,
like giants who were whispering secrets, and after a few seconds of such
repose, fell into a violent flurry, tossing their wild arms about, as if
their late confidences were really too wicked for their peace of mind,
some weatherbeaten ragged old rooks’-nests, burdening their higher
branches, swung like wrecks upon a stormy sea.
‘Where are the birds?’ asked Miss Betsey.
‘The--?’ My mother had been thinking of something else.
‘The rooks--what has become of them?’ asked Miss Betsey.
‘There have not been any since we have lived here,’ said my mother. ‘We
thought--Mr. Copperfield thought--it was quite a large rookery; but
the nests were very old ones, and the birds have deserted them a long
while.’
‘David Copperfield all over!’ cried Miss Betsey. ‘David Copperfield from
head to foot! Calls a house a rookery when there’s not a rook near it,
and takes the birds on trust, because he sees the nests!’
‘Mr. Copperfield,’ returned my mother, ‘is dead, and if you dare to
speak unkindly of him to me--’
My poor dear mother, I suppose, had some momentary intention of
committing an assault and battery upon my aunt, who could easily have
settled her with one hand, even if my mother had been in far better
training for such an encounter than she was that evening. But it passed
with the action of rising from her chair; and she sat down again very
meekly, and fainted.
When she came to herself, or when Miss Betsey had restored her,
whichever it was, she found the latter standing at the window. The
twilight was by this time shading down into darkness; and dimly as they
saw each other, they could not have done that without the aid of the
fire.
‘Well?’ said Miss Betsey, coming back to her chair, as if she had only
been taking a casual look at the prospect; ‘and when do you expect--’
‘I am all in a tremble,’ faltered my mother. ‘I don’t know what’s the
matter. I shall die, I am sure!’
‘No, no, no,’ said Miss Betsey. ‘Have some tea.’
‘Oh dear me, dear me, do you think it will do me any good?’ cried my
mother in a helpless manner.
‘Of course it will,’ said Miss Betsey. ‘It’s nothing but fancy. What do
you call your girl?’
‘I don’t know that it will be a girl, yet, ma’am,’ said my mother
innocently.
‘Bless the Baby!’ exclaimed Miss Betsey, unconsciously quoting the
second sentiment of the pincushion in the drawer upstairs, but
applying it to my mother instead of me, ‘I don’t mean that. I mean your
servant-girl.’
‘Peggotty,’ said my mother.
‘Peggotty!’ repeated Miss Betsey, with some indignation. ‘Do you mean to
say, child, that any human being has gone into a Christian church,
and got herself named Peggotty?’ ‘It’s her surname,’ said my mother,
faintly. ‘Mr. Copperfield called her by it, because her Christian name
was the same as mine.’
‘Here! Peggotty!’ cried Miss Betsey, opening the parlour door. ‘Tea.
Your mistress is a little unwell. Don’t dawdle.’
Having issued this mandate with as much potentiality as if she had been
a recognized authority in the house ever since it had been a house,
and having looked out to confront the amazed Peggotty coming along the
passage with a candle at the sound of a strange voice, Miss Betsey shut
the door again, and sat down as before: with her feet on the fender, the
skirt of her dress tucked up, and her hands folded on one knee.
‘You were speaking about its being a girl,’ said Miss Betsey. ‘I have no
doubt it will be a girl. I have a presentiment that it must be a girl.
Now child, from the moment of the birth of this girl--’
‘Perhaps boy,’ my mother took the liberty of putting in.
‘I tell you I have a presentiment that it must be a girl,’ returned Miss
Betsey. ‘Don’t contradict. From the moment of this girl’s birth, child,
I intend to be her friend. I intend to be her godmother, and I beg
you’ll call her Betsey Trotwood Copperfield. There must be no mistakes
in life with THIS Betsey Trotwood. There must be no trifling with HER
affections, poor dear. She must be well brought up, and well guarded
from reposing any foolish confidences where they are not deserved. I
must make that MY care.’
There was a twitch of Miss Betsey’s head, after each of these sentences,
as if her own old wrongs were working within her, and she repressed any
plainer reference to them by strong constraint. So my mother suspected,
at least, as she observed her by the low glimmer of the fire: too
much scared by Miss Betsey, too uneasy in herself, and too subdued and
bewildered altogether, to observe anything very clearly, or to know what
to say.
‘And was David good to you, child?’ asked Miss Betsey, when she had been
silent for a little while, and these motions of her head had gradually
ceased. ‘Were you comfortable together?’
‘We were very happy,’ said my mother. ‘Mr. Copperfield was only too good
to me.’
‘What, he spoilt you, I suppose?’ returned Miss Betsey.
‘For being quite alone and dependent on myself in this rough world
again, yes, I fear he did indeed,’ sobbed my mother.
‘Well! Don’t cry!’ said Miss Betsey. ‘You were not equally matched,
child--if any two people can be equally matched--and so I asked the
question. You were an orphan, weren’t you?’ ‘Yes.’
‘And a governess?’
‘I was nursery-governess in a family where Mr. Copperfield came to
visit. Mr. Copperfield was very kind to me, and took a great deal of
notice of me, and paid me a good deal of attention, and at last proposed
to me. And I accepted him. And so we were married,’ said my mother
simply.
‘Ha! Poor Baby!’ mused Miss Betsey, with her frown still bent upon the
fire. ‘Do you know anything?’
‘I beg your pardon, ma’am,’ faltered my mother.
‘About keeping house, for instance,’ said Miss Betsey.
‘Not much, I fear,’ returned my mother. ‘Not so much as I could wish.
But Mr. Copperfield was teaching me--’
[‘Much he knew about it himself!’) said Miss Betsey in a parenthesis.
--‘And I hope I should have improved, being very anxious to learn, and
he very patient to teach me, if the great misfortune of his death’--my
mother broke down again here, and could get no farther.
‘Well, well!’ said Miss Betsey. --‘I kept my housekeeping-book
regularly, and balanced it with Mr. Copperfield every night,’ cried my
mother in another burst of distress, and breaking down again.
‘Well, well!’ said Miss Betsey. ‘Don’t cry any more.’ --‘And I am
sure we never had a word of difference respecting it, except when Mr.
Copperfield objected to my threes and fives being too much like each
other, or to my putting curly tails to my sevens and nines,’ resumed my
mother in another burst, and breaking down again.
‘You’ll make yourself ill,’ said Miss Betsey, ‘and you know that will
not be good either for you or for my god-daughter. Come! You mustn’t do
it!’
This argument had some share in quieting my mother, though her
increasing indisposition had a larger one. There was an interval of
silence, only broken by Miss Betsey’s occasionally ejaculating ‘Ha!’ as
she sat with her feet upon the fender.
‘David had bought an annuity for himself with his money, I know,’ said
she, by and by. ‘What did he do for you?’
‘Mr. Copperfield,’ said my mother, answering with some difficulty, ‘was
so considerate and good as to secure the reversion of a part of it to
me.’
‘How much?’ asked Miss Betsey.
‘A hundred and five pounds a year,’ said my mother.
‘He might have done worse,’ said my aunt.
The word was appropriate to the moment. My mother was so much worse
that Peggotty, coming in with the teaboard and candles, and seeing at a
glance how ill she was,--as Miss Betsey might have done sooner if there
had been light enough,--conveyed her upstairs to her own room with all
speed; and immediately dispatched Ham Peggotty, her nephew, who had been
for some days past secreted in the house, unknown to my mother, as a
special messenger in case of emergency, to fetch the nurse and doctor.
Those allied powers were considerably astonished, when they arrived
within a few minutes of each other, to find an unknown lady of
portentous appearance, sitting before the fire, with her bonnet tied
over her left arm, stopping her ears with jewellers’ cotton. Peggotty
knowing nothing about her, and my mother saying nothing about her,
she was quite a mystery in the parlour; and the fact of her having a
magazine of jewellers’ cotton in her pocket, and sticking the article
in her ears in that way, did not detract from the solemnity of her
presence.
The doctor having been upstairs and come down again, and having
satisfied himself, I suppose, that there was a probability of this
unknown lady and himself having to sit there, face to face, for some
hours, laid himself out to be polite and social. He was the meekest of
his sex, the mildest of little men. He sidled in and out of a room, to
take up the less space. He walked as softly as the Ghost in Hamlet,
and more slowly. He carried his head on one side, partly in modest
depreciation of himself, partly in modest propitiation of everybody
else. It is nothing to say that he hadn’t a word to throw at a dog. He
couldn’t have thrown a word at a mad dog. He might have offered him one
gently, or half a one, or a fragment of one; for he spoke as slowly as
he walked; but he wouldn’t have been rude to him, and he couldn’t have
been quick with him, for any earthly consideration.
Mr. Chillip, looking mildly at my aunt with his head on one side, and
making her a little bow, said, in allusion to the jewellers’ cotton, as
he softly touched his left ear:
‘Some local irritation, ma’am?’
‘What!’ replied my aunt, pulling the cotton out of one ear like a cork.
Mr. Chillip was so alarmed by her abruptness--as he told my mother
afterwards--that it was a mercy he didn’t lose his presence of mind. But
he repeated sweetly:
‘Some local irritation, ma’am?’
‘Nonsense!’ replied my aunt, and corked herself again, at one blow.
Mr. Chillip could do nothing after this, but sit and look at her feebly,
as she sat and looked at the fire, until he was called upstairs again.
After some quarter of an hour’s absence, he returned.
‘Well?’ said my aunt, taking the cotton out of the ear nearest to him.
‘Well, ma’am,’ returned Mr. Chillip, ‘we are--we are progressing slowly,
ma’am.’
‘Ba--a--ah!’ said my aunt, with a perfect shake on the contemptuous
interjection. And corked herself as before.
Really--really--as Mr. Chillip told my mother, he was almost shocked;
speaking in a professional point of view alone, he was almost shocked.
But he sat and looked at her, notwithstanding, for nearly two hours,
as she sat looking at the fire, until he was again called out. After
another absence, he again returned.
‘Well?’ said my aunt, taking out the cotton on that side again.
‘Well, ma’am,’ returned Mr. Chillip, ‘we are--we are progressing slowly,
ma’am.’
‘Ya--a--ah!’ said my aunt. With such a snarl at him, that Mr. Chillip
absolutely could not bear it. It was really calculated to break his
spirit, he said afterwards. He preferred to go and sit upon the stairs,
in the dark and a strong draught, until he was again sent for.
Ham Peggotty, who went to the national school, and was a very dragon at
his catechism, and who may therefore be regarded as a credible witness,
reported next day, that happening to peep in at the parlour-door an hour
after this, he was instantly descried by Miss Betsey, then walking to
and fro in a state of agitation, and pounced upon before he could make
his escape. That there were now occasional sounds of feet and voices
overhead which he inferred the cotton did not exclude, from the
circumstance of his evidently being clutched by the lady as a victim on
whom to expend her superabundant agitation when the sounds were loudest.
That, marching him constantly up and down by the collar (as if he had
been taking too much laudanum), she, at those times, shook him, rumpled
his hair, made light of his linen, stopped his ears as if she confounded
them with her own, and otherwise tousled and maltreated him. This was
in part confirmed by his aunt, who saw him at half past twelve o’clock,
soon after his release, and affirmed that he was then as red as I was.
The mild Mr. Chillip could not possibly bear malice at such a time, if
at any time. He sidled into the parlour as soon as he was at liberty,
and said to my aunt in his meekest manner:
‘Well, ma’am, I am happy to congratulate you.’
‘What upon?’ said my aunt, sharply.
Mr. Chillip was fluttered again, by the extreme severity of my aunt’s
manner; so he made her a little bow and gave her a little smile, to
mollify her.
‘Mercy on the man, what’s he doing!’ cried my aunt, impatiently. ‘Can’t
he speak?’
‘Be calm, my dear ma’am,’ said Mr. Chillip, in his softest accents.
‘There is no longer any occasion for uneasiness, ma’am. Be calm.’
It has since been considered almost a miracle that my aunt didn’t shake
him, and shake what he had to say, out of him. She only shook her own
head at him, but in a way that made him quail.
‘Well, ma’am,’ resumed Mr. Chillip, as soon as he had courage, ‘I am
happy to congratulate you. All is now over, ma’am, and well over.’
During the five minutes or so that Mr. Chillip devoted to the delivery
of this oration, my aunt eyed him narrowly.
‘How is she?’ said my aunt, folding her arms with her bonnet still tied
on one of them.
‘Well, ma’am, she will soon be quite comfortable, I hope,’ returned Mr.
Chillip. ‘Quite as comfortable as we can expect a young mother to be,
under these melancholy domestic circumstances. There cannot be any
objection to your seeing her presently, ma’am. It may do her good.’
‘And SHE. How is SHE?’ said my aunt, sharply.
Mr. Chillip laid his head a little more on one side, and looked at my
aunt like an amiable bird.
‘The baby,’ said my aunt. ‘How is she?’
‘Ma’am,’ returned Mr. Chillip, ‘I apprehended you had known. It’s a
boy.’
My aunt said never a word, but took her bonnet by the strings, in the
manner of a sling, aimed a blow at Mr. Chillip’s head with it, put it on
bent, walked out, and never came back. She vanished like a discontented
fairy; or like one of those supernatural beings, whom it was popularly
supposed I was entitled to see; and never came back any more.
No. I lay in my basket, and my mother lay in her bed; but Betsey
Trotwood Copperfield was for ever in the land of dreams and shadows, the
tremendous region whence I had so lately travelled; and the light upon
the window of our room shone out upon the earthly bourne of all such
travellers, and the mound above the ashes and the dust that once was he,
without whom I had never been.
"""
if __name__ == "__main__":
main()
|
import os
import imageio
import numpy as np
from tqdm import tqdm
from math import exp
import matplotlib.pyplot as plt
class HHNeuron():
def __init__(self):
self.v = -65
self.vinit = -65
self.gnamax = 1.20
self.gkmax = 0.36
self.vk = -77
self.vna = 50
self.gl = 0.003
self.vl = -54.387
self.cm = 0.01
self.m = 0.0530
self.h = 0.5960
self.n = 0.3177
def change_params(self, params):
for key in params:
setattr(self, key, params[key])
def simulate(self, t, I, reinit=True):
self.t = t
dt = t[1] - t[0]
niter = len(t)
if reinit:
self.v = self.vinit
self.vhist = []
self.mhist = []
self.hhist = []
self.nhist = []
if type(I) not in [list, np.ndarray]:
I = I*np.ones((len(t),))
if I.size != len(t):
assert("The sizes of the currect vector doesn't match with that of the time vector.")
self.I = I
for idx in range(niter):
gna = self.gnamax*self.m**3*self.h
gk = self.gkmax*self.n**4
gtot = gna + gk + self.gl
vinf = ((gna*self.vna + gk*self.vk + self.gl*self.vl) + I[idx])/gtot
tauv = self.cm/gtot
self.v = vinf + (self.v - vinf)*exp(-dt/tauv)
self.am = 0.1*(self.v + 40)/(1-exp(-(self.v + 40)/10))
self.bm = 4*exp(-0.0556*(self.v + 65))
self.an = 0.01*(self.v + 55)/(1-exp(-(self.v + 55)/10))
self.bn = 0.125*exp(-(self.v + 65)/80)
self.ah = 0.07*exp(-0.05*(self.v + 65))
self.bh = 1/(1+exp(-0.1*(self.v + 35)))
taum = 1/(self.am + self.bm)
tauh = 1/(self.ah + self.bh)
taun = 1/(self.an + self.bn)
minf = self.am*taum
hinf = self.ah*tauh
ninf = self.an*taun
self.m = minf + (self.m - minf)*exp(-dt/taum)
self.h = hinf + (self.h - hinf)*exp(-dt/tauh)
self.n = ninf + (self.n - ninf)*exp(-dt/taun)
self.vhist.append(self.v)
self.mhist.append(self.m)
self.hhist.append(self.h)
self.nhist.append(self.n)
def plot(self, ylim=None, save=False, name=None, show=True, image_directory="images"):
if not hasattr(self, "vhist"):
assert("The model should be simulated before plotting the results! :)\nRun model.simulate()")
if name == None:
name = str(round(self.I[0],5))
if save:
try:
os.mkdir(image_directory)
except:
pass
plt.figure()
plt.plot(self.t, self.vhist)
plt.grid()
if ylim != None:
plt.ylim(ylim)
plt.xlabel("Time (ms)")
plt.ylabel("Voltage (mV)")
plt.title("Hodgkin & Huxley Neuron; Voltage across Time; $I=$"+name)
if save:
fig_name = image_directory+"/hh_"+name+"_v.png"
plt.savefig(fig_name)
plt.figure()
plt.plot(self.t, self.mhist)
plt.grid()
plt.xlabel("Time (ms)")
plt.ylabel("$m(t)$")
plt.title("Hodgkin & Huxley Neuron; $m$ across Time; $I=$"+name)
if save:
fig_name = image_directory+"/hh_"+name+"_m.png"
plt.savefig(fig_name)
plt.figure()
plt.plot(self.t, self.nhist)
plt.grid()
plt.xlabel("Time (ms)")
plt.ylabel("$n(t)$")
plt.title("Hodgkin & Huxley Neuron; $n$ across Time; $I=$"+name)
if save:
fig_name = image_directory+"/hh_"+name+"_n.png"
plt.savefig(fig_name)
plt.figure()
plt.plot(self.t, self.hhist)
plt.grid()
plt.xlabel("Time (ms)")
plt.ylabel("$h(t)$")
plt.title("Hodgkin & Huxley Neuron; $h$ across Time; $I=$"+name)
if save:
fig_name = image_directory+"/hh_"+name+"_h.png"
plt.savefig(fig_name)
if show:
plt.show()
plt.close("all")
def animate(self, t, current_list, ylim=None, name="1", image_directory="images"):
try:
os.mkdir(image_directory)
except:
pass
images_arr = []
for I in tqdm(current_list):
self.simulate(t, I)
self.plot(ylim=ylim, save=True, show=False)
fig_name = image_directory+"/hh_"+str(round(self.I[0],5))+"_v.png"
images_arr.append(imageio.imread(fig_name))
print("Generating the GIF and MOV files... This might take a while...")
imageio.mimsave(image_directory+"/hh_"+name+".gif", images_arr, duration=0.2)
imageio.mimsave(image_directory+"/hh_"+name+".mov", images_arr)
|
# Copyright 2019 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import lale.operators
import lale.pretty_print
class TestToGraphviz(unittest.TestCase):
def test_with_operator_choice(self):
from lale.lib.lale import NoOp
from lale.lib.sklearn import (
PCA,
KNeighborsClassifier,
LogisticRegression,
Nystroem,
)
from lale.operators import make_choice
kernel_tfm_or_not = NoOp | Nystroem
tfm = PCA
clf = make_choice(LogisticRegression, KNeighborsClassifier)
clf.visualize(ipython_display=False)
optimizable = kernel_tfm_or_not >> tfm >> clf
optimizable.visualize(ipython_display=False)
def test_invalid_input(self):
from sklearn.linear_model import LogisticRegression as SklearnLR
scikit_lr = SklearnLR()
from lale.helpers import to_graphviz
with self.assertRaises(TypeError):
to_graphviz(scikit_lr)
class TestPrettyPrint(unittest.TestCase):
def _roundtrip(self, expected, printed):
self.maxDiff = None
# sklearn_version_family changes based on the Python as well as sklearn version,
# so remove that hyperparameter while comparing if present.
import re
expected = re.sub(r"""sklearn_version_family=.\d*.,""", "", expected)
printed = re.sub(r"""sklearn_version_family=.\d*.,""", "", printed)
self.assertEqual(expected, printed)
globals2 = {}
locals2 = {}
try:
exec(printed, globals2, locals2)
except Exception as e:
import pprint
print("error during exec(printed, globals2, locals2) where:")
print(f'printed = """{printed}"""')
print(f"globals2 = {pprint.pformat(globals2)}")
print(f"locals2 = {pprint.pformat(locals2)}")
raise e
pipeline2 = locals2["pipeline"]
import sklearn.pipeline
self.assertIsInstance(
pipeline2, (lale.operators.PlannedOperator, sklearn.pipeline.Pipeline)
)
def test_distance_threshold_validation_error(self):
import jsonschema
from lale.lib.sklearn import FeatureAgglomeration, LogisticRegression
with self.assertRaises(jsonschema.ValidationError):
_ = (
FeatureAgglomeration(
distance_threshold=0.5, n_clusters=None, compute_full_tree=True
)
>> LogisticRegression()
)
def test_indiv_op_1(self):
from lale.lib.sklearn import LogisticRegression
pipeline = LogisticRegression(solver=LogisticRegression.enum.solver.saga, C=0.9)
expected = """from sklearn.linear_model import LogisticRegression
import lale
lale.wrap_imported_operators()
pipeline = LogisticRegression(solver="saga", C=0.9)"""
self._roundtrip(expected, lale.pretty_print.to_string(pipeline))
def test_indiv_op_2(self):
from lale.lib.sklearn import LogisticRegression
pipeline = LogisticRegression()
expected = """from sklearn.linear_model import LogisticRegression
import lale
lale.wrap_imported_operators()
pipeline = LogisticRegression()"""
self._roundtrip(expected, lale.pretty_print.to_string(pipeline))
def test_reducible(self):
from lale.lib.lale import ConcatFeatures, NoOp
from lale.lib.sklearn import (
PCA,
KNeighborsClassifier,
LogisticRegression,
MinMaxScaler,
Nystroem,
)
from lale.lib.xgboost import XGBClassifier as XGB
pca = PCA(copy=False)
logistic_regression = LogisticRegression(solver="saga", C=0.9)
pipeline = (
(MinMaxScaler | NoOp)
>> (pca & Nystroem)
>> ConcatFeatures
>> (KNeighborsClassifier | logistic_regression | XGB)
)
expected = """from sklearn.preprocessing import MinMaxScaler
from lale.lib.lale import NoOp
from sklearn.decomposition import PCA
from sklearn.kernel_approximation import Nystroem
from lale.lib.lale import ConcatFeatures
from sklearn.neighbors import KNeighborsClassifier
from sklearn.linear_model import LogisticRegression
from xgboost import XGBClassifier as XGB
import lale
lale.wrap_imported_operators()
pca = PCA(copy=False)
logistic_regression = LogisticRegression(solver="saga", C=0.9)
pipeline = (
(MinMaxScaler | NoOp)
>> (pca & Nystroem)
>> ConcatFeatures
>> (KNeighborsClassifier | logistic_regression | XGB)
)"""
self._roundtrip(expected, lale.pretty_print.to_string(pipeline))
def test_no_combinators(self):
from lale.lib.lale import ConcatFeatures, NoOp
from lale.lib.sklearn import (
PCA,
KNeighborsClassifier,
LogisticRegression,
MinMaxScaler,
Nystroem,
)
pca = PCA(copy=False)
logistic_regression = LogisticRegression(solver="saga", C=0.9)
pipeline = (
(MinMaxScaler | NoOp)
>> (pca & Nystroem & NoOp)
>> ConcatFeatures
>> (KNeighborsClassifier | logistic_regression)
)
expected = """from sklearn.preprocessing import MinMaxScaler
from lale.lib.lale import NoOp
from lale.operators import make_choice
from sklearn.decomposition import PCA
from sklearn.kernel_approximation import Nystroem
from lale.operators import make_union
from sklearn.neighbors import KNeighborsClassifier
from sklearn.linear_model import LogisticRegression
from lale.operators import make_pipeline
choice_0 = make_choice(MinMaxScaler, NoOp)
pca = PCA(copy=False)
union = make_union(pca, Nystroem, NoOp)
logistic_regression = LogisticRegression(solver="saga", C=0.9)
choice_1 = make_choice(KNeighborsClassifier, logistic_regression)
pipeline = make_pipeline(choice_0, union, choice_1)"""
printed = lale.pretty_print.to_string(pipeline, combinators=False)
self._roundtrip(expected, printed)
def test_astype_sklearn(self):
from lale.lib.lale import ConcatFeatures
from lale.lib.sklearn import PCA, LogisticRegression, MinMaxScaler, Nystroem
pca = PCA(copy=False)
logistic_regression = LogisticRegression(solver="saga", C=0.9)
pipeline = (
MinMaxScaler()
>> (pca & Nystroem())
>> ConcatFeatures
>> logistic_regression
)
expected = """from sklearn.preprocessing import MinMaxScaler
from sklearn.decomposition import PCA
from sklearn.kernel_approximation import Nystroem
from sklearn.pipeline import make_union
from sklearn.linear_model import LogisticRegression
from sklearn.pipeline import make_pipeline
pca = PCA(copy=False)
union = make_union(pca, Nystroem())
logistic_regression = LogisticRegression(solver="saga", C=0.9)
pipeline = make_pipeline(MinMaxScaler(), union, logistic_regression)"""
printed = lale.pretty_print.to_string(pipeline, astype="sklearn")
self._roundtrip(expected, printed)
def test_import_as_1(self):
from lale.lib.sklearn import LogisticRegression as LR
pipeline = LR(solver="saga", C=0.9)
expected = """from sklearn.linear_model import LogisticRegression as LR
import lale
lale.wrap_imported_operators()
pipeline = LR(solver="saga", C=0.9)"""
self._roundtrip(expected, lale.pretty_print.to_string(pipeline))
def test_import_as_2(self):
from lale.lib.lale import ConcatFeatures as Concat
from lale.lib.lale import NoOp
from lale.lib.sklearn import PCA
from lale.lib.sklearn import KNeighborsClassifier as KNN
from lale.lib.sklearn import LogisticRegression as LR
from lale.lib.sklearn import MinMaxScaler as Scaler
from lale.lib.sklearn import Nystroem
pca = PCA(copy=False)
lr = LR(solver="saga", C=0.9)
pipeline = (Scaler | NoOp) >> (pca & Nystroem) >> Concat >> (KNN | lr)
expected = """from sklearn.preprocessing import MinMaxScaler as Scaler
from lale.lib.lale import NoOp
from sklearn.decomposition import PCA
from sklearn.kernel_approximation import Nystroem
from lale.lib.lale import ConcatFeatures as Concat
from sklearn.neighbors import KNeighborsClassifier as KNN
from sklearn.linear_model import LogisticRegression as LR
import lale
lale.wrap_imported_operators()
pca = PCA(copy=False)
lr = LR(solver="saga", C=0.9)
pipeline = (Scaler | NoOp) >> (pca & Nystroem) >> Concat >> (KNN | lr)"""
self._roundtrip(expected, lale.pretty_print.to_string(pipeline))
def test_operator_choice(self):
from lale.lib.sklearn import PCA
from lale.lib.sklearn import MinMaxScaler as Scl
pipeline = PCA | Scl
expected = """from sklearn.decomposition import PCA
from sklearn.preprocessing import MinMaxScaler as Scl
import lale
lale.wrap_imported_operators()
pipeline = PCA | Scl"""
self._roundtrip(expected, lale.pretty_print.to_string(pipeline))
def test_higher_order(self):
from lale.lib.lale import Both
from lale.lib.sklearn import PCA, Nystroem
pipeline = Both(op1=PCA(n_components=2), op2=Nystroem)
expected = """from lale.lib.lale import Both
from sklearn.decomposition import PCA
from sklearn.kernel_approximation import Nystroem
import lale
lale.wrap_imported_operators()
pca = PCA(n_components=2)
pipeline = Both(op1=pca, op2=Nystroem)"""
self._roundtrip(expected, lale.pretty_print.to_string(pipeline))
def test_higher_order_2(self):
from lale.lib.sklearn import PCA
from lale.lib.sklearn import KNeighborsClassifier as KNN
from lale.lib.sklearn import LogisticRegression as LR
from lale.lib.sklearn import VotingClassifier as Vote
pipeline = Vote(
estimators=[("knn", KNN), ("pipeline", PCA() >> LR)], voting="soft"
)
expected = """from sklearn.ensemble import VotingClassifier as Vote
from sklearn.neighbors import KNeighborsClassifier as KNN
from sklearn.decomposition import PCA
from sklearn.linear_model import LogisticRegression as LR
import lale
lale.wrap_imported_operators()
pipeline = Vote(
estimators=[("knn", KNN), ("pipeline", PCA() >> LR)], voting="soft"
)"""
self._roundtrip(expected, lale.pretty_print.to_string(pipeline))
def test_multimodal(self):
from lale.lib.lale import ConcatFeatures as Cat
from lale.lib.lale import Project
from lale.lib.sklearn import LinearSVC
from lale.lib.sklearn import Normalizer as Norm
from lale.lib.sklearn import OneHotEncoder as OneHot
project_0 = Project(columns={"type": "number"})
project_1 = Project(columns={"type": "string"})
linear_svc = LinearSVC(C=29617.4, dual=False, tol=0.005266)
pipeline = (
((project_0 >> Norm()) & (project_1 >> OneHot())) >> Cat >> linear_svc
)
expected = """from lale.lib.lale import Project
from sklearn.preprocessing import Normalizer as Norm
from sklearn.preprocessing import OneHotEncoder as OneHot
from lale.lib.lale import ConcatFeatures as Cat
from sklearn.svm import LinearSVC
import lale
lale.wrap_imported_operators()
project_0 = Project(columns={"type": "number"})
project_1 = Project(columns={"type": "string"})
linear_svc = LinearSVC(C=29617.4, dual=False, tol=0.005266)
pipeline = (
((project_0 >> Norm()) & (project_1 >> OneHot())) >> Cat >> linear_svc
)"""
self._roundtrip(expected, lale.pretty_print.to_string(pipeline))
def test_irreducible_1(self):
from lale.lib.sklearn import (
PCA,
KNeighborsClassifier,
LogisticRegression,
MinMaxScaler,
Nystroem,
)
from lale.operators import make_pipeline_graph
choice = PCA | Nystroem
pipeline = make_pipeline_graph(
steps=[choice, MinMaxScaler, LogisticRegression, KNeighborsClassifier],
edges=[
(choice, LogisticRegression),
(MinMaxScaler, LogisticRegression),
(MinMaxScaler, KNeighborsClassifier),
],
)
expected = """from sklearn.decomposition import PCA
from sklearn.kernel_approximation import Nystroem
from sklearn.preprocessing import MinMaxScaler
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from lale.operators import make_pipeline_graph
import lale
lale.wrap_imported_operators()
choice = PCA | Nystroem
pipeline = make_pipeline_graph(
steps=[choice, MinMaxScaler, LogisticRegression, KNeighborsClassifier],
edges=[
(choice, LogisticRegression),
(MinMaxScaler, LogisticRegression),
(MinMaxScaler, KNeighborsClassifier),
],
)"""
self._roundtrip(expected, lale.pretty_print.to_string(pipeline))
def test_irreducible_2(self):
from lale.lib.lale import ConcatFeatures as HStack
from lale.lib.sklearn import PCA
from lale.lib.sklearn import KNeighborsClassifier as KNN
from lale.lib.sklearn import LogisticRegression as LR
from lale.lib.sklearn import MinMaxScaler as MMS
from lale.operators import make_pipeline_graph
pipeline_0 = HStack >> LR
pipeline = make_pipeline_graph(
steps=[PCA, MMS, KNN, pipeline_0],
edges=[(PCA, KNN), (PCA, pipeline_0), (MMS, pipeline_0)],
)
expected = """from sklearn.decomposition import PCA
from sklearn.preprocessing import MinMaxScaler as MMS
from sklearn.neighbors import KNeighborsClassifier as KNN
from lale.lib.lale import ConcatFeatures as HStack
from sklearn.linear_model import LogisticRegression as LR
from lale.operators import make_pipeline_graph
import lale
lale.wrap_imported_operators()
pipeline_0 = HStack >> LR
pipeline = make_pipeline_graph(
steps=[PCA, MMS, KNN, pipeline_0],
edges=[(PCA, KNN), (PCA, pipeline_0), (MMS, pipeline_0)],
)"""
self._roundtrip(expected, lale.pretty_print.to_string(pipeline))
def test_nested(self):
from lale.lib.lale import NoOp
from lale.lib.sklearn import PCA
from lale.lib.sklearn import LogisticRegression as LR
lr_0 = LR(C=0.09)
lr_1 = LR(C=0.19)
pipeline = PCA >> (lr_0 | NoOp >> lr_1)
expected = """from sklearn.decomposition import PCA
from sklearn.linear_model import LogisticRegression as LR
from lale.lib.lale import NoOp
import lale
lale.wrap_imported_operators()
lr_0 = LR(C=0.09)
lr_1 = LR(C=0.19)
pipeline = PCA >> (lr_0 | NoOp >> lr_1)"""
self._roundtrip(expected, lale.pretty_print.to_string(pipeline))
def test_autoai_libs_cat_encoder(self):
import numpy as np
from autoai_libs.transformers.exportable import CatEncoder
from lale.lib.sklearn import LogisticRegression as LR
cat_encoder = CatEncoder(
encoding="ordinal",
categories="auto",
dtype=np.float64,
handle_unknown="error",
)
pipeline = cat_encoder >> LR()
expected = """from autoai_libs.transformers.exportable import CatEncoder
import numpy as np
from sklearn.linear_model import LogisticRegression as LR
import lale
lale.wrap_imported_operators()
cat_encoder = CatEncoder(
encoding="ordinal",
categories="auto",
dtype=np.float64,
handle_unknown="error",
sklearn_version_family="23",
)
pipeline = cat_encoder >> LR()"""
self._roundtrip(expected, lale.pretty_print.to_string(pipeline))
def test_autoai_libs_fs1(self):
from autoai_libs.cognito.transforms.transform_utils import FS1
from lale.lib.sklearn import LogisticRegression as LR
fs1 = FS1(
cols_ids_must_keep=range(0, 7),
additional_col_count_to_keep=8,
ptype="classification",
)
pipeline = fs1 >> LR()
expected = """from autoai_libs.cognito.transforms.transform_utils import FS1
from sklearn.linear_model import LogisticRegression as LR
import lale
lale.wrap_imported_operators()
fs1 = FS1(
cols_ids_must_keep=range(0, 7),
additional_col_count_to_keep=8,
ptype="classification",
)
pipeline = fs1 >> LR()"""
self._roundtrip(expected, lale.pretty_print.to_string(pipeline))
def test_autoai_libs_numpy_replace_missing_values(self):
from autoai_libs.transformers.exportable import NumpyReplaceMissingValues
from lale.lib.sklearn import LogisticRegression as LR
numpy_replace_missing_values = NumpyReplaceMissingValues(
filling_values=float("nan"), missing_values=["?"]
)
pipeline = numpy_replace_missing_values >> LR()
expected = """from autoai_libs.transformers.exportable import NumpyReplaceMissingValues
from sklearn.linear_model import LogisticRegression as LR
import lale
lale.wrap_imported_operators()
numpy_replace_missing_values = NumpyReplaceMissingValues(
missing_values=["?"], filling_values=float("nan")
)
pipeline = numpy_replace_missing_values >> LR()"""
self._roundtrip(expected, lale.pretty_print.to_string(pipeline))
def test_autoai_libs_numpy_replace_unknown_values1(self):
from autoai_libs.transformers.exportable import NumpyReplaceUnknownValues
from lale.lib.sklearn import LogisticRegression as LR
numpy_replace_unknown_values = NumpyReplaceUnknownValues(
filling_values=float("nan"),
filling_values_list=[float("nan")],
known_values_list=[[36, 45, 56, 67, 68, 75, 78, 89]],
missing_values_reference_list=["", "-", "?", float("nan")],
)
pipeline = numpy_replace_unknown_values >> LR()
expected = """from autoai_libs.transformers.exportable import NumpyReplaceUnknownValues
from sklearn.linear_model import LogisticRegression as LR
import lale
lale.wrap_imported_operators()
numpy_replace_unknown_values = NumpyReplaceUnknownValues(
filling_values=float("nan"),
filling_values_list=[float("nan")],
missing_values_reference_list=["", "-", "?", float("nan")],
)
pipeline = numpy_replace_unknown_values >> LR()"""
self._roundtrip(expected, lale.pretty_print.to_string(pipeline))
def test_autoai_libs_numpy_replace_unknown_values2(self):
from lale.lib.autoai_libs import NumpyReplaceUnknownValues
from lale.lib.sklearn import LogisticRegression as LR
CustomOp = NumpyReplaceUnknownValues.customize_schema(
known_values_list={
"anyOf": [
{"type": "array", "items": {"laleType": "Any"}},
{"enum": [None]},
],
"default": None,
}
)
numpy_replace_unknown_values = CustomOp(
filling_values=float("nan"),
filling_values_list=[float("nan")],
known_values_list=[[36, 45, 56, 67, 68, 75, 78, 89]],
missing_values_reference_list=["", "-", "?", float("nan")],
)
pipeline = numpy_replace_unknown_values >> LR()
expected = """from autoai_libs.transformers.exportable import (
NumpyReplaceUnknownValues as CustomOp,
)
from sklearn.linear_model import LogisticRegression as LR
import lale
lale.wrap_imported_operators()
custom_op = CustomOp(
filling_values=float("nan"),
filling_values_list=[float("nan")],
known_values_list=[[36, 45, 56, 67, 68, 75, 78, 89]],
missing_values_reference_list=["", "-", "?", float("nan")],
)
pipeline = custom_op >> LR()"""
self._roundtrip(expected, lale.pretty_print.to_string(pipeline))
def test_autoai_libs_tam_1(self):
import autoai_libs.cognito.transforms.transform_extras
import numpy as np
from autoai_libs.cognito.transforms.transform_utils import TAM
from lale.lib.sklearn import LogisticRegression as LR
tam = TAM(
tans_class=autoai_libs.cognito.transforms.transform_extras.IsolationForestAnomaly,
name="isoforestanomaly",
col_names=["a", "b", "c"],
col_dtypes=[np.dtype("float32"), np.dtype("float32"), np.dtype("float32")],
)
pipeline = tam >> LR()
expected = """from autoai_libs.cognito.transforms.transform_utils import TAM
import autoai_libs.cognito.transforms.transform_extras
import numpy as np
from sklearn.linear_model import LogisticRegression as LR
from sklearn.pipeline import make_pipeline
tam = TAM(
tans_class=autoai_libs.cognito.transforms.transform_extras.IsolationForestAnomaly,
name="isoforestanomaly",
col_names=["a", "b", "c"],
col_dtypes=[
np.dtype("float32"), np.dtype("float32"), np.dtype("float32"),
],
)
pipeline = make_pipeline(tam, LR())"""
self._roundtrip(
expected, lale.pretty_print.to_string(pipeline, astype="sklearn")
)
def test_autoai_libs_tam_2(self):
import numpy as np
from lightgbm import LGBMClassifier
from sklearn.decomposition import PCA
from lale.lib.autoai_libs import TAM
from lale.operators import make_pipeline
pca = PCA(copy=False)
tam = TAM(
tans_class=pca,
name="pca",
col_names=["a", "b", "c"],
col_dtypes=[np.dtype("float32"), np.dtype("float32"), np.dtype("float32")],
)
lgbm_classifier = LGBMClassifier(class_weight="balanced", learning_rate=0.18)
pipeline = make_pipeline(tam, lgbm_classifier)
expected = """from autoai_libs.cognito.transforms.transform_utils import TAM
import sklearn.decomposition
import numpy as np
from lightgbm import LGBMClassifier
from lale.operators import make_pipeline
tam = TAM(
tans_class=sklearn.decomposition.PCA(copy=False),
name="pca",
col_names=["a", "b", "c"],
col_dtypes=[
np.dtype("float32"), np.dtype("float32"), np.dtype("float32"),
],
)
lgbm_classifier = LGBMClassifier(
class_weight="balanced", learning_rate=0.18, n_estimators=100
)
pipeline = make_pipeline(tam, lgbm_classifier)"""
self._roundtrip(
expected, lale.pretty_print.to_string(pipeline, combinators=False)
)
def test_autoai_libs_tam_3(self):
import autoai_libs.cognito.transforms.transform_utils
import numpy as np
import sklearn.cluster
import sklearn.linear_model
import sklearn.pipeline
import lale.helpers
import lale.operators
import lale.pretty_print
sklearn_pipeline = sklearn.pipeline.make_pipeline(
autoai_libs.cognito.transforms.transform_utils.TAM(
tans_class=sklearn.cluster.FeatureAgglomeration(
affinity="euclidean",
compute_full_tree="auto",
connectivity=None,
linkage="ward",
memory=None,
n_clusters=2,
pooling_func=np.mean,
),
name="featureagglomeration",
col_names=["a", "b", "c"],
col_dtypes=[
np.dtype("float32"),
np.dtype("float32"),
np.dtype("float32"),
],
),
sklearn.linear_model.LogisticRegression(
solver="liblinear", multi_class="ovr"
),
)
pipeline = lale.helpers.import_from_sklearn_pipeline(sklearn_pipeline)
expected = """from autoai_libs.cognito.transforms.transform_utils import TAM
from sklearn.cluster import FeatureAgglomeration
import numpy as np
from sklearn.linear_model import LogisticRegression
import lale
lale.wrap_imported_operators()
tam = TAM(
tans_class=FeatureAgglomeration(),
name="featureagglomeration",
col_names=["a", "b", "c"],
col_dtypes=[
np.dtype("float32"), np.dtype("float32"), np.dtype("float32"),
],
)
logistic_regression = LogisticRegression(
multi_class="ovr", solver="liblinear"
)
pipeline = tam >> logistic_regression"""
self._roundtrip(expected, lale.pretty_print.to_string(pipeline))
def test_autoai_libs_tam_4(self):
import autoai_libs.cognito.transforms.transform_utils
import numpy as np
import sklearn.decomposition
import sklearn.linear_model
import sklearn.pipeline
import lale.helpers
import lale.operators
import lale.pretty_print
sklearn_pipeline = sklearn.pipeline.make_pipeline(
autoai_libs.cognito.transforms.transform_utils.TAM(
tans_class=sklearn.decomposition.PCA(),
name="pca",
col_names=["a", "b", "c"],
col_dtypes=[
np.dtype("float32"),
np.dtype("float32"),
np.dtype("float32"),
],
),
sklearn.linear_model.LogisticRegression(
solver="liblinear", multi_class="ovr"
),
)
pipeline = lale.helpers.import_from_sklearn_pipeline(
sklearn_pipeline, fitted=False
)
assert isinstance(pipeline, lale.operators.TrainableOperator)
expected = """from autoai_libs.cognito.transforms.transform_utils import TAM
from sklearn.decomposition import PCA
import numpy as np
from sklearn.linear_model import LogisticRegression
import lale
lale.wrap_imported_operators()
tam = TAM(
tans_class=PCA(),
name="pca",
col_names=["a", "b", "c"],
col_dtypes=[
np.dtype("float32"), np.dtype("float32"), np.dtype("float32"),
],
)
logistic_regression = LogisticRegression(
multi_class="ovr", solver="liblinear"
)
pipeline = tam >> logistic_regression"""
self._roundtrip(expected, lale.pretty_print.to_string(pipeline))
import numpy as np
import pandas as pd
test = pd.DataFrame(
np.random.randint(0, 100, size=(15, 3)),
columns=["a", "b", "c"],
dtype=np.dtype("float32"),
)
trained = pipeline.fit(
test.to_numpy(), [0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 0, 1]
)
trained.predict(test.to_numpy())
def test_autoai_libs_ta1(self):
import autoai_libs.utils.fc_methods
import numpy as np
from autoai_libs.cognito.transforms.transform_utils import TA1
from lale.lib.sklearn import LogisticRegression as LR
ta1 = TA1(
fun=np.rint,
name="round",
datatypes=["numeric"],
feat_constraints=[autoai_libs.utils.fc_methods.is_not_categorical],
col_names=[
"a____________",
"b____________",
"c____________",
"d____________",
"e____________",
],
col_dtypes=[
np.dtype("float32"),
np.dtype("float32"),
np.dtype("float32"),
np.dtype("float32"),
np.dtype("float32"),
],
)
pipeline = ta1 >> LR()
expected = """from autoai_libs.cognito.transforms.transform_utils import TA1
import numpy as np
import autoai_libs.utils.fc_methods
from sklearn.linear_model import LogisticRegression as LR
import lale
lale.wrap_imported_operators()
ta1 = TA1(
fun=np.rint,
name="round",
datatypes=["numeric"],
feat_constraints=[autoai_libs.utils.fc_methods.is_not_categorical],
col_names=[
"a____________", "b____________", "c____________", "d____________",
"e____________",
],
col_dtypes=[
np.dtype("float32"), np.dtype("float32"), np.dtype("float32"),
np.dtype("float32"), np.dtype("float32"),
],
)
pipeline = ta1 >> LR()"""
self._roundtrip(expected, lale.pretty_print.to_string(pipeline))
def test_autoai_libs_t_no_op(self):
from lightgbm import LGBMClassifier
from lale.lib.autoai_libs import TNoOp
from lale.operators import make_pipeline
t_no_op = TNoOp(
fun="fun",
name="no_action",
datatypes="x",
feat_constraints=[],
tgraph="tgraph",
)
lgbm_classifier = LGBMClassifier(class_weight="balanced", learning_rate=0.18)
pipeline = make_pipeline(t_no_op, lgbm_classifier)
expected = """from autoai_libs.cognito.transforms.transform_utils import TNoOp
from lightgbm import LGBMClassifier
from lale.operators import make_pipeline
t_no_op = TNoOp(
fun="fun",
name="no_action",
datatypes="x",
feat_constraints=[],
tgraph="tgraph",
)
lgbm_classifier = LGBMClassifier(
class_weight="balanced", learning_rate=0.18, n_estimators=100
)
pipeline = make_pipeline(t_no_op, lgbm_classifier)"""
self._roundtrip(
expected, lale.pretty_print.to_string(pipeline, combinators=False)
)
def test_autoai_libs_two_ops_with_combinator(self):
from autoai_libs.transformers.exportable import (
CompressStrings,
NumpyColumnSelector,
)
numpy_column_selector = NumpyColumnSelector(columns=[0, 2, 3, 5])
compress_strings = CompressStrings(
compress_type="hash",
dtypes_list=["char_str", "char_str", "char_str", "char_str"],
misslist_list=[[], [], [], []],
)
pipeline = lale.operators.make_pipeline(numpy_column_selector, compress_strings)
expected = """from autoai_libs.transformers.exportable import NumpyColumnSelector
from autoai_libs.transformers.exportable import CompressStrings
import lale
lale.wrap_imported_operators()
numpy_column_selector = NumpyColumnSelector(columns=[0, 2, 3, 5])
compress_strings = CompressStrings(
compress_type="hash",
dtypes_list=["char_str", "char_str", "char_str", "char_str"],
missing_values_reference_list=["?", "", "-", float("nan")],
misslist_list=[[], [], [], []],
)
pipeline = numpy_column_selector >> compress_strings"""
printed = lale.pretty_print.to_string(pipeline, combinators=True)
self._roundtrip(expected, printed)
def test_expression(self):
from lale.expressions import it, mean
from lale.lib.lale import Join, Scan
from lale.lib.rasl import Aggregate
scan1 = Scan(table=it["table1.csv"])
scan2 = Scan(table=it["table2.csv"])
join = Join(pred=[it["table1.csv"].k1 == it["table2.csv"].k2])
aggregate = Aggregate(columns={"talk_time|mean": mean(it.talk_time)})
pipeline = (scan1 & scan2) >> join >> aggregate
expected = """from lale.lib.lale import Scan
from lale.expressions import it
from lale.lib.lale import Join
from lale.lib.rasl import Aggregate
from lale.expressions import mean
import lale
lale.wrap_imported_operators()
scan_0 = Scan(table=it["table1.csv"])
scan_1 = Scan(table=it["table2.csv"])
join = Join(pred=[it["table1.csv"].k1 == it["table2.csv"].k2])
aggregate = Aggregate(columns={"talk_time|mean": mean(it.talk_time)})
pipeline = (scan_0 & scan_1) >> join >> aggregate"""
self._roundtrip(expected, lale.pretty_print.to_string(pipeline))
def test_sklearn_pipeline(self):
from lale.lib.sklearn import PCA, LogisticRegression, Pipeline
pipeline = Pipeline(steps=[("pca", PCA), ("lr", LogisticRegression(C=0.1))])
expected = """from sklearn.pipeline import Pipeline
from sklearn.decomposition import PCA
from sklearn.linear_model import LogisticRegression
import lale
lale.wrap_imported_operators()
logistic_regression = LogisticRegression(C=0.1)
pipeline = Pipeline(steps=[("pca", PCA), ("lr", logistic_regression)])"""
self._roundtrip(expected, lale.pretty_print.to_string(pipeline))
def test_sklearn_pipeline_2(self):
from lale.lib.sklearn import PCA, LogisticRegression, Pipeline
pipeline = Pipeline(steps=[("pca", PCA), ("lr", LogisticRegression(C=0.1))])
expected = """from sklearn.pipeline import Pipeline
from sklearn.decomposition import PCA
from sklearn.linear_model import LogisticRegression
logistic_regression = LogisticRegression(C=0.1)
pipeline = Pipeline(steps=[("pca", PCA), ("lr", logistic_regression)])"""
printed = lale.pretty_print.to_string(pipeline, astype="sklearn")
self._roundtrip(expected, printed)
def test_customize_schema_enum_and_number(self):
from lale.lib.sklearn import LogisticRegression
pipeline = LogisticRegression.customize_schema(
solver={"enum": ["lbfgs", "liblinear"], "default": "liblinear"},
tol={
"type": "number",
"minimum": 0.00001,
"maximum": 0.1,
"default": 0.0001,
},
)(solver="lbfgs")
expected = """from sklearn.linear_model import LogisticRegression
import lale
lale.wrap_imported_operators()
pipeline = LogisticRegression.customize_schema(
solver={"enum": ["lbfgs", "liblinear"], "default": "liblinear"},
tol={
"type": "number",
"minimum": 1e-05,
"maximum": 0.1,
"default": 0.0001,
},
)(solver="lbfgs")"""
self._roundtrip(expected, pipeline.pretty_print(customize_schema=True))
def test_customize_schema_none_and_boolean(self):
from lale.lib.sklearn import RandomForestRegressor
pipeline = RandomForestRegressor.customize_schema(
bootstrap={"type": "boolean", "default": True},
random_state={
"anyOf": [
{"laleType": "numpy.random.RandomState"},
{
"description": "RandomState used by np.random",
"enum": [None],
},
{"description": "Explicit seed.", "type": "integer"},
],
"default": 33,
},
)(n_estimators=50)
expected = """from sklearn.ensemble import RandomForestRegressor
import lale
lale.wrap_imported_operators()
pipeline = RandomForestRegressor.customize_schema(
bootstrap={"type": "boolean", "default": True},
random_state={
"anyOf": [
{"laleType": "numpy.random.RandomState"},
{"description": "RandomState used by np.random", "enum": [None]},
{"description": "Explicit seed.", "type": "integer"},
],
"default": 33,
},
)(n_estimators=50)"""
# this should not include "random_state=33" because that would be
# redundant with the schema, and would prevent automated search
self._roundtrip(expected, pipeline.pretty_print(customize_schema=True))
def test_customize_schema_print_defaults(self):
from lale.lib.sklearn import RandomForestRegressor
pipeline = RandomForestRegressor.customize_schema(
bootstrap={"type": "boolean", "default": True}, # default unchanged
random_state={
"anyOf": [
{"laleType": "numpy.random.RandomState"},
{"enum": [None]},
{"type": "integer"},
],
"default": 33, # default changed
},
)(n_estimators=50)
expected = """from sklearn.ensemble import RandomForestRegressor
import lale
lale.wrap_imported_operators()
pipeline = RandomForestRegressor(n_estimators=50, random_state=33)"""
# print exactly those defaults that changed
self._roundtrip(expected, pipeline.pretty_print(customize_schema=False))
def test_user_operator_in_toplevel_module(self):
import importlib
import os.path
import sys
import tempfile
with tempfile.NamedTemporaryFile(mode="w", suffix=".py") as tmp_py_file:
file_contents = """import numpy as np
import lale.operators
class _MockClassifierImpl:
def __init__(self, int_hp=0):
self.int_hp = int_hp
def fit(self, X, y):
self.some_y = list(y)[0]
def predict(self, X):
return self.some_y
MockClassifier = lale.operators.make_operator(_MockClassifierImpl)
"""
tmp_py_file.write(file_contents)
tmp_py_file.flush()
dir_name = os.path.dirname(tmp_py_file.name)
old_pythonpath = sys.path
try:
sys.path.append(dir_name)
module_name = os.path.basename(tmp_py_file.name)[: -len(".py")]
module = importlib.import_module(module_name)
MockClf = getattr(module, "MockClassifier")
self.assertIsInstance(MockClf, lale.operators.PlannedIndividualOp)
self.assertEqual(MockClf.name(), "MockClassifier")
pipeline = MockClf(int_hp=42)
expected = f"""from {module_name} import MockClassifier as MockClf
import lale
lale.wrap_imported_operators()
pipeline = MockClf(int_hp=42)"""
self._roundtrip(expected, pipeline.pretty_print())
finally:
sys.path = old_pythonpath
class TestToAndFromJSON(unittest.TestCase):
def test_trainable_individual_op(self):
self.maxDiff = None
from lale.json_operator import from_json, to_json
from lale.lib.sklearn import LogisticRegression as LR
operator = LR(LR.enum.solver.sag, C=0.1)
json_expected = {
"class": LR.class_name(),
"state": "trainable",
"operator": "LogisticRegression",
"label": "LR",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.sklearn.logistic_regression.html",
"hyperparams": {"C": 0.1, "solver": "sag"},
"is_frozen_trainable": False,
}
json = to_json(operator)
self.assertEqual(json, json_expected)
operator_2 = from_json(json)
json_2 = to_json(operator_2)
self.assertEqual(json_2, json_expected)
def test_operator_choice(self):
self.maxDiff = None
from lale.json_operator import from_json, to_json
from lale.lib.sklearn import PCA
from lale.lib.sklearn import MinMaxScaler as Scl
operator = PCA | Scl
json_expected = {
"class": "lale.operators.OperatorChoice",
"operator": "OperatorChoice",
"state": "planned",
"steps": {
"pca": {
"class": PCA.class_name(),
"state": "planned",
"operator": "PCA",
"label": "PCA",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.sklearn.pca.html",
},
"scl": {
"class": Scl.class_name(),
"state": "planned",
"operator": "MinMaxScaler",
"label": "Scl",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.sklearn.min_max_scaler.html",
},
},
}
json = to_json(operator)
self.assertEqual(json, json_expected)
operator_2 = from_json(json)
json_2 = to_json(operator_2)
self.assertEqual(json_2, json_expected)
def test_pipeline_1(self):
self.maxDiff = None
from lale.json_operator import from_json, to_json
from lale.lib.lale import ConcatFeatures, NoOp
from lale.lib.sklearn import PCA
from lale.lib.sklearn import LogisticRegression as LR
operator = (PCA & NoOp) >> ConcatFeatures >> LR
json_expected = {
"class": "lale.operators.PlannedPipeline",
"state": "planned",
"edges": [
["pca", "concat_features"],
["no_op", "concat_features"],
["concat_features", "lr"],
],
"steps": {
"pca": {
"class": PCA.class_name(),
"state": "planned",
"operator": "PCA",
"label": "PCA",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.sklearn.pca.html",
},
"no_op": {
"class": NoOp.class_name(),
"state": "trained",
"operator": "NoOp",
"label": "NoOp",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.lale.no_op.html",
"hyperparams": None,
"coefs": None,
"is_frozen_trainable": True,
"is_frozen_trained": True,
},
"concat_features": {
"class": ConcatFeatures.class_name(),
"state": "trained",
"operator": "ConcatFeatures",
"label": "ConcatFeatures",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.lale.concat_features.html",
"hyperparams": None,
"coefs": None,
"is_frozen_trainable": True,
"is_frozen_trained": True,
},
"lr": {
"class": LR.class_name(),
"state": "planned",
"operator": "LogisticRegression",
"label": "LR",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.sklearn.logistic_regression.html",
},
},
}
json = to_json(operator)
self.assertEqual(json, json_expected)
operator_2 = from_json(json)
json_2 = to_json(operator_2)
self.assertEqual(json, json_2)
def test_pipeline_2(self):
from lale.json_operator import from_json, to_json
from lale.lib.lale import NoOp
from lale.lib.sklearn import (
PCA,
KNeighborsClassifier,
LogisticRegression,
Nystroem,
)
from lale.operators import make_choice, make_pipeline
kernel_tfm_or_not = make_choice(NoOp, Nystroem)
tfm = PCA
clf = make_choice(LogisticRegression, KNeighborsClassifier)
operator = make_pipeline(kernel_tfm_or_not, tfm, clf)
json = to_json(operator)
operator_2 = from_json(json)
json_2 = to_json(operator_2)
self.assertEqual(json, json_2)
def test_higher_order_1(self):
from lale.json_operator import from_json
from lale.lib.lale import Both
from lale.lib.sklearn import PCA, Nystroem
operator = Both(op1=PCA(n_components=2), op2=Nystroem)
json_expected = {
"class": Both.class_name(),
"state": "trainable",
"operator": "Both",
"label": "Both",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.lale.both.html",
"hyperparams": {
"op1": {"$ref": "../steps/pca"},
"op2": {"$ref": "../steps/nystroem"},
},
"steps": {
"pca": {
"class": PCA.class_name(),
"state": "trainable",
"operator": "PCA",
"label": "PCA",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.sklearn.pca.html",
"hyperparams": {"n_components": 2},
"is_frozen_trainable": False,
},
"nystroem": {
"class": Nystroem.class_name(),
"state": "planned",
"operator": "Nystroem",
"label": "Nystroem",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.sklearn.nystroem.html",
},
},
"is_frozen_trainable": False,
}
json = operator.to_json()
self.assertEqual(json, json_expected)
operator_2 = from_json(json)
json_2 = operator_2.to_json()
self.assertEqual(json, json_2)
def test_higher_order_2(self):
self.maxDiff = None
from lale.json_operator import from_json
from lale.lib.sklearn import PCA
from lale.lib.sklearn import KNeighborsClassifier as KNN
from lale.lib.sklearn import LogisticRegression as LR
from lale.lib.sklearn import VotingClassifier as Vote
operator = Vote(
estimators=[("knn", KNN), ("pipeline", PCA() >> LR)], voting="soft"
)
json_expected = {
"class": Vote.class_name(),
"state": "trainable",
"operator": "VotingClassifier",
"is_frozen_trainable": True,
"label": "Vote",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.sklearn.voting_classifier.html",
"hyperparams": {
"estimators": [
("knn", {"$ref": "../steps/knn"}),
("pipeline", {"$ref": "../steps/pipeline"}),
],
"voting": "soft",
},
"steps": {
"knn": {
"class": KNN.class_name(),
"state": "planned",
"operator": "KNeighborsClassifier",
"label": "KNN",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.sklearn.k_neighbors_classifier.html",
},
"pipeline": {
"class": "lale.operators.PlannedPipeline",
"state": "planned",
"edges": [["pca", "lr"]],
"steps": {
"pca": {
"class": PCA.class_name(),
"state": "trainable",
"operator": "PCA",
"label": "PCA",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.sklearn.pca.html",
"hyperparams": {},
"is_frozen_trainable": False,
},
"lr": {
"class": LR.class_name(),
"state": "planned",
"operator": "LogisticRegression",
"label": "LR",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.sklearn.logistic_regression.html",
},
},
},
},
}
json = operator.to_json()
self.assertEqual(json, json_expected)
operator_2 = from_json(json)
json_2 = operator_2.to_json()
self.assertEqual(json, json_2)
def test_nested(self):
self.maxDiff = None
from lale.json_operator import from_json, to_json
from lale.lib.lale import NoOp
from lale.lib.sklearn import PCA
from lale.lib.sklearn import LogisticRegression as LR
operator = PCA >> (LR(C=0.09) | NoOp >> LR(C=0.19))
json_expected = {
"class": "lale.operators.PlannedPipeline",
"state": "planned",
"edges": [["pca", "choice"]],
"steps": {
"pca": {
"class": PCA.class_name(),
"state": "planned",
"operator": "PCA",
"label": "PCA",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.sklearn.pca.html",
},
"choice": {
"class": "lale.operators.OperatorChoice",
"state": "planned",
"operator": "OperatorChoice",
"steps": {
"lr_0": {
"class": LR.class_name(),
"state": "trainable",
"operator": "LogisticRegression",
"label": "LR",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.sklearn.logistic_regression.html",
"hyperparams": {"C": 0.09},
"is_frozen_trainable": False,
},
"pipeline_1": {
"class": "lale.operators.TrainablePipeline",
"state": "trainable",
"edges": [["no_op", "lr_1"]],
"steps": {
"no_op": {
"class": NoOp.class_name(),
"state": "trained",
"operator": "NoOp",
"label": "NoOp",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.lale.no_op.html",
"hyperparams": None,
"coefs": None,
"is_frozen_trainable": True,
"is_frozen_trained": True,
},
"lr_1": {
"class": LR.class_name(),
"state": "trainable",
"operator": "LogisticRegression",
"label": "LR",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.sklearn.logistic_regression.html",
"hyperparams": {"C": 0.19},
"is_frozen_trainable": False,
},
},
},
},
},
},
}
json = to_json(operator)
self.assertEqual(json, json_expected)
operator_2 = from_json(json)
json_2 = to_json(operator_2)
self.assertEqual(json, json_2)
def test_customize_schema(self):
from lale.json_operator import from_json, to_json
from lale.lib.sklearn import LogisticRegression as LR
operator = LR.customize_schema(
solver={"enum": ["lbfgs", "liblinear"], "default": "liblinear"},
tol={
"type": "number",
"minimum": 0.00001,
"maximum": 0.1,
"default": 0.0001,
},
)
json_expected = {
"class": LR.class_name(),
"state": "planned",
"operator": "LogisticRegression",
"label": "LR",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.sklearn.logistic_regression.html",
"customize_schema": {
"properties": {
"hyperparams": {
"allOf": [
{
"type": "object",
"properties": {
"solver": {
"default": "liblinear",
"enum": ["lbfgs", "liblinear"],
},
"tol": {
"type": "number",
"minimum": 0.00001,
"maximum": 0.1,
"default": 0.0001,
},
},
}
]
}
}
},
}
json = to_json(operator)
self.maxDiff = None
self.assertEqual(json, json_expected)
operator_2 = from_json(json)
json_2 = to_json(operator_2)
self.assertEqual(json, json_2)
class TestDiff(unittest.TestCase):
def test_single_op(self):
from lale.lib.sklearn import LogisticRegression
single_op = LogisticRegression()
single_op_param = LogisticRegression(solver="saga")
expected_diff = (
" from sklearn.linear_model import LogisticRegression\n"
" import lale\n"
" \n"
" lale.wrap_imported_operators()\n"
"- pipeline = LogisticRegression()\n"
'+ pipeline = LogisticRegression(solver="saga")\n'
"? +++++++++++++\n"
)
diff_str = single_op.diff(single_op_param, ipython_display=False)
self.assertEqual(diff_str, expected_diff)
expected_diff_reverse = (
" from sklearn.linear_model import LogisticRegression\n"
" import lale\n"
" \n"
" lale.wrap_imported_operators()\n"
'- pipeline = LogisticRegression(solver="saga")\n'
"? -------------\n\n"
"+ pipeline = LogisticRegression()"
)
diff_str_reverse = single_op_param.diff(single_op, ipython_display=False)
self.assertEqual(diff_str_reverse, expected_diff_reverse)
def test_pipeline(self):
from lale.lib.lale import NoOp
from lale.lib.sklearn import PCA, LogisticRegression, SelectKBest
pipeline_simple = PCA >> SelectKBest >> LogisticRegression
pipeline_choice = (PCA | NoOp) >> SelectKBest >> LogisticRegression
expected_diff = (
" from sklearn.decomposition import PCA\n"
"+ from lale.lib.lale import NoOp\n"
" from sklearn.feature_selection import SelectKBest\n"
" from sklearn.linear_model import LogisticRegression\n"
" import lale\n"
" \n"
" lale.wrap_imported_operators()\n"
"- pipeline = PCA >> SelectKBest >> LogisticRegression\n"
"+ pipeline = (PCA | NoOp) >> SelectKBest >> LogisticRegression\n"
"? + ++++++++\n"
)
diff_str = pipeline_simple.diff(pipeline_choice, ipython_display=False)
self.assertEqual(diff_str, expected_diff)
expected_diff_reverse = (
" from sklearn.decomposition import PCA\n"
"- from lale.lib.lale import NoOp\n"
" from sklearn.feature_selection import SelectKBest\n"
" from sklearn.linear_model import LogisticRegression\n"
" import lale\n"
" \n"
" lale.wrap_imported_operators()\n"
"- pipeline = (PCA | NoOp) >> SelectKBest >> LogisticRegression\n"
"? - --------\n\n"
"+ pipeline = PCA >> SelectKBest >> LogisticRegression"
)
diff_str_reverse = pipeline_choice.diff(pipeline_simple, ipython_display=False)
self.assertEqual(diff_str_reverse, expected_diff_reverse)
def test_single_op_pipeline(self):
from lale.lib.sklearn import PCA, LogisticRegression, SelectKBest
single_op = LogisticRegression()
pipeline = PCA >> SelectKBest >> LogisticRegression
expected_diff = (
"+ from sklearn.decomposition import PCA\n"
"+ from sklearn.feature_selection import SelectKBest\n"
" from sklearn.linear_model import LogisticRegression\n"
" import lale\n"
" \n"
" lale.wrap_imported_operators()\n"
"- pipeline = LogisticRegression()\n"
"+ pipeline = PCA >> SelectKBest >> LogisticRegression"
)
diff_str = single_op.diff(pipeline, ipython_display=False)
self.assertEqual(expected_diff, diff_str)
expected_diff_reverse = (
"- from sklearn.decomposition import PCA\n"
"- from sklearn.feature_selection import SelectKBest\n"
" from sklearn.linear_model import LogisticRegression\n"
" import lale\n"
" \n"
" lale.wrap_imported_operators()\n"
"- pipeline = PCA >> SelectKBest >> LogisticRegression\n"
"+ pipeline = LogisticRegression()"
)
diff_str_reverse = pipeline.diff(single_op, ipython_display=False)
self.assertEqual(expected_diff_reverse, diff_str_reverse)
def test_options(self):
from lale.lib.sklearn import LogisticRegression
single_op = LogisticRegression()
single_op_schema = single_op.customize_schema(solver={"enum": ["saga"]})
expected_diff_no_imports = " pipeline = LogisticRegression()"
diff_str_no_imports = single_op.diff(
single_op_schema, show_imports=False, ipython_display=False
)
self.assertEqual(diff_str_no_imports, expected_diff_no_imports)
expected_diff_no_schema = (
" from sklearn.linear_model import LogisticRegression\n"
" import lale\n"
" \n"
" lale.wrap_imported_operators()\n"
"- pipeline = LogisticRegression()\n"
'+ pipeline = LogisticRegression.customize_schema(solver={"enum": ["saga"]})()'
)
diff_str_no_schema = single_op.diff(
single_op_schema, customize_schema=True, ipython_display=False
)
self.assertEqual(diff_str_no_schema, expected_diff_no_schema)
|
# Задача 2. Процесори
# Да се напише програма, която пресмята каква печалба или загуба ще реализира фирма произвеждаща AND процесори.
# Един процесор се изработва за 3 часа. Фирмата има даден брой служители, които работят определен брой дни.
# Приема се, че един служител работи 8 часа на ден. Фирмата има за цел да изработи определен брой процесори.
# Плануваният брой процесори, броят на служителите във фирмата и дните се прочитат от конзолата.
# Броят на произведените процесори да бъде закръглен към по-малкото цяло число.
#
# Пример: за 10 часа се изработват 10/3 = 3.33 процесора 3 процесора. Един брой струва 110.10 лв.
# Според количеството изработени процесори принтирайте на конзолата, колко повече или по-малко пари са изкарани
# от плануваното.
from math import floor
processors_needed = int(input())
count_workers = int(input())
count_workdays = int(input())
total_working_hours = count_workers * count_workdays * 8
processors_produced = floor(total_working_hours / 3)
if processors_produced >= processors_needed:
print(f"Profit: -> {(processors_produced - processors_needed) * 110.10:.2f} BGN")
else:
print(f"Losses: -> {(processors_needed - processors_produced) * 110.10:.2f} BGN")
|
#coding=utf-8
#author@alingse
#2016.08.19
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Dropout, Activation, Flatten,RepeatVector
from keras.layers import Embedding
from keras.layers import LSTM
from keras.optimizers import SGD, Adadelta, Adagrad,RMSprop
from keras.utils import np_utils, generic_utils
import numpy as np
from random import shuffle
from random import choice
#0 --> 255
#0b0 --> 0b11111111
#seq
def load_XY(binlen = 8):
maxnum = eval('0b'+'1'*binlen)
numlen = len(str(maxnum))
count = maxnum + 1
X_train = np.zeros((count,numlen,1),dtype=np.float32)
Y_train = np.zeros((count,binlen),dtype=np.float32)
for i in range(count):
i_str = str(i).zfill(numlen)
x_seq = np.array(map(int,i_str))
i_bin = bin(i)[2:].zfill(binlen)
y_seq = np.array(map(int,i_bin))
X_train[i,:,0] = x_seq[::-1]
Y_train[i] = y_seq[::-1]
X_train = np.repeat(X_train,20,axis=0)
Y_train = np.repeat(Y_train,20,axis=0)
index = list(range(X_train.shape[0]))
shuffle(index)
X_train = X_train[index]
Y_train = Y_train[index]
return X_train,Y_train
def train(X_train,Y_train):
numlen = X_train.shape[1]
binlen = Y_train.shape[1]
print(numlen,binlen)
#copy from keras
model = Sequential()
model.add(LSTM(8, return_sequences=True,
input_shape=(numlen,1)))
model.add(LSTM(8, return_sequences=True))
model.add(LSTM(8))
model.add(Dense(binlen,activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
print('start fit')
model.fit(X_train, Y_train,
batch_size=10, nb_epoch=100,
verbose=2,shuffle=True,
validation_split=0.2)
return model
def dump(model,save_name):
with open('{}.model.json'.format(save_name),'w') as f:
f.write(model.to_json())
model.save_weights('{}.model.weigthts.h5'.format(save_name))
def main(name='test'):
X_train,Y_train = load_XY(binlen=4)
model = train(X_train,Y_train)
score = model.evaluate(X_train,Y_train,batch_size=20,verbose=2)
print(score)
x_test = np.array([3,1]).reshape(1,2,1)
y_seq = model.predict_classes(x_test)
print(y_seq)
print(bin(2))
dump(model,name)
if __name__ == '__main__':
name = 'bin-rnn'
main(name)
|
#-*-coding:utf-8-*-
# date:2019-05-20
# Author: X.L.Eric
# function: data iter
import glob
import math
import os
import random
import shutil
from pathlib import Path
from PIL import Image
from tqdm import tqdm
import cv2
import numpy as np
import torch
from torch.utils.data import Dataset
from torch.utils.data import DataLoader
class LoadImagesAndLabels(Dataset):
def __init__(self, path, img_size=(256,256),vis = False):
print('img_size (height,width) : ',img_size[0],img_size[1])
labels_ = []
files_ = []
for idx,doc in enumerate(sorted(os.listdir(path), key=lambda x:int(x.split('.')[0]), reverse=False)):
print(' %s label is %s \n'%(doc,idx))
for file in os.listdir(path+doc):
if '.jpg' in file :# 同时过滤掉 val 数据集
labels_.append(idx)
files_.append(path+doc + '/' + file)
print()
print('\n')
self.labels = labels_ # 样本标签获取
self.files = files_ # 样本图片路径获取
self.img_size = img_size# 图像尺寸参数获取
self.vis = vis # 可视化参数获取
def __len__(self):
return len(self.files)#返回数据集的长度
def __getitem__(self, index):
img_path = self.files[index]# 获得索引样本对应的图片路径
label_ = self.labels[index]# 获得索引样本对应的标签号
img = cv2.imread(img_path) # BGR 格式
img_ = cv2.resize(img, (self.img_size[1],self.img_size[0]))# 图像统一尺寸
if random.random()>0.5:# 数据扩增
img_ = cv2.flip(img_, 1)# 左右翻转
if self.vis:
cv2.putText(img_,str(label_),(3,img_.shape[0]-10),cv2.FONT_HERSHEY_SIMPLEX,0.45,(15,125,255),2)
cv2.putText(img_,str(label_),(3,img_.shape[0]-10),cv2.FONT_HERSHEY_SIMPLEX,0.45,(255,255,25),1)
cv2.namedWindow('image',0)
cv2.imshow('image',img_)
cv2.waitKey(1)
img_ = img_.astype(np.float32)
img_ = (img_-128.)/256.# 数据预处理 : 归一化
img_ = img_.transpose(2, 0, 1)#转为 pytorch的数据格式,(height,width,channel)-》( channel ,height,width)
return img_,label_ # 返回图像预处理数据、标签
if __name__ == "__main__":
train_path = './datasets/train_datasets/'
img_size = (224,224)
dataset = LoadImagesAndLabels(path = train_path,img_size = img_size,vis = True)
print('len train datasets : %s'%(dataset.__len__()))
# Dataloader
dataloader = DataLoader(dataset,
batch_size=1,
num_workers=1,
shuffle=True,
pin_memory=False,
drop_last = True)
for epoch in range(0, 1):
for i, (imgs_, labels_) in enumerate(dataloader):
print('imgs size {} , labels size {}'.format(imgs_.size(), labels_.size()))
|
# Generated by Django 3.1.2 on 2020-10-26 23:19
import common.models
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('push_notifications', '0006_auto_20200421_2050'),
]
operations = [
migrations.CreateModel(
name='PathwaysApiKey',
fields=[
('id', common.models.RequiredCharField(max_length=200, primary_key=True, serialize=False)),
],
),
]
|
# Generated by Django 3.2.2 on 2021-06-18 19:41
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('schedule', '0004_alter_post_pub_date'),
]
operations = [
migrations.AlterField(
model_name='post',
name='pub_date',
field=models.DateField(),
),
]
|
#!/usr/bin/env python
import argparse
import time
import numpy as np
import torch
from trainer import Trainer
from models import State
from logger import Logger
import utils
from envs.simulation.robot import SimRobot
from utils.config import Config
from tensorboardX import SummaryWriter
from scipy import ndimage
from itertools import count
from torch.autograd import Variable
class Solver():
def __init__(self, args):
np.random.seed(args.random_seed) # Set random seed
self.writer = SummaryWriter() # tensorboard
# algorightm options
self.save_visualizations = args.save_visualizations
# Initialize pick-and-place system (camera and robot)
self.workspace_limits = args.workspace_limits
self.heightmap_resolution = args.heightmap_resolution
self.env = SimRobot(args.obj_mesh_dir, args.num_obj, args.workspace_limits, args.heightmap_resolution)
# Initialize trainer
self.snapshot_file = args.snapshot_file
self.trainer = Trainer(args.future_reward_discount, args.load_snapshot, self.snapshot_file)
self.env_step_args = (self.trainer.model.num_rotations, self.heightmap_resolution)
# Initialize data logger
self.logger = Logger(args.continue_logging, args.logging_directory)
self.logger.save_camera_info(self.env.cam_intrinsics, self.env.cam_pose, self.env.cam_depth_scale)
self.logger.save_heightmap_info(args.workspace_limits, self.heightmap_resolution)
if args.continue_logging:
self.trainer.preload(self.logger.transitions_directory)
def main(self):
# optim_thread = threading.Thread(target=self._optimize_model)
# optim_thread.daemon = True
# optim_thread.start()
for epoch in count(): # Start main training loop
self.env.reset()
# print('instruction: %s' % (self.env.instruction_str))
self.no_change_cnt = 0
color_map, depth_map = self._get_imgs()
state = State(self.env.instruction, color_map, depth_map)
# import pdb; pdb.set_trace()
for t in count():
time_0 = time.time()
choice, action, grasp_pred = self.trainer.select_action(state, self.env)
reward, done = self.env.step(action, depth_map, *self.env_step_args)
self._log_board_save(color_map, choice, action, grasp_pred, reward)
# observe new state
color_map, depth_map = self._get_imgs()
next_state = None if done else State(self.env.instruction, color_map, depth_map)
# store in replay buffer
self.trainer.memory.push(state, action, next_state, reward)
state = next_state
print('Iter: %d, %s, Reward = %d, Time: %.2f' % (
self.trainer.iteration, choice, reward, (time.time() - time_0)))
if done or self._check_stupid() or (not self.env.is_stable()): break
loss = self.trainer.optimize_model()
if loss: self.writer.add_scalar('VLG/loss', loss, self.trainer.iteration)
if epoch % 5 == 0:
self.trainer.target_net.load_state_dict(self.trainer.model.state_dict())
def _log_board_save(self, color_map, choice, action, grasp_pred, reward):
'''
$ tensorboard --host 0.0.0.0 --logdir runs
'''
self.trainer.action_log.append([1, action[0], action[1], action[2]])
self.logger.write_to_log('action', self.trainer.action_log)
self.trainer.reward_log.append([reward])
self.logger.write_to_log('reward', self.trainer.reward_log)
self.writer.add_scalar('VLG/reward', reward, self.trainer.iteration)
# import pdb; pdb.set_trace()
if choice == 'policy_network':
grasp_pred_vis = self.trainer.get_pred_vis(grasp_pred, color_map, action)
self.logger.save_visualizations(self.trainer.iteration, grasp_pred_vis, 'grasp')
# Save model snapshot
self.logger.save_backup_model(self.trainer.model, 'reinforcement')
if self.trainer.iteration % 50 == 0:
self.logger.save_model(self.trainer.iteration, self.trainer.model, 'reinforcement')
self.trainer.model = self.trainer.model.to(self.trainer.device)
def _optimize_model(self):
TARGET_UPDATE = 5
while True:
if self.trainer.iteration % (self.trainer.BATCH_SIZE / 2) == 0:
loss = self.trainer.optimize_model()
if loss == None: continue
self.writer.add_scalar('VLG/loss', loss, self.trainer.iteration)
if self.trainer.iteration % (self.trainer.BATCH_SIZE * TARGET_UPDATE) == 0:
self.trainer.target_net.load_state_dict(self.trainer.model.state_dict())
time.sleep(1)
def _get_imgs(self):
# Get latest RGB-D image
color_img, depth_img = self.env.get_camera_data()
depth_img = depth_img * self.env.cam_depth_scale
# Get heightmap from RGB-D image (by re-projecting 3D point cloud)
color_map, depth_map = utils.get_heightmap(
color_img, depth_img, self.env.cam_intrinsics,
self.env.cam_pose, self.workspace_limits, self.heightmap_resolution
)
# valid_depth_heightmap = depth_heightmap
depth_map[np.isnan(depth_map)] = 0
# Save RGB-D images and RGB-D heightmaps
self.logger.save_instruction(self.trainer.iteration, self.env.instruction_str, '0')
self.logger.save_images(self.trainer.iteration, color_img, depth_img, '0')
self.logger.save_heightmaps(self.trainer.iteration, color_map, depth_map, '0')
return color_map, depth_map
def _check_stupid(self):
if self.no_change_cnt >= 5:
self.no_change_cnt = 0
# print('no change for a long time, Reset.')
return True
self.no_change_cnt += 1
return False
def _detect_changes(self, next_depth_data, depth_data, reward):
# Detect changes
depth_diff = abs(next_depth_data - depth_data)
depth_diff[np.isnan(depth_diff)] = 0
depth_diff[depth_diff > 0.3] = depth_diff[depth_diff < 0.01] = 0
depth_diff[depth_diff > 0] = 1
change_threshold = 300 # TODO this value, 300
change_value = np.sum(depth_diff) # np.sum
change_detected = change_value > change_threshold or reward == 1 # State.SUCCESS
# print('Change detected: %r (value: %d)' % (change_detected, change_value))
if change_detected:
self.no_change_cnt = 0
else:
self.no_change_cnt += 1
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Train robotic agents to learn visual language grasp.'
)
# Run main program with specified config file
parser.add_argument('-f', '--file', dest='file')
args = parser.parse_args()
solver = Solver(Config(args.file))
'''
angles = []
for i in range(8):
angle = np.deg2rad(i * (360.0 / 16))
tool_rotation_angle = (angle % np.pi) - np.pi / 2
angles.append(tool_rotation_angle)
solver.env.reset()
solver.env.random_grasp_action()
assert False
'''
solver.main()
|
# Copyright 2019-2021 Canaan Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import argparse
import bokeh.plotting as plt
import sys
def show(file):
plt.output_file('lifetime.html')
fig1 = plt.figure(width=1600, height=800)
with open(file, 'r') as f:
f.readline() # skip first line
while True:
items = f.readline().split(' ')
if len(items) != 5:
break
left = float(items[3])
right = float(items[4])
bottom = float(items[1])
top = float(items[2])
fig1.quad(top=top, bottom=bottom, left=left, right=right, line_color="black", line_width=2)
plt.show(fig1)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Show liftime plot.')
parser.add_argument('file', type=str, help='lifetime file')
args = parser.parse_args()
show(args.file)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import random
import os
def pick(rel_path="human.txt"):
script_path = os.path.abspath(__file__)
script_dir = os.path.split(script_path)[0]
abs_file_path = os.path.join(script_dir, rel_path)
with open(abs_file_path, encoding='utf-8') as txt:
lines = txt.readlines()
start = random.randrange(0,len(lines))
start -= start % 4
result = "".join(lines[start:(start+4)])
print(result)
|
#!/usr/bin/python3.6
import numpy as np
np.seterr(all="ignore")
def continuousScale(*args):
"""Creates a set of continuous scale data.
If provided a filename, returns the indecies for sorting, D0, and D1
If provided a number, just creates and returns the indecies alternating from D0 and D1 indecies
If provided 2 numbers, returns D0 and D1 interleaved evenly into arr, D0, and D1"""
if len(args) == 1:
if isinstance(args[0], str):
filename: str = args[0]
data: list = []
D0: list = []
D1: list = []
with open(filename) as f:
for i, line in enumerate(f):
if len(line) > 10:
line: list = line.strip().split(" ")
point: float = float(line[2])
data.append(point)
if line[1] == "1": # case should be positive
D1.append(i)
else: # case should be negative
D0.append(i)
newData: list = [-1 for i in range(len(data))]
#print(data)
for i, d in enumerate(sorted(data)):
newData[i]: int = data.index(d)
D0.sort()
D1.sort()
return newData, D0, D1
elif isinstance(args[0], int):
data: list = list()
for i in range(0, args[0] // 2):
data.append(i + args[0] // 2)
data.append(i)
return data
elif len(args) == 2:
D0: list = list(range(args[0]))
D1: list = list(range(args[0], args[0] + args[1]))
arr: list = list()
negI: int = 0
posI: int = 0
ratio: float = args[1] / args[0]
while negI < len(D0):
arr.append(D0[negI])
negI += 1
percent: float = negI * ratio
while posI < percent:
arr.append(D1[posI])
posI += 1
return arr, D0, D1
if __name__ == "__main__":
data: list = continuousScale(5, 4)
print(*data)
|
from pydantic import BaseModel
from songmam.models.webhook.events.base import BaseMessaging, WithTimestamp
class GamePlay(BaseModel):
game_id: str
player_id: str
context_type: str
context_id: str
score: int
payload: str
class GamePlayEntries(BaseMessaging, WithTimestamp):
game_play: GamePlay
# {
# "recipient": {
# "id": "<PSID>"
# },
# "recipient": {
# "id": "<PAGE_ID>"
# },
# "timestamp": 1469111400000,
# "game_play": {
# "game_id": "<GAME-APP-ID>",
# "player_id": "<PLAYER-ID>",
# "context_type": "<CONTEXT-TYPE:SOLO|THREAD>",
# "context_id": "<CONTEXT-ID>", # If a Messenger Thread context
# "score": <SCORE-NUM>, # If a classic score based game
# "payload": "<PAYLOAD>" # If a rich game
# }
# }
|
import datetime
import numpy as np
import pandas as pd
from google.oauth2 import service_account
from googleapiclient import discovery
SPREADSHEET_ID = "1otVI0JgfuBDJw8jlW_l8vHXyfo5ufJiXOqshDixazZA" # ALL-IN-ONE-LOG-2021
class Spreadsheet:
def __init__(self, spreadsheetId):
self.spreadsheetId = spreadsheetId
self.sheet = self.get_all_in_one_log()
def get_all_in_one_log(self):
SERVICE_ACCOUNT_FILE = "credentials.json"
SCOPES = ["https://www.googleapis.com/auth/spreadsheets"]
creds = service_account.Credentials.from_service_account_file(
SERVICE_ACCOUNT_FILE, scopes=SCOPES
)
service = discovery.build("sheets", "v4", credentials=creds)
sheet = service.spreadsheets()
return sheet
def getSheet(self):
return self.sheet
def list_last_five_trans():
result = (
sheet.values().get(spreadsheetId=SPREADSHEET_ID, range="Trans!B6:G").execute()
)
values = result.get("values", [])
df = pd.DataFrame(
values, columns=["Date", "Description", "Dummy", "Amount", "From A/c", "To A/c"]
)
df["Description"] = df["Description"].str.slice(0, 20)
print(df.tail(5))
return
def list_last_30_trans():
result = (
sheet.values().get(spreadsheetId=SPREADSHEET_ID, range="Trans!B6:G").execute()
)
values = result.get("values", [])
df = pd.DataFrame(
values, columns=["Date", "Description", "Dummy", "Amount", "From A/c", "To A/c"]
)
df["Description"] = df["Description"].str.slice(0, 20)
print(df.tail(30))
return
def check_balances():
res2 = (
sheet.values()
.get(spreadsheetId=SPREADSHEET_ID, range="Dashboard!B9:B19")
.execute()
)
acc = res2.get("values", [])
result = (
sheet.values()
.get(spreadsheetId=SPREADSHEET_ID, range="Dashboard!P9:P19")
.execute()
)
val = result.get("values", [])
balances = np.array(val)
balances = balances.flatten()
balances[balances == "#N/A"] = "0"
balances = list(map(float, balances))
(
C,
K,
Zer,
Zer_Comm,
Cams,
Samrudhi,
Citi,
K_Fixed,
Union,
Z_Hold,
Citi_Fixed,
) = balances
print(f"Cash Balance~~~~~~~~~~~~~~~~~~~~~:{C:.2f}")
print(
f"Saving A/c Balance~~~~~~~~~~~~~~~:{(K+Citi):.2f} with (Kotak-{K:.2f} and Citi-{Citi:.2f})"
)
print(
f"In FD (CB,Kotak,Union, Samruddhi):{(K_Fixed+Union+Citi_Fixed):.2f} with (K-{K_Fixed:.2f}, Citi-{Citi_Fixed:.2f})"
)
print(f"Unutilized in Shares~~~~~~~~~~~~~:{Zer:.2f}")
print(f"In CAMS MF~~~~~~~~~~~~~~~~~~~~~~~:{Cams:.2f}")
print(f"In shares~~~~~~~~~~~~~~~~~~~~~~~~:{Z_Hold:.2f}")
return
def check_expenses():
result = (
sheet.values()
.get(spreadsheetId=SPREADSHEET_ID, range="Dashboard!C46:C46")
.execute()
)
values = result.get("values", [])
print("Expenses for the year: " + values[0][0])
return
class Account:
def __init__(self, desc, amount, from_ac, to_ac):
self.desc = desc
self.amount = amount
self.from_ac = from_ac
self.to_ac = to_ac
self.catg = "Adjustment"
if self.from_ac == "C" or self.from_ac == "K":
self.catg = "Expense"
self.today = datetime.datetime.now()
self.period = datetime.date.strftime(self.today, "%Y-%m")
self.formatted_dt = datetime.date.strftime(self.today, "%m/%d/%Y")
self.new_trans = [
[
self.formatted_dt,
self.desc,
"",
self.amount,
self.from_ac,
self.to_ac,
"",
self.period,
self.catg,
]
]
def get_trans(self):
return self.new_trans
def add_new_record():
print("Adding new records, Enter description, amount, from a/c and to a/c")
desc = input("description is: ")
amount = input("trans amount: ")
from_ac = input(" from account: ")
to_ac = input(" to account: ")
account = Account(desc, amount, from_ac, to_ac)
print(" Transaction to be entered is: ", account.get_trans())
conf = 0
while conf != 1 and conf != 9:
conf = int(input(" Enter 1 to confirm, 9 to erase: "))
if conf == 9:
print("Exiting adding new record, please re-enter your choice: ")
return
request = sheet.values().append(
spreadsheetId=SPREADSHEET_ID,
range="Trans!B6:J",
valueInputOption="USER_ENTERED",
insertDataOption="INSERT_ROWS",
body={"values": account.get_trans()},
)
response = request.execute()
print("Added new record: ")
print(response)
return
class Choice:
switcher = {
1: add_new_record,
4: list_last_30_trans,
5: list_last_five_trans,
6: check_balances,
7: check_expenses,
}
def __init__(self, SpreadSheet):
self._choice = 0
self.exit = False
self.Spreadsheet = Spreadsheet
def is_exit(self):
return self.exit
def get_choice(self):
print("~~~~~~ MAIN MENU ~~~~~~~")
print("1:ADD, 4:LIST-30, 5:LIST-5, 6:CHECK-BALANCE, 7:.CHECK-EXPENSES 9: Quit")
self._choice = int(input("Enter your choice : "))
if self._choice == 9:
self.exit = True
return self._choice
def switch_choice(self):
func = self.switcher.get(self._choice, lambda: "Invalid choice")
func()
if __name__ == "__main__":
AccountSheet = Spreadsheet(SPREADSHEET_ID)
sheet = AccountSheet.getSheet()
# sheet = get_all_in_one_log()
# list_last_five_trans()
choice = Choice(AccountSheet)
while choice.is_exit() == False:
choice.get_choice()
choice.switch_choice()
print("Exiting out.. kind regards!")
|
# Copyright 2020 MONAI Consortium
import logging
import sys
import numpy as np
import torch
from torch.utils.data import DataLoader
import monai
from monai.data import ImageDataset, decollate_batch
from monai.inferers import sliding_window_inference
from monai.metrics import DiceMetric
from monai.transforms import Activations, AddChannel, AsDiscrete, Compose, RandRotate90, RandSpatialCrop, ScaleIntensity, EnsureType
from monai.visualize import plot_2d_or_3d_image
from src.deepcolloid import DeepColloid
print(torch.cuda.is_available())
import napari
class ColloidsDatasetSimulated(torch.utils.data.Dataset):
"""
Torch Dataset for simulated colloids
transform is augmentation function
"""
def __init__(self, dataset_path:str, dataset_name:str, indices:list, transform=None, label_transform=None):
super().__init__()
self.dataset_path = dataset_path
self.dataset_name = dataset_name
self.indices = indices
self.transform = transform
self.label_transform = label_transform
def __len__(self):
return len(self.indices)
def __getitem__(self, index):
dc = DeepColloid(self.dataset_path)
# Select sample
i = self.indices[index]
X, positions = dc.read_hdf5(self.dataset_name, i, return_positions=True)
# TODO make arg return_positions = True for use in DSNT
y = dc.read_hdf5(self.dataset_name+'_labels', i)
# dc.view(X)
# napari.run()
X = np.array(X/255, dtype=np.float32)
y = np.array(y , dtype=np.float32)
# print('x', np.min(X), np.max(X), X.shape)
# print('y', np.min(y), np.max(y), y.shape)
#fopr reshaping"
X = np.expand_dims(X, 0) # if numpy array
y = np.expand_dims(y, 0)
# tensor = tensor.unsqueeze(1) # if torch tensor
if self.transform:
X, y = self.transform(X), self.label_transform(y)
# if self.label_transform:
# y = self.label_transform(X)
del dc
return X, y
def compute_max_depth(shape= 1920, max_depth=10, print_out=True):
shapes = []
shapes.append(shape)
for level in range(1, max_depth):
if shape % 2 ** level == 0 and shape / 2 ** level > 1:
shapes.append(shape / 2 ** level)
if print_out:
print(f'Level {level}: {shape / 2 ** level}')
else:
if print_out:
print(f'Max-level: {level - 1}')
break
#out = compute_max_depth(shape, print_out=True, max_depth=10)
return shapes
if __name__ == '__main__':
# monai.config.print_config()
# logging.basicConfig(stream=sys.stdout, level=logging.INFO)
dataset_path = '/home/ak18001/Data/HDD/Colloids'
# dataset_path = '/home/wahab/Data/HDD/Colloids'
# dataset_path = '/mnt/storage/home/ak18001/scratch/Colloids'
dc = DeepColloid(dataset_path)
# dc = DeepColloid(dataset_path)
roiSize = (32,128,128)
train_data = range(1,39)
val_data = range(39,51)
dataset_name = 'replicate'
batch_size = 2
num_workers = 2
epochs=10
n_classes=1
lr = 3e-5
# define transforms for image and segmentation
train_imtrans = Compose([
# AddChannel(),
ScaleIntensity(),
# RandRotate90(prob=0.5, spatial_axes=(0, 2)),
EnsureType(),
])
train_segtrans = Compose([
# AddChannel(),
# RandRotate90(prob=0.5, spatial_axes=(0, 2)),
EnsureType(),
])
val_imtrans = Compose([
ScaleIntensity(),
# AddChannel(),
EnsureType(),
])
val_segtrans = Compose([
# AddChannel(),
EnsureType(),
])
# define image dataset, data loader
check_ds = ColloidsDatasetSimulated(dataset_path, dataset_name, train_data, transform=train_imtrans, label_transform=train_segtrans)
check_loader = DataLoader(check_ds, batch_size=batch_size, num_workers=num_workers, pin_memory=torch.cuda.is_available())
im, seg = monai.utils.misc.first(check_loader)
print(im.shape, seg.shape)
# create a training data loader
train_ds = ColloidsDatasetSimulated(dataset_path, dataset_name, train_data, transform=train_segtrans, label_transform=train_segtrans)
train_loader = DataLoader(train_ds, batch_size=batch_size, shuffle=True, num_workers=num_workers, pin_memory=torch.cuda.is_available())
# create a validation data loader
val_ds = ColloidsDatasetSimulated(dataset_path, dataset_name, val_data, transform=val_imtrans, label_transform=val_segtrans)
val_loader = DataLoader(val_ds, batch_size=batch_size, num_workers=num_workers, pin_memory=torch.cuda.is_available())
dice_metric = DiceMetric(include_background=True, reduction="mean", get_not_nans=False)
post_trans = Compose([EnsureType(), Activations(sigmoid=True), AsDiscrete(threshold_values=True)])
# create UNet, DiceLoss and Adam optimizer
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print('-'*10)
print(f'training on {device}')
model = monai.networks.nets.UNet(
spatial_dims=3,
in_channels=1,
out_channels=n_classes,
channels=(16, 32, 64, 128, 256),
strides=(2, 2, 2, 2),
num_res_units=2,
# act=torch.nn.activation.ReLU(),
).to(device)
# loss_function = monai.losses.DiceLoss(sigmoid=True)
loss_function = torch.nn.BCEWithLogitsLoss()
optimizer = torch.optim.Adam(model.parameters(), lr)
# start a typical PyTorch training
val_interval = 2
best_metric = -1
best_metric_epoch = -1
epoch_loss_values = list()
metric_values = list()
writer = SummaryWriter()
for epoch in range(epochs):
print("-" * 10)
print(f"epoch {epoch + 1}/{5}")
model.train()
epoch_loss = 0
step = 0
for batch_data in train_loader:
step += 1
inputs, labels = batch_data[0].to(device), batch_data[1].to(device)
# print('DataSet shape: ', inputs.shape, labels.shape)
optimizer.zero_grad()
outputs = model(inputs)
loss = loss_function(outputs, labels)
loss.backward()
optimizer.step()
epoch_loss += loss.item()
epoch_len = len(train_ds) // train_loader.batch_size
print(f"{step}/{epoch_len}, train_loss: {loss.item():.4f}")
writer.add_scalar("train_loss", loss.item(), epoch_len * epoch + step)
epoch_loss /= step
epoch_loss_values.append(epoch_loss)
print(f"epoch {epoch + 1} average loss: {epoch_loss:.4f}")
if (epoch + 1) % val_interval == 0:
model.eval()
with torch.no_grad():
val_images = None
val_labels = None
val_outputs = None
for val_data in val_loader:
val_images, val_labels = val_data[0].to(device), val_data[1].to(device)
sw_batch_size = 2
val_outputs = sliding_window_inference(val_images, roiSize, sw_batch_size, model)
val_outputs = [post_trans(i) for i in decollate_batch(val_outputs)]
# compute metric for current iteration
dice_metric(y_pred=val_outputs, y=val_labels)
# aggregate the final mean dice result
metric = dice_metric.aggregate().item()
# reset the status for next validation round
dice_metric.reset()
metric_values.append(metric)
if metric > best_metric:
best_metric = metric
best_metric_epoch = epoch + 1
torch.save(model.state_dict(), "best_metric_model_segmentation3d_array.pth")
print("saved new best metric model")
print(
"current epoch: {} current mean dice: {:.4f} best mean dice: {:.4f} at epoch {}".format(
epoch + 1, metric, best_metric, best_metric_epoch
)
)
writer.add_scalar("val_mean_dice", metric, epoch + 1)
# plot the last model output as GIF image in TensorBoard with the corresponding image and label
plot_2d_or_3d_image(val_images, epoch + 1, writer, index=0, tag="image")
plot_2d_or_3d_image(val_labels, epoch + 1, writer, index=0, tag="label")
plot_2d_or_3d_image(val_outputs, epoch + 1, writer, index=0, tag="output")
print(f"train completed, best_metric: {best_metric:.4f} at epoch: {best_metric_epoch}")
writer.close()
|
from unittest.mock import Mock, patch
import pytest
from firebase_admin import messaging
from backend.common.consts.fcm.platform_priority import PlatformPriority
from backend.common.models.fcm.platform_config import PlatformConfig
from backend.common.models.notifications.requests.fcm_request import (
FCMRequest,
MAXIMUM_TOKENS,
)
from backend.common.models.notifications.requests.request import Request
from backend.common.models.notifications.tests.mocks.notifications.mock_notification import (
MockNotification,
)
@pytest.fixture
def fcm_app():
return Mock()
def test_subclass(fcm_app):
request = FCMRequest(fcm_app, MockNotification(), tokens=["abcd"])
assert isinstance(request, Request)
def test_init_app(fcm_app):
FCMRequest(fcm_app, notification=MockNotification(), tokens=["abcd"])
def test_init_delivery_none(fcm_app):
with pytest.raises(TypeError):
FCMRequest(fcm_app, notification=MockNotification())
def test_init_delivery_too_many_tokens(fcm_app):
with pytest.raises(
ValueError,
match=f"FCMRequest tokens must contain less than {MAXIMUM_TOKENS} tokens",
):
FCMRequest(
fcm_app,
notification=MockNotification(),
tokens=["a" for i in range(MAXIMUM_TOKENS + 1)],
)
def test_str(fcm_app):
request = FCMRequest(fcm_app, notification=MockNotification(), tokens=["abc"])
assert "FCMRequest(tokens=['abc'], notification=MockNotification())" == str(request)
def test_send(fcm_app):
batch_response = messaging.BatchResponse(
[messaging.SendResponse({"name": "abc"}, None)]
)
request = FCMRequest(fcm_app, notification=MockNotification(), tokens=["abc"])
with patch.object(
messaging, "send_multicast", return_value=batch_response
) as mock_send, patch.object(request, "defer_track_notification") as mock_track:
response = request.send()
mock_send.assert_called_once()
mock_track.assert_called_once_with(1)
assert response == batch_response
def test_send_failed(fcm_app):
batch_response = messaging.BatchResponse([messaging.SendResponse(None, "a")])
request = FCMRequest(
fcm_app, notification=MockNotification(), tokens=["abc", "def"]
)
with patch.object(
messaging, "send_multicast", return_value=batch_response
) as mock_send, patch.object(request, "defer_track_notification") as mock_track:
response = request.send()
mock_send.assert_called_once()
mock_track.assert_not_called()
assert response == batch_response
def test_send_failed_partial(fcm_app):
batch_response = messaging.BatchResponse(
[
messaging.SendResponse({"name": "abc"}, None),
messaging.SendResponse(None, "a"),
]
)
request = FCMRequest(
fcm_app, notification=MockNotification(), tokens=["abc", "def"]
)
with patch.object(
messaging, "send_multicast", return_value=batch_response
) as mock_send, patch.object(request, "defer_track_notification") as mock_track:
response = request.send()
mock_send.assert_called_once()
mock_track.assert_called_once_with(1)
assert response == batch_response
def test_fcm_message_empty(fcm_app):
request = FCMRequest(fcm_app, notification=MockNotification(), tokens=["abc"])
message = request._fcm_message()
assert message is not None
assert message.data is not None
assert message.notification is None
assert message.android is None
assert isinstance(message.apns, messaging.APNSConfig)
assert message.webpush is None
assert message.tokens == ["abc"]
def test_fcm_message_apns_sound(fcm_app):
request = FCMRequest(
fcm_app,
notification=MockNotification(
fcm_notification=messaging.Notification(
title="Title", body="Some body message"
)
),
tokens=["abc"],
)
message = request._fcm_message()
assert message is not None
assert message.data is not None
assert isinstance(message.notification, messaging.Notification)
assert message.android is None
assert isinstance(message.apns, messaging.APNSConfig)
assert isinstance(message.apns.payload, messaging.APNSPayload)
assert isinstance(message.apns.payload.aps, messaging.Aps)
assert message.apns.payload.aps.sound is not None
assert not message.apns.payload.aps.content_available
assert message.webpush is None
assert message.tokens == ["abc"]
def test_fcm_message_apns_content_available(fcm_app):
request = FCMRequest(fcm_app, notification=MockNotification(), tokens=["abc"])
message = request._fcm_message()
assert message is not None
assert message.data is not None
assert message.notification is None
assert message.android is None
assert isinstance(message.apns, messaging.APNSConfig)
assert isinstance(message.apns.payload, messaging.APNSPayload)
assert isinstance(message.apns.payload.aps, messaging.Aps)
assert message.apns.payload.aps.sound is None
assert message.apns.payload.aps.content_available
assert message.webpush is None
assert message.tokens == ["abc"]
def test_fcm_message_platform_config(fcm_app):
platform_config = PlatformConfig(
priority=PlatformPriority.HIGH, collapse_key="collapse_key"
)
request = FCMRequest(
fcm_app,
notification=MockNotification(platform_config=platform_config),
tokens=["abc"],
)
message = request._fcm_message()
assert message is not None
assert message.data is not None
assert message.notification is None
assert isinstance(message.android, messaging.AndroidConfig)
assert isinstance(message.apns, messaging.APNSConfig)
assert isinstance(message.webpush, messaging.WebpushConfig)
assert message.tokens == ["abc"]
def test_fcm_message_platform_config_override(fcm_app):
platform_config = PlatformConfig(
priority=PlatformPriority.HIGH, collapse_key="collapse_key"
)
apns_config = messaging.APNSConfig(headers={"apns-collapse-id": "ios_collapse_key"})
request = FCMRequest(
fcm_app,
notification=MockNotification(
platform_config=platform_config, apns_config=apns_config
),
tokens=["abc"],
)
message = request._fcm_message()
assert message is not None
assert message.data is not None
assert message.notification is None
assert isinstance(message.android, messaging.AndroidConfig)
assert isinstance(message.apns, messaging.APNSConfig)
assert message.apns.headers == {"apns-collapse-id": "ios_collapse_key"}
assert isinstance(message.webpush, messaging.WebpushConfig)
assert message.webpush.headers == {"Topic": "collapse_key", "Urgency": "high"}
assert message.tokens == ["abc"]
def test_fcm_message_data_payload_default(fcm_app):
request = FCMRequest(fcm_app, notification=MockNotification(), tokens=["abc"])
message = request._fcm_message()
assert message is not None
assert message.data == {"notification_type": "verification"}
assert message.notification is None
assert message.android is None
assert isinstance(message.apns, messaging.APNSConfig)
assert message.webpush is None
assert message.tokens == ["abc"]
def test_fcm_message_data_payload(fcm_app):
request = FCMRequest(
fcm_app,
notification=MockNotification(data_payload={"some_data": "some test data"}),
tokens=["abc"],
)
message = request._fcm_message()
assert message is not None
assert message.data == {
"notification_type": "verification",
"some_data": "some test data",
}
assert message.notification is None
assert message.android is None
assert isinstance(message.apns, messaging.APNSConfig)
assert message.webpush is None
assert message.tokens == ["abc"]
def test_fcm_message_data_payload_none(fcm_app):
request = FCMRequest(
fcm_app,
notification=MockNotification(
data_payload={"some_data": "some test data", "some_none": None}
),
tokens=["abc"],
)
message = request._fcm_message()
assert message is not None
assert message.data == {
"notification_type": "verification",
"some_data": "some test data",
}
assert message.notification is None
assert message.android is None
assert isinstance(message.apns, messaging.APNSConfig)
assert message.webpush is None
assert message.tokens == ["abc"]
def test_fcm_message_notification(fcm_app):
request = FCMRequest(
fcm_app,
notification=MockNotification(
fcm_notification=messaging.Notification(
title="Title", body="Some body message"
)
),
tokens=["abc"],
)
message = request._fcm_message()
assert message is not None
assert message.data is not None
assert isinstance(message.notification, messaging.Notification)
assert message.android is None
assert isinstance(message.apns, messaging.APNSConfig)
assert message.webpush is None
assert message.tokens == ["abc"]
|
from graphviz import Graph
import os
class ExportReachabilityAnalysisService:
def get_height(self, node):
h = 0
while len(node.children) > 0:
h += 1
node = node.children[-1]
return h
def get_node_color(self, node):
return node.get_color().value
def get_node_text_color(self, node):
text_color = 'black'
node_color = self.get_node_color(node)
if node_color == 'black':
text_color = 'white'
return text_color
def get_node_id(self, node):
node_id = ""
parent = node.parent
while parent is not None:
node_id += parent.key
parent = parent.parent
node_id += node.key
return node_id
def extend_dot_with_node(self, dot, node):
node_id = self.get_node_id(node)
label = node.key
dot.node(
node_id,
label,
style='filled',
color=self.get_node_color(node),
fontcolor=self.get_node_text_color(node)
)
return dot
def extend_dot_with_edge(self, dot, node_a, node_b):
node_a_id = self.get_node_id(node_a)
node_b_id = self.get_node_id(node_b)
dot.edge(node_a_id, node_b_id, constraint='true')
return dot
def build_dot(self, node, depth, dot=None):
assert depth >= 0
if dot is None:
dot = Graph(format='svg')
self.extend_dot_with_node(dot, node)
if depth == 0:
return dot
children = node.get_children()
for i in range(len(children)):
child_node = children[i]
self.extend_dot_with_node(dot, child_node)
self.extend_dot_with_edge(dot, node, child_node)
if i == len(children) - 1:
depth -= 1
self.build_dot(child_node, depth, dot)
return dot
def for_node(self, node, path, depth=None):
max_depth = self.get_height(node)
if depth is None:
depth = max_depth
assert 0 <= depth <= max_depth
path = os.path.abspath(path)
dot = self.build_dot(node, depth)
dot.render(path, view=False)
return path
|
#!/usr/bin/env python3
# vim: textwidth=0 wrapmargin=0 tabstop=2 shiftwidth=2 softtabstop=2 smartindent smarttab
import argparse
import dateutil.parser
import dateutil.utils
import logging
import os
import pathlib
import pprint
import requests
import sys
import time
import yaml
pp = pprint.PrettyPrinter(indent=2, width=10000)
import org.miggy.edcapi
###########################################################################
# Logging
###########################################################################
os.environ['TZ'] = 'UTC'
time.tzset()
__default_loglevel = logging.INFO
__logger = logging.getLogger('fd-api')
__logger.setLevel(__default_loglevel)
__logger_ch = logging.StreamHandler()
__logger_ch.setLevel(__default_loglevel)
__logger_formatter = logging.Formatter('%(asctime)s;%(name)s;%(levelname)s;%(module)s.%(funcName)s: %(message)s')
__logger_formatter.default_time_format = '%Y-%m-%d %H:%M:%S';
__logger_formatter.default_msec_format = '%s.%03d'
__logger_ch.setFormatter(__logger_formatter)
__logger.addHandler(__logger_ch)
###########################################################################
###########################################################################
"""
" Configuration
"""
###########################################################################
__configfile_fd = os.open(pathlib.Path(sys.path[0]) / "fd-api-config.yaml", os.O_RDONLY)
__configfile = os.fdopen(__configfile_fd)
__config = yaml.load(__configfile)
if __config.get('user_agent') is None:
__logger.error('You must set a "user_agent" in the config file')
exit(-1)
###########################################################################
###########################################################################
# Command-Line Arguments
###########################################################################
__parser = argparse.ArgumentParser()
__parser.add_argument("--loglevel", help="set the log level to one of: DEBUG, INFO (default), WARNING, ERROR, CRITICAL")
__parser.add_argument("--rawoutput", action="store_true", help="Output raw returned data")
__parser.add_argument("--pts", action="store_true", help="Use PTS server, not live")
__parser.add_argument("--decode-access-token", action="store_true", help="Decode the currently stored Access Token for the requested Commander")
__parser_endpoints = __parser.add_mutually_exclusive_group(required=False)
__parser_endpoints.add_argument("--endpoints", action="store_true", help="Ask the CAPI server what the currently available endpoints are")
__parser_endpoints.add_argument("--profile", action="store_true", help="Request retrieval of Cmdr's profile")
__parser_endpoints.add_argument("--market", action="store_true", help="Request retrieval of market data")
__parser_endpoints.add_argument("--shipyard", action="store_true", help="Request retrieval of shipyard data")
__parser_endpoints.add_argument("--fleetcarrier", action="store_true", help="Request retrieval of fleetcarrier data")
__parser_endpoints.add_argument(
"--journal",
metavar="date",
nargs="?",
default=False,
help="Request retrieval of journal data. Defaults to 'today' if no 'date' is given, else the string is parsed per python dateutil.parser capabilities.",
)
__parser_endpoints.add_argument("--communitygoals", action="store_true", help="Request retrieval of Community Goals data")
__parser.add_argument("cmdrname", nargs=1, help="Specify the Cmdr Name for this Authorization")
__args = __parser.parse_args()
if __args.loglevel:
__level = getattr(logging, __args.loglevel.upper())
__logger.setLevel(__level)
__logger_ch.setLevel(__level)
__logger.debug('Args: {!r}'.format(__args))
cmdrname = __args.cmdrname[0]
###########################################################################
###########################################################################
# Load a relevant Auth State
###########################################################################
def loadAuthState(cmdr: str) -> int:
########################################
# Retrieve and test state
########################################
db = edcapi.database(__logger, __config)
auth_state = db.getActiveTokenState()
if auth_state:
## Do we have an access_token, and does it work?
if auth_state['access_token']:
print("Found un-expired access_token, assuming it's good.")
return(0)
else:
print("Un-expired access_token, but no access_token? WTF!")
return(-1)
else:
print("No auth state with un-expired access_token found, continuing...")
########################################
###########################################################################
###########################################################################
# Main
###########################################################################
def main():
__logger.debug("Start")
# Set the required capi_url
if __args.pts:
__config['capi_url'] = __config['capi_urls']['pts']
else:
__config['capi_url'] = __config['capi_urls']['live']
capi = org.miggy.edcapi.edcapi(__logger, __config)
if __args.decode_access_token:
token_details = capi.decode(cmdrname)
print(f'{token_details}')
if __args.endpoints:
rawep, ep = capi.endpoints.get(cmdrname)
if __args.rawoutput:
print(f'{rawep}\n')
else:
print(pprint.pformat(ep, width=79))
if __args.profile:
(rawprofile, profile) = capi.profile.get(cmdrname)
if not profile:
return -1
if __args.rawoutput:
print(rawprofile)
print('')
else:
print(pp.pformat(profile))
if __args.market:
(rawmarket, market) = capi.market.get(cmdrname)
if not market:
return -1
if __args.rawoutput:
print(rawmarket)
print('')
else:
print(pp.pformat(market))
if __args.shipyard:
(rawshipyard, shipyard) = capi.shipyard.get(cmdrname)
if not shipyard:
return -1
if __args.rawoutput:
print(rawshipyard)
print('')
else:
print(pp.pformat(shipyard))
if __args.fleetcarrier:
(rawfleetcarrier, fleetcarrier) = capi.fleetcarrier.get(cmdrname)
if not fleetcarrier:
return -1
if __args.rawoutput:
print(rawfleetcarrier)
print('')
else:
print(pp.pformat(fleetcarrier))
# You get 'False' if not present at all, 'None' if no optional arg
if __args.journal != False:
# Validate the date format
if __args.journal:
try:
j_date = dateutil.parser.parse(__args.journal)
except Exception as e:
__logger.error("Could not parse the string '{date}' into a date".format(date=__args.journal))
return -1
else:
j_date = dateutil.utils.today()
datestr = j_date.strftime("%Y/%m/%d")
__logger.debug('Retrieving journals for date "{date}"'.format(date=datestr))
rawjournal = capi.journal.get(cmdrname, datestr)
if not rawjournal:
return -1
print('{journal}'.format(journal=rawjournal))
if __args.communitygoals:
rawcgs, cgs = capi.communitygoals.get(cmdrname)
if __args.rawoutput:
print(f'{rawcgs}\n')
else:
print(pprint.pformat(cgs, width=79))
###########################################################################
if __name__ == '__main__':
exit(main())
|
#
# PySNMP MIB module HP-ICF-RATE-LIMIT-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/HP-ICF-RATE-LIMIT-MIB
# Produced by pysmi-0.3.4 at Wed May 1 13:35:10 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, Integer, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "OctetString", "Integer", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsIntersection, ValueSizeConstraint, ConstraintsUnion, SingleValueConstraint, ValueRangeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsIntersection", "ValueSizeConstraint", "ConstraintsUnion", "SingleValueConstraint", "ValueRangeConstraint")
hpicfRateLimitTrapsPrefix, hpicfObjectModules = mibBuilder.importSymbols("HP-ICF-OID", "hpicfRateLimitTrapsPrefix", "hpicfObjectModules")
InterfaceIndex, ifIndex = mibBuilder.importSymbols("IF-MIB", "InterfaceIndex", "ifIndex")
NotificationGroup, ObjectGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ObjectGroup", "ModuleCompliance")
TimeTicks, NotificationType, iso, Gauge32, Counter64, Counter32, MibScalar, MibTable, MibTableRow, MibTableColumn, Bits, IpAddress, Unsigned32, Integer32, ModuleIdentity, MibIdentifier, ObjectIdentity = mibBuilder.importSymbols("SNMPv2-SMI", "TimeTicks", "NotificationType", "iso", "Gauge32", "Counter64", "Counter32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Bits", "IpAddress", "Unsigned32", "Integer32", "ModuleIdentity", "MibIdentifier", "ObjectIdentity")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
hpicfRateLimitMIB = ModuleIdentity((1, 3, 6, 1, 4, 1, 11, 2, 14, 10, 2, 14))
hpicfRateLimitMIB.setRevisions(('2017-10-13 00:00', '2016-08-03 00:00', '2015-09-04 00:00', '2014-11-17 10:00', '2014-11-18 00:00', '2013-07-11 00:00', '2013-03-12 15:10', '2012-10-05 19:30', '2012-03-12 12:30', '2010-09-27 11:30', '2010-07-14 16:10', '2007-12-04 12:30', '2007-08-29 11:20', '2007-07-27 19:20', '2007-06-01 11:46', '2007-05-30 16:10', '2006-07-07 18:33', '2005-04-20 11:30', '2004-08-22 10:30',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts: hpicfRateLimitMIB.setRevisionsDescriptions(('The maximum value of hpUnknownUnicastLimitPortSingleControlKbps has been modified to 100,000,000.', 'The description of below MIBs are modified: hpEgressRateLimitPortControlMode, hpIngressRateLimitPortControlMode.', 'The hpICMPRateLimitPortKbps modified.', 'Added egress maximum rate limit objects.', 'Increased max range value from 10G to 100G for hpEgressRateLimitPortSingleControlKbps, hpIngressRateLimitPortSingleControlKbps, hpIngressBcastLimitPortSingleControlKbps and hpIngressMcastLimitPortSingleControlKbps.', 'Added hpicfIngressRateLimitVlanConfigTable, hpicfRateLimitCompliance3 and hpicfIngressRateLimitVlanGroup for limiting ingress rate on a VLAN.', 'Added kbps-mode rate-limit option for ingress unknown-unicast traffic. This is an enhancement OID for the unknown-unicast rate-limiting MIB', 'Updated the description of the hpBWMinEgressPortPrct object to remove queue sum restriction.', 'Added the Unknown Unicast Rate-Limiting MIB object, which is used by the switch to control the unknown unicast traffic.', 'Added IPv6 ICMP Rate-Limiting Enhancements through the inclusion of IP packet type MIB object.', 'Added kbps-mode limit option for ingress Broadcast and ingress Multicast rate limiting MIB objects.', 'Added ingress Broadcast and ingress Multicast rate limiting MIB objects.', 'Deprecated all bps objects for hpEgressRateLimitPortConfigTable and hpIngressRateLimitPortConfigTable, and updated compliance information (including correcting ICMP group to be hpICMPRateLimitPortConfigGroup2).', 'Modified enumerated type names for hpICMPRateLimitPortControlMode object.', 'Deprecated hpICMPRateLimitPortState in favor of new hpICMPRateLimitPortControlMode object.', 'Added Kbps configuration for ICMP, port-egress, and port-ingress rate-limiting definitions.', 'Added new egress rate limiting bps objects, and ingress rate-limiting definitions.', 'Added minimum bandwidth and egress rate limiting definitions.', 'Initial version.',))
if mibBuilder.loadTexts: hpicfRateLimitMIB.setLastUpdated('201710130000Z')
if mibBuilder.loadTexts: hpicfRateLimitMIB.setOrganization('HP Networking')
if mibBuilder.loadTexts: hpicfRateLimitMIB.setContactInfo('Hewlett Packard Company 8000 Foothills Blvd. Roseville, CA 95747')
if mibBuilder.loadTexts: hpicfRateLimitMIB.setDescription('This MIB module describes HP rate limit information.')
hpicfRateLimitObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 14, 10, 2, 14, 1))
hpicfICMPRateLimitObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 14, 10, 2, 14, 1, 1))
hpICMPRateLimitConfig = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 14, 10, 2, 14, 1, 1, 1))
hpICMPRateLimitPortConfigTable = MibTable((1, 3, 6, 1, 4, 1, 11, 2, 14, 10, 2, 14, 1, 1, 1, 1), )
if mibBuilder.loadTexts: hpICMPRateLimitPortConfigTable.setStatus('current')
if mibBuilder.loadTexts: hpICMPRateLimitPortConfigTable.setDescription('A table that contains configuration objects on ICMP rate limit on a per interface basis.')
hpICMPRateLimitPortConfigEntry = MibTableRow((1, 3, 6, 1, 4, 1, 11, 2, 14, 10, 2, 14, 1, 1, 1, 1, 1), ).setIndexNames((0, "HP-ICF-RATE-LIMIT-MIB", "hpICMPRateLimitPortConfigIndex"))
if mibBuilder.loadTexts: hpICMPRateLimitPortConfigEntry.setStatus('current')
if mibBuilder.loadTexts: hpICMPRateLimitPortConfigEntry.setDescription('Entry that contains configuration objects on ICMP rate limit on a per interface basis')
hpICMPRateLimitPortConfigIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 10, 2, 14, 1, 1, 1, 1, 1, 1), InterfaceIndex())
if mibBuilder.loadTexts: hpICMPRateLimitPortConfigIndex.setStatus('current')
if mibBuilder.loadTexts: hpICMPRateLimitPortConfigIndex.setDescription('The interface index associated with this entry.')
hpICMPRateLimitPortState = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 10, 2, 14, 1, 1, 1, 1, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2))).clone('disabled')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hpICMPRateLimitPortState.setStatus('deprecated')
if mibBuilder.loadTexts: hpICMPRateLimitPortState.setDescription('This object indicates whether ICMP rate limiting is enabled on the corresponding port. Because this is a State object it cannot distinguish between percent-based ICMP Rate-Limiting and the newer Kbps-based ICMP Rate-Limiting. Therefore, it has been deprecated in favor of hpICMPRateLimitPortControlMode.')
hpICMPRateLimitPortPrct = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 10, 2, 14, 1, 1, 1, 1, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 100))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hpICMPRateLimitPortPrct.setStatus('current')
if mibBuilder.loadTexts: hpICMPRateLimitPortPrct.setDescription('This indicates the percent of ICMP rate limit on the port. The value of this object must be interpreted under the context of hpICMPRateLimitPortState. A value of 0 is not the same as disabling, but rather all ICMP traffic must be dropped.')
hpICMPRateLimitPortAlarmFlag = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 10, 2, 14, 1, 1, 1, 1, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("clear", 1), ("set", 2))).clone('clear')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hpICMPRateLimitPortAlarmFlag.setStatus('current')
if mibBuilder.loadTexts: hpICMPRateLimitPortAlarmFlag.setDescription('This object indicates whether ICMP rate limiting alarm has been sent. When an alarm is sent, this object is set to set(2). Once a management station changes this object to clear(1), an alarm can be sent again for this port.')
hpICMPRateLimitPortKbps = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 10, 2, 14, 1, 1, 1, 1, 1, 5), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hpICMPRateLimitPortKbps.setStatus('current')
if mibBuilder.loadTexts: hpICMPRateLimitPortKbps.setDescription('The maximum Kilobits-per-second of ICMP traffic that may be received inbound on the port. The value of this object must be interpreted under the context of hpICMPRateLimitControlMode. A value of 0 is not the same as disabling, but means instead that all ICMP traffic must be dropped.')
hpICMPRateLimitPortControlMode = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 10, 2, 14, 1, 1, 1, 1, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("disabled", 1), ("portPrct", 2), ("portKbps", 3))).clone('disabled')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hpICMPRateLimitPortControlMode.setStatus('current')
if mibBuilder.loadTexts: hpICMPRateLimitPortControlMode.setDescription('The mode by which inbound ICMP traffic on this port will Rate-Limited. If icmpRateLimitPerPortOnly is configured, there will be a single maximum percentage-based rate for the entire port. If icmpRateLimitPerPortOnlyKbpsMode is configured, there will be a single maximum kilobits-per- second rate for the entire port. When ICMP rate-limiting is disabled, there are no maximum controls on inbound ICMP traffic for this port.')
hpICMPRateLimitPortIpPacketType = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 10, 2, 14, 1, 1, 1, 1, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("ipv4PacketsOnly", 1), ("ipv6PacketsOnly", 2), ("ipv4AndIpv6Packets", 3))).clone('ipv4PacketsOnly')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hpICMPRateLimitPortIpPacketType.setStatus('current')
if mibBuilder.loadTexts: hpICMPRateLimitPortIpPacketType.setDescription('This object represents the type(s) of IP packet to which the specified ICMP rate-limit will be applied. The default is IPv4.')
hpICMPRateLimitPortNotification = NotificationType((1, 3, 6, 1, 4, 1, 11, 2, 14, 12, 5, 0, 1)).setObjects(("HP-ICF-RATE-LIMIT-MIB", "hpICMPRateLimitNotifyPortIndex"))
if mibBuilder.loadTexts: hpICMPRateLimitPortNotification.setStatus('current')
if mibBuilder.loadTexts: hpICMPRateLimitPortNotification.setDescription('This notification indicates limit has exceeded.')
hpICMPRateLimitNotifyPortIndex = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 14, 10, 2, 14, 1, 1, 2), InterfaceIndex()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: hpICMPRateLimitNotifyPortIndex.setStatus('current')
if mibBuilder.loadTexts: hpICMPRateLimitNotifyPortIndex.setDescription('The interface index associated with hpICMPRateLimitPortNotification event.')
hpicfBWMinEgressObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 14, 10, 2, 14, 1, 2))
hpBWMinEgressPortConfig = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 14, 10, 2, 14, 1, 2, 1))
hpBWMinEgressPortNumQueues = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 14, 10, 2, 14, 1, 2, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpBWMinEgressPortNumQueues.setStatus('current')
if mibBuilder.loadTexts: hpBWMinEgressPortNumQueues.setDescription('The number of bandwidth minimum egress queues supported on this system.')
hpBWMinEgressPortPrctTable = MibTable((1, 3, 6, 1, 4, 1, 11, 2, 14, 10, 2, 14, 1, 2, 1, 2), )
if mibBuilder.loadTexts: hpBWMinEgressPortPrctTable.setStatus('current')
if mibBuilder.loadTexts: hpBWMinEgressPortPrctTable.setDescription("A table that contains information about the port's egress Guaranteed Minimum Bandwidth percent configurations on this switch. The number of entries in this table is determined by hpBWMinEgressPortNumQueues. The priority of the queues is in ascending order, starting with queue one being the lowest and queue hpBWMinEgressPortNumQueues being the highest.")
hpBWMinEgressPortPrctEntry = MibTableRow((1, 3, 6, 1, 4, 1, 11, 2, 14, 10, 2, 14, 1, 2, 1, 2, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"), (0, "HP-ICF-RATE-LIMIT-MIB", "hpBWMinEgressPortPrctQueue"))
if mibBuilder.loadTexts: hpBWMinEgressPortPrctEntry.setStatus('current')
if mibBuilder.loadTexts: hpBWMinEgressPortPrctEntry.setDescription("The information associated with each port's egress Guaranteed Minimum Bandwidth percent configuration.")
hpBWMinEgressPortPrctQueue = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 10, 2, 14, 1, 2, 1, 2, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 9000)))
if mibBuilder.loadTexts: hpBWMinEgressPortPrctQueue.setStatus('current')
if mibBuilder.loadTexts: hpBWMinEgressPortPrctQueue.setDescription('The queue associated with this entry. The priority of the queues is in ascending order, starting with queue one being the lowest and queue hpBWMinEgressPortNumQueues being the highest.')
hpBWMinEgressPortPrct = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 10, 2, 14, 1, 2, 1, 2, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 100))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hpBWMinEgressPortPrct.setStatus('current')
if mibBuilder.loadTexts: hpBWMinEgressPortPrct.setDescription('The percentage of Guaranteed Minimum bandwidth to be assigned to this egress queue for this port. Total values for all queues must not exceed 100 percent.')
hpicfBWMinIngressObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 14, 10, 2, 14, 1, 3))
hpBWMinIngressPortConfig = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 14, 10, 2, 14, 1, 3, 1))
hpBWMinIngressPortNumQueues = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 14, 10, 2, 14, 1, 3, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpBWMinIngressPortNumQueues.setStatus('current')
if mibBuilder.loadTexts: hpBWMinIngressPortNumQueues.setDescription('The number of bandwidth minimum ingress queues supported on this system.')
hpBWMinIngressPortPrctTable = MibTable((1, 3, 6, 1, 4, 1, 11, 2, 14, 10, 2, 14, 1, 3, 1, 2), )
if mibBuilder.loadTexts: hpBWMinIngressPortPrctTable.setStatus('current')
if mibBuilder.loadTexts: hpBWMinIngressPortPrctTable.setDescription("A table that contains information about the port's ingress Guaranteed Minimum Bandwidth percent configurations on this switch. The number of entries in this table is determined by hpBWMinIngressPortNumQueues. The priority of the queues is in ascending order, starting with queue one being the lowest and queue hpBWMinIngressPortNumQueues being the highest.")
hpBWMinIngressPortPrctEntry = MibTableRow((1, 3, 6, 1, 4, 1, 11, 2, 14, 10, 2, 14, 1, 3, 1, 2, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"), (0, "HP-ICF-RATE-LIMIT-MIB", "hpBWMinIngressPortPrctQueue"))
if mibBuilder.loadTexts: hpBWMinIngressPortPrctEntry.setStatus('current')
if mibBuilder.loadTexts: hpBWMinIngressPortPrctEntry.setDescription("The information associated with each port's ingress Guaranteed Minimum Bandwidth percent configuration.")
hpBWMinIngressPortPrctQueue = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 10, 2, 14, 1, 3, 1, 2, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 9000)))
if mibBuilder.loadTexts: hpBWMinIngressPortPrctQueue.setStatus('current')
if mibBuilder.loadTexts: hpBWMinIngressPortPrctQueue.setDescription('The queue associated with this entry. The priority of the queues is in ascending order, starting with queue one being the lowest and queue hpBWMinIngressPortNumQueues being the highest.')
hpBWMinIngressPortPrct = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 10, 2, 14, 1, 3, 1, 2, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 100))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hpBWMinIngressPortPrct.setStatus('current')
if mibBuilder.loadTexts: hpBWMinIngressPortPrct.setDescription('The percentage of Guaranteed Minimum bandwidth to be assigned to this ingress queue for this port. Total values for all queues must not exceed 100 percent.')
hpicfRateLimitPortObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 14, 10, 2, 14, 1, 4))
hpRateLimitPortConfig = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 14, 10, 2, 14, 1, 4, 1))
hpEgressRateLimitPortNumQueues = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 14, 10, 2, 14, 1, 4, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpEgressRateLimitPortNumQueues.setStatus('current')
if mibBuilder.loadTexts: hpEgressRateLimitPortNumQueues.setDescription('The number of egress rate-limiting queues supported on this system.')
hpEgressRateLimitPortConfigTable = MibTable((1, 3, 6, 1, 4, 1, 11, 2, 14, 10, 2, 14, 1, 4, 1, 2), )
if mibBuilder.loadTexts: hpEgressRateLimitPortConfigTable.setStatus('current')
if mibBuilder.loadTexts: hpEgressRateLimitPortConfigTable.setDescription('A table that contains information about the port egress Rate-Limiting configurations on this switch.')
hpEgressRateLimitPortConfigEntry = MibTableRow((1, 3, 6, 1, 4, 1, 11, 2, 14, 10, 2, 14, 1, 4, 1, 2, 1), ).setIndexNames((0, "HP-ICF-RATE-LIMIT-MIB", "hpEgressRateLimitPortIndex"))
if mibBuilder.loadTexts: hpEgressRateLimitPortConfigEntry.setStatus('current')
if mibBuilder.loadTexts: hpEgressRateLimitPortConfigEntry.setDescription("The information associated with each port's egress Rate-Limiting configuration.")
hpEgressRateLimitPortIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 10, 2, 14, 1, 4, 1, 2, 1, 1), InterfaceIndex())
if mibBuilder.loadTexts: hpEgressRateLimitPortIndex.setStatus('current')
if mibBuilder.loadTexts: hpEgressRateLimitPortIndex.setDescription('The ifIndex value which uniquely identifies a row in the Interfaces Table.')
hpEgressRateLimitPortControlMode = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 10, 2, 14, 1, 4, 1, 2, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6))).clone(namedValues=NamedValues(("disabled", 1), ("egressRateLimitPerPortOnly", 2), ("egressRateLimitPerQueue", 3), ("egressRateLimitPerPortOnlyBpsMode", 4), ("egressRateLimitPerQueueBpsMode", 5), ("egressRateLimitPerPortOnlyKbpsMode", 6))).clone('disabled')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hpEgressRateLimitPortControlMode.setStatus('current')
if mibBuilder.loadTexts: hpEgressRateLimitPortControlMode.setDescription('The mode by which this port will be Rate-Limited on egress. If egressRateLimitPerPortOnly is configured, there will be a single maximum percentage-based rate for the entire port. If egressRateLimitPerQueue is configured, the values for each of the queues indicate the maximum percentage of port traffic that may be transmitted by that queue. If egressRateLimitPerPortOnlyKbpsMode is configured, there will be a single maximum kilobits-per-second rate for the entire port. The queues are defined under hpEgressRateLimitPortPrctTable. When egress rate-limiting is disabled, there are no maximum controls on egress for this port. NOTE : Currently, egressRateLimitPerPortOnlyBpsMode and egressRateLimitPerQueueBpsMode are not supported.')
hpEgressRateLimitPortSingleControlPrct = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 10, 2, 14, 1, 4, 1, 2, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 100))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hpEgressRateLimitPortSingleControlPrct.setStatus('current')
if mibBuilder.loadTexts: hpEgressRateLimitPortSingleControlPrct.setDescription('When hpEgressRateLimitPortControlMode is configured for egressRateLimitPerPortOnly, this value is the maximum percentage of traffic that may be transmitted by this port on egress.')
hpEgressRateLimitPortSingleControlBps = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 10, 2, 14, 1, 4, 1, 2, 1, 4), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 4200000000))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hpEgressRateLimitPortSingleControlBps.setStatus('deprecated')
if mibBuilder.loadTexts: hpEgressRateLimitPortSingleControlBps.setDescription('When hpEgressRateLimitPortControlMode is configured for egressRateLimitPerPortOnlyBpsMode, this value is the maximum bits-per-second of traffic that may be transmitted by this port on egress.')
hpEgressRateLimitPortSingleControlKbps = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 10, 2, 14, 1, 4, 1, 2, 1, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 100000000))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hpEgressRateLimitPortSingleControlKbps.setStatus('current')
if mibBuilder.loadTexts: hpEgressRateLimitPortSingleControlKbps.setDescription('When hpEgressRateLimitPortControlMode is configured for egressRateLimitPerPortOnlyKbpsMode, this value is the maximum kilobits-per-second of traffic that may be transmitted by this port on egress.')
hpEgressRateLimitPortQueueControlMode = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 10, 2, 14, 1, 4, 1, 2, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("disabled", 1), ("egressRateLimitQueuePrctMode", 2), ("egressRateLimitQueueKbpsMode", 3))).clone('disabled')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hpEgressRateLimitPortQueueControlMode.setStatus('current')
if mibBuilder.loadTexts: hpEgressRateLimitPortQueueControlMode.setDescription('The mode by which queues on this port will be rate-limited on egress. If egressRateLimitQueuePrctMode is configured, the values for each of the queues indicate the maximum percentage of port traffic that may be transmitted by that queue.If egressRateLimitQueueKbpsMode is configured, the maximum transmission-rate values for each of the queues will be expressed in kilobits-per-second. The queues are defined under hpEgressRateLimitPortQueueTable. When egress rate-limiting is disabled, there are no maximum controls on egress for this port.')
hpEgressRateLimitPortPrctTable = MibTable((1, 3, 6, 1, 4, 1, 11, 2, 14, 10, 2, 14, 1, 4, 1, 3), )
if mibBuilder.loadTexts: hpEgressRateLimitPortPrctTable.setStatus('current')
if mibBuilder.loadTexts: hpEgressRateLimitPortPrctTable.setDescription('A table that contains information about the port egress Rate-Limiting percent configurations on this switch. The number of entries in this table is determined by hpEgressRateLimitPortNumQueues. The priority of the queues is in ascending order, starting with queue one being the lowest priority and queue hpEgressRateLimitPortNumQueues being the highest priority.')
hpEgressRateLimitPortPrctEntry = MibTableRow((1, 3, 6, 1, 4, 1, 11, 2, 14, 10, 2, 14, 1, 4, 1, 3, 1), ).setIndexNames((0, "HP-ICF-RATE-LIMIT-MIB", "hpEgressRateLimitPortIndex"), (0, "HP-ICF-RATE-LIMIT-MIB", "hpEgressRateLimitPortPrctQueue"))
if mibBuilder.loadTexts: hpEgressRateLimitPortPrctEntry.setStatus('current')
if mibBuilder.loadTexts: hpEgressRateLimitPortPrctEntry.setDescription("The information associated with each port's egress Rate-Limiting percent configuration.")
hpEgressRateLimitPortPrctQueue = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 10, 2, 14, 1, 4, 1, 3, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 9000)))
if mibBuilder.loadTexts: hpEgressRateLimitPortPrctQueue.setStatus('current')
if mibBuilder.loadTexts: hpEgressRateLimitPortPrctQueue.setDescription('The queue associated with this entry. The priority o of the queues is in ascending order, starting with queue one being the lowest and queue hpEgressRateLimitgressPortNumQueues being the highest.')
hpEgressRateLimitPortPrct = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 10, 2, 14, 1, 4, 1, 3, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 100))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hpEgressRateLimitPortPrct.setStatus('current')
if mibBuilder.loadTexts: hpEgressRateLimitPortPrct.setDescription("The maximum percentage of traffic that may be transmitted by this port's queue on egress. hpEgressRateLimitPortControlMode must be configured to use egressRateLimitPerQueue for this to take effect. A value of 0-percent for any queue means that no traffic will ever be transmitted on this port for that egress queue.")
hpEgressRateLimitPortBpsTable = MibTable((1, 3, 6, 1, 4, 1, 11, 2, 14, 10, 2, 14, 1, 4, 1, 4), )
if mibBuilder.loadTexts: hpEgressRateLimitPortBpsTable.setStatus('deprecated')
if mibBuilder.loadTexts: hpEgressRateLimitPortBpsTable.setDescription('A table that contains information about the port egress Rate-Limiting bits-per-second configurations on this switch. The number of entries in this table is determined by hpEgressRateLimitPortNumQueues. The priority of the queues is in ascending order, starting with queue one being the lowest priority and queue hpEgressRateLimitPortNumQueues being the highest priority queue.')
hpEgressRateLimitPortBpsEntry = MibTableRow((1, 3, 6, 1, 4, 1, 11, 2, 14, 10, 2, 14, 1, 4, 1, 4, 1), ).setIndexNames((0, "HP-ICF-RATE-LIMIT-MIB", "hpEgressRateLimitPortIndex"), (0, "HP-ICF-RATE-LIMIT-MIB", "hpEgressRateLimitPortBpsQueue"))
if mibBuilder.loadTexts: hpEgressRateLimitPortBpsEntry.setStatus('deprecated')
if mibBuilder.loadTexts: hpEgressRateLimitPortBpsEntry.setDescription("The information associated with each port's egress Rate-Limiting bits-per-second configuration.")
hpEgressRateLimitPortBpsQueue = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 10, 2, 14, 1, 4, 1, 4, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 64)))
if mibBuilder.loadTexts: hpEgressRateLimitPortBpsQueue.setStatus('deprecated')
if mibBuilder.loadTexts: hpEgressRateLimitPortBpsQueue.setDescription('The queue associated with this entry. The priority of the queues is in ascending order, starting with queue one being the lowest and queue hpEgressRateLimitgressPortNumQueues being the highest.')
hpEgressRateLimitPortBps = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 10, 2, 14, 1, 4, 1, 4, 1, 2), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 4200000000))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hpEgressRateLimitPortBps.setStatus('deprecated')
if mibBuilder.loadTexts: hpEgressRateLimitPortBps.setDescription("The maximum bits-per-second of traffic that may be transmitted by this port's queue on egress. hpEgressRateLimitPortControlMode must be configured to use egressRateLimitPerQueue for this to take effect. A value of 0-percent for any queue means that no traffic will ever be transmitted on this port for that egress queue. The values for each queue must not exceed the bits-per-second capability of the current network link speed.")
hpEgressRateLimitPortQueueConfigTable = MibTable((1, 3, 6, 1, 4, 1, 11, 2, 14, 10, 2, 14, 1, 4, 1, 5), )
if mibBuilder.loadTexts: hpEgressRateLimitPortQueueConfigTable.setStatus('current')
if mibBuilder.loadTexts: hpEgressRateLimitPortQueueConfigTable.setDescription('A table that contains information about the port egress- queue rate-limiting configurations on this switch.')
hpEgressRateLimitPortQueueConfigEntry = MibTableRow((1, 3, 6, 1, 4, 1, 11, 2, 14, 10, 2, 14, 1, 4, 1, 5, 1), ).setIndexNames((0, "HP-ICF-RATE-LIMIT-MIB", "hpEgressRateLimitPortIndex"), (0, "HP-ICF-RATE-LIMIT-MIB", "hpEgressRateLimitPortQueueIndex"))
if mibBuilder.loadTexts: hpEgressRateLimitPortQueueConfigEntry.setStatus('current')
if mibBuilder.loadTexts: hpEgressRateLimitPortQueueConfigEntry.setDescription('The information associated with the egress rate-limiting configuration for the queues on each port.')
hpEgressRateLimitPortQueueIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 10, 2, 14, 1, 4, 1, 5, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 9000)))
if mibBuilder.loadTexts: hpEgressRateLimitPortQueueIndex.setStatus('current')
if mibBuilder.loadTexts: hpEgressRateLimitPortQueueIndex.setDescription('The queue associated with this entry. The priority of the queues is in ascending order, starting with queue one being the lowest and queue hpEgressRateLimitEgressPortNumQueues being the highest.')
hpEgressRateLimitPortQueueMax = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 10, 2, 14, 1, 4, 1, 5, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 100000000))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hpEgressRateLimitPortQueueMax.setStatus('current')
if mibBuilder.loadTexts: hpEgressRateLimitPortQueueMax.setDescription("The maximum amount of traffic that may be transmitted by this port's corresponding queue on egress. When the value of hpEgressRateLimitPortQueueControlMode is egressRateLimitQueuePrctMode, this maximum value is a percentage. A limit of 100% acts as no limit. A value of 0-percent for any queue means that no traffic will ever be transmitted on this port for that egress queue. When the value of hpEgressRateLimitPortQueueControlMode is egressRateLimitQueueKbpsMode, this maximum value is in kilobits-per-second. A limit of kbps which matches the port's maximum media speed acts as no limit.")
hpicfIngressRateLimitPortObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 14, 10, 2, 14, 1, 5))
hpRateLimitIngressPortConfig = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 14, 10, 2, 14, 1, 5, 1))
hpIngressRateLimitPortNumQueues = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 14, 10, 2, 14, 1, 5, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpIngressRateLimitPortNumQueues.setStatus('current')
if mibBuilder.loadTexts: hpIngressRateLimitPortNumQueues.setDescription('The number of ingress rate-limiting queues supported on this system.')
hpIngressRateLimitPortConfigTable = MibTable((1, 3, 6, 1, 4, 1, 11, 2, 14, 10, 2, 14, 1, 5, 1, 2), )
if mibBuilder.loadTexts: hpIngressRateLimitPortConfigTable.setStatus('current')
if mibBuilder.loadTexts: hpIngressRateLimitPortConfigTable.setDescription('A table that contains information about the port ingress Rate-Limiting configurations on this switch.')
hpIngressRateLimitPortConfigEntry = MibTableRow((1, 3, 6, 1, 4, 1, 11, 2, 14, 10, 2, 14, 1, 5, 1, 2, 1), ).setIndexNames((0, "HP-ICF-RATE-LIMIT-MIB", "hpIngressRateLimitPortIndex"))
if mibBuilder.loadTexts: hpIngressRateLimitPortConfigEntry.setStatus('current')
if mibBuilder.loadTexts: hpIngressRateLimitPortConfigEntry.setDescription("The information associated with each port's ingress Rate-Limiting configuration.")
hpIngressRateLimitPortIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 10, 2, 14, 1, 5, 1, 2, 1, 1), InterfaceIndex())
if mibBuilder.loadTexts: hpIngressRateLimitPortIndex.setStatus('current')
if mibBuilder.loadTexts: hpIngressRateLimitPortIndex.setDescription('The ifIndex value which uniquely identifies a row in the Interfaces Table.')
hpIngressRateLimitPortControlMode = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 10, 2, 14, 1, 5, 1, 2, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6))).clone(namedValues=NamedValues(("disabled", 1), ("ingressRateLimitPerPortOnly", 2), ("ingressRateLimitPerQueue", 3), ("ingressRateLimitPerPortOnlyBpsMode", 4), ("ingressRateLimitPerQueueBpsMode", 5), ("ingressRateLimitPerPortOnlyKbpsMode", 6))).clone('disabled')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hpIngressRateLimitPortControlMode.setStatus('current')
if mibBuilder.loadTexts: hpIngressRateLimitPortControlMode.setDescription('The mode by which this port will be Rate-Limited on ingress. If ingressRateLimitPerPortOnly is configured, there will be a single maximum percentage-based rate for the entire port. If ingressRateLimitPerQueue is configured, the values for each of the queues indicate the maximum percentage of port traffic that may be transmitted by that queue. If ingressRateLimitPerPortOnlyKbpsMode is configured, there will be a single maximum kilobits-per-second rate for the entire port. The queues are defined under hpIngressRateLimitPortPrctTable. When ingress rate-limiting is disabled, there are no maximum controls on ingress for this port. NOTE : Currently, IngressRateLimitPerPortOnlyBpsMode and ingressRateLimitPerQueueBpsMode are not supported.')
hpIngressRateLimitPortSingleControlPrct = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 10, 2, 14, 1, 5, 1, 2, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 100))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hpIngressRateLimitPortSingleControlPrct.setStatus('current')
if mibBuilder.loadTexts: hpIngressRateLimitPortSingleControlPrct.setDescription('When hpIngressRateLimitPortControlMode is configured for ingressRateLimitPerPortOnly, this value is the maximum percentage of traffic that may be transmitted by this port on egress.')
hpIngressRateLimitPortSingleControlBps = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 10, 2, 14, 1, 5, 1, 2, 1, 4), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 4200000000))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hpIngressRateLimitPortSingleControlBps.setStatus('deprecated')
if mibBuilder.loadTexts: hpIngressRateLimitPortSingleControlBps.setDescription('When hpIngressRateLimitPortControlMode is configured for ingressRateLimitPerPortOnlyBpsMode, this value is the maximum bits-per-second of traffic that may be transmitted by this port on ingress.')
hpIngressRateLimitPortSingleControlKbps = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 10, 2, 14, 1, 5, 1, 2, 1, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 100000000))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hpIngressRateLimitPortSingleControlKbps.setStatus('current')
if mibBuilder.loadTexts: hpIngressRateLimitPortSingleControlKbps.setDescription('When hpIngressRateLimitPortControlMode is configured for ingressRateLimitPerPortOnlyKbpsMode, this value is the maximum kilobits-per-second of traffic that may be transmitted by this port on ingress.')
hpIngressRateLimitPortPrctTable = MibTable((1, 3, 6, 1, 4, 1, 11, 2, 14, 10, 2, 14, 1, 5, 1, 3), )
if mibBuilder.loadTexts: hpIngressRateLimitPortPrctTable.setStatus('current')
if mibBuilder.loadTexts: hpIngressRateLimitPortPrctTable.setDescription('A table that contains information about the port ingress Rate-Limiting percent configurations on this switch. The number of entries in this table is determined by hpIngressRateLimitPortNumQueues. The priority of the queues is in ascending order, starting with queue one being the lowest priority and queue hpIngressRateLimitPortNumQueues being the highest priority.')
hpIngressRateLimitPortPrctEntry = MibTableRow((1, 3, 6, 1, 4, 1, 11, 2, 14, 10, 2, 14, 1, 5, 1, 3, 1), ).setIndexNames((0, "HP-ICF-RATE-LIMIT-MIB", "hpIngressRateLimitPortIndex"), (0, "HP-ICF-RATE-LIMIT-MIB", "hpIngressRateLimitPortPrctQueue"))
if mibBuilder.loadTexts: hpIngressRateLimitPortPrctEntry.setStatus('current')
if mibBuilder.loadTexts: hpIngressRateLimitPortPrctEntry.setDescription("The information associated with each port's ingress Rate-Limiting percent configuration.")
hpIngressRateLimitPortPrctQueue = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 10, 2, 14, 1, 5, 1, 3, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 9000)))
if mibBuilder.loadTexts: hpIngressRateLimitPortPrctQueue.setStatus('current')
if mibBuilder.loadTexts: hpIngressRateLimitPortPrctQueue.setDescription('The queue associated with this entry. The priority of the queues is in ascending order, starting with queue one being the lowest and queue hpIngressRateLimitgressPortNumQueues being the highest.')
hpIngressRateLimitPortPrct = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 10, 2, 14, 1, 5, 1, 3, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 100))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hpIngressRateLimitPortPrct.setStatus('current')
if mibBuilder.loadTexts: hpIngressRateLimitPortPrct.setDescription("The maximum percentage of traffic that may be transmitted by this port's queue on ingress. hpIngressRateLimitPortControlMode must be configured to use ingressRateLimitPerQueue for this to take effect. A value of 0-percent for any queue means that no traffic will ever be transmitted on this port for that ingress queue. The values for each queue must not exceed 100 percent.")
hpIngressRateLimitPortBpsTable = MibTable((1, 3, 6, 1, 4, 1, 11, 2, 14, 10, 2, 14, 1, 5, 1, 4), )
if mibBuilder.loadTexts: hpIngressRateLimitPortBpsTable.setStatus('deprecated')
if mibBuilder.loadTexts: hpIngressRateLimitPortBpsTable.setDescription('A table that contains information about the port ingress Rate-Limiting bits-per-second configurations on this switch. The number of entries in this table is determined by hpIngressRateLimitPortNumQueues. The priority of the queues is in ascending order, starting with queue one being the lowest priority and queue hpIngressRateLimitPortNumQueues being the highest priority queue.')
hpIngressRateLimitPortBpsEntry = MibTableRow((1, 3, 6, 1, 4, 1, 11, 2, 14, 10, 2, 14, 1, 5, 1, 4, 1), ).setIndexNames((0, "HP-ICF-RATE-LIMIT-MIB", "hpIngressRateLimitPortIndex"), (0, "HP-ICF-RATE-LIMIT-MIB", "hpIngressRateLimitPortBpsQueue"))
if mibBuilder.loadTexts: hpIngressRateLimitPortBpsEntry.setStatus('deprecated')
if mibBuilder.loadTexts: hpIngressRateLimitPortBpsEntry.setDescription("The information associated with each port's egress Rate-Limiting bits-per-second configuration.")
hpIngressRateLimitPortBpsQueue = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 10, 2, 14, 1, 5, 1, 4, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 64)))
if mibBuilder.loadTexts: hpIngressRateLimitPortBpsQueue.setStatus('deprecated')
if mibBuilder.loadTexts: hpIngressRateLimitPortBpsQueue.setDescription('The queue associated with this entry. The priority of the queues is in ascending order, starting with queue one being the lowest and queue hpIngressRateLimitgressPortNumQueues being the highest.')
hpIngressRateLimitPortBps = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 10, 2, 14, 1, 5, 1, 4, 1, 2), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 4200000000))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hpIngressRateLimitPortBps.setStatus('deprecated')
if mibBuilder.loadTexts: hpIngressRateLimitPortBps.setDescription("The maximum bits-per-second of traffic that may be transmitted by this port's queue on ingress. hpIngressRateLimitPortControlMode must be configured to use ingressRateLimitPerQueue for this to take effect. A value of 0-percent for any queue means that no traffic will ever be transmitted on this port for that ingress queue. The values for each queue must not exceed the bits-per-second capability of the current network link speed.")
hpicfIngressBcastLimitPortObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 14, 10, 2, 14, 1, 6))
hpBcastLimitIngressPortConfig = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 14, 10, 2, 14, 1, 6, 1))
hpIngressBcastLimitPortConfigTable = MibTable((1, 3, 6, 1, 4, 1, 11, 2, 14, 10, 2, 14, 1, 6, 1, 1), )
if mibBuilder.loadTexts: hpIngressBcastLimitPortConfigTable.setStatus('current')
if mibBuilder.loadTexts: hpIngressBcastLimitPortConfigTable.setDescription('A table that contains information about the port ingress Broadcast-Limiting configurations on this switch.')
hpIngressBcastLimitPortConfigEntry = MibTableRow((1, 3, 6, 1, 4, 1, 11, 2, 14, 10, 2, 14, 1, 6, 1, 1, 1), ).setIndexNames((0, "HP-ICF-RATE-LIMIT-MIB", "hpIngressBcastLimitPortIndex"))
if mibBuilder.loadTexts: hpIngressBcastLimitPortConfigEntry.setStatus('current')
if mibBuilder.loadTexts: hpIngressBcastLimitPortConfigEntry.setDescription("The information associated with each port's ingress Broadcast-Limiting configuration.")
hpIngressBcastLimitPortIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 10, 2, 14, 1, 6, 1, 1, 1, 1), InterfaceIndex())
if mibBuilder.loadTexts: hpIngressBcastLimitPortIndex.setStatus('current')
if mibBuilder.loadTexts: hpIngressBcastLimitPortIndex.setDescription('The ifIndex value which uniquely identifies a row in the Interfaces Table.')
hpIngressBcastLimitPortControlMode = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 10, 2, 14, 1, 6, 1, 1, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("disabled", 1), ("ingressBcastLimitPerPortOnly", 2), ("ingressBcastLimitPerPortOnlyKbpsMode", 3))).clone('disabled')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hpIngressBcastLimitPortControlMode.setStatus('current')
if mibBuilder.loadTexts: hpIngressBcastLimitPortControlMode.setDescription('The mode by which this port will be Broadcast-Limited on ingress. If ingressBcastLimitPerPortOnly is configured, there will be a single maximum percentage-based broadcast traffic limit for the entire port. If ingressBcastLimitPerPortOnlyKbpsMode is configured, there will be a single maximum kilobits-per-second maximum broadcast traffic rate for the entire port. When ingress Broadcast-limiting is disabled, there are no maximum broadcast traffic controls on ingress for this port.')
hpIngressBcastLimitPortSingleControlPrct = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 10, 2, 14, 1, 6, 1, 1, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 100))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hpIngressBcastLimitPortSingleControlPrct.setStatus('current')
if mibBuilder.loadTexts: hpIngressBcastLimitPortSingleControlPrct.setDescription('When hpIngressBcastLimitPortControlMode is configured for ingressBcastLimitPerPortOnly, this value is the maximum percentage of traffic that may be transmitted by this port on ingress.')
hpIngressBcastLimitPortSingleControlKbps = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 10, 2, 14, 1, 6, 1, 1, 1, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 100000000))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hpIngressBcastLimitPortSingleControlKbps.setStatus('current')
if mibBuilder.loadTexts: hpIngressBcastLimitPortSingleControlKbps.setDescription('When hpIngressBcastLimitPortControlMode is configured for ingressBcastLimitPerPortOnlyKbpsMode, this value is the maximum kilobits-per-second of broadcast traffic that may be transmitted by this port on ingress.')
hpicfIngressMcastLimitPortObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 14, 10, 2, 14, 1, 7))
hpMcastLimitIngressPortConfig = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 14, 10, 2, 14, 1, 7, 1))
hpIngressMcastLimitPortConfigTable = MibTable((1, 3, 6, 1, 4, 1, 11, 2, 14, 10, 2, 14, 1, 7, 1, 1), )
if mibBuilder.loadTexts: hpIngressMcastLimitPortConfigTable.setStatus('current')
if mibBuilder.loadTexts: hpIngressMcastLimitPortConfigTable.setDescription('A table that contains information about the port ingress Multicast-Limiting configurations on this switch.')
hpIngressMcastLimitPortConfigEntry = MibTableRow((1, 3, 6, 1, 4, 1, 11, 2, 14, 10, 2, 14, 1, 7, 1, 1, 1), ).setIndexNames((0, "HP-ICF-RATE-LIMIT-MIB", "hpIngressMcastLimitPortIndex"))
if mibBuilder.loadTexts: hpIngressMcastLimitPortConfigEntry.setStatus('current')
if mibBuilder.loadTexts: hpIngressMcastLimitPortConfigEntry.setDescription("The information associated with each port's ingress Multicast-Limiting configuration.")
hpIngressMcastLimitPortIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 10, 2, 14, 1, 7, 1, 1, 1, 1), InterfaceIndex())
if mibBuilder.loadTexts: hpIngressMcastLimitPortIndex.setStatus('current')
if mibBuilder.loadTexts: hpIngressMcastLimitPortIndex.setDescription('The ifIndex value which uniquely identifies a row in the Interfaces Table.')
hpIngressMcastLimitPortControlMode = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 10, 2, 14, 1, 7, 1, 1, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("disabled", 1), ("ingressMcastLimitPerPortOnly", 2), ("ingressMcastLimitPerPortOnlyKbpsMode", 3))).clone('disabled')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hpIngressMcastLimitPortControlMode.setStatus('current')
if mibBuilder.loadTexts: hpIngressMcastLimitPortControlMode.setDescription('The mode by which this port will be Multicast-Limited on ingress. If ingressMcastLimitPerPortOnly is configured, there will be a single maximum percentage-based multicast traffic limit for the entire port. If ingressMcastLimitPerPortOnlyKbpsMode is configured, there will be a single maximum kilobits-per-second maximum multicast traffic rate for the entire port. When ingress Multicast-limiting is disabled, there are no maximum multicast traffic controls on ingress for this port.')
hpIngressMcastLimitPortSingleControlPrct = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 10, 2, 14, 1, 7, 1, 1, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 100))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hpIngressMcastLimitPortSingleControlPrct.setStatus('current')
if mibBuilder.loadTexts: hpIngressMcastLimitPortSingleControlPrct.setDescription('When hpIngressMcastLimitPortControlMode is configured for ingressMcastLimitPerPortOnly, this value is the maximum percentage of multicast traffic that may be transmitted by this port on ingress.')
hpIngressMcastLimitPortSingleControlKbps = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 10, 2, 14, 1, 7, 1, 1, 1, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 100000000))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hpIngressMcastLimitPortSingleControlKbps.setStatus('current')
if mibBuilder.loadTexts: hpIngressMcastLimitPortSingleControlKbps.setDescription('When hpIngressMcastLimitPortControlMode is configured for ingressMcastLimitPerPortOnlyKbpsMode, this value is the maximum kilobits-per-second of multicast traffic that may be transmitted by this port on ingress.')
hpicfUnknownUnicastLimitPortObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 14, 10, 2, 14, 1, 8))
hpUnknownUnicastLimitPortConfig = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 14, 10, 2, 14, 1, 8, 1))
hpUnknownUnicastLimitConfigTable = MibTable((1, 3, 6, 1, 4, 1, 11, 2, 14, 10, 2, 14, 1, 8, 1, 1), )
if mibBuilder.loadTexts: hpUnknownUnicastLimitConfigTable.setStatus('current')
if mibBuilder.loadTexts: hpUnknownUnicastLimitConfigTable.setDescription('A table that contains information about the unknown-unicast rate limiting configurations on this switch.')
hpUnknownUnicastLimitConfigEntry = MibTableRow((1, 3, 6, 1, 4, 1, 11, 2, 14, 10, 2, 14, 1, 8, 1, 1, 1), ).setIndexNames((0, "HP-ICF-RATE-LIMIT-MIB", "hpUnknownUnicastLimitPortIndex"))
if mibBuilder.loadTexts: hpUnknownUnicastLimitConfigEntry.setStatus('current')
if mibBuilder.loadTexts: hpUnknownUnicastLimitConfigEntry.setDescription("The information associated with each port's unknown-unicast rate limiting configuration.")
hpUnknownUnicastLimitPortIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 10, 2, 14, 1, 8, 1, 1, 1, 1), InterfaceIndex())
if mibBuilder.loadTexts: hpUnknownUnicastLimitPortIndex.setStatus('current')
if mibBuilder.loadTexts: hpUnknownUnicastLimitPortIndex.setDescription('The ifIndex value which uniquely identifies a row in the interfaces table.')
hpUnknownUnicastLimitPortControlMode = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 10, 2, 14, 1, 8, 1, 1, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("disabled", 1), ("unknownUnicastLimitPerPortOnly", 2), ("unknownUnicastLimitPerPortOnlyKbpsMode", 3))).clone('disabled')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hpUnknownUnicastLimitPortControlMode.setStatus('current')
if mibBuilder.loadTexts: hpUnknownUnicastLimitPortControlMode.setDescription('The mode by which unknown-unicast ingress traffic on this port will be rate limited. If unknownUnicastLimitPerPortOnly is configured, the limit will be a percentage of the current line rate of the port. The percentage is specified in hpUnknownUnicastLimitPortSingleControlPrct. If unknownUnicastLimitPerPortOnlyKbpsMode is configured, the limit will be an absolute value in kilobits-per-second. The kbps rate is specified in hpUnknownUnicastLimitPortSingleControlKbps. If disabled is configured, any unknown-unicast rate limit will be removed.')
hpUnknownUnicastLimitPortSingleControlPrct = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 10, 2, 14, 1, 8, 1, 1, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 100))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hpUnknownUnicastLimitPortSingleControlPrct.setStatus('current')
if mibBuilder.loadTexts: hpUnknownUnicastLimitPortSingleControlPrct.setDescription('When hpUnknownUnicastLimitPortControlMode is unknownUnicastLimitPerPortOnly, this value is the maximum allowed ingress rate for unknown-unicast traffic on this port as a percentage of the current line rate.')
hpUnknownUnicastLimitPortSingleControlKbps = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 10, 2, 14, 1, 8, 1, 1, 1, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 100000000))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hpUnknownUnicastLimitPortSingleControlKbps.setStatus('current')
if mibBuilder.loadTexts: hpUnknownUnicastLimitPortSingleControlKbps.setDescription('When hpUnknownUnicastLimitPortControlMode is UnknownUnicastLimitPerPortOnlyKbpsMode, this value is the maximum allowed ingress rate for unknown-unicast traffic on this port in kilobits-per-second.')
hpicfIngressRateLimitVlanObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 14, 10, 2, 14, 1, 9))
hpicfIngressRateLimitVlanConfigTable = MibTable((1, 3, 6, 1, 4, 1, 11, 2, 14, 10, 2, 14, 1, 9, 1), )
if mibBuilder.loadTexts: hpicfIngressRateLimitVlanConfigTable.setStatus('current')
if mibBuilder.loadTexts: hpicfIngressRateLimitVlanConfigTable.setDescription('A table of VLAN rate limits.')
hpicfIngressRateLimitVlanConfigEntry = MibTableRow((1, 3, 6, 1, 4, 1, 11, 2, 14, 10, 2, 14, 1, 9, 1, 1), ).setIndexNames((0, "HP-ICF-RATE-LIMIT-MIB", "hpicfIngressRateLimitVlanIndex"))
if mibBuilder.loadTexts: hpicfIngressRateLimitVlanConfigEntry.setStatus('current')
if mibBuilder.loadTexts: hpicfIngressRateLimitVlanConfigEntry.setDescription('A set of objects used to configure rate limits on a VLANs.')
hpicfIngressRateLimitVlanIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 10, 2, 14, 1, 9, 1, 1, 1), InterfaceIndex())
if mibBuilder.loadTexts: hpicfIngressRateLimitVlanIndex.setStatus('current')
if mibBuilder.loadTexts: hpicfIngressRateLimitVlanIndex.setDescription('The VLAN ifIndex value that uniquely identifies a row in the Interfaces Table and corresponds to the VLAN on which the rate limit is being set.')
hpicfIngressRateLimitVlanControlMode = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 10, 2, 14, 1, 9, 1, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("disabled", 1), ("ingressVlanKbps", 2))).clone('disabled')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hpicfIngressRateLimitVlanControlMode.setStatus('current')
if mibBuilder.loadTexts: hpicfIngressRateLimitVlanControlMode.setDescription('The rate limit mode. A value of 1 indicates that there is no rate limit set. A value of 2 indicates that the rate limit on the VLAN will be set in Kilobits per second.')
hpicfIngressRateLimitVlanKbps = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 10, 2, 14, 1, 9, 1, 1, 3), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 260000000))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hpicfIngressRateLimitVlanKbps.setStatus('current')
if mibBuilder.loadTexts: hpicfIngressRateLimitVlanKbps.setDescription("The maximum rate of inbound traffic permitted on the VLAN in kilobits per second. This limit is the total aggregate inbound traffic allowed across all ports in the VLAN. The value of this object only takes effect when hpicfIngressRateLimitVlanControlMode is set to 'ingressVlanKbps. A limit of 0 will drop all traffic. The actual traffic limit applied in hardware may be rounded down to the nearest multiple of the platform-dependent rate limiting granularity.")
hpicfRateLimitConformance = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 14, 10, 2, 14, 2))
hpicfRateLimitGroups = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 14, 10, 2, 14, 2, 1))
hpicfRateLimitCompliances = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 14, 10, 2, 14, 2, 2))
hpICMPRateLimitPortConfigGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 11, 2, 14, 10, 2, 14, 2, 1, 1)).setObjects(("HP-ICF-RATE-LIMIT-MIB", "hpICMPRateLimitPortState"), ("HP-ICF-RATE-LIMIT-MIB", "hpICMPRateLimitPortPrct"), ("HP-ICF-RATE-LIMIT-MIB", "hpICMPRateLimitPortAlarmFlag"), ("HP-ICF-RATE-LIMIT-MIB", "hpICMPRateLimitNotifyPortIndex"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hpICMPRateLimitPortConfigGroup = hpICMPRateLimitPortConfigGroup.setStatus('deprecated')
if mibBuilder.loadTexts: hpICMPRateLimitPortConfigGroup.setDescription('A collection of objects providing configuration to ICMP rate limiting on an interface.')
hpICMPRateLimitPortNotifyGroup = NotificationGroup((1, 3, 6, 1, 4, 1, 11, 2, 14, 10, 2, 14, 2, 1, 2)).setObjects(("HP-ICF-RATE-LIMIT-MIB", "hpICMPRateLimitPortNotification"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hpICMPRateLimitPortNotifyGroup = hpICMPRateLimitPortNotifyGroup.setStatus('current')
if mibBuilder.loadTexts: hpICMPRateLimitPortNotifyGroup.setDescription('A collection of notifications used to indicate ICMP rate limiting on an interface events.')
hpBWMinIngressPortConfigGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 11, 2, 14, 10, 2, 14, 2, 1, 3)).setObjects(("HP-ICF-RATE-LIMIT-MIB", "hpBWMinIngressPortNumQueues"), ("HP-ICF-RATE-LIMIT-MIB", "hpBWMinIngressPortPrct"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hpBWMinIngressPortConfigGroup = hpBWMinIngressPortConfigGroup.setStatus('current')
if mibBuilder.loadTexts: hpBWMinIngressPortConfigGroup.setDescription('A collection of objects providing configuration to Guaranteed Minimum ingress bandwidth on an interface.')
hpBWMinEgressPortConfigGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 11, 2, 14, 10, 2, 14, 2, 1, 4)).setObjects(("HP-ICF-RATE-LIMIT-MIB", "hpBWMinEgressPortNumQueues"), ("HP-ICF-RATE-LIMIT-MIB", "hpBWMinEgressPortPrct"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hpBWMinEgressPortConfigGroup = hpBWMinEgressPortConfigGroup.setStatus('current')
if mibBuilder.loadTexts: hpBWMinEgressPortConfigGroup.setDescription('A collection of objects providing configuration to Guaranteed Minimum egress bandwidth on an interface.')
hpEgressRateLimitPortConfigGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 11, 2, 14, 10, 2, 14, 2, 1, 5)).setObjects(("HP-ICF-RATE-LIMIT-MIB", "hpEgressRateLimitPortNumQueues"), ("HP-ICF-RATE-LIMIT-MIB", "hpEgressRateLimitPortControlMode"), ("HP-ICF-RATE-LIMIT-MIB", "hpEgressRateLimitPortSingleControlPrct"), ("HP-ICF-RATE-LIMIT-MIB", "hpEgressRateLimitPortSingleControlBps"), ("HP-ICF-RATE-LIMIT-MIB", "hpEgressRateLimitPortPrct"), ("HP-ICF-RATE-LIMIT-MIB", "hpEgressRateLimitPortBps"), ("HP-ICF-RATE-LIMIT-MIB", "hpEgressRateLimitPortSingleControlKbps"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hpEgressRateLimitPortConfigGroup = hpEgressRateLimitPortConfigGroup.setStatus('deprecated')
if mibBuilder.loadTexts: hpEgressRateLimitPortConfigGroup.setDescription('A collection of objects providing configuration to Egress Rate-Limiting on an interface.')
hpIngressRateLimitPortConfigGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 11, 2, 14, 10, 2, 14, 2, 1, 6)).setObjects(("HP-ICF-RATE-LIMIT-MIB", "hpIngressRateLimitPortNumQueues"), ("HP-ICF-RATE-LIMIT-MIB", "hpIngressRateLimitPortControlMode"), ("HP-ICF-RATE-LIMIT-MIB", "hpIngressRateLimitPortSingleControlPrct"), ("HP-ICF-RATE-LIMIT-MIB", "hpIngressRateLimitPortSingleControlBps"), ("HP-ICF-RATE-LIMIT-MIB", "hpIngressRateLimitPortPrct"), ("HP-ICF-RATE-LIMIT-MIB", "hpIngressRateLimitPortBps"), ("HP-ICF-RATE-LIMIT-MIB", "hpIngressRateLimitPortSingleControlKbps"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hpIngressRateLimitPortConfigGroup = hpIngressRateLimitPortConfigGroup.setStatus('deprecated')
if mibBuilder.loadTexts: hpIngressRateLimitPortConfigGroup.setDescription('A collection of objects providing configuration to Ingress Rate-Limiting on an interface.')
hpICMPRateLimitPortConfigGroup2 = ObjectGroup((1, 3, 6, 1, 4, 1, 11, 2, 14, 10, 2, 14, 2, 1, 7)).setObjects(("HP-ICF-RATE-LIMIT-MIB", "hpICMPRateLimitPortPrct"), ("HP-ICF-RATE-LIMIT-MIB", "hpICMPRateLimitPortAlarmFlag"), ("HP-ICF-RATE-LIMIT-MIB", "hpICMPRateLimitNotifyPortIndex"), ("HP-ICF-RATE-LIMIT-MIB", "hpICMPRateLimitPortKbps"), ("HP-ICF-RATE-LIMIT-MIB", "hpICMPRateLimitPortControlMode"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hpICMPRateLimitPortConfigGroup2 = hpICMPRateLimitPortConfigGroup2.setStatus('deprecated')
if mibBuilder.loadTexts: hpICMPRateLimitPortConfigGroup2.setDescription('This replaces the deprecated hpICMPRateLimitPortConfigGroup. A collection of objects providing configuration to ICMP rate limiting on an interface.')
hpEgressRateLimitPortConfigGroup2 = ObjectGroup((1, 3, 6, 1, 4, 1, 11, 2, 14, 10, 2, 14, 2, 1, 8)).setObjects(("HP-ICF-RATE-LIMIT-MIB", "hpEgressRateLimitPortNumQueues"), ("HP-ICF-RATE-LIMIT-MIB", "hpEgressRateLimitPortControlMode"), ("HP-ICF-RATE-LIMIT-MIB", "hpEgressRateLimitPortSingleControlPrct"), ("HP-ICF-RATE-LIMIT-MIB", "hpEgressRateLimitPortPrct"), ("HP-ICF-RATE-LIMIT-MIB", "hpEgressRateLimitPortSingleControlKbps"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hpEgressRateLimitPortConfigGroup2 = hpEgressRateLimitPortConfigGroup2.setStatus('deprecated')
if mibBuilder.loadTexts: hpEgressRateLimitPortConfigGroup2.setDescription('This replaces the deprecated hpEgressRateLimitPortConfigGroup. A collection of objects providing configuration to Egress Rate-Limiting on an interface.')
hpIngressRateLimitPortConfigGroup2 = ObjectGroup((1, 3, 6, 1, 4, 1, 11, 2, 14, 10, 2, 14, 2, 1, 9)).setObjects(("HP-ICF-RATE-LIMIT-MIB", "hpIngressRateLimitPortNumQueues"), ("HP-ICF-RATE-LIMIT-MIB", "hpIngressRateLimitPortControlMode"), ("HP-ICF-RATE-LIMIT-MIB", "hpIngressRateLimitPortSingleControlPrct"), ("HP-ICF-RATE-LIMIT-MIB", "hpIngressRateLimitPortPrct"), ("HP-ICF-RATE-LIMIT-MIB", "hpIngressRateLimitPortSingleControlKbps"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hpIngressRateLimitPortConfigGroup2 = hpIngressRateLimitPortConfigGroup2.setStatus('current')
if mibBuilder.loadTexts: hpIngressRateLimitPortConfigGroup2.setDescription('This replaces the deprecated hpIngressRateLimitPortConfigGroup. A collection of objects providing configuration to Ingress Rate-Limiting on an interface.')
hpBcastLimitIngressPortConfigGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 11, 2, 14, 10, 2, 14, 2, 1, 10)).setObjects(("HP-ICF-RATE-LIMIT-MIB", "hpIngressBcastLimitPortControlMode"), ("HP-ICF-RATE-LIMIT-MIB", "hpIngressBcastLimitPortSingleControlPrct"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hpBcastLimitIngressPortConfigGroup = hpBcastLimitIngressPortConfigGroup.setStatus('deprecated')
if mibBuilder.loadTexts: hpBcastLimitIngressPortConfigGroup.setDescription('A collection of objects providing configuration to Ingress Broadcast Limiting on an interface.')
hpMcastLimitIngressPortConfigGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 11, 2, 14, 10, 2, 14, 2, 1, 11)).setObjects(("HP-ICF-RATE-LIMIT-MIB", "hpIngressMcastLimitPortControlMode"), ("HP-ICF-RATE-LIMIT-MIB", "hpIngressMcastLimitPortSingleControlPrct"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hpMcastLimitIngressPortConfigGroup = hpMcastLimitIngressPortConfigGroup.setStatus('current')
if mibBuilder.loadTexts: hpMcastLimitIngressPortConfigGroup.setDescription('A collection of objects providing configuration to Ingress Multicast Limiting on an interface.')
hpBcastLimitIngressPortConfigGroup2 = ObjectGroup((1, 3, 6, 1, 4, 1, 11, 2, 14, 10, 2, 14, 2, 1, 12)).setObjects(("HP-ICF-RATE-LIMIT-MIB", "hpIngressBcastLimitPortControlMode"), ("HP-ICF-RATE-LIMIT-MIB", "hpIngressBcastLimitPortSingleControlPrct"), ("HP-ICF-RATE-LIMIT-MIB", "hpIngressBcastLimitPortSingleControlKbps"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hpBcastLimitIngressPortConfigGroup2 = hpBcastLimitIngressPortConfigGroup2.setStatus('current')
if mibBuilder.loadTexts: hpBcastLimitIngressPortConfigGroup2.setDescription('This replaces the deprecated hpBcastLimitIngressPortConfigGroup. A collection of objects providing configuration of Ingress Broadcast Limiting on an interface.')
hpMcastLimitIngressPortConfigGroup2 = ObjectGroup((1, 3, 6, 1, 4, 1, 11, 2, 14, 10, 2, 14, 2, 1, 13)).setObjects(("HP-ICF-RATE-LIMIT-MIB", "hpIngressMcastLimitPortControlMode"), ("HP-ICF-RATE-LIMIT-MIB", "hpIngressMcastLimitPortSingleControlPrct"), ("HP-ICF-RATE-LIMIT-MIB", "hpIngressMcastLimitPortSingleControlKbps"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hpMcastLimitIngressPortConfigGroup2 = hpMcastLimitIngressPortConfigGroup2.setStatus('current')
if mibBuilder.loadTexts: hpMcastLimitIngressPortConfigGroup2.setDescription('This replaces the deprecated hpMcastLimitIngressPortConfigGroup A collection of objects providing configuration of Ingress Multicast Limiting on an interface.')
hpICMPRateLimitPortConfigGroup3 = ObjectGroup((1, 3, 6, 1, 4, 1, 11, 2, 14, 10, 2, 14, 2, 1, 14)).setObjects(("HP-ICF-RATE-LIMIT-MIB", "hpICMPRateLimitPortPrct"), ("HP-ICF-RATE-LIMIT-MIB", "hpICMPRateLimitPortAlarmFlag"), ("HP-ICF-RATE-LIMIT-MIB", "hpICMPRateLimitNotifyPortIndex"), ("HP-ICF-RATE-LIMIT-MIB", "hpICMPRateLimitPortKbps"), ("HP-ICF-RATE-LIMIT-MIB", "hpICMPRateLimitPortControlMode"), ("HP-ICF-RATE-LIMIT-MIB", "hpICMPRateLimitPortIpPacketType"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hpICMPRateLimitPortConfigGroup3 = hpICMPRateLimitPortConfigGroup3.setStatus('current')
if mibBuilder.loadTexts: hpICMPRateLimitPortConfigGroup3.setDescription('This replaces the deprecated hpICMPRateLimitPortConfigGroup2. A collection of objects providing configuration for ICMP rate limiting on an interface.')
hpUnknownUcastLimitIngressPortConfigGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 11, 2, 14, 10, 2, 14, 2, 1, 15)).setObjects(("HP-ICF-RATE-LIMIT-MIB", "hpUnknownUnicastLimitPortControlMode"), ("HP-ICF-RATE-LIMIT-MIB", "hpUnknownUnicastLimitPortSingleControlPrct"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hpUnknownUcastLimitIngressPortConfigGroup = hpUnknownUcastLimitIngressPortConfigGroup.setStatus('deprecated')
if mibBuilder.loadTexts: hpUnknownUcastLimitIngressPortConfigGroup.setDescription('A collection of objects providing configuration to Ingress Unknown Unicast traffic Limiting on an interface.')
hpicfIngressRateLimitVlanGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 11, 2, 14, 10, 2, 14, 2, 1, 16)).setObjects(("HP-ICF-RATE-LIMIT-MIB", "hpicfIngressRateLimitVlanControlMode"), ("HP-ICF-RATE-LIMIT-MIB", "hpicfIngressRateLimitVlanKbps"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hpicfIngressRateLimitVlanGroup = hpicfIngressRateLimitVlanGroup.setStatus('current')
if mibBuilder.loadTexts: hpicfIngressRateLimitVlanGroup.setDescription('A collection of HP proprietary objects to support configuration of VLAN-based rate limits on HP Networking devices.')
hpUnknownUcastLimitIngressPortConfigGroup2 = ObjectGroup((1, 3, 6, 1, 4, 1, 11, 2, 14, 10, 2, 14, 2, 1, 17)).setObjects(("HP-ICF-RATE-LIMIT-MIB", "hpUnknownUnicastLimitPortControlMode"), ("HP-ICF-RATE-LIMIT-MIB", "hpUnknownUnicastLimitPortSingleControlPrct"), ("HP-ICF-RATE-LIMIT-MIB", "hpUnknownUnicastLimitPortSingleControlKbps"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hpUnknownUcastLimitIngressPortConfigGroup2 = hpUnknownUcastLimitIngressPortConfigGroup2.setStatus('current')
if mibBuilder.loadTexts: hpUnknownUcastLimitIngressPortConfigGroup2.setDescription('A collection of objects providing configuration to Ingress Unknown Unicast traffic Limiting on an interface.')
hpEgressRateLimitPortConfigGroup3 = ObjectGroup((1, 3, 6, 1, 4, 1, 11, 2, 14, 10, 2, 14, 2, 1, 18)).setObjects(("HP-ICF-RATE-LIMIT-MIB", "hpEgressRateLimitPortNumQueues"), ("HP-ICF-RATE-LIMIT-MIB", "hpEgressRateLimitPortControlMode"), ("HP-ICF-RATE-LIMIT-MIB", "hpEgressRateLimitPortSingleControlPrct"), ("HP-ICF-RATE-LIMIT-MIB", "hpEgressRateLimitPortPrct"), ("HP-ICF-RATE-LIMIT-MIB", "hpEgressRateLimitPortSingleControlKbps"), ("HP-ICF-RATE-LIMIT-MIB", "hpEgressRateLimitPortQueueControlMode"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hpEgressRateLimitPortConfigGroup3 = hpEgressRateLimitPortConfigGroup3.setStatus('current')
if mibBuilder.loadTexts: hpEgressRateLimitPortConfigGroup3.setDescription('This replaces the deprecated hpEgressRateLimitPortConfigGroup2. A collection of objects providing configuration to Egress Rate-Limiting on an interface.')
hpEgressRateLimitPortQueueConfigEntryGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 11, 2, 14, 10, 2, 14, 2, 1, 19)).setObjects(("HP-ICF-RATE-LIMIT-MIB", "hpEgressRateLimitPortQueueMax"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hpEgressRateLimitPortQueueConfigEntryGroup = hpEgressRateLimitPortQueueConfigEntryGroup.setStatus('current')
if mibBuilder.loadTexts: hpEgressRateLimitPortQueueConfigEntryGroup.setDescription('A collection of objects providing configuration to egress rate-limit on an interface queues.')
hpicfRateLimitCompliance = ModuleCompliance((1, 3, 6, 1, 4, 1, 11, 2, 14, 10, 2, 14, 2, 2, 1)).setObjects(("HP-ICF-RATE-LIMIT-MIB", "hpICMPRateLimitPortConfigGroup"), ("HP-ICF-RATE-LIMIT-MIB", "hpICMPRateLimitPortNotifyGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hpicfRateLimitCompliance = hpicfRateLimitCompliance.setStatus('deprecated')
if mibBuilder.loadTexts: hpicfRateLimitCompliance.setDescription('The compliance statement for device support of HP-ICF-RATE-LIMIT MIB.')
hpicfRateLimitCompliance1 = ModuleCompliance((1, 3, 6, 1, 4, 1, 11, 2, 14, 10, 2, 14, 2, 2, 2)).setObjects(("HP-ICF-RATE-LIMIT-MIB", "hpICMPRateLimitPortConfigGroup2"), ("HP-ICF-RATE-LIMIT-MIB", "hpICMPRateLimitPortNotifyGroup"), ("HP-ICF-RATE-LIMIT-MIB", "hpBWMinIngressPortConfigGroup"), ("HP-ICF-RATE-LIMIT-MIB", "hpBWMinEgressPortConfigGroup"), ("HP-ICF-RATE-LIMIT-MIB", "hpEgressRateLimitPortConfigGroup2"), ("HP-ICF-RATE-LIMIT-MIB", "hpIngressRateLimitPortConfigGroup2"), ("HP-ICF-RATE-LIMIT-MIB", "hpBcastLimitIngressPortConfigGroup"), ("HP-ICF-RATE-LIMIT-MIB", "hpMcastLimitIngressPortConfigGroup"), ("HP-ICF-RATE-LIMIT-MIB", "hpUnknownUcastLimitIngressPortConfigGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hpicfRateLimitCompliance1 = hpicfRateLimitCompliance1.setStatus('deprecated')
if mibBuilder.loadTexts: hpicfRateLimitCompliance1.setDescription('The compliance statement for device support of HP-ICF-RATE-LIMIT MIB.')
hpicfRateLimitCompliance2 = ModuleCompliance((1, 3, 6, 1, 4, 1, 11, 2, 14, 10, 2, 14, 2, 2, 3)).setObjects(("HP-ICF-RATE-LIMIT-MIB", "hpICMPRateLimitPortConfigGroup3"), ("HP-ICF-RATE-LIMIT-MIB", "hpICMPRateLimitPortNotifyGroup"), ("HP-ICF-RATE-LIMIT-MIB", "hpBWMinEgressPortConfigGroup"), ("HP-ICF-RATE-LIMIT-MIB", "hpBWMinIngressPortConfigGroup"), ("HP-ICF-RATE-LIMIT-MIB", "hpEgressRateLimitPortConfigGroup"), ("HP-ICF-RATE-LIMIT-MIB", "hpEgressRateLimitPortConfigGroup2"), ("HP-ICF-RATE-LIMIT-MIB", "hpIngressRateLimitPortConfigGroup"), ("HP-ICF-RATE-LIMIT-MIB", "hpIngressRateLimitPortConfigGroup2"), ("HP-ICF-RATE-LIMIT-MIB", "hpBcastLimitIngressPortConfigGroup2"), ("HP-ICF-RATE-LIMIT-MIB", "hpMcastLimitIngressPortConfigGroup2"), ("HP-ICF-RATE-LIMIT-MIB", "hpUnknownUcastLimitIngressPortConfigGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hpicfRateLimitCompliance2 = hpicfRateLimitCompliance2.setStatus('deprecated')
if mibBuilder.loadTexts: hpicfRateLimitCompliance2.setDescription('The compliance statement for device support of HP-ICF-RATE-LIMIT MIB.')
hpicfRateLimitCompliance3 = ModuleCompliance((1, 3, 6, 1, 4, 1, 11, 2, 14, 10, 2, 14, 2, 2, 4)).setObjects(("HP-ICF-RATE-LIMIT-MIB", "hpicfIngressRateLimitVlanGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hpicfRateLimitCompliance3 = hpicfRateLimitCompliance3.setStatus('current')
if mibBuilder.loadTexts: hpicfRateLimitCompliance3.setDescription('The compliance statement for HP switches running IngressRateLimitVlan feature.')
hpicfRateLimitCompliance4 = ModuleCompliance((1, 3, 6, 1, 4, 1, 11, 2, 14, 10, 2, 14, 2, 2, 5)).setObjects(("HP-ICF-RATE-LIMIT-MIB", "hpICMPRateLimitPortConfigGroup3"), ("HP-ICF-RATE-LIMIT-MIB", "hpICMPRateLimitPortNotifyGroup"), ("HP-ICF-RATE-LIMIT-MIB", "hpBWMinEgressPortConfigGroup"), ("HP-ICF-RATE-LIMIT-MIB", "hpBWMinIngressPortConfigGroup"), ("HP-ICF-RATE-LIMIT-MIB", "hpEgressRateLimitPortConfigGroup3"), ("HP-ICF-RATE-LIMIT-MIB", "hpEgressRateLimitPortQueueConfigEntryGroup"), ("HP-ICF-RATE-LIMIT-MIB", "hpIngressRateLimitPortConfigGroup2"), ("HP-ICF-RATE-LIMIT-MIB", "hpBcastLimitIngressPortConfigGroup2"), ("HP-ICF-RATE-LIMIT-MIB", "hpMcastLimitIngressPortConfigGroup2"), ("HP-ICF-RATE-LIMIT-MIB", "hpUnknownUcastLimitIngressPortConfigGroup2"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hpicfRateLimitCompliance4 = hpicfRateLimitCompliance4.setStatus('current')
if mibBuilder.loadTexts: hpicfRateLimitCompliance4.setDescription('The compliance statement for device support of HP-ICF-RATE-LIMIT MIB.')
mibBuilder.exportSymbols("HP-ICF-RATE-LIMIT-MIB", hpicfICMPRateLimitObjects=hpicfICMPRateLimitObjects, hpIngressMcastLimitPortConfigTable=hpIngressMcastLimitPortConfigTable, hpBWMinIngressPortConfig=hpBWMinIngressPortConfig, hpEgressRateLimitPortSingleControlPrct=hpEgressRateLimitPortSingleControlPrct, hpBWMinIngressPortPrct=hpBWMinIngressPortPrct, hpBWMinIngressPortPrctEntry=hpBWMinIngressPortPrctEntry, hpUnknownUnicastLimitPortConfig=hpUnknownUnicastLimitPortConfig, hpBWMinEgressPortConfig=hpBWMinEgressPortConfig, hpUnknownUnicastLimitPortSingleControlPrct=hpUnknownUnicastLimitPortSingleControlPrct, hpicfIngressRateLimitVlanConfigTable=hpicfIngressRateLimitVlanConfigTable, hpEgressRateLimitPortQueueConfigTable=hpEgressRateLimitPortQueueConfigTable, hpicfRateLimitConformance=hpicfRateLimitConformance, hpicfIngressBcastLimitPortObjects=hpicfIngressBcastLimitPortObjects, hpIngressRateLimitPortBpsTable=hpIngressRateLimitPortBpsTable, hpMcastLimitIngressPortConfigGroup2=hpMcastLimitIngressPortConfigGroup2, hpicfRateLimitCompliance1=hpicfRateLimitCompliance1, hpicfRateLimitMIB=hpicfRateLimitMIB, hpUnknownUnicastLimitPortControlMode=hpUnknownUnicastLimitPortControlMode, hpEgressRateLimitPortBpsEntry=hpEgressRateLimitPortBpsEntry, hpBWMinIngressPortPrctQueue=hpBWMinIngressPortPrctQueue, hpIngressRateLimitPortPrctEntry=hpIngressRateLimitPortPrctEntry, hpIngressBcastLimitPortConfigTable=hpIngressBcastLimitPortConfigTable, hpicfRateLimitPortObjects=hpicfRateLimitPortObjects, hpUnknownUnicastLimitConfigTable=hpUnknownUnicastLimitConfigTable, hpEgressRateLimitPortControlMode=hpEgressRateLimitPortControlMode, hpIngressRateLimitPortPrct=hpIngressRateLimitPortPrct, hpEgressRateLimitPortQueueControlMode=hpEgressRateLimitPortQueueControlMode, hpEgressRateLimitPortConfigTable=hpEgressRateLimitPortConfigTable, hpRateLimitIngressPortConfig=hpRateLimitIngressPortConfig, hpicfIngressRateLimitPortObjects=hpicfIngressRateLimitPortObjects, hpEgressRateLimitPortPrctQueue=hpEgressRateLimitPortPrctQueue, hpIngressMcastLimitPortConfigEntry=hpIngressMcastLimitPortConfigEntry, hpIngressMcastLimitPortSingleControlKbps=hpIngressMcastLimitPortSingleControlKbps, hpicfIngressRateLimitVlanIndex=hpicfIngressRateLimitVlanIndex, hpIngressRateLimitPortSingleControlPrct=hpIngressRateLimitPortSingleControlPrct, hpBWMinEgressPortNumQueues=hpBWMinEgressPortNumQueues, hpIngressRateLimitPortIndex=hpIngressRateLimitPortIndex, hpIngressMcastLimitPortSingleControlPrct=hpIngressMcastLimitPortSingleControlPrct, hpUnknownUnicastLimitConfigEntry=hpUnknownUnicastLimitConfigEntry, hpEgressRateLimitPortPrctEntry=hpEgressRateLimitPortPrctEntry, hpBWMinIngressPortNumQueues=hpBWMinIngressPortNumQueues, hpBWMinIngressPortPrctTable=hpBWMinIngressPortPrctTable, hpICMPRateLimitPortConfigGroup3=hpICMPRateLimitPortConfigGroup3, hpicfRateLimitObjects=hpicfRateLimitObjects, hpIngressBcastLimitPortSingleControlPrct=hpIngressBcastLimitPortSingleControlPrct, hpICMPRateLimitPortState=hpICMPRateLimitPortState, hpBWMinEgressPortConfigGroup=hpBWMinEgressPortConfigGroup, hpicfRateLimitGroups=hpicfRateLimitGroups, hpEgressRateLimitPortPrctTable=hpEgressRateLimitPortPrctTable, hpICMPRateLimitPortConfigEntry=hpICMPRateLimitPortConfigEntry, hpBWMinEgressPortPrct=hpBWMinEgressPortPrct, hpICMPRateLimitPortConfigGroup2=hpICMPRateLimitPortConfigGroup2, hpEgressRateLimitPortConfigGroup3=hpEgressRateLimitPortConfigGroup3, hpEgressRateLimitPortConfigGroup2=hpEgressRateLimitPortConfigGroup2, hpICMPRateLimitPortConfigIndex=hpICMPRateLimitPortConfigIndex, hpEgressRateLimitPortConfigEntry=hpEgressRateLimitPortConfigEntry, hpBWMinEgressPortPrctQueue=hpBWMinEgressPortPrctQueue, hpBcastLimitIngressPortConfig=hpBcastLimitIngressPortConfig, hpicfIngressRateLimitVlanKbps=hpicfIngressRateLimitVlanKbps, hpIngressRateLimitPortSingleControlBps=hpIngressRateLimitPortSingleControlBps, hpicfRateLimitCompliance4=hpicfRateLimitCompliance4, hpIngressRateLimitPortNumQueues=hpIngressRateLimitPortNumQueues, hpicfRateLimitCompliance3=hpicfRateLimitCompliance3, PYSNMP_MODULE_ID=hpicfRateLimitMIB, hpicfBWMinIngressObjects=hpicfBWMinIngressObjects, hpEgressRateLimitPortSingleControlKbps=hpEgressRateLimitPortSingleControlKbps, hpEgressRateLimitPortNumQueues=hpEgressRateLimitPortNumQueues, hpIngressBcastLimitPortControlMode=hpIngressBcastLimitPortControlMode, hpIngressRateLimitPortConfigGroup=hpIngressRateLimitPortConfigGroup, hpIngressRateLimitPortSingleControlKbps=hpIngressRateLimitPortSingleControlKbps, hpIngressRateLimitPortPrctQueue=hpIngressRateLimitPortPrctQueue, hpMcastLimitIngressPortConfig=hpMcastLimitIngressPortConfig, hpIngressMcastLimitPortControlMode=hpIngressMcastLimitPortControlMode, hpIngressRateLimitPortConfigGroup2=hpIngressRateLimitPortConfigGroup2, hpICMPRateLimitPortNotification=hpICMPRateLimitPortNotification, hpBcastLimitIngressPortConfigGroup2=hpBcastLimitIngressPortConfigGroup2, hpIngressMcastLimitPortIndex=hpIngressMcastLimitPortIndex, hpICMPRateLimitPortConfigGroup=hpICMPRateLimitPortConfigGroup, hpIngressBcastLimitPortConfigEntry=hpIngressBcastLimitPortConfigEntry, hpIngressBcastLimitPortIndex=hpIngressBcastLimitPortIndex, hpEgressRateLimitPortSingleControlBps=hpEgressRateLimitPortSingleControlBps, hpIngressRateLimitPortPrctTable=hpIngressRateLimitPortPrctTable, hpEgressRateLimitPortBps=hpEgressRateLimitPortBps, hpEgressRateLimitPortQueueConfigEntry=hpEgressRateLimitPortQueueConfigEntry, hpICMPRateLimitPortConfigTable=hpICMPRateLimitPortConfigTable, hpIngressRateLimitPortConfigEntry=hpIngressRateLimitPortConfigEntry, hpicfRateLimitCompliance2=hpicfRateLimitCompliance2, hpIngressBcastLimitPortSingleControlKbps=hpIngressBcastLimitPortSingleControlKbps, hpICMPRateLimitPortNotifyGroup=hpICMPRateLimitPortNotifyGroup, hpicfIngressMcastLimitPortObjects=hpicfIngressMcastLimitPortObjects, hpUnknownUnicastLimitPortIndex=hpUnknownUnicastLimitPortIndex, hpicfRateLimitCompliances=hpicfRateLimitCompliances, hpIngressRateLimitPortBps=hpIngressRateLimitPortBps, hpUnknownUcastLimitIngressPortConfigGroup2=hpUnknownUcastLimitIngressPortConfigGroup2, hpEgressRateLimitPortQueueConfigEntryGroup=hpEgressRateLimitPortQueueConfigEntryGroup, hpIngressRateLimitPortConfigTable=hpIngressRateLimitPortConfigTable, hpICMPRateLimitPortPrct=hpICMPRateLimitPortPrct, hpICMPRateLimitNotifyPortIndex=hpICMPRateLimitNotifyPortIndex, hpBWMinEgressPortPrctEntry=hpBWMinEgressPortPrctEntry, hpEgressRateLimitPortBpsQueue=hpEgressRateLimitPortBpsQueue, hpUnknownUnicastLimitPortSingleControlKbps=hpUnknownUnicastLimitPortSingleControlKbps, hpICMPRateLimitPortControlMode=hpICMPRateLimitPortControlMode, hpEgressRateLimitPortBpsTable=hpEgressRateLimitPortBpsTable, hpICMPRateLimitConfig=hpICMPRateLimitConfig, hpicfIngressRateLimitVlanConfigEntry=hpicfIngressRateLimitVlanConfigEntry, hpEgressRateLimitPortIndex=hpEgressRateLimitPortIndex, hpicfRateLimitCompliance=hpicfRateLimitCompliance, hpicfBWMinEgressObjects=hpicfBWMinEgressObjects, hpBWMinIngressPortConfigGroup=hpBWMinIngressPortConfigGroup, hpMcastLimitIngressPortConfigGroup=hpMcastLimitIngressPortConfigGroup, hpIngressRateLimitPortControlMode=hpIngressRateLimitPortControlMode, hpEgressRateLimitPortConfigGroup=hpEgressRateLimitPortConfigGroup, hpIngressRateLimitPortBpsEntry=hpIngressRateLimitPortBpsEntry, hpicfUnknownUnicastLimitPortObjects=hpicfUnknownUnicastLimitPortObjects, hpBWMinEgressPortPrctTable=hpBWMinEgressPortPrctTable, hpICMPRateLimitPortKbps=hpICMPRateLimitPortKbps, hpicfIngressRateLimitVlanControlMode=hpicfIngressRateLimitVlanControlMode, hpEgressRateLimitPortQueueMax=hpEgressRateLimitPortQueueMax, hpBcastLimitIngressPortConfigGroup=hpBcastLimitIngressPortConfigGroup, hpEgressRateLimitPortQueueIndex=hpEgressRateLimitPortQueueIndex, hpUnknownUcastLimitIngressPortConfigGroup=hpUnknownUcastLimitIngressPortConfigGroup, hpICMPRateLimitPortAlarmFlag=hpICMPRateLimitPortAlarmFlag, hpicfIngressRateLimitVlanGroup=hpicfIngressRateLimitVlanGroup, hpIngressRateLimitPortBpsQueue=hpIngressRateLimitPortBpsQueue, hpEgressRateLimitPortPrct=hpEgressRateLimitPortPrct, hpicfIngressRateLimitVlanObjects=hpicfIngressRateLimitVlanObjects, hpRateLimitPortConfig=hpRateLimitPortConfig, hpICMPRateLimitPortIpPacketType=hpICMPRateLimitPortIpPacketType)
|
import unittest
from .. import *
class ScanTestCase(unittest.TestCase):
def test_reset(self):
scan.reset([player.Player(), player.Player()])
self.assertEqual(len(scan._bin_testing()), 2)
def test_binning1(self):
p1 = player.Player()
p_ref = reference.Reference(p1)
scan.reset([p1])
obj = ship.Ship()
scan.add(obj, location.Location(), 1, 1)
self.assertEqual(len(scan._bin_testing()[p_ref]), 1)
def test_binning2(self):
p1 = player.Player()
p_ref = reference.Reference(p1)
scan.reset([p1])
obj = ship.Ship()
scan.add(obj, location.Location(), 1, 1)
b = (0, 0, 0)
self.assertEqual(len(scan._bin_testing()[p_ref]), 1)
self.assertEqual(len(scan._bin_testing()[p_ref][b]), 1)
|
# Generate statistics table of coverage breadth values at the three given coverage depth thresholds
def stat_table(coverage_list, RegionNames, validation, phred_score, coverage_phred, X_Cut_off_list, RegionInfo, dataType):
Tresh_1 = 0
Tresh_2 = 0
Tresh_3 = 0
index=0
s_table =[]
s_table_phred=[]
val_list =[]
# Create the stat table for the coverage list without phred score filtering.
# The table contains the coverage breadth at three given coverage depth threshold values, X_cut_off_list.
# If validation is turned on the validation column is added
for line in coverage_list:
for element in line:
if int(element) >= int(X_Cut_off_list[0]):
Tresh_1+=1
if int(element) >= int(X_Cut_off_list[1]):
Tresh_2+=1
if int(element) >= int(X_Cut_off_list[2]):
Tresh_3+=1
# compute and enter the coverage depth values in the table together with region name and validation column if turned on
if validation:
if float(Tresh_1)/float(len(line)) < 0.95:
s_table.append([dataType,RegionNames[index], round(float(Tresh_1)/float(len(line)),2), round(float(Tresh_2)/float(len(line)),2), round(float(Tresh_3)/float(len(line)),2), '***'])
val_list.append([RegionNames[index], int(round(float(Tresh_1)/float(len(line)),2)*100)]+ RegionInfo[index])
else:
s_table.append([dataType,RegionNames[index], round(float(Tresh_1)/float(len(line)),2), round(float(Tresh_2)/float(len(line)),2), round(float(Tresh_3)/float(len(line)),2)])
else:
s_table.append([dataType, RegionNames[index], round(float(Tresh_1)/float(len(line)), 2), round(float(Tresh_2)/float(len(line)),2), round(float(Tresh_3)/float(len(line)),2)])
Tresh_1 = 0
Tresh_2 = 0
Tresh_3 = 0
index+=1
# If phred score filtering has been turned on an additional statistics table with the filtered values is generated
# with the same method as above.
if phred_score:
Tresh_1_p = 0
Tresh_2_p = 0
Tresh_3_p = 0
count=0
for line in coverage_phred:
for element in line:
if int(element) >= int(X_Cut_off_list[0]):
Tresh_1_p+=1
if int(element) >= int(X_Cut_off_list[1]):
Tresh_2_p+=1
if int(element) >= int(X_Cut_off_list[2]):
Tresh_3_p+=1
s_table_phred.append(['filtered',RegionNames[count], round(float(Tresh_1_p)/float(len(line)),2), round(float(Tresh_2_p)/float(len(line)),2), round(float(Tresh_3_p)/float(len(line)),2)])
Tresh_1_p = 0
Tresh_2_p = 0
Tresh_3_p = 0
count+=1
return s_table, s_table_phred, val_list
|
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
import os
from flask_cors import CORS;
from sqlalchemy import create_engine
basedir = os.path.abspath(os.path.dirname(__file__))
app = Flask(__name__)
pw = os.environ['DB_PW']
url = os.environ['DB_URL']
db = os.environ['DB_DB']
db_user = os.environ['DB_USER']
uri = "postgresql://" + db_user + ":" + pw + "@" + url + db;
app.config["SQLALCHEMY_DATABASE_URI"] = uri
db = SQLAlchemy(app)
app.config.from_object(__name__)
CORS(app)
db_properties={}
db_properties['url_vis'] =uri
engine = create_engine(db_properties['url_vis'], echo=True)
from app import views
|
"""
Copyright (c) Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from collections import Counter
from functools import partial
from typing import List, Dict
import onnx
import pytest
import torch
import torch.nn
from onnx import numpy_helper
from nncf.dynamic_graph.graph import OperationExecutionContext, InputAgnosticOperationExecutionContext
from nncf.dynamic_graph.trace_tensor import TensorMeta
from nncf.nncf_network import InsertionInfo
from nncf.quantization.algo import QuantizationBuilder
from nncf.quantization.quantizer_id import NonWeightQuantizerId
from tests.helpers import create_compressed_model_and_algo_for_test
from tests.quantization.test_quantization_helpers import get_quantization_config_without_range_init
def make_op_exec_context_for_coalescing_test(scope_str: str) -> OperationExecutionContext:
ia_op_exec_context = InputAgnosticOperationExecutionContext.from_str(scope_str)
op_exec_context = OperationExecutionContext(ia_op_exec_context.operator_name,
ia_op_exec_context.scope_in_model,
ia_op_exec_context.call_order,
[TensorMeta(0, 0, [1])])
return op_exec_context
def make_insertion_info_for_coalescing_test(scope_str: str,
linked_op_exec_contexts: List[OperationExecutionContext] = None):
op_exec_context = make_op_exec_context_for_coalescing_test(scope_str)
retval = InsertionInfo(op_exec_context)
if linked_op_exec_contexts is not None:
retval.linked_op_exec_contexts = linked_op_exec_contexts
return retval
@pytest.mark.parametrize("input_insertion_infos, linked_scopes_groups_list, ref_coalesced_insertion_infos",
# ref_coalesced_insertion_infos == None means that the coalescing should raise an exception
[
# 0 - Empty linked scopes list
(
[
make_insertion_info_for_coalescing_test(
"Foo/Baz[bar]/conv2d_0"
),
make_insertion_info_for_coalescing_test(
"Foo/Xyz[leet]/__add___0"
)
],
[],
# Same as input
[
make_insertion_info_for_coalescing_test(
"Foo/Baz[bar]/conv2d_0"
),
make_insertion_info_for_coalescing_test(
"Foo/Xyz[leet]/__add___0"
)
],
),
# 1 - Linked scope only affects 1 operation
(
[
make_insertion_info_for_coalescing_test(
"Foo/Baz[bar]/conv2d_0"
),
make_insertion_info_for_coalescing_test(
"Foo/Xyz[leet]/__add___0"
)
],
[["Foo/Baz[bar]/conv2d_0"]],
# Same as input
[
make_insertion_info_for_coalescing_test(
"Foo/Baz[bar]/conv2d_0"
),
make_insertion_info_for_coalescing_test(
"Foo/Xyz[leet]/__add___0"
)
]
),
# 2 - Same as 1 but with multiple groups
(
[
make_insertion_info_for_coalescing_test(
"Foo/Baz[bar]/conv2d_0"
),
make_insertion_info_for_coalescing_test(
"Foo/Xyz[leet]/__add___0"
)
],
[["Foo/Baz[bar]/conv2d_0"], ["Foo/Xyz[leet]/__add___0"]],
# Same as input again
[
make_insertion_info_for_coalescing_test(
"Foo/Baz[bar]/conv2d_0"
),
make_insertion_info_for_coalescing_test(
"Foo/Xyz[leet]/__add___0"
)
]
),
# 3 - Single group affecting some of the scopes
(
[
make_insertion_info_for_coalescing_test(
"Foo/Baz[bar]/conv2d_0"
),
make_insertion_info_for_coalescing_test(
"Foo/Baz[bar]/linear_0"
),
make_insertion_info_for_coalescing_test(
"Foo/Xyz[leet]/__add___0"
),
make_insertion_info_for_coalescing_test(
"Foo/Xyz[leet]/matmul_0"
)
],
[["Foo/Xyz[leet]/matmul_0", "Foo/Xyz[leet]/__add___0", "Foo/Baz[bar]/linear_0"]],
[
make_insertion_info_for_coalescing_test(
"Foo/Xyz[leet]/matmul_0",
linked_op_exec_contexts=[
make_op_exec_context_for_coalescing_test(
"Foo/Baz[bar]/linear_0"
),
make_op_exec_context_for_coalescing_test(
"Foo/Xyz[leet]/__add___0"
),
]
),
make_insertion_info_for_coalescing_test(
"Foo/Baz[bar]/conv2d_0"
)
]
),
# 4 - Multiple groups, each affecting one operation
(
[
make_insertion_info_for_coalescing_test(
"Foo/Baz[bar]/conv2d_0"
),
make_insertion_info_for_coalescing_test(
"Foo/Baz[bar]/linear_0"
),
make_insertion_info_for_coalescing_test(
"Foo/Xyz[leet]/__add___0"
),
make_insertion_info_for_coalescing_test(
"Foo/Xyz[leet]/matmul_0"
),
make_insertion_info_for_coalescing_test(
"Foo/Asdf[jkl]/softmax_0"
),
],
[["Foo/Baz[bar]/linear_0"], ["Foo/Asdf[jkl]/softmax_0"]],
[
# Same as input
make_insertion_info_for_coalescing_test(
"Foo/Baz[bar]/conv2d_0"
),
make_insertion_info_for_coalescing_test(
"Foo/Baz[bar]/linear_0"
),
make_insertion_info_for_coalescing_test(
"Foo/Xyz[leet]/__add___0"
),
make_insertion_info_for_coalescing_test(
"Foo/Xyz[leet]/matmul_0"
),
make_insertion_info_for_coalescing_test(
"Foo/Asdf[jkl]/softmax_0"
),
]
),
# 5 - Multiple groups affecting multiple operations without overlapping
(
[
make_insertion_info_for_coalescing_test(
"Foo/Baz[bar]/conv2d_0"
),
make_insertion_info_for_coalescing_test(
"Foo/Baz[bar]/linear_0"
),
make_insertion_info_for_coalescing_test(
"Foo/Xyz[leet]/__add___0"
),
make_insertion_info_for_coalescing_test(
"Foo/Xyz[leet]/matmul_0"
),
make_insertion_info_for_coalescing_test(
"Foo/Asdf[jkl]/softmax_0"
),
make_insertion_info_for_coalescing_test(
"Foo/Asdf[jkl]/softmax_1"
),
],
[["Foo/Baz[bar]/conv2d_0",
"Foo/Baz[bar]/linear_0"],
["Foo/Asdf[jkl]/softmax_1", "Foo/Xyz[leet]/__add___0"]],
[
make_insertion_info_for_coalescing_test(
"Foo/Baz[bar]/conv2d_0",
linked_op_exec_contexts=[
make_op_exec_context_for_coalescing_test(
"Foo/Baz[bar]/linear_0"
),
]
),
make_insertion_info_for_coalescing_test(
"Foo/Asdf[jkl]/softmax_1",
linked_op_exec_contexts=[
make_op_exec_context_for_coalescing_test(
"Foo/Xyz[leet]/__add___0"
),
]
),
make_insertion_info_for_coalescing_test(
"Foo/Xyz[leet]/matmul_0"
),
make_insertion_info_for_coalescing_test(
"Foo/Asdf[jkl]/softmax_0"
),
]
),
# 6 - A variation of 5
(
[
make_insertion_info_for_coalescing_test(
"Foo/Baz[bar]/conv2d_0"
),
make_insertion_info_for_coalescing_test(
"Foo/Baz[bar]/linear_0"
),
make_insertion_info_for_coalescing_test(
"Foo/Xyz[leet]/__add___0"
),
make_insertion_info_for_coalescing_test(
"Foo/Xyz[leet]/matmul_0"
),
make_insertion_info_for_coalescing_test(
"Foo/Asdf[jkl]/softmax_0"
),
make_insertion_info_for_coalescing_test(
"Foo/Asdf[jkl]/Qwer[tyu]/conv2d_0"
),
],
[["Foo/Baz[bar]/conv2d_0", "Foo/Baz[bar]/linear_0", "Foo/Xyz[leet]/matmul_0"],
["Foo/Asdf[jkl]/softmax_0", "Foo/Asdf[jkl]/Qwer[tyu]/conv2d_0"]],
[
make_insertion_info_for_coalescing_test(
"Foo/Baz[bar]/conv2d_0",
linked_op_exec_contexts=[
make_op_exec_context_for_coalescing_test(
"Foo/Baz[bar]/linear_0"
),
make_op_exec_context_for_coalescing_test(
"Foo/Xyz[leet]/matmul_0"
)
]
),
make_insertion_info_for_coalescing_test(
"Foo/Asdf[jkl]/softmax_0",
linked_op_exec_contexts=[
make_op_exec_context_for_coalescing_test(
"Foo/Asdf[jkl]/Qwer[tyu]/conv2d_0"
),
]
),
make_insertion_info_for_coalescing_test(
"Foo/Xyz[leet]/__add___0"
),
]
),
# 7 - Overlapping groups
(
[
make_insertion_info_for_coalescing_test(
"Foo/Baz[bar]/conv2d_0"
),
make_insertion_info_for_coalescing_test(
"Foo/Baz[bar]/linear_0"
),
make_insertion_info_for_coalescing_test(
"Foo/Xyz[leet]/__add___0"
),
make_insertion_info_for_coalescing_test(
"Foo/Xyz[leet]/matmul_0"
),
make_insertion_info_for_coalescing_test(
"Foo/Asdf[jkl]/softmax_0"
),
make_insertion_info_for_coalescing_test(
"Foo/Asdf[jkl]/Qwer[tyu]/conv2d_0"
),
],
[["Foo/Baz[bar]/conv2d_0", "Foo/Baz[bar]/linear_0", "Foo/Xyz[leet]/matmul_0"],
["Foo/Xyz[leet]/matmul_0",
"Foo/Asdf[jkl]/Qwer[tyu]/conv2d_0"]],
None
),
# 8 - More than 1 match for the operation specified in the group
(
[
make_insertion_info_for_coalescing_test(
"Foo/Baz[bar]/conv2d_0"
),
make_insertion_info_for_coalescing_test(
"Foo/Baz[bar]/conv2d_0"
),
make_insertion_info_for_coalescing_test(
"Foo/Baz[bar]/linear_0"
),
make_insertion_info_for_coalescing_test(
"Foo/Xyz[leet]/__add___0"
),
make_insertion_info_for_coalescing_test(
"Foo/Xyz[leet]/matmul_0"
),
make_insertion_info_for_coalescing_test(
"Foo/Asdf[jkl]/softmax_0"
),
make_insertion_info_for_coalescing_test(
"Foo/Asdf[jkl]/Qwer[tyu]/conv2d_0"
),
],
[["Foo/Baz[bar]/conv2d_0", "Foo/Xyz[leet]/matmul_0"],
["Foo/Xyz[leet]/matmul_0",
"Foo/Asdf[jkl]/Qwer[tyu]/conv2d_0"]],
None
),
# 9 - No match for an operation specified in the group
(
[
make_insertion_info_for_coalescing_test(
"Foo/Baz[bar]/conv2d_0"
),
make_insertion_info_for_coalescing_test(
"Foo/Baz[bar]/linear_0"
),
make_insertion_info_for_coalescing_test(
"Foo/Xyz[leet]/__add___0"
),
make_insertion_info_for_coalescing_test(
"Foo/Xyz[leet]/matmul_0"
),
make_insertion_info_for_coalescing_test(
"Foo/Asdf[jkl]/softmax_0"
),
make_insertion_info_for_coalescing_test(
"Foo/Asdf[jkl]/Qwer[tyu]/conv2d_0"
),
],
[["Foo/Baz[bar]/conv2d_0", "Foo/Xyz[leet]/matmul_1"],
["Foo/Xyz[leet]/matmul_0",
"Foo/Asdf[jkl]/Qwer[tyu]/conv2d_0"]],
None
),
])
def test_insertion_info_coalescing(input_insertion_infos: List[InsertionInfo],
linked_scopes_groups_list: List[List[str]],
ref_coalesced_insertion_infos: List[InsertionInfo]):
if ref_coalesced_insertion_infos is None:
with pytest.raises(RuntimeError):
_ = QuantizationBuilder.coalesce_insertion_infos(input_insertion_infos,
linked_scopes_groups_list)
else:
test_coalesced_insertion_infos = QuantizationBuilder.coalesce_insertion_infos(input_insertion_infos,
linked_scopes_groups_list)
assert Counter(test_coalesced_insertion_infos) == Counter(ref_coalesced_insertion_infos)
class QuantizerLinkingTestModel(torch.nn.Module):
def __init__(self):
super().__init__()
self._dummy_trainable_param = torch.nn.Parameter(torch.ones([1]))
class Path(torch.nn.Module):
def forward(self, input_1, input_2):
retval0 = input_1 + input_2
retval1 = retval0 * input_2
retval2 = retval0 + retval1
# __add___0, __mul___0, __add___1 results respectively
return retval0, retval1, retval2
self.path1 = Path()
self.path2 = Path()
def forward(self, input_1, input_2):
path1_results = self.path1(input_1, input_2)
path2_results = self.path2(input_1, input_2)
return tuple(x + y for x, y in zip(path1_results, path2_results))
def test_quantizer_scale_linking():
nncf_config = get_quantization_config_without_range_init(model_size=1)
nncf_config["compression"]["quantize_outputs"] = True
nncf_config["input_info"] = [
{
"sample_size": [1, 1, 1, 1],
},
{
"sample_size": [1, 1, 1, 1],
}
]
nncf_config["compression"]["activations"] = {
"linked_quantizer_scopes": [
[
# Note: Assuming that quantizers are attached as a post-op to the specified operation
"QuantizerLinkingTestModel/Path[path2]/__mul___0",
"QuantizerLinkingTestModel/Path[path2]/__add___0",
]
],
"ignored_scopes": [
# Ignore path output averaging operations
"QuantizerLinkingTestModel/__add___0",
"QuantizerLinkingTestModel/__add___1",
"QuantizerLinkingTestModel/__add___2",
]
}
compressed_model, compression_ctrl = create_compressed_model_and_algo_for_test(QuantizerLinkingTestModel(),
nncf_config)
# 2 paths x 3 quantizers - 1 because two are shared in one path
assert len(compression_ctrl.non_weight_quantizers) == 5
test_input1 = torch.ones([1, 1, 1, 1])
test_input2 = 2 * test_input1
non_shared_mul_quantizer_id = NonWeightQuantizerId(
InputAgnosticOperationExecutionContext.from_str("QuantizerLinkingTestModel/Path[path1]/__mul___0"))
non_shared_add_quantizer_id = NonWeightQuantizerId(
InputAgnosticOperationExecutionContext.from_str("QuantizerLinkingTestModel/Path[path1]/__add___0"))
shared_quantizer_id = NonWeightQuantizerId(
InputAgnosticOperationExecutionContext.from_str("QuantizerLinkingTestModel/Path[path2]/__mul___0"))
non_shared_mul_quantizer = compression_ctrl.non_weight_quantizers[non_shared_mul_quantizer_id].quantizer_module_ref
non_shared_add_quantizer = compression_ctrl.non_weight_quantizers[non_shared_add_quantizer_id].quantizer_module_ref
shared_quantizer = compression_ctrl.non_weight_quantizers[shared_quantizer_id].quantizer_module_ref
old_scale = 765.0 # so that the quantum is equal to 3
with torch.no_grad():
for quantizer in compression_ctrl.all_quantizations.values():
quantizer.scale.fill_(old_scale)
# Expected outputs without compression - 6, 12, 8. Scale deliberately set to preserve the values
uncompressed_expected_outputs = (6.0 * torch.ones([1]), 12.0 * torch.ones([1]), 18.0 * torch.ones([1]))
outputs_with_shared_scale_1 = compressed_model(test_input1, test_input2)
for uncomp_out, comp_out_1 in zip(uncompressed_expected_outputs, outputs_with_shared_scale_1):
assert torch.allclose(uncomp_out, comp_out_1)
# Specifically clip the shared quantizer's outputs by setting scale to 1.0
new_shared_scale = 1.0
with torch.no_grad():
shared_quantizer.scale.fill_(new_shared_scale)
outputs_with_shared_scale_2 = compressed_model(test_input1, test_input2)
# __add___0 outputs
assert torch.allclose(outputs_with_shared_scale_2[0], 4.0 * torch.ones([1]))
# __mul___0 outputs
assert torch.allclose(outputs_with_shared_scale_2[1], 7.0 * torch.ones([1]))
# __add___1 outputs
assert torch.allclose(outputs_with_shared_scale_2[2], 12.0 * torch.ones([1]))
# Clipping the non-shared quantizers at the same position in the path as the two shared ones
# in the same manner is required to simulate the same grad input for both the shared quantizers
# and the unshared ones
with torch.no_grad():
non_shared_mul_quantizer.scale.fill_(new_shared_scale)
non_shared_add_quantizer.scale.fill_(new_shared_scale)
final_output = compressed_model(test_input1, test_input2)[2]
final_output.backward()
assert torch.allclose(shared_quantizer.scale.grad,
non_shared_mul_quantizer.scale.grad + non_shared_add_quantizer.scale.grad)
def test_unified_scales_for_vpu():
nncf_config = get_quantization_config_without_range_init(model_size=1)
nncf_config["compression"]["quantize_outputs"] = True
nncf_config["input_info"] = [
{
"sample_size": [1, 1, 1, 1],
},
{
"sample_size": [1, 1, 1, 1],
}
]
nncf_config["hw_config_type"] = "vpu"
_, compression_ctrl = create_compressed_model_and_algo_for_test(QuantizerLinkingTestModel(),
nncf_config)
assert len(compression_ctrl.non_weight_quantizers) == 2
total_quantizations = sum(
[len(info.affected_ia_op_exec_contexts) for info in compression_ctrl.non_weight_quantizers.values()])
assert total_quantizations == 8
class SimplerModelForUnifiedScalesTesting(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv2d_1 = torch.nn.Conv2d(1, 1, 1)
self.conv2d_2 = torch.nn.Conv2d(1, 1, 1)
self.conv2d_3 = torch.nn.Conv2d(1, 1, 1)
self.conv2d_4 = torch.nn.Conv2d(1, 1, 1)
def forward(self, x):
in_1, in_2 = x.chunk(dim=-1, chunks=2)
in_1 = self.conv2d_1(in_1)
in_2 = self.conv2d_2(in_2)
x = in_1 + in_2
x = torch.cat([x, x], dim=-1)
in_1, in_2 = x.chunk(dim=-1, chunks=2)
in_1 = self.conv2d_3(in_1)
in_2 = self.conv2d_4(in_2)
x = in_1 * in_2
return x
def test_unified_scales_are_identical_in_onnx(tmp_path):
#pylint:disable=no-member
nncf_config = get_quantization_config_without_range_init(model_size=1)
nncf_config["compression"]["quantize_outputs"] = True
nncf_config["input_info"] = [
{
"sample_size": [1, 1, 1, 2],
},
]
nncf_config["hw_config_type"] = "vpu"
compressed_model, compression_ctrl = create_compressed_model_and_algo_for_test(
SimplerModelForUnifiedScalesTesting(),
nncf_config)
with torch.no_grad():
for quant_info in compression_ctrl.non_weight_quantizers.values():
quant_info.quantizer_module_ref.scale *= torch.abs(torch.rand_like(quant_info.quantizer_module_ref.scale))
test_input1 = torch.ones([1, 1, 1, 2])
compressed_model.forward(test_input1)
onnx_path = tmp_path / "model.onnx"
compression_ctrl.export_model(onnx_path)
onnx_model = onnx.load(onnx_path)
def get_fq_nodes(onnx_model: onnx.ModelProto) -> List[onnx.NodeProto]:
retval = []
for node in onnx_model.graph.node:
if str(node.op_type) == "FakeQuantize":
retval.append(node)
return retval
def immediately_dominates_add_or_mul(node: onnx.NodeProto, graph: onnx.GraphProto) -> bool:
if len(node.output) != 1:
return False
output_tensor_id = node.output[0]
matches = [x for x in graph.node if output_tensor_id in x.input]
for match in matches:
if match.op_type in ["Add", "Mul"]:
return True
return False
def get_successor(node: onnx.NodeProto, graph: onnx.GraphProto) -> onnx.NodeProto:
assert len(node.output) == 1 # Only single-output nodes are supported in this func
for target_node in graph.node:
if node.output[0] in target_node.input:
return target_node
return None
def group_nodes_by_output_target(nodes: List[onnx.NodeProto], graph: onnx.GraphProto) -> List[List[onnx.NodeProto]]:
output_nodes = {} # type: Dict[str, List[onnx.NodeProto]]
for node in nodes:
target_node_name = get_successor(node, graph).name
if target_node_name not in output_nodes:
output_nodes[target_node_name] = []
output_nodes[target_node_name].append(node)
return list(output_nodes.values())
def resolve_constant_node_inputs_to_values(node: onnx.NodeProto, graph: onnx.GraphProto) -> \
Dict[str, onnx.AttributeProto]:
retval = {}
for input_ in node.input:
constant_input_nodes = [x for x in graph.node if input_ in x.output and x.op_type == "Constant"]
for constant_input_node in constant_input_nodes:
assert len(constant_input_node.attribute) == 1
val = constant_input_node.attribute[0]
retval[input_] = numpy_helper.to_array(val.t)
return retval
fq_nodes = get_fq_nodes(onnx_model)
eltwise_predicate = partial(immediately_dominates_add_or_mul, graph=onnx_model.graph)
eltwise_fq_nodes = list(filter(eltwise_predicate, fq_nodes))
fq_nodes_grouped_by_output = group_nodes_by_output_target(eltwise_fq_nodes, onnx_model.graph)
for unified_scale_group in fq_nodes_grouped_by_output:
inputs = [resolve_constant_node_inputs_to_values(fq_node, onnx_model.graph) for fq_node in unified_scale_group]
for inputs_dict in inputs[1:]:
curr_values = list(inputs_dict.values())
ref_values = list(inputs[0].values())
assert curr_values == ref_values # All inputs for unified scale quantizers must be equal
|
# sweet_tenant/admin.py
# Django modules
from django.contrib import admin
# Locals
from .models import Sweet
# Register your models here.
admin.site.register(Sweet)
|
from typing import Text
from linebot.models import TextSendMessage
from models.message_request import MessageRequest
from skills import add_skill
@add_skill('/hello')
def get(message_request: MessageRequest):
return [
TextSendMessage(text=f'Hello World')
]
|
#
# Copyright 2013 Intel Corp
#
# Authors: Lianhao Lu <lianhao.lu@intel.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from ceilometer.hardware.pollsters import net
from ceilometer import sample
from ceilometer.tests.hardware.pollsters import base
class TestNetPollsters(base.TestPollsterBase):
def test_incoming(self):
self._check_get_samples(net.IncomingBytesPollster,
'hardware.network.incoming.bytes',
90, sample.TYPE_CUMULATIVE,
expected_unit='B')
def test_outgoing(self):
self._check_get_samples(net.OutgoingBytesPollster,
'hardware.network.outgoing.bytes',
80, sample.TYPE_CUMULATIVE,
expected_unit='B')
def test_error(self):
self._check_get_samples(net.OutgoingErrorsPollster,
'hardware.network.outgoing.errors',
1, sample.TYPE_CUMULATIVE,
expected_unit='packet')
|
import MySQLdb
# replace mysql.server with "localhost" if you are running via your own server!
# server MySQL username MySQL pass Database name.
conn = MySQLdb.connect("mysql.server","beginneraccount","cookies","beginneraccount$tutorial")
c = conn.cursor()
c.execute("SELECT * FROM taula")
rows = c.fetchall()
for eachRow in rows:
print eachRow
|
# you can read this docs for writing bot
# handler reference : https://python-telegram-bot.readthedocs.io/en/stable/telegram.ext.html
from telegram import ext
from . import views
HANDLERS = [
ext.CommandHandler('start', views.start),
ext.CommandHandler('getme', views.getme),
ext.CommandHandler('clear', views.clear),
ext.MessageHandler(ext.Filters.text, views.message)
]
|
from spacy.lang.en import English
nlp = English()
tokenizer = nlp.Defaults.create_tokenizer(nlp)
class voc:
def __init__(self):
self.num_words=1
self.num_tags=0
self.tags={}
self.index2tags={}
self.questions={}
self.word2index={}
self.response={}
def addWord(self,word):
if word not in self.word2index:
self.word2index[word] = self.num_words
self.num_words += 1
def addTags(self,tag):
if tag not in self.tags:
self.tags[tag]=self.num_tags
self.index2tags[self.num_tags]=tag
self.num_tags+=1
def addQuestion(self, question, answer):
self.questions[question]=answer
words=self.tokenization(question)
for wrd in words:
self.addWord(wrd)
def tokenization(self,ques):
tokens = tokenizer(ques)
token_list = []
for token in tokens:
token_list.append(token.lemma_)
return token_list
def getIndexOfWord(self,word):
return self.word2index[word]
def getQuestionInNum(self, ques):
words=self.tokenization(ques)
tmp=[ 0 for i in range(self.num_words)]
for wrds in words:
tmp[self.getIndexOfWord(wrds)]=1
return tmp
def getTag(self, tag):
tmp=[0.0 for i in range(self.num_tags)]
tmp[self.tags[tag]]=1.0
return tmp
def getVocabSize(self):
return self.num_words
def getTagSize(self):
return self.num_tags
def addResponse(self, tag, responses):
self.response[tag]=responses
|
if __name__ == "__main__":
import argparse
import sys
parser = argparse.ArgumentParser()
parser.add_argument('-v',
default=0,
type=int,
dest='VERTEX',
help='Set number of vertices.')
parser.add_argument('-e',
default=0,
type=int,
dest='EDGE',
help='Set number of edges')
parser.add_argument('-o',
default='graph_gen.txt',
dest='OUTPUT',
help='Place the output into <file>')
args = parser.parse_args()
vertex = args.VERTEX
edge = args.EDGE
outfile = args.OUTPUT
if vertex <= 0 or edge <= 0:
print('Please set number of vertices and edges.')
sys.exit()
max_edge = vertex ** 2
edge = min(edge, max_edge)
import numpy as np
choice = np.sort(np.random.choice(max_edge, edge, replace=False))
with open(outfile, 'w') as f:
for i in choice:
f.write(str(i // vertex + 1) + ',' + str(i % vertex + 1) + '\n')
|
from .MultiMatch import MultiMatch
from .Range import Range
from .Term import Term
from .Wildcard import Wildcard
__all__ = ["MultiMatch", "Range","Term","Wildcard"]
|
"""
Jeu du 1000 Bornes
Quentin Deschamps
2020
"""
import pygame
from src.interface import Interface
from src.sons import Sons
from src.couleurs import Couleurs
from src.jeu import Jeu
import src.selection as selection
import src.stats as stats
from src.partie import Partie
from random import shuffle
if __name__ == "__main__":
nom = input("Votre nom : ")
pygame.init()
# Création de la fenêtre
fenetre = Interface()
# Lancement de la musique
pygame.mixer.music.load('./sounds/Chill-house-music-loop-116-bpm.wav')
pygame.mixer.music.play(-1)
pygame.mixer.music.set_volume(0.1)
# Initialisation de la carte choisie
numCarteChoisie = -1
carteChoisie = 'aucune'
endroitChoisi = 'aucun'
# Création de la partie
mancheSuivante = False
numManche = 0
playMusic = True
numJoueur = 0
listePoints = [0, 0, 0, 0]
partie = Partie(listePoints, nom)
# Mainloop
run = True
while run:
if not mancheSuivante:
# Renversement pioche
if len(partie.pioche) == 0:
partie.pioche = partie.pot[:]
shuffle(partie.pioche)
partie.pot = []
# Sélection du joueur
joueurQuiJoue = partie.listeJoueurs[numJoueur]
if joueurQuiJoue.orientation == 'sud':
# Le joueur sud joue.
if len(partie.pioche) != 0:
# Cas où la pioche est non vide
if len(joueurQuiJoue.main) < 7:
joueurQuiJoue.main.append(partie.pioche.pop(0))
joueurQuiJoue.trieMain()
fenetre.update(
partie.listeJoueurs,
partie.pioche,
partie.pot,
numCarteChoisie,
'À vous de jouer ! Choisissez une carte.',
joueurQuiJoue.orientation,
playMusic)
for event in pygame.event.get():
if event.type == pygame.QUIT:
run = False
elif event.type == pygame.MOUSEBUTTONUP:
Sons.clickSound.play()
pos = pygame.mouse.get_pos()
# Music button
if selection.clickDansZone(pos, 5, 5, 35, 33):
if playMusic:
# Arrête la musique
pygame.mixer.music.stop()
playMusic = False
else:
# Remet la musique
pygame.mixer.music.play(-1)
playMusic = True
fenetre.update(
partie.listeJoueurs,
partie.pioche,
partie.pot,
numCarteChoisie,
'À vous de jouer ! Choisissez une carte.',
joueurQuiJoue.orientation,
playMusic)
# Stat button
elif selection.clickDansZone(pos, 5, 55, 35, 31):
stats.afficheStats(
partie.statsKm,
partie.listeJoueurs)
# Night mode button
elif selection.clickDansZone(pos, 5, 105, 35, 35):
# Change la couleur de fond
if fenetre.bgColor == Couleurs.WHITE:
fenetre.bgColor = Couleurs.BLACK
else:
fenetre.bgColor = Couleurs.WHITE
fenetre.update(
partie.listeJoueurs,
partie.pioche,
partie.pot,
numCarteChoisie,
'À vous de jouer ! Choisissez une carte.',
joueurQuiJoue.orientation,
playMusic)
if carteChoisie == 'aucune':
carteChoisie, numCarteChoisie = selection.carteSelectionnee(
partie.listeJoueurs, pos)
if (carteChoisie != 'aucune'
and endroitChoisi == 'aucun'):
fenetre.update(
partie.listeJoueurs,
partie.pioche, partie.pot,
numCarteChoisie,
'Vous avez choisi la carte : '
+ Jeu.dicoNomsCartes[carteChoisie]
+ '.',
joueurQuiJoue.orientation,
playMusic)
endroitChoisi = selection.endroitSelectionne(
partie.listeJoueurs, pos)
if endroitChoisi == 'aucun':
carteChoisie, numCarteChoisie = selection.carteSelectionnee(
partie.listeJoueurs, pos)
if carteChoisie != 'aucune':
fenetre.update(
partie.listeJoueurs,
partie.pioche,
partie.pot,
numCarteChoisie,
'Vous avez choisi la carte : '
+ Jeu.dicoNomsCartes[carteChoisie]
+ '.',
joueurQuiJoue.orientation,
playMusic)
else:
fenetre.update(
partie.listeJoueurs,
partie.pioche,
partie.pot,
numCarteChoisie,
'À vous de jouer ! Choisissez une carte.',
joueurQuiJoue.orientation,
playMusic)
if (carteChoisie != 'aucune'
and endroitChoisi != 'aucun'):
message = partie.joueJoueurSud(
pos, partie.pot,
carteChoisie,
endroitChoisi)
if message != '':
# Le joueur sud rejoue si
# une botte est posée
if carteChoisie not in Jeu.listeBottes:
# Mise à jour de la liste des
# stats de km
partie.statsKm[numJoueur].append(
joueurQuiJoue.km)
numJoueur = (numJoueur + 1) % 4
numCarteChoisie = -1
carteChoisie = 'aucune'
endroitChoisi = 'aucun'
fenetre.update(
partie.listeJoueurs,
partie.pioche,
partie.pot,
numCarteChoisie,
message,
joueurQuiJoue.orientation,
playMusic)
fenetre.pause(
partie.listeJoueurs,
partie.pioche,
partie.pot,
partie.statsKm,
numCarteChoisie,
message,
joueurQuiJoue.orientation,
playMusic)
else:
numCarteChoisie = -1
carteChoisie = 'aucune'
endroitChoisi = 'aucun'
fenetre.update(
partie.listeJoueurs,
partie.pioche,
partie.pot,
numCarteChoisie,
'À vous de jouer ! Choisissez une carte.',
joueurQuiJoue.orientation,
playMusic)
if carteChoisie != 'aucune':
# Bouge la carte choisie
fenetre.update(
partie.listeJoueurs,
partie.pioche,
partie.pot,
numCarteChoisie,
'Vous avez choisi la carte : '
+ Jeu.dicoNomsCartes[carteChoisie]
+ '.',
joueurQuiJoue.orientation,
playMusic,
bougeCarte=True)
else:
# Cas où la pioche est vide
message = joueurQuiJoue.phaseCritique()
partie.statsKm[numJoueur].append(joueurQuiJoue.km)
numJoueur = (numJoueur + 1) % 4
fenetre.update(
partie.listeJoueurs,
partie.pioche,
partie.pot,
numCarteChoisie,
message,
joueurQuiJoue.orientation,
playMusic)
fenetre.pause(
partie.listeJoueurs,
partie.pioche,
partie.pot,
partie.statsKm,
numCarteChoisie,
message,
joueurQuiJoue.orientation,
playMusic)
else:
# Les autres joueurs jouent.
# print(joueurQuiJoue.nom, joueurQuiJoue.main)
if len(partie.pioche) != 0:
# Cas où la pioche est non vide
message, rejoue = joueurQuiJoue.joue(
partie.listeJoueurs,
partie.pioche,
partie.pot)
fenetre.update(
partie.listeJoueurs,
partie.pioche,
partie.pot,
numCarteChoisie,
message,
joueurQuiJoue.orientation,
playMusic)
fenetre.pause(
partie.listeJoueurs,
partie.pioche,
partie.pot,
partie.statsKm,
numCarteChoisie,
message,
joueurQuiJoue.orientation,
playMusic)
if not rejoue:
# Mise à jour de la liste des stats de km
partie.statsKm[numJoueur].append(joueurQuiJoue.km)
numJoueur = (numJoueur + 1) % 4
else:
# Cas où la pioche est vide
message = joueurQuiJoue.phaseCritique()
partie.statsKm[numJoueur].append(joueurQuiJoue.km)
numJoueur = (numJoueur + 1) % 4
fenetre.update(
partie.listeJoueurs,
partie.pioche,
partie.pot,
numCarteChoisie,
message,
joueurQuiJoue.orientation,
playMusic)
fenetre.pause(
partie.listeJoueurs,
partie.pioche,
partie.pot,
partie.statsKm,
numCarteChoisie,
message,
joueurQuiJoue.orientation,
playMusic)
# Condition de fin de partie
if (joueurQuiJoue.km == 1000 or (
len(partie.pioche) == 0
and len(partie.pot) == 0 and partie.tousBloque())):
gagnant = partie.listeJoueursTri()[0]
# Ajout des points de fin de manche
gagnant.points += 400
for i in partie.listeJoueurs:
i.points += i.km
if 0 in [i.km for i in partie.listeJoueurs]:
# Cas où capot d'un ou plusieurs joueurs
for i in partie.listeJoueurs:
if i.km != 0:
i.points += 500
fenetre.update(
partie.listeJoueurs,
partie.pioche,
partie.pot,
numCarteChoisie,
"Capot ! 500 points pour les autres !",
gagnant.orientation,
playMusic)
fenetre.pause(
partie.listeJoueurs,
partie.pioche,
partie.pot,
partie.statsKm,
numCarteChoisie,
"Capot ! 500 points pour les autres !",
gagnant.orientation,
playMusic,
temps=5)
fenetre.update(
partie.listeJoueurs,
partie.pioche,
partie.pot,
numCarteChoisie,
gagnant.nom + " a gagné la manche ! 400 points",
gagnant.orientation, playMusic)
Sons.tadaSound.play()
fenetre.pause(
partie.listeJoueurs,
partie.pioche,
partie.pot,
partie.statsKm,
numCarteChoisie,
gagnant.nom + " a gagné la manche ! 400 points",
gagnant.orientation,
playMusic,
temps=10)
mancheSuivante = True
else:
listePoints = [i.points for i in partie.listeJoueurs]
partie = Partie(listePoints, nom)
numJoueur = numManche % 4
numManche += 1
mancheSuivante = False
pygame.quit()
|
class FinisherTemplate(object):
def __init__(self, name, description, message, body_message):
self.name = name
self.description = description
self.message = message
self.body_message = body_message
|
import argparse
import os
import yaml
import logging
import coloredlogs
from pippin.config import mkdirs, get_logger, get_output_dir, chown_file, get_config, chown_dir
from pippin.manager import Manager
from colorama import init
class MessageStore(logging.Handler):
store = None
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.store = {}
def emit(self, record):
l = record.levelname
if l not in self.store:
self.store[l] = []
self.store[l].append(record)
def get_warnings(self):
return self.store.get("WARNING", []) + []
def get_errors(self):
return self.store.get("CRITICAL", []) + self.store.get("ERROR", [])
def setup_logging(config_filename, logging_folder, args):
level = logging.DEBUG if args.verbose else logging.INFO
logging_filename = f"{logging_folder}/{config_filename}.log"
message_store = MessageStore()
message_store.setLevel(logging.WARNING)
NOTICE_LEVELV_NUM = 25
logging.addLevelName(NOTICE_LEVELV_NUM, "NOTICE")
def notice(self, message, *args, **kws):
if self.isEnabledFor(NOTICE_LEVELV_NUM):
self._log(NOTICE_LEVELV_NUM, message, args, **kws)
logging.Logger.notice = notice
fmt = "[%(levelname)8s |%(filename)21s:%(lineno)3d] %(message)s" if args.verbose else "%(message)s"
handlers = [logging.StreamHandler(), message_store]
handlers[0].setLevel(level)
if not args.check:
handlers.append(logging.FileHandler(logging_filename, mode="w"))
handlers[-1].setLevel(logging.DEBUG)
logging.basicConfig(level=level, format=fmt, handlers=handlers)
coloredlogs.install(
level=level,
fmt=fmt,
reconfigure=True,
level_styles=coloredlogs.parse_encoded_styles("debug=8;notice=green;warning=yellow;error=red,bold;critical=red,inverse"),
)
logging.getLogger("matplotlib").setLevel(logging.ERROR)
logger = get_logger()
logger.info(f"Logging streaming out, also saving to {logging_filename}")
return message_store, logging_filename
def run(args):
if args is None:
return None
init()
# Load YAML config file
yaml_path = os.path.abspath(os.path.expandvars(args.yaml))
assert os.path.exists(yaml_path), f"File {yaml_path} cannot be found."
with open(yaml_path, "r") as f:
config = yaml.safe_load(f)
overwrites = config.get("GLOBAL")
if config.get("GLOBALS") is not None:
logging.warning("Your config file has a GLOBALS section in it. If you're trying to overwrite cfg.yml, rename this to GLOBAL")
cfg = None
if config.get("GLOBAL"):
cfg = config.get("GLOBAL").get("CFG_PATH")
if cfg is None:
cfg = args.config
global_config = get_config(initial_path=cfg, overwrites=overwrites)
config_filename = os.path.basename(args.yaml).split(".")[0].upper()
output_dir = get_output_dir()
logging_folder = os.path.abspath(os.path.join(output_dir, config_filename))
if not args.check:
mkdirs(logging_folder)
if os.path.exists(logging_folder):
chown_dir(logging_folder, walk=args.permission)
if args.permission:
return
message_store, logging_filename = setup_logging(config_filename, logging_folder, args)
for i, d in enumerate(global_config["DATA_DIRS"]):
logging.debug(f"Data directory {i + 1} set as {d}")
assert d is not None, "Data directory is none, which means it failed to resolve. Check the error message above for why."
logging.info(f"Running on: {os.environ.get('HOSTNAME', '$HOSTNAME not set')} login node.")
manager = Manager(config_filename, yaml_path, config, message_store)
if args.start is not None:
args.refresh = True
manager.set_start(args.start)
manager.set_finish(args.finish)
manager.set_force_refresh(args.refresh)
manager.set_force_ignore_stage(args.ignore)
manager.execute(args.check)
chown_file(logging_filename)
return manager
def get_syntax():
syntax = {}
base = os.path.dirname(os.path.realpath(__file__))
with open(f"{base}/README.md", 'r') as f:
readme = f.read()
lines = readme.split('\n')
start, end = [idx for (idx, line) in enumerate(lines) if "[//]" in line]
lines = lines[start:end]
index = [idx for (idx, line) in enumerate(lines) if "###" == line.split(' ')[0]]
tasks = []
for i in range(len(index)):
idx = index[i]
if idx != index[-1]:
tasks.append("\n".join(lines[idx+2:index[i+1]-1]))
else:
tasks.append("\n".join(lines[idx+2:-1]))
taskname = ["DATAPREP", "SIM", "LCFIT", "CLASSIFY", "AGG", "MERGE", "BIASCOR", "CREATE_COV", "COSMOMC", "ANALYSE"]
for i, name in enumerate(taskname):
syntax[name] = tasks[i]
syntax["options"] = f"Possible tasks are: ({[(i, task) for i, task in enumerate(taskname)]})"
return syntax
def print_syntax(s):
syntax = get_syntax()
try:
keys = list(syntax.keys())
s = int(s)
if s < 0 or s > len(keys) - 1:
raise ValueError(f"Unknown task number {s}")
key = keys[s]
except ValueError:
key = s
if key not in syntax.keys():
raise ValueError(f"Unknown task {key}")
msg = syntax[key]
print(msg)
return None
def get_args(test=False):
# Set up command line arguments
parser = argparse.ArgumentParser()
parser.add_argument("yaml", help="the name of the yml config file to run. For example: configs/default.yml", type=str, nargs='*')
parser.add_argument("--config", help="Location of global config", default=None, type=str)
parser.add_argument("-v", "--verbose", help="increase output verbosity", action="store_true")
parser.add_argument("-s", "--start", help="Stage to start and force refresh", default=None)
parser.add_argument("-f", "--finish", help="Stage to finish at (it runs this stage too)", default=None)
parser.add_argument("-r", "--refresh", help="Refresh all tasks, do not use hash", action="store_true")
parser.add_argument("-c", "--check", help="Check if config is valid", action="store_true", default=False)
parser.add_argument("-p", "--permission", help="Fix permissions and groups on all output, don't rerun", action="store_true", default=False)
parser.add_argument("-i", "--ignore", help="Dont rerun tasks with this stage or less", default=None)
parser.add_argument("-S", "--syntax", help="Get the syntax of the given task.", default=None, const="options", type=str, nargs='?')
args = parser.parse_args()
if args.syntax is not None:
s = args.syntax
print_syntax(s)
return None
elif not test:
if len(args.yaml) == 0:
parser.error("You must specify a yaml file!")
else:
args.yaml = args.yaml[0]
return args
if __name__ == "__main__":
args = get_args()
run(args)
|
# coding=utf-8
from colorama import Fore
import argparse
import copy
from Putil.data.common_data import CommonDataWithAug
import numpy as np
import Putil.base.logger as plog
logger = plog.PutilLogConfig('dataset').logger()
logger.setLevel(plog.DEBUG)
COCOLogger = logger.getChild('COCO')
COCOLogger.setLevel(plog.DEBUG)
import Putil.data.coco as coco
from Putil.trainer.util import Stage
def common_dataset_arg(parser, property_type='', **kwargs):
'''
@brief 公共通用的dataset的参数
@note 参数说明:
n_worker_per_dataset:dataset 都继承于Putil.data.common_data.CommonDataWithAug
sub_data: 制定数据集子集,这在初步调试阶段很有用,在开发模型测试流程以及模型可行性时,使用数据集子集快速获得结论
remain_data_as_negative: 除去sub_data的子集,其他子集是否作为负数据,使用可以减小样本空间的偏差
fake_aug:当使用小子集进行测试时,重新开始一个epoch耗时比较多,我们可以设定fake_aug的数量,意思是:一个epoch的数据
进行fake_aug次复制
naug: 当被设置时,dataset不使用数据集扩展
'''
parser.add_argument('--{}n_worker_per_dataset'.format(property_type), action='store', type=int, default=1, \
help='the number of worker for every dataset')
parser.add_argument('--{}sub_data'.format(property_type), type=int, nargs='+', default=None, \
help='list with int, specified the sub dataset which would be used in train evaluate, '
'default None(whole dataset)')
parser.add_argument('--{}remain_data_as_negative'.format(property_type), action='store_true', \
help='if set, the data beside $sub_data would be use as negative, otherwise the data beside' \
'$sub_data would be abandon')
parser.add_argument('--{}fake_aug'.format(property_type), action='store', type=int, default=0, \
help='do the sub aug with NoOp for fake_aug time, check the generate_dataset')
parser.add_argument('--{}naug'.format(property_type), action='store_true', \
help='do not use data aug while set')
parser.add_argument('--{}data_using_rate_train'.format(property_type), action='store', type=float, default=1.0, \
help='rate of data used in train')
parser.add_argument('--{}data_using_rate_evaluate'.format(property_type), action='store', type=float, default=1.0, \
help='rate of data used in evaluate')
parser.add_argument('--{}data_using_rate_test'.format(property_type), action='store', type=float, default=1.0, \
help='rate of data used in test')
parser.add_argument('--{}shuffle_train'.format(property_type), action='store_true', default=False, \
help='shuffle the train data every epoch')
parser.add_argument('--{}shuffle_evaluate'.format(property_type), action='store_true', default=False, \
help='shuffle the evaluate data every epoch')
parser.add_argument('--{}shuffle_test'.format(property_type), action='store_true', default=False, \
help='shuffle the test data every epoch')
parser.add_argument('--{}drop_last_train'.format(property_type), action='store_true', default=False, \
help='drop the last uncompleted train data while set')
parser.add_argument('--{}drop_last_evaluate'.format(property_type), action='store_true', default=False, \
help='drop the last uncompleted evaluate data while set')
parser.add_argument('--{}drop_last_test'.format(property_type), action='store_true', default=False, \
help='drop the last uncompleted test data while set')
pass
def common_dd_dataset_arg(parser, property_type='', **kwargs):
common_dataset_arg(parser)
parser.add_argument('--{}input_height'.format(property_type), type=int, action='store', default=256, \
help='the height of the input')
parser.add_argument('--{}input_width'.format(property_type), action='store', type=int, default=256, \
help='the width of the input')
parser.add_argument('--{}coco_remain_strategy'.format(property_type), type=str, default='drop', \
help='')
pass
def common_ddd_dataset_arg(parser, property_type='', **kwargs):
common_dataset_arg(parser)
parser.add_argument('--{}input_height'.format(property_type), type=int, action='store', default=256, \
help='the height of the input')
parser.add_argument('--{}input_width'.format(property_type), action='store', type=int, default=256, \
help='the width of the input')
parser.add_argument('--{}input_depth'.format(property_type), action='store', type=int, default=256, \
help='the depth of the input')
pass
class Dataset(CommonDataWithAug):
def __init__(self, args, property_type='', **kwargs):
CommonDataWithAug.__init__(self)
pass
pass
def COCODataset(args, property_type='', **kwargs):
temp_args = copy.deepcopy(args)
def generate_coco_dataset():
return coco.COCOData(coco_root_dir=eval('args.{}coco_root_dir'.format(property_type)),
stage=stage,
information_save_to_path=eval('args.{}save_dir'.format(property_type)),
detection=eval('args.{}coco_detection'.format(property_type)),
key_points=eval('args.{}coco_key_points'.format(property_type)),
stuff=eval('args.{}coco_stuff'.format(property_type)),
panoptic=eval('args.{}coco_panoptic'.format(property_type)),
dense_pose=eval('args.{}coco_dense_pose'.format(property_type)),
captions=eval('args.{}coco_captions'.format(property_type)),
cat_ids=eval('args.{}coco_cat_ids'.format(property_type)),
use_rate=eval('args.{}coco_use_rate'.format(property_type)) \
if 'coco_use_rate' in dir(args) else eval('args.{}data_using_rate_train'.format(property_type)) \
if stage == Stage.Train else eval('args.{}data_using_rate_evaluate'.format(property_type)) \
if stage == Stage.Train or stage == Stage.TrainEvaluate else eval('args.{}data_using_rate_test'.format(property_type)),
image_width=eval('args.{}coco_image_width'.format(property_type)) if 'coco_image_width' in dir(args) else eval('args.{}image_width'.format(property_type)),
image_height=eval('args.{}coco_image_height'.format(property_type)) if 'coco_image_height' in dir(args) else eval('args.{}image_height'.format(property_type)),
remain_strategy=eval('args.{}coco_remain_strategy'.format(property_type)))
pass
pass
def COCOArg(parser, property_type='', **kwargs):
try:
common_dd_dataset_arg(parser)
except argparse.ArgumentError as e:
COCOLogger.warning(Fore.YELLOW + e.message + Fore.RESET)
pass
parser.add_argument('--{}coco_root_dir'.format(property_type), type=str, default='', action='store', \
help='')
parser.add_argument('--{}coco_detection'.format(property_type), action='store_true', default=False, \
help='generate detection data while set')
parser.add_argument('--{}coco_key_points'.format(property_type), action='store_true', default=False, \
help='generate key points data while set')
parser.add_argument('--{}coco_stuff'.format(property_type), action='store_true', default=False, \
help='generate stuff data while set')
parser.add_argument('--{}coco_panoptic'.format(property_type), action='store_true', default=False, \
help='generate panoptic data while set')
parser.add_argument('--{}coco_dense_pose'.format(property_type), action='store_true', default=False, \
help='generate dense pose data while set')
parser.add_argument('--{}coco_captions'.format(property_type), action='store_true', default=False, \
help='generate captions data while set')
parser.add_argument('--{}coco_cat_ids'.format(property_type), type=int, default=[], nargs='+', \
help='specify the target cat ids')
parser.add_argument('--{}coco_image_width'.format(property_type), type=int, default=256, \
help='specify the width of the data')
parser.add_argument('--{}coco_image_height'.format(property_type), type=int, default=256, \
help='specify the height of the data')
pass
# DefaultDataset
class _DefaultDataset(Dataset):
def __init__(self, args, property_type='', **kwargs):
Dataset.__init__(self, args)
self._data_field = list(range(0, 1000))
pass
def _restart_process(self, restart_param):
pass
def _inject_operation(self, inject_param):
pass
def _generate_from_origin_index(self, index):
x = np.random.sample() * 1000
return np.array([[x]], dtype=np.float32), np.array([[np.sin(x)]], dtype=np.float32)
pass
def DefaultDataset(args, property_type='', **kwargs):
temp_args = copy.deepcopy(args)
def generate_default_dataset():
return _DefaultDataset(temp_args, property_type, **kwargs)
return generate_default_dataset
def DefaultDatasetArg(parser, property_type='', **kwargs):
common_dataset_arg(parser, property_type, **kwargs)
pass
#parser = argparse.ArgumentParser()
#parser.add_argument('--t', type=int, default=1, action='store')
#try:
# parser.add_argument('--t', type=int, default=1, action='store')
#except argparse.ArgumentError as e:
# print('a')
#args = parser.parse_args()
#print(args)
|
import glob
from pwn import *
"""
Modulo in assembly is another interesting concept! x86 allows you to get the
remainder after doing a division on something. For instance:
10 / 3 -> remainder = 1
You can get the remainder of a division using the instructions introduced earlier
through the div instruction.
In most programming languages we refer to mod with the symbol '%'.
div
mov rax, reg1; div reg2 Notice: to use this instruction you need to first load rax with the desired register
you intended to be the divided. Then run div reg2, where reg2 is the divisor. This
results in:
rax = rdi / rsi; rdx = remainder
The quotient is placed in rax, the remainder is placed in rdx.
"""
context.arch = "amd64"
#context.log_level = "DEBUG"
# rdi / rsi -> put rdi in rax, div rsi, resultado estara em rax, resto estara em rdx
# pwntools will change SYS_exit to the respective syscall
assembly = """
mov rax, rdi
div rsi
mov rax, rdx
"""
shellcode = asm(assembly, arch="amd64", os="linux")
#print(disasm(shellcode))
p = process(glob.glob('/challenge/embry*'))
print(p.recv(timeout=2).decode())
p.send(shellcode)
print(p.recvall().decode())
|
from django.contrib.auth.views import LoginView
class HomePage(LoginView):
template_name = 'account/login.html'
|
# Undergraduate Student: Arturo Burgos
# Professor: João Rodrigo Andrade
# Federal University of Uberlândia - UFU, Fluid Mechanics Laboratory - MFLab, Block 5P, Uberlândia, MG, Brazil
# Fourth exercise: Solving a Linear System --> ax = b
# Here I first set conditions
import numpy as np
from numpy import linalg as lin
np.seterr(divide='ignore', invalid='ignore')
print('\n')
# Basic definitions
initialLineValue = 0
initialColumnValue = 0
pointsAmount = 9
pointsRoot = int(np.sqrt(pointsAmount))
#K = np.sqrt(pointsAmount)
#k = int(K)
# Initial x_k = old and x_k1 = current value
old = 0
current = 1
valuesVector = np.zeros((2,pointsAmount))
valuesVector[old] = np.zeros(pointsAmount)
valuesVector[current] = np.ones(pointsAmount)
# Here I set the Matrix
indexMatrix = np.zeros((pointsAmount, pointsAmount))
for matrixRow in range(initialLineValue, pointsAmount):
for matrixCol in range(initialLineValue, pointsAmount):
if matrixRow == matrixCol:
indexMatrix[matrixRow, matrixCol] = -4
elif matrixRow == matrixCol - 3 or matrixRow ==matrixCol + 3:
indexMatrix[matrixRow, matrixCol] = 1
elif ((matrixRow + 1) % 3 != 0 and matrixRow == matrixCol - 1) or ((matrixRow + 1) % 3 != 1 and matrixRow == matrixCol + 1): # (i+1) because in Python we start from 0
indexMatrix[matrixRow, matrixCol] = 1
print('The coefficient Matrix is:')
print(indexMatrix)
print('\n')
resultMatrix = np.zeros(pointsAmount)
for matrixRow in range(initialLineValue, pointsRoot):
if matrixRow < pointsRoot - 1: # (k-1) because in Python we start from 0
resultMatrix[matrixRow] = -50
else:
resultMatrix[matrixRow] = -150
for matrixRow in range(pointsRoot, pointsAmount - pointsRoot):
if (matrixRow + 1) % 6 != 0: # (i+1) because in Python we start from 0
resultMatrix[matrixRow] = 0
else:
resultMatrix[matrixRow] = -100
for matrixRow in range(pointsAmount - pointsRoot, pointsAmount):
if matrixRow < pointsAmount - 1: # (k-1) because in Python we start from 0
resultMatrix[matrixRow] = -50
else:
resultMatrix[matrixRow] = -150
print('The result Matrix is:')
print(resultMatrix)
print('\n')
# Here I set the tolerance
tolerance = 0.0000001
# Here I set the iterations
iterationsCounter = 0
# Here I set the error based in the Infinite norm
error = (valuesVector[current] - valuesVector[old]) / valuesVector[current]
while error.all() > tolerance:
for matrixRow in range(initialLineValue, pointsAmount):
#x_k1[i] = b[i].copy()
valuesVector[current][matrixRow] = resultMatrix[matrixRow]
for matrixCol in range(initialColumnValue, pointsAmount):
if(matrixCol != matrixRow):
valuesVector[current][matrixRow] = valuesVector[current][matrixRow] - indexMatrix[matrixRow, matrixCol] * valuesVector[old][matrixCol]
#x_k1[i] = x_k1[i].copy() / a[i,i].copy()
valuesVector[current][matrixRow] = valuesVector[current][matrixRow] / indexMatrix[matrixRow, matrixRow]
error = (valuesVector[current] - valuesVector[old]) / valuesVector[current]
valuesVector[old] = valuesVector[current]
iterationsCounter = iterationsCounter + 1
print('The number of iterations is: ')
print(iterationsCounter)
print('The solution is:')
print(valuesVector[current])
|
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import datetime
import random
import string
import time
import uuid
import mock
import oslo_utils.fixture
from oslo_utils import timeutils
import six
from testtools import matchers
import webob
from keystone import assignment
from keystone import auth
from keystone.common import authorization
import keystone.conf
from keystone import exception
from keystone.models import token_model
from keystone.tests import unit
from keystone.tests.unit import default_fixtures
from keystone.tests.unit import ksfixtures
from keystone.tests.unit.ksfixtures import database
from keystone import token
from keystone import trust
CONF = keystone.conf.CONF
TIME_FORMAT = '%Y-%m-%dT%H:%M:%S.%fZ'
HOST = ''.join(random.choice(string.ascii_lowercase) for x in range(
random.randint(5, 15)))
HOST_URL = 'http://%s' % (HOST)
def _build_user_auth(token=None, user_id=None, username=None,
password=None, tenant_id=None, tenant_name=None,
trust_id=None):
"""Build auth dictionary.
It will create an auth dictionary based on all the arguments
that it receives.
"""
auth_json = {}
if token is not None:
auth_json['token'] = token
if username or password:
auth_json['passwordCredentials'] = {}
if username is not None:
auth_json['passwordCredentials']['username'] = username
if user_id is not None:
auth_json['passwordCredentials']['userId'] = user_id
if password is not None:
auth_json['passwordCredentials']['password'] = password
if tenant_name is not None:
auth_json['tenantName'] = tenant_name
if tenant_id is not None:
auth_json['tenantId'] = tenant_id
if trust_id is not None:
auth_json['trust_id'] = trust_id
return auth_json
class AuthTest(unit.TestCase):
def setUp(self):
self.useFixture(database.Database())
super(AuthTest, self).setUp()
self.time_fixture = self.useFixture(oslo_utils.fixture.TimeFixture())
self.load_backends()
self.load_fixtures(default_fixtures)
environ = {'REMOTE_USER': 'FOO', 'AUTH_TYPE': 'Negotiate'}
self.request_with_remote_user = self.make_request(environ=environ)
self.empty_request = self.make_request()
self.controller = token.controllers.Auth()
def assertEqualTokens(self, a, b, enforce_audit_ids=True):
"""Assert that two tokens are equal.
Compare two tokens except for their ids. This also truncates
the time in the comparison.
"""
def normalize(token):
token['access']['token']['id'] = 'dummy'
del token['access']['token']['expires']
del token['access']['token']['issued_at']
del token['access']['token']['audit_ids']
return token
self.assertCloseEnoughForGovernmentWork(
timeutils.parse_isotime(a['access']['token']['expires']),
timeutils.parse_isotime(b['access']['token']['expires']))
self.assertCloseEnoughForGovernmentWork(
timeutils.parse_isotime(a['access']['token']['issued_at']),
timeutils.parse_isotime(b['access']['token']['issued_at']))
if enforce_audit_ids:
self.assertIn(a['access']['token']['audit_ids'][0],
b['access']['token']['audit_ids'])
self.assertThat(len(a['access']['token']['audit_ids']),
matchers.LessThan(3))
self.assertThat(len(b['access']['token']['audit_ids']),
matchers.LessThan(3))
return self.assertDictEqual(normalize(a), normalize(b))
class AuthBadRequests(AuthTest):
def test_no_external_auth(self):
"""Verify that _authenticate_external() raises exception if N/A."""
request = webob.Request.blank('/')
self.assertRaises(
token.controllers.ExternalAuthNotApplicable,
self.controller._authenticate_external,
request, auth={})
def test_empty_remote_user(self):
"""Verify exception is raised when REMOTE_USER is an empty string."""
request = webob.Request.blank('/', environ={'REMOTE_USER': ''})
self.assertRaises(
token.controllers.ExternalAuthNotApplicable,
self.controller._authenticate_external,
request, auth={})
def test_no_token_in_auth(self):
"""Verify that _authenticate_token() raises exception if no token."""
self.assertRaises(
exception.ValidationError,
self.controller._authenticate_token,
None, {})
def test_no_credentials_in_auth(self):
"""Verify that _authenticate_local() raises exception if no creds."""
self.assertRaises(
exception.ValidationError,
self.controller._authenticate_local,
None, {})
def test_empty_username_and_userid_in_auth(self):
"""Verify that empty username and userID raises ValidationError."""
self.assertRaises(
exception.ValidationError,
self.controller._authenticate_local,
None, {'passwordCredentials': {'password': 'abc',
'userId': '', 'username': ''}})
def test_authenticate_blank_request_body(self):
"""Verify sending empty json dict raises the right exception."""
self.assertRaises(exception.ValidationError,
self.controller.authenticate,
self.make_request(), {})
def test_authenticate_blank_auth(self):
"""Verify sending blank 'auth' raises the right exception."""
body_dict = _build_user_auth()
self.assertRaises(exception.ValidationError,
self.controller.authenticate,
self.make_request(), body_dict)
def test_authenticate_invalid_auth_content(self):
"""Verify sending invalid 'auth' raises the right exception."""
self.assertRaises(exception.ValidationError,
self.controller.authenticate,
self.make_request(), {'auth': 'abcd'})
def test_authenticate_user_id_too_large(self):
"""Verify sending large 'userId' raises the right exception."""
body_dict = _build_user_auth(user_id='0' * 65, username='FOO',
password='foo2')
self.assertRaises(exception.ValidationSizeError,
self.controller.authenticate,
self.make_request(), body_dict)
def test_authenticate_username_too_large(self):
"""Verify sending large 'username' raises the right exception."""
body_dict = _build_user_auth(username='0' * 65, password='foo2')
self.assertRaises(exception.ValidationSizeError,
self.controller.authenticate,
self.make_request(), body_dict)
def test_authenticate_tenant_id_too_large(self):
"""Verify sending large 'tenantId' raises the right exception."""
body_dict = _build_user_auth(username='FOO', password='foo2',
tenant_id='0' * 65)
self.assertRaises(exception.ValidationSizeError,
self.controller.authenticate,
self.make_request(), body_dict)
def test_authenticate_tenant_name_too_large(self):
"""Verify sending large 'tenantName' raises the right exception."""
body_dict = _build_user_auth(username='FOO', password='foo2',
tenant_name='0' * 65)
self.assertRaises(exception.ValidationSizeError,
self.controller.authenticate,
self.make_request(), body_dict)
def test_authenticate_token_too_large(self):
"""Verify sending large 'token' raises the right exception."""
body_dict = _build_user_auth(token={'id': '0' * 8193})
self.assertRaises(exception.ValidationSizeError,
self.controller.authenticate,
self.make_request(), body_dict)
def test_authenticate_password_too_large(self):
"""Verify sending large 'password' raises the right exception."""
length = CONF.identity.max_password_length + 1
body_dict = _build_user_auth(username='FOO', password='0' * length)
self.assertRaises(exception.ValidationSizeError,
self.controller.authenticate,
self.make_request(), body_dict)
def test_authenticate_fails_if_project_unsafe(self):
"""Verify authenticate to a project with unsafe name fails."""
# Start with url name restrictions off, so we can create the unsafe
# named project
self.config_fixture.config(group='resource',
project_name_url_safe='off')
unsafe_name = 'i am not / safe'
project = unit.new_project_ref(
domain_id=CONF.identity.default_domain_id, name=unsafe_name)
self.resource_api.create_project(project['id'], project)
self.assignment_api.add_role_to_user_and_project(
self.user_foo['id'], project['id'], self.role_member['id'])
empty_request = self.make_request()
body_dict = _build_user_auth(
username=self.user_foo['name'],
password=self.user_foo['password'],
tenant_name=project['name'])
# Since name url restriction is off, we should be able to authenticate
self.controller.authenticate(empty_request, body_dict)
# Set the name url restriction to strict and we should fail to
# authenticate
self.config_fixture.config(group='resource',
project_name_url_safe='strict')
self.assertRaises(exception.Unauthorized,
self.controller.authenticate,
empty_request, body_dict)
class AuthWithToken(object):
def test_unscoped_token(self):
"""Verify getting an unscoped token with password creds."""
body_dict = _build_user_auth(username='FOO',
password='foo2')
unscoped_token = self.controller.authenticate(self.make_request(),
body_dict)
self.assertNotIn('tenant', unscoped_token['access']['token'])
def test_auth_invalid_token(self):
"""Verify exception is raised if invalid token."""
body_dict = _build_user_auth(token={"id": uuid.uuid4().hex})
self.assertRaises(
exception.Unauthorized,
self.controller.authenticate,
self.make_request(), body_dict)
def test_auth_bad_formatted_token(self):
"""Verify exception is raised if invalid token."""
body_dict = _build_user_auth(token={})
self.assertRaises(
exception.ValidationError,
self.controller.authenticate,
self.make_request(), body_dict)
def test_auth_unscoped_token_no_project(self):
"""Verify getting an unscoped token with an unscoped token."""
body_dict = _build_user_auth(
username='FOO',
password='foo2')
unscoped_token = self.controller.authenticate(self.make_request(),
body_dict)
body_dict = _build_user_auth(
token=unscoped_token["access"]["token"])
unscoped_token_2 = self.controller.authenticate(self.make_request(),
body_dict)
self.assertEqualTokens(unscoped_token, unscoped_token_2)
def test_auth_unscoped_token_project(self):
"""Verify getting a token in a tenant with an unscoped token."""
# Add a role in so we can check we get this back
self.assignment_api.add_role_to_user_and_project(
self.user_foo['id'],
self.tenant_bar['id'],
self.role_member['id'])
# Get an unscoped token
body_dict = _build_user_auth(
username='FOO',
password='foo2')
unscoped_token = self.controller.authenticate(self.make_request(),
body_dict)
# Get a token on BAR tenant using the unscoped token
body_dict = _build_user_auth(
token=unscoped_token["access"]["token"],
tenant_name="BAR")
scoped_token = self.controller.authenticate(self.make_request(),
body_dict)
tenant = scoped_token["access"]["token"]["tenant"]
roles = scoped_token["access"]["metadata"]["roles"]
self.assertEqual(self.tenant_bar['id'], tenant["id"])
self.assertThat(roles, matchers.Contains(self.role_member['id']))
def test_auth_scoped_token_bad_project_with_debug(self):
"""Authenticating with an invalid project fails."""
# Bug 1379952 reports poor user feedback, even in insecure_debug mode,
# when the user accidentally passes a project name as an ID.
# This test intentionally does exactly that.
body_dict = _build_user_auth(
username=self.user_foo['name'],
password=self.user_foo['password'],
tenant_id=self.tenant_bar['name'])
# with insecure_debug enabled, this produces a friendly exception.
self.config_fixture.config(debug=True, insecure_debug=True)
e = self.assertRaises(
exception.Unauthorized,
self.controller.authenticate,
self.make_request(), body_dict)
# explicitly verify that the error message shows that a *name* is
# found where an *ID* is expected
self.assertIn(
'Project ID not found: %s' % self.tenant_bar['name'],
six.text_type(e))
def test_auth_scoped_token_bad_project_without_debug(self):
"""Authenticating with an invalid project fails."""
# Bug 1379952 reports poor user feedback, even in insecure_debug mode,
# when the user accidentally passes a project name as an ID.
# This test intentionally does exactly that.
body_dict = _build_user_auth(
username=self.user_foo['name'],
password=self.user_foo['password'],
tenant_id=self.tenant_bar['name'])
# with insecure_debug disabled (the default), authentication failure
# details are suppressed.
e = self.assertRaises(
exception.Unauthorized,
self.controller.authenticate,
self.make_request(), body_dict)
# explicitly verify that the error message details above have been
# suppressed.
self.assertNotIn(
'Project ID not found: %s' % self.tenant_bar['name'],
six.text_type(e))
def test_auth_token_project_group_role(self):
"""Verify getting a token in a tenant with group roles."""
# Add a v2 style role in so we can check we get this back
self.assignment_api.add_role_to_user_and_project(
self.user_foo['id'],
self.tenant_bar['id'],
self.role_member['id'])
# Now create a group role for this user as well
domain1 = unit.new_domain_ref()
self.resource_api.create_domain(domain1['id'], domain1)
new_group = unit.new_group_ref(domain_id=domain1['id'])
new_group = self.identity_api.create_group(new_group)
self.identity_api.add_user_to_group(self.user_foo['id'],
new_group['id'])
self.assignment_api.create_grant(
group_id=new_group['id'],
project_id=self.tenant_bar['id'],
role_id=self.role_admin['id'])
# Get a scoped token for the tenant
body_dict = _build_user_auth(
username='FOO',
password='foo2',
tenant_name="BAR")
scoped_token = self.controller.authenticate(self.make_request(),
body_dict)
tenant = scoped_token["access"]["token"]["tenant"]
roles = scoped_token["access"]["metadata"]["roles"]
self.assertEqual(self.tenant_bar['id'], tenant["id"])
self.assertIn(self.role_member['id'], roles)
self.assertIn(self.role_admin['id'], roles)
def test_belongs_to_no_tenant(self):
r = self.controller.authenticate(
self.make_request(),
auth={
'passwordCredentials': {
'username': self.user_foo['name'],
'password': self.user_foo['password']
}
})
unscoped_token_id = r['access']['token']['id']
query_string = 'belongsTo=%s' % self.tenant_bar['id']
self.assertRaises(
exception.Unauthorized,
self.controller.validate_token,
self.make_request(is_admin=True, query_string=query_string),
token_id=unscoped_token_id)
self.assertRaises(
exception.Unauthorized,
self.controller.validate_token_head,
self.make_request(is_admin=True, query_string=query_string),
token_id=unscoped_token_id)
def test_belongs_to(self):
body_dict = _build_user_auth(
username='FOO',
password='foo2',
tenant_name=self.tenant_bar['name'])
scoped_token = self.controller.authenticate(self.make_request(),
body_dict)
scoped_token_id = scoped_token['access']['token']['id']
query_string = 'belongsTo=%s' % uuid.uuid4().hex
self.assertRaises(
exception.Unauthorized,
self.controller.validate_token,
self.make_request(is_admin=True, query_string=query_string),
token_id=scoped_token_id)
self.assertRaises(
exception.Unauthorized,
self.controller.validate_token_head,
self.make_request(is_admin=True, query_string=query_string),
token_id=scoped_token_id)
query_string = 'belongsTo=%s' % self.tenant_bar['id']
self.controller.validate_token(
self.make_request(is_admin=True, query_string=query_string),
token_id=scoped_token_id
)
self.controller.validate_token_head(
self.make_request(is_admin=True, query_string=query_string),
token_id=scoped_token_id
)
def test_token_auth_with_binding(self):
self.config_fixture.config(group='token', bind=['kerberos'])
body_dict = _build_user_auth()
unscoped_token = self.controller.authenticate(
self.request_with_remote_user, body_dict)
# the token should have bind information in it
bind = unscoped_token['access']['token']['bind']
self.assertEqual('FOO', bind['kerberos'])
body_dict = _build_user_auth(
token=unscoped_token['access']['token'],
tenant_name='BAR')
# using unscoped token without remote user context fails
self.assertRaises(
exception.Unauthorized,
self.controller.authenticate,
self.empty_request, body_dict)
# using token with remote user context succeeds
scoped_token = self.controller.authenticate(
self.request_with_remote_user, body_dict)
# the bind information should be carried over from the original token
bind = scoped_token['access']['token']['bind']
self.assertEqual('FOO', bind['kerberos'])
def test_deleting_role_revokes_token(self):
role_controller = assignment.controllers.Role()
project1 = unit.new_project_ref(
domain_id=CONF.identity.default_domain_id)
self.resource_api.create_project(project1['id'], project1)
role_one = unit.new_role_ref(id='role_one')
self.role_api.create_role(role_one['id'], role_one)
self.assignment_api.add_role_to_user_and_project(
self.user_foo['id'], project1['id'], role_one['id'])
# Get a scoped token for the tenant
body_dict = _build_user_auth(
username=self.user_foo['name'],
password=self.user_foo['password'],
tenant_name=project1['name'])
token = self.controller.authenticate(self.empty_request, body_dict)
# Ensure it is valid
token_id = token['access']['token']['id']
self.controller.validate_token(self.make_request(is_admin=True),
token_id=token_id)
# Delete the role, which should invalidate the token
role_controller.delete_role(self.make_request(is_admin=True),
role_one['id'])
# Check the token is now invalid
self.assertRaises(
exception.TokenNotFound,
self.controller.validate_token,
self.make_request(is_admin=True),
token_id=token_id)
def test_deleting_role_assignment_does_not_revoke_unscoped_token(self):
admin_request = self.make_request(is_admin=True)
project = unit.new_project_ref(
domain_id=CONF.identity.default_domain_id)
self.resource_api.create_project(project['id'], project)
role = unit.new_role_ref()
self.role_api.create_role(role['id'], role)
self.assignment_api.add_role_to_user_and_project(
self.user_foo['id'], project['id'], role['id'])
# Get an unscoped token.
token = self.controller.authenticate(
self.make_request(),
_build_user_auth(username=self.user_foo['name'],
password=self.user_foo['password']))
token_id = token['access']['token']['id']
# Ensure it is valid
self.controller.validate_token(admin_request, token_id=token_id)
# Delete the role assignment, which should not invalidate the token,
# because we're not consuming it with just an unscoped token.
self.assignment_api.remove_role_from_user_and_project(
self.user_foo['id'], project['id'], role['id'])
# Ensure it is still valid
self.controller.validate_token(admin_request, token_id=token_id)
def test_only_original_audit_id_is_kept(self):
def get_audit_ids(token):
return token['access']['token']['audit_ids']
# get a token
body_dict = _build_user_auth(username='FOO', password='foo2')
unscoped_token = self.controller.authenticate(self.make_request(),
body_dict)
starting_audit_id = get_audit_ids(unscoped_token)[0]
self.assertIsNotNone(starting_audit_id)
# get another token to ensure the correct parent audit_id is set
body_dict = _build_user_auth(token=unscoped_token["access"]["token"])
unscoped_token_2 = self.controller.authenticate(self.make_request(),
body_dict)
audit_ids = get_audit_ids(unscoped_token_2)
self.assertThat(audit_ids, matchers.HasLength(2))
self.assertThat(audit_ids[-1], matchers.Equals(starting_audit_id))
# get another token from token 2 and ensure the correct parent
# audit_id is set
body_dict = _build_user_auth(token=unscoped_token_2["access"]["token"])
unscoped_token_3 = self.controller.authenticate(self.make_request(),
body_dict)
audit_ids = get_audit_ids(unscoped_token_3)
self.assertThat(audit_ids, matchers.HasLength(2))
self.assertThat(audit_ids[-1], matchers.Equals(starting_audit_id))
def test_revoke_by_audit_chain_id_original_token(self):
self.config_fixture.config(group='token', revoke_by_id=False)
# get a token
body_dict = _build_user_auth(username='FOO', password='foo2')
unscoped_token = self.controller.authenticate(self.make_request(),
body_dict)
token_id = unscoped_token['access']['token']['id']
self.time_fixture.advance_time_seconds(1)
# get a second token
body_dict = _build_user_auth(token=unscoped_token["access"]["token"])
unscoped_token_2 = self.controller.authenticate(self.make_request(),
body_dict)
token_2_id = unscoped_token_2['access']['token']['id']
self.time_fixture.advance_time_seconds(1)
self.token_provider_api.revoke_token(token_id, revoke_chain=True)
self.assertRaises(exception.TokenNotFound,
self.token_provider_api.validate_v2_token,
token_id=token_id)
self.assertRaises(exception.TokenNotFound,
self.token_provider_api.validate_v2_token,
token_id=token_2_id)
def test_revoke_by_audit_chain_id_chained_token(self):
self.config_fixture.config(group='token', revoke_by_id=False)
# get a token
body_dict = _build_user_auth(username='FOO', password='foo2')
unscoped_token = self.controller.authenticate(self.make_request(),
body_dict)
token_id = unscoped_token['access']['token']['id']
self.time_fixture.advance_time_seconds(1)
# get a second token
body_dict = _build_user_auth(token=unscoped_token["access"]["token"])
unscoped_token_2 = self.controller.authenticate(self.make_request(),
body_dict)
token_2_id = unscoped_token_2['access']['token']['id']
self.time_fixture.advance_time_seconds(1)
self.token_provider_api.revoke_token(token_2_id, revoke_chain=True)
self.assertRaises(exception.TokenNotFound,
self.token_provider_api.validate_v2_token,
token_id=token_id)
self.assertRaises(exception.TokenNotFound,
self.token_provider_api.validate_v2_token,
token_id=token_2_id)
def _mock_audit_info(self, parent_audit_id):
# NOTE(morgainfainberg): The token model and other cases that are
# extracting the audit id expect 'None' if the audit id doesn't
# exist. This ensures that the audit_id is None and the
# audit_chain_id will also return None.
return [None, None]
class UUIDAuthWithToken(AuthWithToken, AuthTest):
def config_overrides(self):
super(UUIDAuthWithToken, self).config_overrides()
self.config_fixture.config(group='token', provider='uuid')
class FernetAuthWithToken(AuthWithToken, AuthTest):
def config_overrides(self):
super(FernetAuthWithToken, self).config_overrides()
self.config_fixture.config(group='token', provider='fernet')
self.useFixture(
ksfixtures.KeyRepository(
self.config_fixture,
'fernet_tokens',
CONF.fernet_tokens.max_active_keys
)
)
def test_token_auth_with_binding(self):
self.config_fixture.config(group='token', bind=['kerberos'])
body_dict = _build_user_auth()
self.assertRaises(exception.NotImplemented,
self.controller.authenticate,
self.request_with_remote_user,
body_dict)
def test_deleting_role_revokes_token(self):
self.skip_test_overrides('Fernet with v2.0 and revocation is broken')
class PKIAuthWithToken(AuthWithToken, AuthTest):
def config_overrides(self):
super(PKIAuthWithToken, self).config_overrides()
self.config_fixture.config(group='token', provider='pki')
class PKIZAuthWithToken(AuthWithToken, AuthTest):
def config_overrides(self):
super(PKIZAuthWithToken, self).config_overrides()
self.config_fixture.config(group='token', provider='pkiz')
class AuthWithPasswordCredentials(AuthTest):
def test_auth_invalid_user(self):
"""Verify exception is raised if invalid user."""
body_dict = _build_user_auth(
username=uuid.uuid4().hex,
password=uuid.uuid4().hex)
self.assertRaises(
exception.Unauthorized,
self.controller.authenticate,
self.make_request(), body_dict)
def test_auth_valid_user_invalid_password(self):
"""Verify exception is raised if invalid password."""
body_dict = _build_user_auth(
username="FOO",
password=uuid.uuid4().hex)
self.assertRaises(
exception.Unauthorized,
self.controller.authenticate,
self.make_request(), body_dict)
def test_auth_empty_password(self):
"""Verify exception is raised if empty password."""
body_dict = _build_user_auth(
username="FOO",
password="")
self.assertRaises(
exception.Unauthorized,
self.controller.authenticate,
self.empty_request, body_dict)
def test_auth_no_password(self):
"""Verify exception is raised if empty password."""
body_dict = _build_user_auth(username="FOO")
self.assertRaises(
exception.ValidationError,
self.controller.authenticate,
self.make_request(), body_dict)
def test_authenticate_blank_password_credentials(self):
"""Sending empty dict as passwordCredentials raises 400 Bad Requset."""
body_dict = {'passwordCredentials': {}, 'tenantName': 'demo'}
self.assertRaises(exception.ValidationError,
self.controller.authenticate,
self.make_request(), body_dict)
def test_authenticate_no_username(self):
"""Verify skipping username raises the right exception."""
body_dict = _build_user_auth(password="pass",
tenant_name="demo")
self.assertRaises(exception.ValidationError,
self.controller.authenticate,
self.make_request(), body_dict)
def test_bind_without_remote_user(self):
self.config_fixture.config(group='token', bind=['kerberos'])
body_dict = _build_user_auth(username='FOO', password='foo2',
tenant_name='BAR')
token = self.controller.authenticate(self.make_request(), body_dict)
self.assertNotIn('bind', token['access']['token'])
def test_change_default_domain_id(self):
# If the default_domain_id config option is not the default then the
# user in auth data is from the new default domain.
# 1) Create a new domain.
new_domain = unit.new_domain_ref()
new_domain_id = new_domain['id']
self.resource_api.create_domain(new_domain_id, new_domain)
# 2) Create user "foo" in new domain with different password than
# default-domain foo.
new_user = unit.create_user(self.identity_api,
name=self.user_foo['name'],
domain_id=new_domain_id)
# 3) Update the default_domain_id config option to the new domain
self.config_fixture.config(group='identity',
default_domain_id=new_domain_id)
# 4) Authenticate as "foo" using the password in the new domain.
body_dict = _build_user_auth(
username=self.user_foo['name'],
password=new_user['password'])
# The test is successful if this doesn't raise, so no need to assert.
self.controller.authenticate(self.make_request(), body_dict)
class AuthWithRemoteUser(object):
def test_unscoped_remote_authn(self):
"""Verify getting an unscoped token with external authn."""
body_dict = _build_user_auth(
username='FOO',
password='foo2')
local_token = self.controller.authenticate(
self.make_request(), body_dict)
body_dict = _build_user_auth()
remote_token = self.controller.authenticate(
self.request_with_remote_user, body_dict)
self.assertEqualTokens(local_token, remote_token,
enforce_audit_ids=False)
def test_unscoped_remote_authn_jsonless(self):
"""Verify that external auth with invalid request fails."""
self.assertRaises(
exception.ValidationError,
self.controller.authenticate,
self.make_request(environ={'REMOTE_USER': 'FOO'}),
None)
def test_scoped_remote_authn(self):
"""Verify getting a token with external authn."""
body_dict = _build_user_auth(
username='FOO',
password='foo2',
tenant_name='BAR')
local_token = self.controller.authenticate(
self.make_request(), body_dict)
body_dict = _build_user_auth(
tenant_name='BAR')
remote_token = self.controller.authenticate(
self.request_with_remote_user, body_dict)
self.assertEqualTokens(local_token, remote_token,
enforce_audit_ids=False)
def test_scoped_nometa_remote_authn(self):
"""Verify getting a token with external authn and no metadata."""
body_dict = _build_user_auth(
username='TWO',
password='two2',
tenant_name='BAZ')
local_token = self.controller.authenticate(
self.make_request(), body_dict)
body_dict = _build_user_auth(tenant_name='BAZ')
remote_token = self.controller.authenticate(
self.make_request(environ={'REMOTE_USER': 'TWO'}), body_dict)
self.assertEqualTokens(local_token, remote_token,
enforce_audit_ids=False)
def test_scoped_remote_authn_invalid_user(self):
"""Verify that external auth with invalid user fails."""
body_dict = _build_user_auth(tenant_name="BAR")
self.assertRaises(
exception.Unauthorized,
self.controller.authenticate,
self.make_request(environ={'REMOTE_USER': uuid.uuid4().hex}),
body_dict)
def test_bind_with_kerberos(self):
self.config_fixture.config(group='token', bind=['kerberos'])
body_dict = _build_user_auth(tenant_name="BAR")
token = self.controller.authenticate(self.request_with_remote_user,
body_dict)
self.assertEqual('FOO', token['access']['token']['bind']['kerberos'])
def test_bind_without_config_opt(self):
self.config_fixture.config(group='token', bind=['x509'])
body_dict = _build_user_auth(tenant_name='BAR')
token = self.controller.authenticate(self.request_with_remote_user,
body_dict)
self.assertNotIn('bind', token['access']['token'])
class FernetAuthWithRemoteUser(AuthWithRemoteUser, AuthTest):
def config_overrides(self):
super(FernetAuthWithRemoteUser, self).config_overrides()
self.config_fixture.config(group='token', provider='fernet')
self.useFixture(
ksfixtures.KeyRepository(
self.config_fixture,
'fernet_tokens',
CONF.fernet_tokens.max_active_keys
)
)
def test_bind_with_kerberos(self):
self.config_fixture.config(group='token', bind=['kerberos'])
body_dict = _build_user_auth(tenant_name="BAR")
# NOTE(lbragstad): Bind authentication is not supported by the Fernet
# provider.
self.assertRaises(exception.NotImplemented,
self.controller.authenticate,
self.request_with_remote_user,
body_dict)
class UUIDAuthWithRemoteUser(AuthWithRemoteUser, AuthTest):
def config_overrides(self):
super(UUIDAuthWithRemoteUser, self).config_overrides()
self.config_fixture.config(group='token', provider='uuid')
class PKIAuthWithRemoteUser(AuthWithRemoteUser, AuthTest):
def config_overrides(self):
super(PKIAuthWithRemoteUser, self).config_overrides()
self.config_fixture.config(group='token', provider='pki')
class PKIZAuthWithRemoteUser(AuthWithRemoteUser, AuthTest):
def config_overrides(self):
super(PKIZAuthWithRemoteUser, self).config_overrides()
self.config_fixture.config(group='token', provider='pkiz')
class AuthWithTrust(object):
def setUp(self):
super(AuthWithTrust, self).setUp()
self.trust_controller = trust.controllers.TrustV3()
self.auth_v3_controller = auth.controllers.Auth()
self.trustor = self.user_foo
self.trustee = self.user_two
self.assigned_roles = [self.role_member['id'],
self.role_browser['id']]
for assigned_role in self.assigned_roles:
self.assignment_api.add_role_to_user_and_project(
self.trustor['id'], self.tenant_bar['id'], assigned_role)
self.sample_data = {'trustor_user_id': self.trustor['id'],
'trustee_user_id': self.trustee['id'],
'project_id': self.tenant_bar['id'],
'impersonation': True,
'roles': [{'id': self.role_browser['id']},
{'name': self.role_member['name']}]}
def config_overrides(self):
super(AuthWithTrust, self).config_overrides()
self.config_fixture.config(group='trust', enabled=True)
def _create_auth_request(self, token_id):
token_ref = token_model.KeystoneToken(
token_id=token_id,
token_data=self.token_provider_api.validate_token(token_id))
auth_context = authorization.token_to_auth_context(token_ref)
# NOTE(gyee): if public_endpoint and admin_endpoint are not set, which
# is the default, the base url will be constructed from the environment
# variables wsgi.url_scheme, SERVER_NAME, SERVER_PORT, and SCRIPT_NAME.
# We have to set them in the context so the base url can be constructed
# accordingly.
environ = {authorization.AUTH_CONTEXT_ENV: auth_context,
'wsgi.url_scheme': 'http',
'HTTP_HOST': HOST_URL,
'SCRIPT_NAME': '/v3',
'SERVER_PORT': '80',
'SERVER_NAME': HOST}
req = self.make_request(environ=environ)
req.context_dict['token_id'] = token_id
# NOTE(jamielennox): This wouldn't be necessary if these were calls via
# the wsgi interface instead of directly creating a request to pass to
# a controller.
req.context.auth_token = token_id
req.context.user_id = auth_context.get('user_id')
req.context.project_id = auth_context.get('project_id')
req.context.domain_id = auth_context.get('domain_id')
req.context.domain_name = auth_context.get('domain_name')
req.context.user_domain_id = auth_context.get('user_domain_id')
req.context.roles = auth_context.get('roles')
req.context.trust_id = auth_context.get('trust_id')
req.context.trustor_id = auth_context.get('trustor_id')
req.context.trustee_id = auth_context.get('trustee_id')
return req
def create_trust(self, trust_data, trustor_name, expires_at=None,
impersonation=True):
username = trustor_name
password = 'foo2'
unscoped_token = self.get_unscoped_token(username, password)
request = self._create_auth_request(
unscoped_token['access']['token']['id'])
trust_data_copy = copy.deepcopy(trust_data)
trust_data_copy['expires_at'] = expires_at
trust_data_copy['impersonation'] = impersonation
return self.trust_controller.create_trust(
request, trust=trust_data_copy)['trust']
def get_unscoped_token(self, username, password='foo2'):
body_dict = _build_user_auth(username=username, password=password)
return self.controller.authenticate(self.make_request(), body_dict)
def build_v2_token_request(self, username, password, trust,
tenant_id=None):
if not tenant_id:
tenant_id = self.tenant_bar['id']
unscoped_token = self.get_unscoped_token(username, password)
unscoped_token_id = unscoped_token['access']['token']['id']
request_body = _build_user_auth(token={'id': unscoped_token_id},
trust_id=trust['id'],
tenant_id=tenant_id)
return request_body
def test_create_trust_bad_data_fails(self):
unscoped_token = self.get_unscoped_token(self.trustor['name'])
request = self._create_auth_request(
unscoped_token['access']['token']['id'])
bad_sample_data = {'trustor_user_id': self.trustor['id'],
'project_id': self.tenant_bar['id'],
'roles': [{'id': self.role_browser['id']}]}
self.assertRaises(exception.ValidationError,
self.trust_controller.create_trust,
request, trust=bad_sample_data)
def test_create_trust_no_roles(self):
unscoped_token = self.get_unscoped_token(self.trustor['name'])
req = self.make_request()
req.context_dict['token_id'] = unscoped_token['access']['token']['id']
self.sample_data['roles'] = []
self.assertRaises(exception.Forbidden,
self.trust_controller.create_trust,
req, trust=self.sample_data)
def test_create_trust(self):
expires_at = (timeutils.utcnow() +
datetime.timedelta(minutes=10)).strftime(TIME_FORMAT)
new_trust = self.create_trust(self.sample_data, self.trustor['name'],
expires_at=expires_at)
self.assertEqual(self.trustor['id'], new_trust['trustor_user_id'])
self.assertEqual(self.trustee['id'], new_trust['trustee_user_id'])
role_ids = [self.role_browser['id'], self.role_member['id']]
self.assertTrue(timeutils.parse_strtime(new_trust['expires_at'],
fmt=TIME_FORMAT))
self.assertIn('%s/v3/OS-TRUST/' % HOST_URL,
new_trust['links']['self'])
self.assertIn('%s/v3/OS-TRUST/' % HOST_URL,
new_trust['roles_links']['self'])
for role in new_trust['roles']:
self.assertIn(role['id'], role_ids)
def test_create_trust_expires_bad(self):
self.assertRaises(exception.ValidationTimeStampError,
self.create_trust, self.sample_data,
self.trustor['name'], expires_at="bad")
self.assertRaises(exception.ValidationTimeStampError,
self.create_trust, self.sample_data,
self.trustor['name'], expires_at="")
self.assertRaises(exception.ValidationTimeStampError,
self.create_trust, self.sample_data,
self.trustor['name'], expires_at="Z")
def test_create_trust_expires_older_than_now(self):
self.assertRaises(exception.ValidationExpirationError,
self.create_trust, self.sample_data,
self.trustor['name'],
expires_at="2010-06-04T08:44:31.999999Z")
def test_create_trust_without_project_id(self):
"""Verify that trust can be created without project id.
Also, token can be generated with that trust.
"""
unscoped_token = self.get_unscoped_token(self.trustor['name'])
request = self._create_auth_request(
unscoped_token['access']['token']['id'])
self.sample_data['project_id'] = None
self.sample_data['roles'] = []
new_trust = self.trust_controller.create_trust(
request, trust=self.sample_data)['trust']
self.assertEqual(self.trustor['id'], new_trust['trustor_user_id'])
self.assertEqual(self.trustee['id'], new_trust['trustee_user_id'])
self.assertIs(True, new_trust['impersonation'])
auth_response = self.fetch_v2_token_from_trust(new_trust)
token_user = auth_response['access']['user']
self.assertEqual(token_user['id'], new_trust['trustor_user_id'])
def test_get_trust(self):
unscoped_token = self.get_unscoped_token(self.trustor['name'])
request = self._create_auth_request(
unscoped_token['access']['token']['id'])
new_trust = self.trust_controller.create_trust(
request, trust=self.sample_data)['trust']
trust = self.trust_controller.get_trust(request,
new_trust['id'])['trust']
self.assertEqual(self.trustor['id'], trust['trustor_user_id'])
self.assertEqual(self.trustee['id'], trust['trustee_user_id'])
role_ids = [self.role_browser['id'], self.role_member['id']]
for role in new_trust['roles']:
self.assertIn(role['id'], role_ids)
def test_create_trust_no_impersonation(self):
new_trust = self.create_trust(self.sample_data, self.trustor['name'],
expires_at=None, impersonation=False)
self.assertEqual(self.trustor['id'], new_trust['trustor_user_id'])
self.assertEqual(self.trustee['id'], new_trust['trustee_user_id'])
self.assertIs(False, new_trust['impersonation'])
auth_response = self.fetch_v2_token_from_trust(new_trust)
token_user = auth_response['access']['user']
self.assertEqual(token_user['id'], new_trust['trustee_user_id'])
def test_create_trust_impersonation(self):
new_trust = self.create_trust(self.sample_data, self.trustor['name'])
self.assertEqual(self.trustor['id'], new_trust['trustor_user_id'])
self.assertEqual(self.trustee['id'], new_trust['trustee_user_id'])
self.assertIs(True, new_trust['impersonation'])
auth_response = self.fetch_v2_token_from_trust(new_trust)
token_user = auth_response['access']['user']
self.assertEqual(token_user['id'], new_trust['trustor_user_id'])
def test_token_from_trust_wrong_user_fails(self):
new_trust = self.create_trust(self.sample_data, self.trustor['name'])
request_body = self.build_v2_token_request('FOO', 'foo2', new_trust)
self.assertRaises(exception.Forbidden, self.controller.authenticate,
self.make_request(), request_body)
def test_token_from_trust_wrong_project_fails(self):
for assigned_role in self.assigned_roles:
self.assignment_api.add_role_to_user_and_project(
self.trustor['id'], self.tenant_baz['id'], assigned_role)
new_trust = self.create_trust(self.sample_data, self.trustor['name'])
request_body = self.build_v2_token_request('TWO', 'two2', new_trust,
self.tenant_baz['id'])
self.assertRaises(exception.Forbidden, self.controller.authenticate,
self.make_request(), request_body)
def fetch_v2_token_from_trust(self, trust):
request_body = self.build_v2_token_request('TWO', 'two2', trust)
auth_response = self.controller.authenticate(self.make_request(),
request_body)
return auth_response
def fetch_v3_token_from_trust(self, trust, trustee):
v3_password_data = {
'identity': {
"methods": ["password"],
"password": {
"user": {
"id": trustee["id"],
"password": trustee["password"]
}
}
},
'scope': {
'project': {
'id': self.tenant_baz['id']
}
}
}
auth_response = self.auth_v3_controller.authenticate_for_token(
self.make_request(), v3_password_data)
token = auth_response.headers['X-Subject-Token']
v3_req_with_trust = {
"identity": {
"methods": ["token"],
"token": {"id": token}},
"scope": {
"OS-TRUST:trust": {"id": trust['id']}}}
token_auth_response = self.auth_v3_controller.authenticate_for_token(
self.make_request(), v3_req_with_trust)
return token_auth_response
def test_validate_v3_trust_scoped_token_against_v2_succeeds(self):
new_trust = self.create_trust(self.sample_data, self.trustor['name'])
auth_response = self.fetch_v3_token_from_trust(new_trust, self.trustee)
trust_token = auth_response.headers['X-Subject-Token']
self.controller.validate_token(self.make_request(is_admin=True),
trust_token)
def test_create_v3_token_from_trust(self):
new_trust = self.create_trust(self.sample_data, self.trustor['name'])
auth_response = self.fetch_v3_token_from_trust(new_trust, self.trustee)
trust_token_user = auth_response.json['token']['user']
self.assertEqual(self.trustor['id'], trust_token_user['id'])
trust_token_trust = auth_response.json['token']['OS-TRUST:trust']
self.assertEqual(trust_token_trust['id'], new_trust['id'])
self.assertEqual(self.trustor['id'],
trust_token_trust['trustor_user']['id'])
self.assertEqual(self.trustee['id'],
trust_token_trust['trustee_user']['id'])
trust_token_roles = auth_response.json['token']['roles']
self.assertEqual(2, len(trust_token_roles))
def test_v3_trust_token_get_token_fails(self):
new_trust = self.create_trust(self.sample_data, self.trustor['name'])
auth_response = self.fetch_v3_token_from_trust(new_trust, self.trustee)
trust_token = auth_response.headers['X-Subject-Token']
v3_token_data = {'identity': {
'methods': ['token'],
'token': {'id': trust_token}
}}
self.assertRaises(
exception.Forbidden,
self.auth_v3_controller.authenticate_for_token,
self.make_request(), v3_token_data)
def test_token_from_trust(self):
new_trust = self.create_trust(self.sample_data, self.trustor['name'])
auth_response = self.fetch_v2_token_from_trust(new_trust)
self.assertIsNotNone(auth_response)
self.assertEqual(2,
len(auth_response['access']['metadata']['roles']),
"user_foo has three roles, but the token should"
" only get the two roles specified in the trust.")
def assert_token_count_for_trust(self, trust, expected_value):
tokens = self.token_provider_api._persistence._list_tokens(
self.trustee['id'], trust_id=trust['id'])
token_count = len(tokens)
self.assertEqual(expected_value, token_count)
def test_delete_tokens_for_user_invalidates_tokens_from_trust(self):
new_trust = self.create_trust(self.sample_data, self.trustor['name'])
self.assert_token_count_for_trust(new_trust, 0)
self.fetch_v2_token_from_trust(new_trust)
self.assert_token_count_for_trust(new_trust, 1)
self.token_provider_api._persistence.delete_tokens_for_user(
self.trustee['id'])
self.assert_token_count_for_trust(new_trust, 0)
def test_token_from_trust_cant_get_another_token(self):
new_trust = self.create_trust(self.sample_data, self.trustor['name'])
auth_response = self.fetch_v2_token_from_trust(new_trust)
trust_token_id = auth_response['access']['token']['id']
request_body = _build_user_auth(token={'id': trust_token_id},
tenant_id=self.tenant_bar['id'])
self.assertRaises(
exception.Forbidden,
self.controller.authenticate, self.make_request(), request_body)
def test_delete_trust_revokes_token(self):
unscoped_token = self.get_unscoped_token(self.trustor['name'])
new_trust = self.create_trust(self.sample_data, self.trustor['name'])
request = self._create_auth_request(
unscoped_token['access']['token']['id'])
trust_token_resp = self.fetch_v2_token_from_trust(new_trust)
trust_scoped_token_id = trust_token_resp['access']['token']['id']
self.controller.validate_token(
self.make_request(is_admin=True),
token_id=trust_scoped_token_id)
trust_id = new_trust['id']
self.time_fixture.advance_time_seconds(1)
self.trust_controller.delete_trust(request, trust_id=trust_id)
self.assertRaises(
exception.TokenNotFound,
self.controller.validate_token,
self.make_request(is_admin=True),
token_id=trust_scoped_token_id)
def test_token_from_trust_with_no_role_fails(self):
new_trust = self.create_trust(self.sample_data, self.trustor['name'])
for assigned_role in self.assigned_roles:
self.assignment_api.remove_role_from_user_and_project(
self.trustor['id'], self.tenant_bar['id'], assigned_role)
request_body = self.build_v2_token_request('TWO', 'two2', new_trust)
self.assertRaises(
exception.Forbidden,
self.controller.authenticate, self.make_request(), request_body)
def test_expired_trust_get_token_fails(self):
expires_at = (timeutils.utcnow() +
datetime.timedelta(minutes=5)).strftime(TIME_FORMAT)
time_expired = timeutils.utcnow() + datetime.timedelta(minutes=10)
new_trust = self.create_trust(self.sample_data, self.trustor['name'],
expires_at)
with mock.patch.object(timeutils, 'utcnow') as mock_now:
mock_now.return_value = time_expired
request_body = self.build_v2_token_request('TWO', 'two2',
new_trust)
self.assertRaises(
exception.Forbidden,
self.controller.authenticate,
self.make_request(), request_body)
def test_token_from_trust_with_wrong_role_fails(self):
new_trust = self.create_trust(self.sample_data, self.trustor['name'])
self.assignment_api.add_role_to_user_and_project(
self.trustor['id'],
self.tenant_bar['id'],
self.role_other['id'])
for assigned_role in self.assigned_roles:
self.assignment_api.remove_role_from_user_and_project(
self.trustor['id'], self.tenant_bar['id'], assigned_role)
request_body = self.build_v2_token_request('TWO', 'two2', new_trust)
self.assertRaises(
exception.Forbidden,
self.controller.authenticate, self.make_request(), request_body)
def test_do_not_consume_remaining_uses_when_get_token_fails(self):
trust_data = copy.deepcopy(self.sample_data)
trust_data['remaining_uses'] = 3
new_trust = self.create_trust(trust_data, self.trustor['name'])
for assigned_role in self.assigned_roles:
self.assignment_api.remove_role_from_user_and_project(
self.trustor['id'], self.tenant_bar['id'], assigned_role)
request_body = self.build_v2_token_request('TWO', 'two2', new_trust)
self.assertRaises(exception.Forbidden,
self.controller.authenticate,
self.make_request(),
request_body)
unscoped_token = self.get_unscoped_token(self.trustor['name'])
request = self._create_auth_request(
unscoped_token['access']['token']['id'])
trust = self.trust_controller.get_trust(request,
new_trust['id'])['trust']
self.assertEqual(3, trust['remaining_uses'])
def disable_user(self, user):
user['enabled'] = False
self.identity_api.update_user(user['id'], user)
def test_trust_get_token_fails_if_trustor_disabled(self):
new_trust = self.create_trust(self.sample_data, self.trustor['name'])
request_body = self.build_v2_token_request(self.trustee['name'],
self.trustee['password'],
new_trust)
self.disable_user(self.trustor)
self.assertRaises(
exception.Forbidden,
self.controller.authenticate, self.make_request(), request_body)
def test_trust_get_token_fails_if_trustee_disabled(self):
new_trust = self.create_trust(self.sample_data, self.trustor['name'])
request_body = self.build_v2_token_request(self.trustee['name'],
self.trustee['password'],
new_trust)
self.time_fixture.advance_time_seconds(1)
self.disable_user(self.trustee)
self.assertRaises(
exception.Unauthorized,
self.controller.authenticate, self.make_request(), request_body)
def test_validate_trust_scoped_token_against_v2(self):
new_trust = self.create_trust(self.sample_data, self.trustor['name'])
trust_token_resp = self.fetch_v2_token_from_trust(new_trust)
trust_scoped_token_id = trust_token_resp['access']['token']['id']
self.controller.validate_token(self.make_request(is_admin=True),
token_id=trust_scoped_token_id)
def test_trust_get_token_fails_with_future_token_if_trustee_disabled(self):
"""Test disabling trustee and using an unrevoked token.
This test simulates what happens when a token is generated *after* the
disable event. Technically this should not happen, but it's possible in
a multinode deployment with only a slight clock skew.
"""
# We need to turn off caching here since we are bypassing the
# identity_api to disable a user. The identity_api would typically
# invalidate the cache when a user is disabled but it will also emit a
# notification/callback to prune all user tokens from the backend,
# which defeats the purpose of this test.
self.config_fixture.config(group='identity', caching=False)
new_trust = self.create_trust(self.sample_data, self.trustor['name'])
# NOTE(lbragstad): We want to make sure we control the issued_at time
# of the token.
future_time = timeutils.utcnow() + datetime.timedelta(seconds=5)
# get a token from the future
with mock.patch.object(timeutils, 'utcnow', lambda: future_time):
request_body = self.build_v2_token_request(
self.trustee['name'], self.trustee['password'], new_trust)
# We need to disable the user using the driver directly for the same
# reason stated above. This test really just ensures that if a trustee
# is disabled the logic in
# keystone.token.controller:Auth._authenticate_token will throw a
# Forbidden.
user = {'enabled': False}
self.identity_api.driver.update_user(self.trustee['id'], user)
self.assertRaises(
exception.Forbidden,
self.controller.authenticate, self.make_request(), request_body)
class UUIDAuthWithTrust(AuthWithTrust, AuthTest):
def config_overrides(self):
super(UUIDAuthWithTrust, self).config_overrides()
self.config_fixture.config(group='token', provider='uuid')
def setUp(self):
super(UUIDAuthWithTrust, self).setUp()
class FernetAuthWithTrust(AuthWithTrust, AuthTest):
def config_overrides(self):
super(FernetAuthWithTrust, self).config_overrides()
self.config_fixture.config(group='token', provider='fernet',
cache_on_issue=True)
self.useFixture(
ksfixtures.KeyRepository(
self.config_fixture,
'fernet_tokens',
CONF.fernet_tokens.max_active_keys
)
)
def setUp(self):
super(FernetAuthWithTrust, self).setUp()
def test_delete_tokens_for_user_invalidates_tokens_from_trust(self):
# TODO(lbragstad): Rewrite this test to not rely on the persistence
# backend. This same test can be exercised through the API.
msg = 'The Fernet token provider does not support token persistence'
self.skipTest(msg)
def test_delete_trust_revokes_token(self):
# NOTE(amakarov): have to override this for Fernet as TokenNotFound
# can't be raised for non-persistent token, but deleted trust will
# cause TrustNotFound exception.
self.assertRaises(
exception.TrustNotFound,
super(FernetAuthWithTrust, self).test_delete_trust_revokes_token)
def test_trust_get_token_fails_with_future_token_if_trustee_disabled(self):
"""Test disabling trustee and using an unrevoked token.
This test simulates what happens when a Fernet token is generated
*after* the disable event. Technically this should not happen, but
it's possible in a multinode deployment with only a slight clock skew.
"""
new_trust = self.create_trust(self.sample_data, self.trustor['name'])
# NOTE(dstanek): cryptography.fernet gets it's timestamps from
# time.time(). We need to control what it gets.
epoch = datetime.datetime.utcfromtimestamp(0)
future_time = (timeutils.utcnow() - epoch).total_seconds() + 5
# get a token from the future
with mock.patch.object(time, 'time', lambda: future_time):
request_body = self.build_v2_token_request(
self.trustee['name'], self.trustee['password'], new_trust)
self.disable_user(self.trustee)
self.assertRaises(
exception.Forbidden,
self.controller.authenticate, self.make_request(), request_body)
class TokenExpirationTest(AuthTest):
@mock.patch.object(timeutils, 'utcnow')
def _maintain_token_expiration(self, mock_utcnow):
"""Token expiration should be maintained after re-auth & validation."""
now = datetime.datetime.utcnow()
mock_utcnow.return_value = now
r = self.controller.authenticate(
self.make_request(),
auth={
'passwordCredentials': {
'username': self.user_foo['name'],
'password': self.user_foo['password']
}
})
unscoped_token_id = r['access']['token']['id']
original_expiration = r['access']['token']['expires']
mock_utcnow.return_value = now + datetime.timedelta(seconds=1)
r = self.controller.validate_token(
self.make_request(is_admin=True),
token_id=unscoped_token_id)
self.assertEqual(
timeutils.parse_isotime(original_expiration),
timeutils.parse_isotime(r['access']['token']['expires'])
)
mock_utcnow.return_value = now + datetime.timedelta(seconds=2)
r = self.controller.authenticate(
self.make_request(),
auth={
'token': {
'id': unscoped_token_id,
},
'tenantId': self.tenant_bar['id'],
})
scoped_token_id = r['access']['token']['id']
self.assertEqual(original_expiration, r['access']['token']['expires'])
mock_utcnow.return_value = now + datetime.timedelta(seconds=3)
r = self.controller.validate_token(
self.make_request(is_admin=True),
token_id=scoped_token_id)
self.assertEqual(
timeutils.parse_isotime(original_expiration),
timeutils.parse_isotime(r['access']['token']['expires'])
)
def test_maintain_uuid_token_expiration(self):
self.config_fixture.config(group='token', provider='uuid')
self._maintain_token_expiration()
class AuthCatalog(unit.SQLDriverOverrides, AuthTest):
"""Test for the catalog provided in the auth response."""
def config_files(self):
config_files = super(AuthCatalog, self).config_files()
# We need to use a backend that supports disabled endpoints, like the
# SQL backend.
config_files.append(unit.dirs.tests_conf('backend_sql.conf'))
return config_files
def _create_endpoints(self):
def create_region(**kwargs):
ref = unit.new_region_ref(**kwargs)
self.catalog_api.create_region(ref)
return ref
def create_endpoint(service_id, region, **kwargs):
endpoint = unit.new_endpoint_ref(region_id=region,
service_id=service_id, **kwargs)
self.catalog_api.create_endpoint(endpoint['id'], endpoint)
return endpoint
# Create a service for use with the endpoints.
def create_service(**kwargs):
ref = unit.new_service_ref(**kwargs)
self.catalog_api.create_service(ref['id'], ref)
return ref
enabled_service_ref = create_service(enabled=True)
disabled_service_ref = create_service(enabled=False)
region = create_region()
# Create endpoints
enabled_endpoint_ref = create_endpoint(
enabled_service_ref['id'], region['id'])
create_endpoint(
enabled_service_ref['id'], region['id'], enabled=False,
interface='internal')
create_endpoint(
disabled_service_ref['id'], region['id'])
return enabled_endpoint_ref
def test_auth_catalog_disabled_endpoint(self):
"""On authenticate, get a catalog that excludes disabled endpoints."""
endpoint_ref = self._create_endpoints()
# Authenticate
body_dict = _build_user_auth(
username='FOO',
password='foo2',
tenant_name="BAR")
token = self.controller.authenticate(self.make_request(), body_dict)
# Check the catalog
self.assertEqual(1, len(token['access']['serviceCatalog']))
endpoint = token['access']['serviceCatalog'][0]['endpoints'][0]
self.assertEqual(
1, len(token['access']['serviceCatalog'][0]['endpoints']))
exp_endpoint = {
'id': endpoint_ref['id'],
'publicURL': endpoint_ref['url'],
'region': endpoint_ref['region_id'],
}
self.assertEqual(exp_endpoint, endpoint)
def test_validate_catalog_disabled_endpoint(self):
"""On validate, get back a catalog that excludes disabled endpoints."""
endpoint_ref = self._create_endpoints()
# Authenticate
body_dict = _build_user_auth(
username='FOO',
password='foo2',
tenant_name="BAR")
token = self.controller.authenticate(self.make_request(), body_dict)
# Validate
token_id = token['access']['token']['id']
validate_ref = self.controller.validate_token(
self.make_request(is_admin=True),
token_id=token_id)
# Check the catalog
self.assertEqual(1, len(token['access']['serviceCatalog']))
endpoint = validate_ref['access']['serviceCatalog'][0]['endpoints'][0]
self.assertEqual(
1, len(token['access']['serviceCatalog'][0]['endpoints']))
exp_endpoint = {
'id': endpoint_ref['id'],
'publicURL': endpoint_ref['url'],
'region': endpoint_ref['region_id'],
}
self.assertEqual(exp_endpoint, endpoint)
class NonDefaultAuthTest(unit.TestCase):
def test_add_non_default_auth_method(self):
self.config_fixture.config(group='auth',
methods=['password', 'token', 'custom'])
keystone.conf.auth.setup_authentication()
self.assertTrue(hasattr(CONF.auth, 'custom'))
|
length = 10
width = 5
space = length * width
print("Space is: %s square meters" % space)
|
from flask import Blueprint
from flask import current_app as app
from flask import request, redirect, url_for
from functools import wraps
from uuid import uuid4
from Models.User import User
from Models.Session import Session
from datetime import datetime
import uuid
import hashlib
class AuthenticationService():
def __init__(self):
return
"""
Return the user
"""
def getUser(self, **kwargs):
if not User.objects(**kwargs):
return False
user = User.objects.get(**kwargs) #use objects.get to retreive one result
return user
"""
append their recent datasets they've opened
"""
def updateRecentDatasets(self, sessionId, datasetId):
user = self.verifySessionAndReturnUser(sessionId)
if not user:
return False
recentDatasets = user.recentDatasets
try:
recentDatasets.insert(0, datasetId)
if len(recentDatasets) > 5:
recentDatasets = recentDatasets[:5]
except:
# exception means that the list dataset is empty
recentDatasets = [datasetId]
user.update(recentDatasets=recentDatasets)
return True
"""
Authenticate the user (Login).
@param: email
@param: password
@return: unique session id
"""
def authenticate(self,email,password):
hashedPassword = self.saltPassword(password)
user = self.getUser(email=email)
if not user:
return False
if hashedPassword != user.password:
return False
sessionId = uuid4()
session = Session(user=user, sessionId=sessionId)
session.save()
return session
"""
Save the user to the database upon signup if they don't exist
"""
def save(self, user):
if self.getUser(email=user.email):
return False
else:
if user.password:
user.password = self.saltPassword(user.password)
user.save()
return True
"""
Takes a password and returns a hashed version of it
"""
def saltPassword(self, password):
yummyYummySalty = "dHw33Th"
db_password = password+yummyYummySalty
hasher = hashlib.sha256(db_password.encode())
hashLevelOne = hasher.hexdigest()
supaHasher = hashlib.sha256(hashLevelOne.encode())
hashLevelTwo = supaHasher.hexdigest()
return hashLevelTwo
"""
Currently, all users will not be administrators.
"""
def signup(self, document):
user = None
if not document["password"]:
user = User(
firstName=document["firstName"],
lastName=document["lastName"],
email=document["email"],
organization=document["organization"],
location=document["location"],
userType=document["userType"],
isAdmin=False,
recentDatasets=[],
resetId="",
confirmationId="",
isConfirmed=True
)
else:
user = User(
firstName=document["firstName"],
lastName=document["lastName"],
email=document["email"],
password=document["password"],
organization=document["organization"],
location=document["location"],
userType=document["userType"],
isAdmin=False,
recentDatasets=[],
resetId="",
confirmationId="",
isConfirmed=False
)
user.validate() # TODO: enclose this in a try/catch block /check if its an error with the type entered
if (self.save(user)):
return True
else:
return False
def emailIsAlreadyInUse(self, email):
return User.objects(email=email)
def setUserConfirmationId(self, user, userConfirmationId):
user.update(confirmationId=str(userConfirmationId))
def checkUserConfirmationId(self, userConfirmationId):
user = User.objects.get(confirmationId=userConfirmationId)
if (user):
return user
return False
def setUserAsConfirmed(self, user):
User.objects.get(email=user.email).update(isConfirmed=True)
def isUserConfirmed(self, user):
if user.isConfirmed:
return True
return False
def resetPasswordSame(self, user, password):
resetPassword = self.saltPassword(password)
if (user.password == resetPassword):
return True
return False
def setUserResetID(self, user, resetPasswordId):
User.objects.get(email=user.email).update(resetId=str(resetPasswordId))
def checkUserResetID(self, resetPasswordId):
user = User.objects.get(resetId=resetPasswordId)
if (user):
return user
else:
return False
def changeEmail(self, oldEmail, newEmail):
User.objects.get(email=oldEmail).update(email=newEmail)
def changePassword(self, email, password):
User.objects.get(email=email).update(password=self.saltPassword(password))
def getSession(self, sessionId):
sessionUUID = uuid.UUID(sessionId)
return Session.objects.get(sessionId=sessionUUID)
def logout(self, sessionId):
session = self.getSession(sessionId)
if (session):
session.delete()
else:
raise
def verifySessionAndReturnUser(self, sessionId):
session = self.getSession(sessionId)
if (datetime.utcnow() < session.dateExpires):
return User.objects.get(id=session.user.id)
else:
return False
|
from __future__ import print_function, division
import os,unittest,numpy as np
from pyscf.nao import system_vars_c, prod_basis_c, tddft_iter_c
dname = os.path.dirname(os.path.abspath(__file__))
sv = system_vars_c().init_siesta_xml(label='water', cd=dname)
pb = prod_basis_c().init_prod_basis_pp(sv, jcutoff=7)
td = tddft_iter_c(pb.sv, pb, tddft_iter_broadening=1e-2, xc_code='RPA')
class KnowValues(unittest.TestCase):
def test_non_inter_polariz(self):
""" This is non-interacting polarizability TDDFT with SIESTA starting point """
omegas = np.linspace(0.0,2.0,500)+1j*td.eps
pxx = td.comp_nonin(omegas).imag
#pxx = np.zeros_like(omegas)
#vext = np.transpose(td.moms1)
#for iomega,omega in enumerate(omegas): pxx[iomega] = -np.dot(td.apply_rf0(vext[0,:], omega), vext[0,:]).imag
data = np.array([27.2114*omegas.real, pxx])
data_ref = np.loadtxt(dname+'/water.tddft_iter.omega.pxx.txt-ref')
self.assertTrue(np.allclose(data_ref,data.T, rtol=1.0, atol=1e-05))
#np.savetxt('water.tddft_iter.omega.nonin.pxx.txt', data.T, fmt=['%f','%f'])
def test_inter_polariz(self):
""" This is interacting polarizability with SIESTA starting point """
omegas = np.linspace(0.0,2.0,150)+1j*td.eps
pxx = -td.comp_polariz_xx(omegas).imag
data = np.array([omegas.real*27.2114, pxx])
data_ref = np.loadtxt(dname+'/water.tddft_iter.omega.inter.pxx.txt-ref')
#print(' td.rf0_ncalls ', td.rf0_ncalls)
#print(' td.matvec_ncalls ', td.matvec_ncalls)
self.assertTrue(np.allclose(data_ref,data.T, rtol=1.0, atol=1e-05))
#np.savetxt('water.tddft_iter.omega.inter.pxx.txt', data.T, fmt=['%f','%f'])
if __name__ == "__main__": unittest.main()
|
from __future__ import absolute_import
# import defaults
from importlib import import_module
from .base import *
overrides = import_module('unpp_api.settings.{}'.format(ENV))
# apply imported overrides
for attr in dir(overrides):
# we only want to import settings (which have to be variables in ALLCAPS)
if attr.isupper():
# update our scope with the imported variables. We use globals() instead of locals()
# because locals() is readonly and it returns a copy of itself upon assignment.
globals()[attr] = getattr(overrides, attr)
|
from os.path import exists, join
import numpy as np
from nerfactor.util import logging as logutil, io as ioutil, img as imgutil
from third_party.xiuminglib import xiuminglib as xm
logger = logutil.Logger(loggee="util/vis")
def make_frame(
view_dir, layout, put_text=True, put_text_param=None, data_root=None,
rgb_embed_light=None):
if put_text_param is None:
put_text_param = {}
if 'text_loc_ratio' not in put_text_param:
put_text_param['text_loc_ratio'] = 0.05
if 'text_size_ratio' not in put_text_param:
put_text_param['text_size_ratio'] = 0.05
if 'font_path' not in put_text_param:
put_text_param['font_path'] = xm.const.Path.open_sans_regular
layout = np.array(layout)
if layout.ndim == 1:
layout = np.reshape(layout, (1, -1))
elif layout.ndim == 2:
pass
else:
raise ValueError(layout.ndim)
# Guaranteed to be 2D
frame = []
for row_names in layout:
frame.append([])
for name in row_names:
is_render = name.startswith('rgb')
is_nn = name == 'nn'
# Get path
if is_nn:
assert data_root is not None, \
"When including NN, you must provide `data_root`"
path = get_nearest_input(view_dir, data_root)
else:
path = join(view_dir, f'pred_{name}.png')
if not exists(path):
logger.warn("Skipping because of missing files:\n\t%s", path)
return None
img = xm.io.img.load(path)
img = img[:, :, :3] # discards alpha
hw = img.shape[:2]
# Optionally, embed the light used into right top corner of render
if is_render and rgb_embed_light is not None:
light = rgb_embed_light
frame_width = int(max(1 / 16 * light.shape[0], 1))
imgutil.frame_image(light, rgb=(1, 1, 1), width=frame_width)
light_vis_h = int(32 / 256 * hw[0]) # scale light probe size
light = xm.img.resize(light, new_h=light_vis_h)
img[:light.shape[0], -light.shape[1]:] = light
# NN already has embedded light
# Put label
if put_text:
font_color = (1, 1, 1) if is_render or is_nn else (0, 0, 0)
put_text_kwargs = {
'label_top_left_xy': (
int(put_text_param['text_loc_ratio'] * hw[1]),
int(put_text_param['text_loc_ratio'] * hw[0])),
'font_size': int(
put_text_param['text_size_ratio'] * hw[0]),
'font_color': font_color,
'font_ttf': put_text_param['font_path']}
if is_nn:
label = "Nearest Input"
elif is_render:
label = "Rendering"
elif name in ('normal', 'normals'):
label = "Normals"
elif name == 'lvis':
label = "Visibility (mean)"
elif name.startswith('lvis_olat_'):
label = "Visibility"
elif name == 'brdf':
label = "BRDF"
elif name == 'albedo':
label = "Albedo"
else:
raise NotImplementedError(name)
img = xm.vis.text.put_text(img, label, **put_text_kwargs)
frame[-1].append(img)
# Make collage
rows = []
for row in frame:
try:
rows.append(imgutil.hconcat(row))
except:
from IPython import embed; embed()
frame = imgutil.vconcat(rows)
return frame
def get_nearest_input(view_dir, data_root):
metadata_path = join(view_dir, 'metadata.json')
metadata = ioutil.read_json(metadata_path)
id_ = metadata['id']
nearest_input_path = join(data_root, id_, 'nn.png')
return nearest_input_path
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import grad
# WGAN-GP with R1-regularization Discriminator Loss function
class WGANGP_G(nn.Module):
def __init__(self, penalty=None):
super(WGANGP_G, self).__init__()
def forward(self, preds):
return F.softplus(-preds).mean()
class WGANGP_D(nn.Module):
def __init__(self, lam=10.0, eps=0.001, penalty_target=1.0, penalty_type='grad'):
super(WGANGP_D, self).__init__()
self.lam = lam
self.eps = eps
self.penalty_type = penalty_type
self.penalty_target = penalty_target
def forward(self, real, gen, img):
loss = F.softplus(-real).mean() + F.softplus(gen).mean()
if self.penalty_type == 'grad':
grad_real = grad(outputs=real.sum(),
inputs=img, create_graph=True)[0]
penalty = (grad_real.view(grad_real.size(0),
-1).norm(2, dim=1) ** 2).mean()
penalty *= self.lam / self.penalty_target**2
elif self.penalty_type == 'eps':
penalty = self.eps * (real ** 2).mean()
return loss + penalty
class MSE_G(nn.Module):
def __init__(self, reduction='mean'):
super(MSE_G, self).__init__()
self.reduction = reduction
def forward(self, preds):
return F.mse_loss(preds, torch.ones(preds.size(), dtype=preds.dtype,
device=preds.device), reduction=self.reduction)
class MSE_D(nn.Module):
def __init__(self, lam=10.0, eps=0.001, penalty_target=1.0,
penalty_type='grad', reduction='mean'):
super(MSE_D, self).__init__()
self.lam = lam
self.eps = eps
self.penalty_type = penalty_type
self.penalty_target = penalty_target
self.reduction = reduction
def forward(self, real, gen, img):
loss = 0.5*(F.mse_loss(real, torch.ones(real.size(), dtype=real.dtype, device=real.device),
reduction=self.reduction) +
F.mse_loss(gen, torch.zeros(gen.size(), dtype=gen.dtype, device=gen.device),
reduction=self.reduction))
if self.penalty_type == 'grad':
grad_real = grad(outputs=real.sum(),
inputs=img, create_graph=True)[0]
penalty = (grad_real.view(grad_real.size(0),
-1).norm(2, dim=1) ** 2).mean()
penalty *= self.lam / self.penalty_target**2
loss += penalty
elif self.penalty_type == 'eps':
penalty = self.eps * (real ** 2).mean()
loss += penalty
return loss
|
#!/usr/bin/python3
import json
import csv
import os
import argparse
"""
read in excelint-cli generated JSON files and produce a CSV file, where each line contains the following:
Workbook name
Sheet name
Suspiciousness threshold
Formatting threshold
Number of suspicious ranges
Number of cells in suspicious ranges
"""
def process_workbook(dirname, fname, output_file):
qualified_origname = os.path.join(dirname, fname)
with open(qualified_origname, 'r') as f:
with open(output_file, 'a') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
j = json.load(f)
for i in range(0, len(j)):
workbookName = j[i]['workbookName']
workbook = j[i]['worksheets']
for sheet in workbook.keys():
s = { 'workbookName' : workbookName,
'worksheet' : sheet,
'suspiciousRanges' : workbook[sheet]['suspiciousRanges'],
'weightedSuspiciousRanges' : workbook[sheet]['weightedSuspiciousRanges'],
'suspiciousCells' : workbook[sheet]['suspiciousCells'],
'suspiciousnessThreshold' : workbook[sheet]['suspiciousnessThreshold'],
'formattingDiscount' : workbook[sheet]['formattingDiscount'],
'numFormulaCells' : workbook[sheet]['numFormulaCells'] }
writer.writerow(s)
parser = argparse.ArgumentParser('json-to-csv.py')
parser.add_argument('--input', help='Process an input .json file, as generated by xlsx-to-json.py.')
parser.add_argument('--output', help='Name of output csv file.')
parser.add_argument('--directory', help='Process all .json files in a directory.')
args = parser.parse_args()
if args.output is None or (args.input is None and args.directory is None):
parser.print_help()
exit(-1)
# Write the header.
with open(args.output, 'w') as csvfile:
fieldnames = ['workbookName', 'worksheet', 'suspiciousRanges', 'weightedSuspiciousRanges', 'suspiciousCells', 'suspiciousnessThreshold', 'formattingDiscount', 'numFormulaCells']
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
if args.directory is None:
process_workbook('.', args.input, args.output)
else:
fnames = os.listdir(args.directory)
for fname in fnames:
# Skip subdirectories
if os.path.isdir(os.path.join(args.directory, fname)):
continue
if fname.endswith("-stats.json") and not fname.startswith("~$"):
print("processing " + fname)
try:
process_workbook(args.directory, fname, args.output)
except:
pass
|
# Copyright (c) OpenMMLab. All rights reserved.
from .base import BaseDataset
from .builder import (DATASETS, DATASOURCES, PIPELINES, build_dataloader,
build_dataset, build_datasource)
from .data_sources import * # noqa: F401,F403
from .dataset_wrappers import ConcatDataset, RepeatDataset
from .deepcluster import DeepClusterDataset
from .multi_view import MultiViewDataset, MultiViewDatasetwNegative,\
MultiViewDatasetwOriginal, MultiViewDatasetwOriginalLog
from .pipelines import * # noqa: F401,F403
from .relative_loc import RelativeLocDataset
from .rotation_pred import RotationPredDataset
from .samplers import * # noqa: F401,F403
from .single_view import SingleViewDataset
__all__ = [
'DATASETS', 'DATASOURCES', 'PIPELINES', 'BaseDataset', 'build_dataloader',
'build_dataset', 'build_datasource', 'ConcatDataset', 'RepeatDataset',
'DeepClusterDataset', 'MultiViewDataset', 'SingleViewDataset',
'RelativeLocDataset', 'RotationPredDataset', 'MultiViewDatasetwNegative'
]
|
"""Visualization tooling."""
import itertools
from collections import defaultdict
import graphviz
from monkeys.typing import REGISTERED_TYPES, lookup_rtype, prettify_converted_type
def type_graph(simplify=False):
"""
Render graph of current type system.
"""
graph = graphviz.Digraph(format='svg')
graph.node(
u'\u03b5',
shape='circle',
style='dotted',
)
edge_pairs = set()
def add_edge(*pair):
if pair in edge_pairs:
return
graph.edge(*pair)
edge_pairs.add(pair)
simplified_graph = defaultdict(set)
for t in REGISTERED_TYPES:
targeting_functions = lookup_rtype(t, convert=False)
pretty_t = prettify_converted_type(t)
for targeting_function in targeting_functions:
params = targeting_function.readable_param_list
if not params:
add_edge(
u'\u03b5',
pretty_t
)
continue
for param in params:
simplified_graph[param].add(pretty_t)
if simplify:
# pretend these can go directly to the return type
for param in params:
add_edge(
param,
pretty_t
)
continue
elif len(list(params)) > 1:
# show composition of constructed type from constituents
graph.node(
targeting_function.readable_params,
shape='rect',
style='dashed',
)
for param in params:
add_edge(
param,
targeting_function.readable_params
)
add_edge(targeting_function.readable_params, pretty_t)
end_states = {
t
for t in
map(prettify_converted_type, REGISTERED_TYPES)
if not simplified_graph[t]
or simplified_graph[t] == {t}
}
for end_state in end_states:
graph.node(
end_state,
peripheries='2',
)
return graph
def node_graph(node):
"""Create a graph representing a node."""
graph = graphviz.Graph()
counter = itertools.count(1)
graph.node('0', label=str(node.f.__name__))
frontier = [('0', child) for child in node.children]
while frontier:
parent, node = frontier.pop()
node_num = str(next(counter))
graph.node(node_num, label=str(node.f.__name__))
graph.edge(parent, node_num)
frontier.extend((node_num, child) for child in node.children)
return graph
|
from django.urls import reverse
from rest_framework.views import status
from authors.apps.base_test import BaseTest
class DislikeLikeArticleTestCase(BaseTest):
"""Tests Like and Dislike articles views"""
def test_if_user_cannot_like_article_without_authentication(self):
"""Test if user cannot like article without authentication"""
response = self.client.put(path="/api/articles/how-to-feed-your-dragon/like")
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
self.assertEqual(
response.data["detail"], "Authentication credentials were not provided."
)
def test_if_user_can_dislike_without_authentication(self):
"""Test if user can dislike article without authentication"""
response = self.client.put(path="/api/articles/how-to-feed-your-dragon/dislike")
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
self.assertEqual(
response.data["detail"], "Authentication credentials were not provided."
)
def test_if_user_can_like_unexisting_article(self):
"""Test if the user can like an article that does not exist"""
token = self.get_token()
response = self.client.put(
path="/api/articles/how-to-train-your-dragon/like",
HTTP_AUTHORIZATION="Bearer " + token,
)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
self.assertEqual(response.data["message"], "The article does not exist.")
def test_if_user_can_dislike_unexisting_article(self):
"""Test if the user can like an article that does not exist"""
token = self.get_token()
response = self.client.put(
path="/api/articles/how-to-train-your-dragon/dislike",
HTTP_AUTHORIZATION="Bearer " + token,
)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
self.assertEqual(response.data["message"], "The article does not exist.")
def test_if_user_liking_is_successful(self):
"""Test if user liking is successful, if like does not exist"""
token = self.get_token()
self.create_article(token, self.testArticle)
response = self.client.put(
path="/api/articles/how-to-feed-your-dragon/like",
HTTP_AUTHORIZATION="Bearer " + token,
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data["message"], "Added to Liked articles")
def test_successful_article_disliking(self):
"""Test a successful disliking of an article"""
token = self.get_token()
self.create_article(token, self.testArticle)
response = self.client.put(
path="/api/articles/how-to-feed-your-dragon/dislike",
HTTP_AUTHORIZATION="Bearer " + token,
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data["message"], "You Dislike this Article")
def test_response_of_adding_a_like_after_adding_a_dislike(self):
"""Test the response of adding a like after adding a dislike"""
token = self.get_token()
self.create_article(token, self.testArticle)
response = self.client.put(
path="/api/articles/how-to-feed-your-dragon/dislike",
HTTP_AUTHORIZATION="Bearer " + token,
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data["message"], "You Dislike this Article")
response = self.client.put(
path="/api/articles/how-to-feed-your-dragon/like",
HTTP_AUTHORIZATION="Bearer " + token,
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(
response.data["message"], "Removed from dislike and Added to Liked articles"
)
def test_response_of_adding_a_dislike_after_adding_a_like(self):
"""Test the response of adding a dislike after adding a like"""
token = self.get_token()
self.create_article(token, self.testArticle)
response = self.client.put(
path="/api/articles/how-to-feed-your-dragon/like",
HTTP_AUTHORIZATION="Bearer " + token,
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data["message"], "Added to Liked articles")
response = self.client.put(
path="/api/articles/how-to-feed-your-dragon/dislike",
HTTP_AUTHORIZATION="Bearer " + token,
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(
response.data["message"],
"Removed from Liked Articles and Added to Disliked articles",
)
def test_response_of_double_liking(self):
"""Test the response of liking an article twice"""
token = self.get_token()
self.create_article(token, self.testArticle)
response = self.client.put(
path="/api/articles/how-to-feed-your-dragon/like",
HTTP_AUTHORIZATION="Bearer " + token,
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data["message"], "Added to Liked articles")
response = self.client.put(
path="/api/articles/how-to-feed-your-dragon/like",
HTTP_AUTHORIZATION="Bearer " + token,
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data["message"], "You no longer like this article")
def test_response_of_double_disliking(self):
"""Test the response of disliking an article twice"""
token = self.get_token()
self.create_article(token, self.testArticle)
response = self.client.put(
path="/api/articles/how-to-feed-your-dragon/dislike",
HTTP_AUTHORIZATION="Bearer " + token,
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data["message"], "You Dislike this Article")
response = self.client.put(
path="/api/articles/how-to-feed-your-dragon/dislike",
HTTP_AUTHORIZATION="Bearer " + token,
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data["message"], "You no longer dislike this article")
|
from __future__ import division
import pandas as pd
import scipy.io as sio
import numpy as np
from sklearn import metrics
from sklearn.metrics import hamming_loss
import torch
import numpy as np
import pandas as pd
def Intersect_set(a, b):
countL = 0
for i in range(len(a)):
if a[i] == 1 and b[i] == 1:
countL += 1
else:
continue
return countL
def unionset(line1, line2):
sum2 = 0
for i in range(len(line1)):
if (line1[i] == 0 and line2[i] == 1) or (line1[i] == 1 and line2[i] == 0) or (line1[i] == 1 and line2[i] == 1):
sum2 += 1
return sum2
def Aiming(preLabels, test_targets, D):
sumsum1 = 0
for i in range(D):
line1, line2 = preLabels[i], test_targets[i]
line1_count = 0
for i in range(len(line1)):
if line1[i] == 1:
line1_count += 1
sumsum1 += Intersect_set(line1, line2) / (line1_count + 1e-6)
return sumsum1 / D
def Coverage(preLabels, test_targets, D):
sumsum1 = 0
for i in range(D):
line1, line2 = preLabels[i], test_targets[i]
line2_count = 0
for i in range(len(line2)):
if line2[i] == 1:
line2_count += 1
sumsum1 += Intersect_set(line1, line2) / (line2_count + 1e-6)
return sumsum1 / D
def Abs_True_Rate(preLabels, test_targets, D):
'''
correct_pairs = 0
for i in range(len(preLabels)):
if (preLabels[i] == test_targets[i]):
correct_pairs += 1
abs_true = correct_pairs / D
'''
correct_pairs = 0
for i in range(len(preLabels)):
flag = True
for j in range(len(preLabels[i])):
if preLabels[i][j] != test_targets[i][j]:
flag = False
break
if flag:
correct_pairs += 1
abs_true = correct_pairs / D
return abs_true
def Abs_False_Rate(preLabels, test_targets, D):
correct_pairs = 0.0
for i in range(len(preLabels)):
line1, line2 = preLabels[i], test_targets[i]
correct_pairs += (unionset(line1, line2) - Intersect_set(line1, line2)) / 14
abs_false = correct_pairs / D
return abs_false
def Accuracy(preLabels, test_targets, D):
acc_score = 0
for i in range(len(preLabels)):
item_inter = Intersect_set(preLabels[i], test_targets[i])
item_union = unionset(preLabels[i], test_targets[i])
acc_score += item_inter / (item_union + 1e-6)
accuracy = acc_score / D
return accuracy
def save_value(lst, filename):
with open(filename, "w") as f:
for l in lst:
f.write(str(l)+'\n')
|
from unittest import TestCase
from unittest.mock import patch, MagicMock
import sevenbridges
from sbg_cwl_upgrader.validator.cwl_validation import CWLValidator
import warnings
import os
import ruamel.yaml
from sbg_cwl_upgrader.validator.sbg_validate_js_cwl_v1 import main
class TestCWLValidatorLinting(TestCase):
def test_variable_not_defined(self):
warnings.simplefilter('ignore')
tool = {
"class": "CommandLineTool",
"cwlVersion": "v1.0",
"inputs": [
{
"id": "input",
"type": "File",
"inputBinding": {
"valueFrom": "${ a = 1; return a }"
}
}
],
"requirements": [
{
"class": "InlineJavascriptRequirement"
}
]
}
with self.assertLogs(logger='cwltool') as a_log:
CWLValidator().validate_js_expressions_strict(tool)
self.assertIn("'a' is not defined.", a_log.output[0])
self.assertEqual(len(a_log.output), 2)
def test_ES6_syntax(self):
warnings.simplefilter('ignore')
tool = {
"class": "CommandLineTool",
"cwlVersion": "v1.0",
"inputs": [
{
"id": "input",
"type": "File",
"inputBinding": {
"valueFrom": '${ return [0].map(v => v + 1) }'
}
}
],
"requirements": [
{
"class": "InlineJavascriptRequirement"
}
]
}
with self.assertLogs(logger='cwltool') as a_log:
CWLValidator().validate_js_expressions_strict(tool)
self.assertIn("ES6", a_log.output[0])
self.assertEqual(len(a_log.output), 1)
class TestCWLValidatorCLI(TestCase):
@classmethod
def setUpClass(cls):
cls.test_file = os.path.join(os.path.dirname(__file__),
'minimal-tool.cwl')
with open(cls.test_file) as f:
cls.tool = ruamel.yaml.safe_load(f)
@patch('logging.basicConfig', MagicMock())
@patch('sbg_cwl_upgrader.validator.cwl_validation.CWLValidator',
MagicMock())
def test_local_validation_missing_file(self):
"""
Check that missing file raises error.
"""
with self.assertRaises(FileNotFoundError):
main(['--input', '/foo/bar/foo.cwl'])
@patch('logging.basicConfig', MagicMock())
@patch('sbg_cwl_upgrader.validator.cwl_validation.CWLValidator.validate')
def test_local_validation_conversion(self, mock_validator):
"""
Check validator is called with the right value
"""
main(['--input', self.test_file])
mock_validator.assert_called_with(self.tool)
@patch('logging.basicConfig', MagicMock())
@patch('sevenbridges.Config', MagicMock())
@patch('sevenbridges.Api')
@patch('sbg_cwl_upgrader.validator.cwl_validation.CWLValidator.validate')
def test_platform_validation(self, mock_validator, mock_api):
"""
Check validator is called with the right value
"""
mock_app = MagicMock(sevenbridges.App)
mock_app.raw = self.tool
mock_api_instance = mock_api.return_value
mock_api_instance.apps.get.return_value = mock_app
main(['--input', 'a/b/c'])
mock_validator.assert_called_with(self.tool)
class TestCWLValidatorValidate(TestCase):
@classmethod
def setUpClass(cls):
cls.test_file = os.path.join(os.path.dirname(__file__),
'minimal-tool.cwl')
with open(cls.test_file) as f:
cls.tool = ruamel.yaml.safe_load(f)
cls.wf = {
"class": "Workflow",
"cwlVersion": "v1.0",
"inputs": [],
"outputs": [],
"steps": [
{
"id": "1",
"run": cls.tool
}
]
}
@patch(('sbg_cwl_upgrader.validator.'
'cwl_validation.CWLValidator.validate_js_expressions_strict'))
def test_simple_validate(self, mock_validation):
CWLValidator().validate(self.tool)
mock_validation.assert_called_once_with(self.tool)
@patch(('sbg_cwl_upgrader.validator.'
'cwl_validation.CWLValidator.validate_js_expressions_strict'))
def test_recursive_validate(self, mock_validation):
CWLValidator().validate(self.wf)
mock_validation.assert_called_once_with(self.tool)
def test_exception_missing_class(self):
with self.assertRaises(IndexError):
CWLValidator().validate({})
def test_exception_wrong_class(self):
with self.assertRaises(ValueError):
CWLValidator().validate({"class": "FooTool"})
def test_exception_missing_run(self):
with self.assertRaises(IndexError):
CWLValidator().validate(
{"class": "Workflow", "steps": [{"id": 1}]}
)
|
""" defines the filesystem model
"""
import os
import glob
import types
import shutil
import autofile.file
class DataFile():
""" file manager for a given datatype """
def __init__(self, name, writer_=(lambda _: _), reader_=(lambda _: _)):
"""
:param name: the file name
:type name: str
:param writer_: writes data to a string
:type writer_: callable[object->str]
:param reader_: reads data from a string
:type reader_: callable[str->object]
"""
self.name = name
self.writer_ = writer_
self.reader_ = reader_
def path(self, dir_pth):
""" file path
"""
return os.path.join(dir_pth, self.name)
def exists(self, dir_pth):
""" does this file exist?
"""
pth = self.path(dir_pth)
return os.path.isfile(pth)
def write(self, val, dir_pth):
""" write data to this file
"""
assert os.path.exists(dir_pth)
pth = self.path(dir_pth)
val_str = self.writer_(val)
autofile.file.write_file(pth, val_str)
def read(self, dir_pth):
""" read data from this file
"""
assert self.exists(dir_pth)
pth = self.path(dir_pth)
val_str = autofile.file.read_file(pth)
val = self.reader_(val_str)
return val
class DataSeries():
""" directory manager mapping locator values to a directory series
"""
def __init__(self, prefix, map_, nlocs, depth, loc_dfile=None,
root_ds=None, removable=False):
"""
:param map_: maps `nlocs` locators to a segment path consisting of
`depth` directories
:param info_map_: maps `nlocs` locators to an information object, to
be written in the data directory
"""
assert os.path.isdir(prefix)
self.prefix = os.path.abspath(prefix)
self.map_ = map_
self.nlocs = nlocs
self.depth = depth
self.loc_dfile = loc_dfile
self.root = root_ds
self.removable = removable
self.file = types.SimpleNamespace()
def add_data_files(self, dfile_dct):
""" add DataFiles to the DataSeries
"""
dfile_dct = {} if dfile_dct is None else dfile_dct
for name, dfile in dfile_dct.items():
assert isinstance(name, str)
assert isinstance(dfile, DataFile)
dsfile = _DataSeriesFile(ds=self, dfile=dfile)
setattr(self.file, name, dsfile)
def path(self, locs=()):
""" absolute directory path
"""
if self.root is None:
prefix = self.prefix
else:
root_locs = self._root_locators(locs)
locs = self._self_locators(locs)
prefix = self.root.path(root_locs)
assert len(locs) == self.nlocs
pth = self.map_(locs)
assert _path_is_relative(pth)
assert _path_has_depth(pth, self.depth)
return os.path.join(prefix, pth)
def exists(self, locs=()):
""" does this directory exist?
"""
pth = self.path(locs)
return os.path.isdir(pth)
def remove(self, locs=()):
""" does this directory exist?
"""
if self.removable:
pth = self.path(locs)
if self.exists(locs):
shutil.rmtree(pth)
else:
raise ValueError("This data series is not removable")
def create(self, locs=()):
""" create a directory at this prefix
"""
# recursively create starting from the first root directory
if self.root is not None:
root_locs = self._root_locators(locs)
self.root.create(root_locs)
# create this directory in the chain, if it doesn't already exist
if not self.exists(locs):
pth = self.path(locs)
os.makedirs(pth)
if self.loc_dfile is not None:
locs = self._self_locators(locs)
self.loc_dfile.write(locs, pth)
def existing(self, root_locs=(), relative=False):
""" return the list of locators for existing paths
"""
if self.loc_dfile is None:
raise ValueError("This function does not work "
"without a locator DataFile")
pths = self.existing_paths(root_locs)
locs_lst = tuple(self.loc_dfile.read(pth) for pth in pths)
if not relative:
locs_lst = tuple(map(list(root_locs).__add__, locs_lst))
return locs_lst
def existing_paths(self, root_locs=()):
""" existing paths at this prefix/root directory
"""
if self.root is None:
prefix = self.prefix
else:
prefix = self.root.path(root_locs)
pth_pattern = os.path.join(prefix, *('*' * self.depth))
pths = filter(os.path.isdir, glob.glob(pth_pattern))
pths = tuple(sorted(os.path.join(prefix, pth) for pth in pths))
return pths
# helpers
def _self_locators(self, locs):
""" locators for this DataSeriesDir
"""
nlocs = len(locs)
assert nlocs >= self.nlocs
root_nlocs = nlocs - self.nlocs
return locs[root_nlocs:]
def _root_locators(self, locs):
""" locators for the root DataSeriesDir, if there is one
"""
nlocs = len(locs)
assert nlocs >= self.nlocs
root_nlocs = nlocs - self.nlocs
return locs[:root_nlocs]
class FileSystem(types.SimpleNamespace):
""" a collection of DataSeries
"""
def __init__(self, dseries_dct):
self.update(dseries_dct)
def __iter__(self):
for key, val in vars(self).items():
yield key, val
def update(self, dseries_dct):
""" update the filesystem dataseries
"""
for name, obj in dict(dseries_dct).items():
assert isinstance(name, str)
assert isinstance(obj, DataSeries)
setattr(self, name, obj)
# helpers:
class _DataSeriesFile():
""" file manager mapping locator values to files in a directory series
"""
def __init__(self, ds, dfile):
self.dir = ds
self.file = dfile
def path(self, locs=()):
""" absolute file path
"""
return self.file.path(self.dir.path(locs))
def exists(self, locs=()):
""" does this file exist?
"""
return self.file.exists(self.dir.path(locs))
def write(self, val, locs=()):
""" write data to this file
"""
self.file.write(val, self.dir.path(locs))
def read(self, locs=()):
""" read data from this file
"""
return self.file.read(self.dir.path(locs))
def _path_is_relative(pth):
""" is this a relative path?
"""
return os.path.relpath(pth) == pth
def _path_has_depth(pth, depth):
""" does this path have the given depth?
"""
return len(_os_path_split_all(pth)) == depth
def _os_path_split_all(pth):
""" grabbed this from the internet """
allparts = []
while 1:
parts = os.path.split(pth)
if parts[0] == pth: # sentinel for absolute paths
allparts.insert(0, parts[0])
break
elif parts[1] == pth: # sentinel for relative paths
allparts.insert(0, parts[1])
break
else:
pth = parts[0]
allparts.insert(0, parts[1])
return allparts
|
import re
from factom_did.client.constants import DID_METHOD_NAME
from factom_did.client.enums import KeyType, Network
def validate_alias(alias):
if not re.match("^[a-z0-9-]{1,32}$", alias):
raise ValueError(
"Alias must not be more than 32 characters long and must contain only lower-case "
"letters, digits and hyphens."
)
def validate_did(did):
if not re.match(
"^{}:({}:|{}:)?[a-f0-9]{{64}}$".format(
DID_METHOD_NAME, Network.Mainnet.value, Network.Testnet.value
),
did,
):
raise ValueError("Controller must be a valid DID.")
def validate_full_key_identifier(did):
if not re.match(
"^{}:({}:|{}:)?[a-f0-9]{{64}}#[a-zA-Z0-9-]{{1,32}}$".format(
DID_METHOD_NAME, Network.Mainnet.value, Network.Testnet.value
),
did,
):
raise ValueError("Controller must be a valid DID.")
def validate_service_endpoint(endpoint):
if not re.match(
r"^(http|https):\/\/(\w+:{0,1}\w*@)?(\S+)(:[0-9]+)?(\/|\/([\w#!:.?+=&%@!\-\/]))?$",
endpoint,
):
raise ValueError(
"Endpoint must be a valid URL address starting with http:// or https://."
)
def validate_priority_requirement(priority_requirement):
if priority_requirement is not None and (
isinstance(priority_requirement, int) is False or priority_requirement < 0
):
raise ValueError("Priority requirement must be a non-negative integer.")
def validate_key_type(key_type):
if key_type not in (KeyType.ECDSA, KeyType.EdDSA, KeyType.RSA):
raise ValueError("Type must be a valid signature type.")
|
# Brandon Kupczyk (10/31/16)
from pushbullet import Pushbullet
class PBAutoRespond:
"""
Thiis class is used to send actual messages to others using a prebuilt Pushbullet API
https://pypi.python.org/pypi/pushbullet.py.
"""
VCard_file = None
pb = None
devices = None
device = None
contacts = None
my_contact = None
unresponded_messages=None
IO=None
auto_respond_message='This person is away, this is an auto responce. The more you text the more messages you will get. Much love'
def __init__(self, api_key, device_name, my_contact, contacts_VCard_file): #on init it uses calls to the PushBullet api to get the device you send texts with
"""
This sets up contacts, the Python Pushbullet API, the device to use and the contact information.
:param api_key: API key passed in from the configs as a string.
:param device_name: Device name found in the configs as a string.
:param my_contact: This is also found in the configs
:param contacts_VCard_file:
"""
self.VCard_file = contacts_VCard_file
self.pb = Pushbullet(api_key)
self.devices = self.pb.devices
self.my_contact = my_contact
count = 0
for dev in self.devices:
if dev.nickname == device_name:
self.device = self.pb.devices[count]
print(dev.nickname)
count += 1
if self.device == None: #if the device was not set then it would still be None
raise AssertionError('The device that was passed on init was not in your device list')
name = ''
number = ''
self.contacts = {}
with open(self.VCard_file) as openfileobject: # also on init contacts get generated from a vcf file
for line in openfileobject:
if line[:3] == 'FN:':
name = line[3:-1]
if line[:14] == 'TEL;TYPE=CELL:':
number = line[14:-1]
if line == 'END:VCARD\n':
if len(name) + len(number) >= 10 and len(number) >= 9:
self.contacts[name] = number
def send(self, person, text):
"""
This method makes it possible to send a general purpose message.
:param person: This is a string of the person you are trying to send the message to from the contacts.
:param text: This is a string of the message you are trying to return.
"""
try:
number = self.contacts[person]
self.pb.push_sms(self.device, number, text)
print('trying to send away message to ' + person)
except Exception as e:
number = self.contacts[self.my_contact]
text = 'Could not send text to ' + person
self.pb.push_sms(self.device, number, text)
print('Could not send text to ' + person+ str(e))
def sendAwayMes(self, person): #sends an away message
"""
This method sends the away message text; either default or user specified.
:param person: This is a string of the person you are trying to send the message to from the contacts.
:return: If it is set to off the program just returns.
"""
if self.IO:
return
try:
number = self.contacts[person]
self.pb.push_sms(self.device, number, self.auto_respond_message)
print('trying to send away message to ' + person)
except Exception as e:
number = self.contacts[self.my_contact]
text = 'Could not send response to ' + person + '. Update Contacts \n'
self.pb.push_sms(self.device, number, text)
print(str(e))
def set_away_message(self, message): #called when I text setmessage: switch found in main
"""
This method is called to set a custom message.
:param message: This is the custom message you wish to set as a string.
"""
self.auto_respond_message = message
self.send(self.my_contact, 'Away message set to ' + message)
def set_ONOFF(self, value):
"""
This method is used to set the Auto_responder on or off.
:param value: This is the Bool to set on or off.
"""
self.IO = value
if self.IO:
self.send(self.my_contact, 'Auto-response is now on')
if self.IO:
self.send(self.my_contact, 'Auto-response is now off')
print('turned off :(')
|
from .environment import Environment, ENVIRONMENT_VAR_NAME, ENVIRONMENT_NAME_DEFAULT, ENVIRONMENT_NAME_PRODUCTION
from .config_manager import ConfigManager
|
from collections import defaultdict
from operator import itemgetter
import numpy as np
from torch.utils.data import DataLoader
from tools.trainer import ModelBase
class ModelRCNN(ModelBase):
def __init__(self, model, device, metric_iou_treshold=0.75):
super().__init__(model, device)
self.metric_iou_treshold = metric_iou_treshold
def train_batch(self, X, y, optimizer):
X_dev = list(image.to(self.device) for image in X)
y_dev = [{k: v.to(self.device) for k, v in t.items()} for t in y]
loss_dict = self.model(X_dev, y_dev)
loss_value = sum(loss for loss in loss_dict.values())
optimizer.zero_grad()
loss_value.backward()
optimizer.step()
return {k: v.detach().item() for k, v in loss_dict.items()}
def eval_batch(self, X, y):
X_dev = list(image.to(self.device) for image in X)
y_dev = None if not y else [{k: v.to(self.device) for k, v in t.items()} for t in y]
result = self.model(X_dev, y_dev)
result = [{k: v.detach().cpu() for k, v in row.items()} for row in result]
return result
def eval_append_results(self, predictions, ground_truth):
self.ground_truth.extend({k: v.numpy() for k, v in row.items()} for row in ground_truth)
self.predictions.extend({k: v.numpy() for k, v in row.items()} for row in predictions)
def evaluate(self, prediction, ground_truth):
return {'mAP': mAP(ground_truth, prediction, self.metric_iou_treshold)}
def dataloader_factory(self, dataset, train, batch_size):
def collate_fn(batch):
return tuple(list(v) for v in zip(*batch))
return DataLoader(dataset, batch_size=batch_size, shuffle=train, collate_fn=collate_fn)
def epoch_metrics_format(self):
return "LR:{learning_rate:.6f}, loss_cls:{loss_classifier:.4f}, loss_box:{loss_box_reg:.4f}, loss_obj:{loss_objectness:.4f}, loss_rpn_box:{loss_rpn_box_reg:.4f}, mAP:{mAP:.3f}"
def batch_metrics_format(self):
return "loss_cls:{loss_classifier:.4f}, loss_box:{loss_box_reg:.4f}, loss_obj:{loss_objectness:.4f}, loss_rpn_box:{loss_rpn_box_reg:.4f}"
def target_metric(self):
return "mAP"
def iou(box1, box2):
x1left, x1right = min(box1[::2]), max(box1[::2])
y1top, y1bottom = min(box1[1::2]), max(box1[1::2])
x2left, x2right = min(box2[::2]), max(box2[::2])
y2top, y2bottom = min(box2[1::2]), max(box2[1::2])
xi = max(0, min(x1right, x2right) - max(x1left, x2left))
yi = max(0, min(y1bottom, y2bottom) - max(y1top, y2top))
intersection = xi * yi
union = (x1right - x1left) * (y1bottom - y1top) + (x1right - x1left) * (y2bottom - y2top) - intersection
return intersection / union
def mAP(ground_truth, predictions, iou_threshold):
all_predictions = defaultdict(list)
class_counts = defaultdict(int)
for gt, pred in zip(ground_truth, predictions):
true_boxes = defaultdict(list)
for box, label in zip(gt['boxes'], gt['labels']):
true_boxes[label].append(box)
class_counts[label] = class_counts[label] + 1
for box, label, confidence in zip(pred['boxes'], pred['labels'], pred['scores']):
correct = False
if label in true_boxes:
for gt_box in true_boxes[label]:
if iou(gt_box, box) >= iou_threshold:
correct = True
break
all_predictions[label].append((confidence, correct))
ap = {k: 0 for k in class_counts}
for k, v in all_predictions.items():
v.sort(key=itemgetter(0), reverse=True)
v = np.array(v)
recall = np.cumsum(v[:, 1])
gt_count = class_counts[k]
cum = 0.0
if gt_count > 0:
precision = recall / np.cumsum(np.ones(v.shape[0]))
recall = recall / gt_count
r_prev = 0.0
p_prev = 1.0
for i in range(recall.shape[0]):
if precision[i] < p_prev and recall[i] > r_prev:
cum += (recall[i] - r_prev) * p_prev
r_prev = recall[i]
p_prev = precision[i]
cum += (recall[-1] - r_prev) * p_prev
ap[k] = cum
return sum(ap.values()) / len(ap) if len(ap) > 0 else 0
|
# Copyright (c) 2020, Anders Lervik.
# Distributed under the MIT License. See LICENSE for more info.
"""
PCA Loadings (2D) with xkcd style and centered axes
===================================================
This example will plot PCA loadings along two principal axes.
Here we employ the
`xkcd style <https://matplotlib.org/gallery/showcase/xkcd.html>`_
and also modify the loadings plot to use centered axes
(that is, the axes go through the origin).
"""
from matplotlib import pyplot as plt
import pandas as pd
from sklearn.datasets import load_diabetes
from sklearn.preprocessing import scale
from sklearn.decomposition import PCA
from psynlig import pca_2d_loadings
plt.xkcd()
data_set = load_diabetes()
data = pd.DataFrame(data_set['data'], columns=data_set['feature_names'])
data = scale(data)
pca = PCA()
pca.fit_transform(data)
pca_2d_loadings(
pca,
data_set['feature_names'],
select_components={(1, 2)},
style='center'
)
# Remove text and add legend:
_, axes = pca_2d_loadings(
pca,
data_set['feature_names'],
select_components={(1, 2)},
style='center',
text_settings={'show': False},
)
for axi in axes:
axi.legend(loc='upper left')
plt.show()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.