max_stars_repo_path stringlengths 3 269 | max_stars_repo_name stringlengths 4 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.05M | score float64 0.23 5.13 | int_score int64 0 5 |
|---|---|---|---|---|---|---|
shot.py | penguintutor/pico-spacegame | 2 | 12760851 | import utime
from constants import *
class Shot:
def __init__ (self, display, start_position, color=(255, 255, 255)):
self.display = display
self.x = start_position[0]
self.y = start_position[1]
self.color = color
def update(self):
self.y -= 2
def draw(self, display_buffer):
if self.y <= 0:
return
self.display.set_pen(*self.color)
self.display.pixel(int(self.x), int(self.y))
self.display.pixel(int(self.x+1), int(self.y))
self.display.pixel(int(self.x), int(self.y+1))
self.display.pixel(int(self.x+1), int(self.y+1)) | 2.90625 | 3 |
utils/builder/register_builder/riscv/register_changes/vector_registers.py | jeremybennett/force-riscv | 0 | 12760852 | #
# Copyright (C) [2020] Futurewei Technologies, Inc.
#
# FORCE-RISCV is licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR
# FIT FOR A PARTICULAR PURPOSE.
# See the License for the specific language governing permissions and
# limitations under the License.
#
new_registers = [{'target':'system',
'register':'vtype',
'size':64,
'physical_register':'vtype',
'index':'0xc21',
'fields':[{'field':'VILL','shift':63,'size':1},
{'field':'RESERVED (WRITE 0)','shift':8,'size':55},
{'field':'VMA','shift':7,'size':1},
{'field':'VTA','shift':6,'size':1},
{'field':'VLMUL','shift':5,'size':1},
{'field':'VSEW','shift':2,'size':3},
{'field':'VLMUL','shift':0,'size':2}],
'choice':{'name':'vtype','value':'0xc21','weight':'0','description':'URO; Vector data type register.'}},
{'target':'system',
'register':'vstart',
'size':64,
'physical_register':'vstart',
'index':'0x8',
'fields':[{'field':'VSTART','shift':0,'size':64}],
'choice':{'name':'vstart','value':'0x8','weight':'0','description':'URW; Vector start position.'}},
{'target':'system',
'register':'vxsat',
'size':64,
'physical_register':'vxsat',
'index':'0x9',
'fields':[{'field':'RESERVED','shift':1,'size':63},
{'field':'VXSAT','shift':0,'size':1}],
'choice':{'name':'vxsat','value':'0x9','weight':'0','description':'URW; Fixed-point Saturate Flag.'}},
{'target':'system',
'register':'vxrm',
'size':64,
'physical_register':'vxrm',
'index':'0xa',
'fields':[{'field':'RESERVED (WRITE 0)','shift':2,'size':62},
{'field':'VXRM','shift':0,'size':2}],
'choice':{'name':'vxrm','value':'0xa','weight':'0','description':'URW; Fixed-point Rounding Mode.'}},
{'target':'system',
'register':'vcsr',
'size':64,
'physical_register':'vcsr',
'index':'0xf',
'fields':[{'field':'RESERVED','shift':3,'size':61},
{'field':'VXRM','shift':1,'size':2},
{'field':'VXSAT','shift':0,'size':1}],
'choice':{'name':'vcsr','value':'0xf','weight':'0','description':'URW; Vector control and status register.'}},
{'target':'system',
'register':'vl',
'size':64,
'physical_register':'vl',
'index':'0xc20',
'fields':[{'field':'VL','shift':0,'size':64}],
'choice':{'name':'vl','value':'0xc20','weight':'0','description':'URO; Vector length.'}},
{'target':'system',
'register':'vlenb',
'size':64,
'physical_register':'vlenb',
'index':'0xc22',
# there should probably be 2 fields here instead of just 1 (vlenb with shift 0, size 61 and a reserved field with shift 61, size 3), but the spec doesn't explicitly specify that
'fields':[{'field':'VLENB','shift':0,'size':64}],
'choice':{'name':'vlenb','value':'0xc22','weight':'0','description':'URO; VLEN/8 (vector register length in bytes).'}}]
| 1.28125 | 1 |
flexget/components/archive/archive.py | guillaumelamirand/Flexget | 0 | 12760853 | import logging
import re
from flexget import plugin
from flexget.entry import Entry
from flexget.event import event
from flexget.manager import Session
from . import db
log = logging.getLogger('archive')
class Archive:
"""
Archives all new items into database where they can be later searched and injected.
Stores the entries in the state as they are at the exit phase, this way task cleanup for title
etc is stored into the database. This may however make injecting them back to the original task work
wrongly.
"""
schema = {'oneOf': [{'type': 'boolean'}, {'type': 'array', 'items': {'type': 'string'}}]}
def on_task_learn(self, task, config):
"""Add new entries into archive. We use learn phase in case the task corrects title or url via some plugins."""
if isinstance(config, bool):
tag_names = []
else:
tag_names = config
tags = []
for tag_name in set(tag_names):
tags.append(db.get_tag(tag_name, task.session))
count = 0
processed = []
for entry in task.entries + task.rejected + task.failed:
# I think entry can be in multiple of those lists .. not sure though!
if entry in processed:
continue
else:
processed.append(entry)
ae = (
task.session.query(db.ArchiveEntry)
.filter(db.ArchiveEntry.title == entry['title'])
.filter(db.ArchiveEntry.url == entry['url'])
.first()
)
if ae:
# add (missing) sources
source = db.get_source(task.name, task.session)
if source not in ae.sources:
log.debug('Adding `%s` into `%s` sources' % (task.name, ae))
ae.sources.append(source)
# add (missing) tags
for tag_name in tag_names:
atag = db.get_tag(tag_name, task.session)
if atag not in ae.tags:
log.debug('Adding tag %s into %s' % (tag_name, ae))
ae.tags.append(atag)
else:
# create new archive entry
ae = db.ArchiveEntry()
ae.title = entry['title']
ae.url = entry['url']
if 'description' in entry:
ae.description = entry['description']
ae.task = task.name
ae.sources.append(db.get_source(task.name, task.session))
if tags:
# note, we're extending empty list
ae.tags.extend(tags)
log.debug('Adding `%s` with %i tags to archive' % (ae, len(tags)))
task.session.add(ae)
count += 1
if count:
log.verbose('Added %i new entries to archive' % count)
def on_task_abort(self, task, config):
"""
Archive even on task abort, except if the abort has happened before session
was started.
"""
if task.session is not None:
self.on_task_learn(task, config)
class UrlrewriteArchive:
"""
Provides capability to rewrite urls from archive or make searches with discover.
"""
entry_map = {'title': 'title', 'url': 'url', 'description': 'description'}
schema = {'oneOf': [{'type': 'boolean'}, {'type': 'array', 'items': {'type': 'string'}}]}
def search(self, task, entry, config=None):
"""Search plugin API method"""
session = Session()
entries = set()
if isinstance(config, bool):
tag_names = None
else:
tag_names = config
try:
for query in entry.get('search_strings', [entry['title']]):
# clean some characters out of the string for better results
query = re.sub(r'[ \(\)]+', ' ', query).strip()
log.debug('looking for `%s` config: %s' % (query, config))
for archive_entry in db.search(session, query, tags=tag_names, desc=True):
log.debug('rewrite search result: %s' % archive_entry)
entry = Entry()
entry.update_using_map(self.entry_map, archive_entry, ignore_none=True)
if entry.isvalid():
entries.add(entry)
finally:
session.close()
log.debug('found %i entries' % len(entries))
return entries
@event('plugin.register')
def register_plugin():
plugin.register(Archive, 'archive', api_ver=2)
plugin.register(UrlrewriteArchive, 'flexget_archive', interfaces=['search'], api_ver=2)
| 2.140625 | 2 |
s3_funcs.py | joseph-bongo-220/DeepLearningProject | 0 | 12760854 | <gh_stars>0
import boto3
import os
import re
s3 = boto3.client("s3")
bucket_name = "yale-amth552-deep-learning"
mont_path = "../Downloads/MontgomerySet"
mont_dir_type = {"ClinicalReadings":".txt", "CXR_png":".png", "ManualMask":{"leftMask": ".png", "rightMask": ".png"}}
china_path = "../Downloads/ChinaSet_AllFiles"
china_dir_type = {"ClinicalReadings":".txt", "CXR_png":".png"}
def to_S3_Mont(bucket_name, path, dir_type):
"""Push Montgomery Data to S3"""
for d1, f1 in dir_type.items():
if type(f1) == dict:
for d2, f2 in f1.items():
files = [f for f in os.listdir('./' + path + "/" + d1 + "/" + d2) if re.search(f2, f)]
for i in files:
s3.upload_file(path + "/" + d1 + "/" + d2 + "/" + i, bucket_name, "Images/" + d1 + "/" + d2 + "/" + i)
print(i)
else:
files = [f for f in os.listdir('./' + path + "/" + d1) if re.search(f1, f)]
for i in files:
s3.upload_file(path + "/" + d1 + "/" + i, bucket_name, "Images/" + d1 + "/" + i)
print(i)
def to_S3_China(bucket_name, path, dir_type):
"""Push Shenzhen Data to S3"""
for d1, f1 in dir_type.items():
files = [f for f in os.listdir('./' + path + "/" + d1) if re.search(f1, f)]
for i in files:
s3.upload_file(path + "/" + d1 + "/" + i, bucket_name, "Images/" + d1 + "/" + i)
print(i)
if __name__ == "__main__":
to_S3_China(bucket_name, china_path, china_dir_type)
| 2.375 | 2 |
tests/test_get_host.py | sasqwatch/yawast | 0 | 12760855 | <filename>tests/test_get_host.py<gh_stars>0
from unittest import TestCase
from yawast.scanner.plugins.dns import basic
class TestGetHost(TestCase):
def test_get_host(self):
res = basic.get_host("8.8.8.8")
self.assertEqual("dns.google", res)
def test_get_host_na(self):
res = basic.get_host("192.168.3.11")
self.assertEqual("N/A", res)
| 2.578125 | 3 |
td/tableurs/tableurs_ressources/fichiers_secondaires/participants/generer.py | projeduc/ESI_1CP_BWeb | 1 | 12760856 | <gh_stars>1-10
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
import random
import numpy
import pandas
FEMELLE_NBR = 100 # Nombre des filles
MALE_NBR = 100 # nombre des garçons
ESI_NBR = 150 #Nombre des étudiants de l'ESI
ALL_NBR = FEMELLE_NBR + MALE_NBR # nombre total des étudiants
NONESI_NBR = ALL_NBR - ESI_NBR # nombre des étudiants hors ESI
NIVEAU = ["1CP", "2CP", "1CS", "2CS", "3CS"]
EXPER = ["Débutant", "Intermédiaire", "Avancé"]
TYPE = ["Introduction à la programmation", "C", "C#", "Java", "Javascript", "Python"]
# Une fonction qui lit une liste en utilisant le chemin (chaine de caractères)
# Elle retourne un nombre aléatoire des éléments de cette liste
def selectionner_aleatoirement(chemin_liste, nombre):
# Lire la liste des prénoms des garçons
with open(chemin_liste) as f:
liste = f.read().splitlines()
return random.choices(liste, k=nombre)
# Liste aléatoire des garçons
maleListe = selectionner_aleatoirement("male.csv", MALE_NBR)
# Concaténer les deux listes en une matrice de pandas
maleListe = pandas.DataFrame({"Prenom": maleListe})
#print(maleListe)
# Liste aléatoire des filles
femelleListe = selectionner_aleatoirement("femelle.csv", FEMELLE_NBR)
femelleListe = pandas.DataFrame({"Prenom": femelleListe})
#print(femelleListe)
#concaténer les listes des garçons et des filles
etudiants = pandas.concat([maleListe, femelleListe])
#print(etudiants)
noms_liste = selectionner_aleatoirement("noms.csv", ALL_NBR)
etudiants["Nom"] = pandas.Series(noms_liste).values
liste_univ = selectionner_aleatoirement("nomUniv.csv", NONESI_NBR) + ["École nationale supérieure d'informatique d'Alger"] * ESI_NBR
liste_niveaux = [""] * NONESI_NBR + random.choices(NIVEAU, k=ESI_NBR)
msk = numpy.random.permutation(len(etudiants))
liste_univ = numpy.array(liste_univ)[msk]
liste_niveaux = numpy.array(liste_niveaux)[msk]
etudiants["Univ"] = pandas.Series(liste_univ).values
etudiants["Niveaux"] = pandas.Series(liste_niveaux).values
#print(etudiants)
etudiants = etudiants.iloc[numpy.random.permutation(len(etudiants))]
liste_exper = random.choices(EXPER, k=ALL_NBR)
etudiants["Experience"] = pandas.Series(liste_exper).values
liste_type = random.choices(TYPE, k=ALL_NBR)
etudiants["Type"] = pandas.Series(liste_type).values
adresses = pandas.read_csv("wilaya.csv")
etudiants["Wilaya"] = random.choices(adresses["nom"].values, k=ALL_NBR)
etudiants["Note"] = (numpy.array(random.choices(range(500, 2001, 25), k=ALL_NBR))/100.).tolist()
ordre = ["Nom", "Prenom", "Wilaya", "Univ", "Niveaux", "Type", "Experience", "Note"]
etudiants = etudiants.reindex(ordre, axis=1)
etudiants.to_excel("./participants_analyse.xlsx", index=False)
print(etudiants)
| 3.609375 | 4 |
lib/paypal/resources/permission.py | muffinresearch/solitude | 0 | 12760857 | from cached import Resource
from lib.paypal.client import get_client
from lib.paypal.forms import (CheckPermission, GetPermissionToken,
GetPermissionURL)
class GetPermissionURLResource(Resource):
class Meta(Resource.Meta):
resource_name = 'permission-url'
list_allowed_methods = ['post']
form = GetPermissionURL
method = 'get_permission_url'
class CheckPermissionResource(Resource):
class Meta(Resource.Meta):
resource_name = 'permission-check'
list_allowed_methods = ['post']
form = CheckPermission
method = 'check_permission'
class GetPermissionTokenResource(Resource):
class Meta(Resource.Meta):
resource_name = 'permission-token'
list_allowed_methods = ['post']
def obj_create(self, bundle, request, **kwargs):
form = GetPermissionToken(bundle.data)
if not form.is_valid():
raise self.form_errors(form)
paypal = get_client()
result = paypal.get_permission_token(*form.args())
seller = form.cleaned_data['seller']
seller.token = result['token']
seller.secret = result['secret']
seller.save()
bundle.obj = seller
return bundle
def dehydrate(self, bundle):
return {'token': bundle.obj.token_exists,
'secret': bundle.obj.secret_exists}
| 2.265625 | 2 |
24/first.py | qxzcode/aoc_2019 | 0 | 12760858 | <gh_stars>0
import sys # argv
import numpy as np
# load the initial grid
with open(sys.argv[1]) as f:
grid = [[{'.': 0, '#': 1}[c] for c in line.strip()] for line in f]
grid = np.array(grid)
grid_tmp = np.empty_like(grid)
tile_ratings = 2 ** np.arange(grid.size).reshape(grid.shape)
past_ratings = set()
while True:
rating = np.einsum('ij,ij', grid, tile_ratings)
if rating in past_ratings:
print(rating)
break
past_ratings.add(rating)
# compute the next iteration
for (y, x), t in np.ndenumerate(grid):
count = 0
if x > 0: count += grid[y, x-1]
if x < grid.shape[1]-1: count += grid[y, x+1]
if y > 0: count += grid[y-1, x]
if y < grid.shape[1]-1: count += grid[y+1, x]
grid_tmp[y, x] = count == 1 or (count == 2 and not t)
grid, grid_tmp = grid_tmp, grid
| 2.703125 | 3 |
assets/IsItQuestion/main.py | Simple086/Chat-Bot | 0 | 12760859 | <filename>assets/IsItQuestion/main.py
import pandas as pd
import time
import sys
# Other files
import core
import cosineSimilarity
import otherFunctions
# Main code
def isQuestion(text):
# Establish connection
core.connect()
# Call server and get result
questionsResult = core.run_read_query("SELECT text FROM chatbot WHERE isQuestion = 1")
answersResult = core.run_read_query("SELECT text FROM chatbot WHERE isQuestion = 0")
# Convert question and answer result to arrays
questions = []
answers = []
for index, row in questionsResult.iterrows():
questions.append(row['text'])
for index, row in answersResult.iterrows():
answers.append(row['text'])
# Get the sum of cosine similarities
questionsSum = 0.0
answersSum = 0.0
for string in questions:
questionsSum += cosineSimilarity.cosineSimilarity(text, string)
for string in answers:
answersSum += cosineSimilarity.cosineSimilarity(text, string)
# Getting means of both similarities
questionsMean = questionsSum / len(questions)
answersMean = answersSum / len(answers)
# check if it is question
threshold = 40 # I just found 40 to be a good threshold it might be changed with further training
# The resone for the unknown option is so that manual sorting of text can happen in the begining stages
if questionsMean > answersMean:
if otherFunctions.percentageDiff(questionsMean, answersMean) <= threshold:
print(2)
#core.run_insert_query("INSERT INTO chatbot(id, text, isQuestion, response, isWolframResponse, timestamp) VALUES (null,'" + text + "',2,'',0," + str(int(time.time())) + ")")
return 2
else:
print(1)
#core.run_insert_query("INSERT INTO chatbot(id, text, isQuestion, response, isWolframResponse, timestamp) VALUES (null,'" + text + "',1,'',0," + str(int(time.time())) + ")"
else:
if otherFunctions.percentageDiff(answersMean, questionsMean) <= threshold:
print(2)
#core.run_insert_query("INSERT INTO chatbot(id, text, isQuestion, response, isWolframResponse, timestamp) VALUES (null,'" + text + "',2,'',0," + str(int(time.time())) + ")")
else:
print(0)
#core.run_insert_query("INSERT INTO chatbot(id, text, isQuestion, response, isWolframResponse, timestamp) VALUES (null,'" + text + "',0,'',0," + str(int(time.time())) + ")")
# Disconnect from server
core.disconnect()
arguments = sys.argv
isQuestion(arguments[1]) | 3.40625 | 3 |
stackable_leftmost_rightmost.py | erjan/coding_exercises | 0 | 12760860 | '''
There is a horizontal row of cubes. The length of each cube is given. You need to create a new vertical pile of cubes.
The new pile should follow these directions: if cube_i is on top of cube_j then cube_j >= cube_i.
When stacking the cubes, you can only pick up either the
leftmost or the rightmost cube each time. Print "Yes" if it is
possible to stack the cubes. Otherwise, print "No".
Do not print the quotation marks.
'''
#time out solution - too long, because O(n^2) for long test cases - becaues of max(list),
#it takes too long to compute max everytime its called!
def f(r):
cur = list()
found = False
for i in range(len(r)):
if len(r)!= 0:
leftmost = r[0]
rightmost = r[-1]
if len(cur) == 0:
cur.append(r.pop(0))
else:
if max(cur) < leftmost or max(cur) < rightmost:
print('No')
found = True
break
else:
if leftmost >= rightmost:
cur.append(r.pop(0))
else:
cur.append(r.pop(-1))
if found == False:
print('Yes')
if __name__ == '__main__':
n = int(input())
total_list = list()
for _ in range(n):
length = int(input())
l = list(int(i) for i in input().split(' '))
total_list.append(l)
for l in total_list:
f(l)
#solution 2
def pilecub(cubes,n):
for i in range(n-1):
if cubes[i]>=cubes[i+1]:
continue
for j in range(i+1,n-1):
if cubes[j]>cubes[j+1]:
return "No"
return "Yes"
return "Yes"
T = int(input())
for _ in range(T):
n = int(input())
cubes = list(map(int,input().split()))
print(pilecub(cubes,n))
| 4.09375 | 4 |
pyrix/binarymatrix/binarymatrix.py | Abhi-1U/pyrix | 2 | 12760861 | <reponame>Abhi-1U/pyrix
#!/usr/bin/python3
# -*- coding : UTF-8 -*-
"""
Name : Pyrix/BinaryMatrix\n
Author : Abhi-1U <https://github.com/Abhi-1U>\n
Description : A Binary matrix manipulation library \n
Encoding : UTF-8\n
Version :0.7.19\n
Build :0.7.19/21-12-2020
"""
#*------- Imports -------------------------------------------------------------*
from pyrix.matrix import Matrix, matrixData
from pyrix.exception import binaryMatrixException,incompaitableTypeException
import random
import copy
#*-----------------------------------------------------------------------------*
"""
Unique methods List:
1. binary add
2. binary subtract
3. isBinaryMatrix
4. boolean/logical and
5. boolean/logical or
6. boolean/logical invert
7. boolean/logical xor
8. bitwise lshift
9. bitwise rshift
10. boolean/logical Nand
11. boolean/logical Nor
12. UnitBinaryMatrix
13. ZeroBinaryMatrix
14. IdentityBinaryMatrix
15. RandmBinaryMatrix
16. listifymatrix
17. reDimensionalizeMatrix
18. flipDimensions
19. JSON import/export
20. onesComplement
21. twosComplement
22. boolean/logical ExNor
"""
#*------- pyrix.binarymatrix.BinaryMatrix -------------------------------------*
class BinaryMatrix(Matrix):
"""
A completely Innovative approach to Matrices with Binary numbers.
Full Logic control with Matrix types.
Can be used as comparators,Inverters,Bit Data Manipulators as a matrix.
BinaryMatrix.__init__() :>
"""
def __init__(self, nrow=1, ncol=1, data=[1],mode='EBM',bit='1'):
if(len(data) == nrow, len(data[0]) == ncol):
self.matrix = matrixData(nrow=nrow, ncol=ncol, data=data)
self.matrix.classType = 'BinaryMatrix'
self.matrix.mode=mode
setattr(self.matrix,'bitwidth',bit)
self.isBinaryMatrix()
else:
raise incompaitableTypeException
def __repr__(self):
pass
def __str__(self):
stringV = str()
stringV = "Binary Matrix ("
stringV += str(self.matrix.mode)+" Mode) "+"Bit-Width :"+self.matrix.bitwidth+":\n"
for item in self.matrix.data:
stringV += str(item)+"\n"
stringV += ("Dimensions :") + \
str(self.matrix.dimensions[0])+"x"+str(self.matrix.dimensions[1])
return stringV
__repr__ = __str__
# *------- Add BinaryMatrix -----------------------------------------------*
def __add__(self, BinaryMat2):
if(not BinaryMat2.isBinaryMatrix()):
raise incompaitableTypeException
sum = zeroBinaryMatrix(self.matrix.nrow, self.matrix.ncol)
for i in range(0,self.matrix.nrow):
for j in range(0,self.matrix.ncol):
sum.matrix.data[i][j]=(self.matrix.data[i][j]+BinaryMat2.matrix.data[i][j])%2
return sum
# *------- Subtract BinaryMatrix ------------------------------------------*
def __sub__(self, BinaryMat2):
if(not BinaryMat2.isBinaryMatrix()):
raise incompaitableTypeException
sum = zeroBinaryMatrix(self.matrix.nrow, self.matrix.ncol)
for i in range(0, self.matrix.nrow):
for j in range(0, self.matrix.ncol):
sum.matrix.data[i][j]=(self.matrix.data[i][j]-BinaryMat2.matrix.data[i][j])%2
return sum
# *------- Left Shift BinaryMatrix ----------------------------------------*
def __lshift__(self, bits):
if(self.matrix.bitwidth==1):
return self.logicalShift(direction="left", bits=bits)
# *------- Right Shift BinaryMatrix ---------------------------------------*
def __rshift__(self, bits):
if(self.matrix.bitwidth==1):
return self.logicalShift(direction="Right", bits=bits)
# *------- pyrix.binarymatrix.BinaryMatrix.isBinaryMatrix() ---------------*
def isBinaryMatrix(self):
"""
This method checks if the BinaryMatrix or Matrix in question is a
Binary Matrix anymore. Works Best for Emulated Binary Mode(EBM).
Returns Boolean True or False
"""
for i in range(self.matrix.nrow):
for j in range(self.matrix.ncol):
if(self.matrix.data[i][j] == 1):
continue
if(self.matrix.data[i][j] == 0):
continue
else:
self.matrix.binaryMatrix = False
raise binaryMatrixException
self.matrix.binaryMatrix = True
return self.matrix.binaryMatrix
# *------- BOOLEAN AND ----------------------------------------------------*
def __and__(self, m2):
return self.And(m2)
# *------- pyrix.binarymatrix.BinaryMatrix.And() --------------------------*
def And(self,m2):
"""
Boolean AND implementation.
"""
self.isBinaryMatrix()
m2.isBinaryMatrix()
if(self.matrix.binaryMatrix == True and m2.matrix.binaryMatrix == True):
if(self.matrix.dimensions == m2.matrix.dimensions):
data = []
for i in range(self.matrix.nrow):
data.append([])
for j in range(self.matrix.ncol):
data[i].append(
self.__AndS(self.matrix.data[i][j],
m2.matrix.data[i][j])
)
return BinaryMatrix(
nrow=self.matrix.nrow,
ncol=self.matrix.ncol,
data=data
)
else:
raise incompaitableTypeException
else:
raise binaryMatrixException
# *------- BOOLEAN OR -----------------------------------------------------*
def __or__(self, m2):
return self.Or(m2)
# *------- pyrix.binarymatrix.BinaryMatrix.Or() ---------------------------*
def Or(self,m2):
"""
Boolean OR implementation.
"""
self.isBinaryMatrix()
m2.isBinaryMatrix()
if(self.matrix.binaryMatrix == True and m2.matrix.binaryMatrix == True):
if(self.matrix.dimensions == m2.matrix.dimensions):
data = []
for i in range(self.matrix.nrow):
data.append([])
for j in range(self.matrix.ncol):
data[i].append(
self.__Or(self.matrix.data[i][j],
m2.matrix.data[i][j]))
return BinaryMatrix(
nrow=self.matrix.nrow,
ncol=self.matrix.ncol,
data=data
)
else:
raise incompaitableTypeException
else:
raise binaryMatrixException
# *------- BOOLEAN XOR ----------------------------------------------------*
def __xor__(self, m2):
return self.ExOr(m2)
# *------- pyrix.binarymatrix.BinaryMatrix.ExOr() -------------------------*
def ExOr(self,m2):
"""
Boolean EXclusive OR implementation.
"""
self.isBinaryMatrix()
m2.isBinaryMatrix()
if(self.matrix.binaryMatrix == True and m2.matrix.binaryMatrix == True):
if(self.matrix.dimensions == m2.matrix.dimensions):
data = []
for i in range(self.matrix.nrow):
data.append([])
for j in range(self.matrix.ncol):
data[i].append(
self.__Exor(self.matrix.data[i][j],
m2.matrix.data[i][j]))
return BinaryMatrix(
nrow=self.matrix.nrow,
ncol=self.matrix.ncol,
data=data
)
else:
raise incompaitableTypeException
else:
raise binaryMatrixException
# *------- BOOLEAN INVERT -------------------------------------------------*
def __invert__(self):
return self.Not()
# *------- pyrix.binarymatrix.BinaryMatrix.Not() --------------------------*
def Not(self):
"""
Boolean NOT(invert) implementation.
"""
self.isBinaryMatrix()
if(self.matrix.binaryMatrix == True):
data = []
for i in range(self.matrix.nrow):
data.append([])
for j in range(self.matrix.ncol):
data[i].append(
self.__Not(self.matrix.data[i][j]))
return BinaryMatrix(
nrow=self.matrix.nrow,
ncol=self.matrix.ncol,
data=data
)
else:
raise binaryMatrixException
# *------- pyrix.binarymatrix.BinaryMatrix.onesComplement() ---------------*
def onesComplement(self):
"""
Boolean Ones Complement.
"""
return self.__invert__()
# *------- pyrix.binarymatrix.BinaryMatrix.twosComplement() ---------------*
def twosComplement(self):
"""
Boolean Twos Complement.
"""
binaryinvertedmatrix = self.onesComplement()
lastrow = binaryinvertedmatrix.matrix.nrow
lastcol = binaryinvertedmatrix.matrix.ncol
lastelement = binaryinvertedmatrix.matrix.data[lastrow-1][lastcol-1]
if(lastelement == 0):
lastelement += 1
else:
data = self.__forward_one(data=binaryinvertedmatrix.matrix.data,
rowcount=lastrow, colcount=lastcol)
binaryinvertedmatrix.matrix.data = data
return binaryinvertedmatrix
# *------- pyrix.binarymatrix.BinaryMatrix.__forward_one() ----------------*
def __forward_one(self, data, rowcount, colcount):
for _i in range(rowcount-1, 0, -1):
for _j in range(colcount-1, 0, -1):
if(data[_i][_j] == 1):
data[_i][_j] == 0
continue
if(data[_i][_j] == 0):
data[_i][_j] == 1
return data
# *------- pyrix.binarymatrix.BinaryMatrix.Nand() -------------------------*
def Nand(self, Bmatrix2):
"""
Boolean NAND implementation.
"""
self.isBinaryMatrix()
Bmatrix2.isBinaryMatrix()
if(self.matrix.binaryMatrix == True) and (Bmatrix2.matrix.binaryMatrix == True):
if(self.matrix.dimensions == Bmatrix2.matrix.dimensions):
data = []
data = []
for i in range(self.matrix.nrow):
data.append([])
for j in range(self.matrix.ncol):
data[i].append(
self.__Nand(self.matrix.data[i][j],
Bmatrix2.matrix.data[i][j]))
return BinaryMatrix(
nrow=self.matrix.nrow,
ncol=self.matrix.ncol,
data=data
)
else:
raise incompaitableTypeException
else:
raise binaryMatrixException
# *------- pyrix.binarymatrix.BinaryMatrix.Nor() --------------------------*
def Nor(self, Bmatrix2):
"""
Boolean NOR implementation.
"""
self.isBinaryMatrix()
Bmatrix2.isBinaryMatrix()
if(self.matrix.binaryMatrix == True and Bmatrix2.matrix.binaryMatrix == True):
if(self.matrix.dimensions == Bmatrix2.matrix.dimensions):
data = []
data = []
for i in range(self.matrix.nrow):
data.append([])
for j in range(self.matrix.ncol):
data[i].append(
self.__Nor(self.matrix.data[i][j],
Bmatrix2.matrix.data[i][j]))
return BinaryMatrix(
nrow=self.matrix.nrow,
ncol=self.matrix.ncol,
data=data
)
else:
raise incompaitableTypeException
else:
raise binaryMatrixException
# *------- pyrix.binarymatrix.BinaryMatrix.ExNor() ------------------------*
def ExNor(self, Bmatrix2):
"""
Boolean ExNor implementation.
"""
self.isBinaryMatrix()
Bmatrix2.isBinaryMatrix()
if(self.matrix.binaryMatrix == True and Bmatrix2.matrix.binaryMatrix == True):
if(self.matrix.dimensions == Bmatrix2.matrix.dimensions):
data = []
data = []
for i in range(self.matrix.nrow):
data.append([])
for j in range(self.matrix.ncol):
data[i].append(
self.__EXNor(self.matrix.data[i][j],
Bmatrix2.matrix.data[i][j]))
return BinaryMatrix(
nrow=self.matrix.nrow,
ncol=self.matrix.ncol,
data=data
)
else:
raise incompaitableTypeException
else:
raise binaryMatrixException
# *------- pyrix.binarymatrix.BinaryMatrix.logicalShift() -----------------*
def logicalShift(self, direction, bits):
"""
Logical Shift in the specified direction and count of bits.
"""
dataArray = self.__listifyMatrix(self)
right=["r","R","right","Right","RIGHT"]
left=["l","L","left","Left","LEFT"]
if direction in right:
for _i in range(bits):
dataArray.insert(0, 0)
dataArray.pop()
if direction in left:
for _i in range(bits):
dataArray.insert(-1, 0)
dataArray.pop(0)
return BinaryMatrix(nrow=self.nrow,
ncol=self.ncol, data=dataArray)
# *------- pyrix.binarymatrix.BinaryMatrix.circularShift() ----------------*
def circularShift(self, direction, bits):
"""
Circular shift implementation in the specified direction and bits.
"""
dataArray = self.__listifyMatrix(self)
right=["r","R","right","Right","RIGHT"]
left=["l","L","left","Left","LEFT"]
if direction in right:
for _i in range(bits):
lastelement = dataArray[-1]
dataArray.insert(0, lastelement)
dataArray.pop()
if direction in left:
for _i in range(bits):
firstelement = dataArray[0]
dataArray.insert(-1, firstelement)
dataArray.pop(0)
return BinaryMatrix(nrow=self.nrow,
ncol=self.ncol,data=dataArray)
# *------- pyrix.binarymatrix.BinaryMatrix.arithmeticShift() --------------*
def arithmeticShift(self, direction, bits):
"""
Arithmetic shift implementation in the specified direction and bits.
"""
dataArray = self.__listifyMatrix(self)
right=["r","R","right","Right","RIGHT"]
left=["l","L","left","Left","LEFT"]
if direction in right:
for _i in range(bits):
MSBvalue = dataArray[0]
dataArray.insert(0, MSBvalue)
dataArray.pop()
if direction in left:
for _i in range(bits):
LSBvalue = 0
dataArray.insert(-1, LSBvalue)
dataArray.pop(0)
return BinaryMatrix(nrow=self.nrow,
ncol=self.ncol, data=dataArray)
# *------- pyrix.binarymatrix.BinaryMatrix.popcount() ---------------------*
def popcount(self):
"""
popcount will return the count of nonzero elements.
"""
popcount = 0
dataArray = self.__listifyMatrix(self)
for value in dataArray:
if (value != 0):
popcount += 1
else:
continue
return popcount
# *------- pyrix.binarymatrix.BinaryMatrix.__Exor() -----------------------*
def __Exor(self,t1, t2):
if(t1 == t2):
return 0
else:
return 1
# *------- pyrix.binarymatrix.BinaryMatrix.__AndS() -----------------------*
def __AndS(self,t1, t2):
if(t1 == t2 == 1):
return 1
else:
return 0
# *------- pyrix.binarymatrix.BinaryMatrix.__Or() -------------------------*
def __Or(self,t1, t2):
if(t1 == t2 == 0):
return 0
else:
return 1
# *------- pyrix.binarymatrix.BinaryMatrix.__Nor() ------------------------*
def __Not(self,t1):
if(t1 == 1):
return 0
else:
return 1
# *------- pyrix.binarymatrix.BinaryMatrix.__Nand() -----------------------*
def __Nand(self,t1, t2):
if(t1 == t2 == 1):
return 0
else:
return 1
# *------- pyrix.binarymatrix.BinaryMatrix.__Nor() ------------------------*
def __Nor(self,t1, t2):
if(t1 == t2 == 0):
return 1
else:
return 0
# *------- pyrix.binarymatrix.BinaryMatrix.__EXNor() ----------------------*
def __EXNor(self,t1, t2):
if(t1 == t2):
return 1
else:
return 0
# *------- pyrix.binarymatrix.__listifyMatrix() -------------------------------*
def __listifyMatrix(self,BinaryMatrixObject):
matrixdata = BinaryMatrixObject.matrix.data
listifiedmatrix = []
for i in range(BinaryMatrixObject.matrix.nrow):
for j in range(BinaryMatrixObject.matrix.ncol):
listifiedmatrix.append(matrixdata[i][j])
BinaryMatrixObject.matrix.listifieddata = listifiedmatrix
return listifiedmatrix
# *------- pyrix.binarymatrix.zeroBinaryMatrix() ------------------------------*
def zeroBinaryMatrix(nrow, ncol):
"""
Create a zero Binary matrix of the given dimensions\n
Retuns a BinaryMatrix Object
"""
t = []
for i in range(nrow):
t.append([])
for _j in range(ncol):
t[i].append(0)
return BinaryMatrix(
nrow=nrow,
ncol=ncol,
data=t
)
# *------- pyrix.binarymatrix.unitBinaryMatrix() ------------------------------*
def unitBinaryMatrix(nrow, ncol):
"""
Create a Unit Binary matrix of the given dimensions\n
Retuns a BinaryMatrix Object
"""
t = []
for i in range(nrow):
t.append([])
for _j in range(ncol):
t[i].append(1)
return BinaryMatrix(
nrow=nrow,
ncol=ncol,
data=t
)
# *------- pyrix.binarymatrix.identityBinaryMatrix() --------------------------*
def identityBinaryMatrix(nrow, ncol):
"""
Create a identity Binary matrix of the given dimensions\n
Works for square Matrices\n
Retuns a BinaryMatrix Object
"""
if(nrow == ncol):
t = []
for i in range(nrow):
t.append([])
for j in range(ncol):
if(i == j):
t[i].append(1)
else:
t[i].append(0)
return BinaryMatrix(
nrow=nrow,
ncol=ncol,
data=t
)
else:
raise incompaitableTypeException
# *------- pyrix.binarymatrix.randomBinaryMatrix() ----------------------------*
def randomBinaryMatrix(scale, type):
"""
Generates a pseudo random BinaryMatrix of a given scale(small,large) and
datatype(int).
"""
if(scale == "small" and type == "int"):
nrow = random.randint(1, 10)
ncol = random.randint(1, 10)
data = []
for i in range(nrow):
data.append([])
for _j in range(ncol):
data[i].append(random.randint(0, 1))
return BinaryMatrix(
nrow=nrow,
ncol=ncol,
data=data
)
if(scale == "large" and type == "int"):
nrow = random.randint(10, 100)
ncol = random.randint(10, 100)
data = []
for i in range(nrow):
data.append([])
for _j in range(ncol):
data[i].append(random.randint(0, 1))
return BinaryMatrix(
nrow=nrow,
ncol=ncol,
data=data
)
# *------- pyrix.binarymatrix.Copy() ------------------------------------------*
def Copy(AnyObject):
"""
Returns A Deep copy of the object
"""
return copy.deepcopy(AnyObject)
| 1.851563 | 2 |
tencentcloud/facefusion/v20181201/facefusion_client.py | PlasticMem/tencentcloud-sdk-python | 465 | 12760862 | <filename>tencentcloud/facefusion/v20181201/facefusion_client.py
# -*- coding: utf8 -*-
# Copyright (c) 2017-2021 THL A29 Limited, a Tencent company. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from tencentcloud.common.exception.tencent_cloud_sdk_exception import TencentCloudSDKException
from tencentcloud.common.abstract_client import AbstractClient
from tencentcloud.facefusion.v20181201 import models
class FacefusionClient(AbstractClient):
_apiVersion = '2018-12-01'
_endpoint = 'facefusion.tencentcloudapi.com'
_service = 'facefusion'
def DescribeMaterialList(self, request):
"""通常通过腾讯云人脸融合的控制台可以查看到素材相关的参数数据,可以满足使用。本接口返回活动的素材数据,包括素材状态等。用于用户通过Api查看素材相关数据,方便使用。
:param request: Request instance for DescribeMaterialList.
:type request: :class:`tencentcloud.facefusion.v20181201.models.DescribeMaterialListRequest`
:rtype: :class:`tencentcloud.facefusion.v20181201.models.DescribeMaterialListResponse`
"""
try:
params = request._serialize()
body = self.call("DescribeMaterialList", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.DescribeMaterialListResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def FaceFusion(self, request):
"""本接口用于人脸融合,用户上传人脸图片,获取与模板融合后的人脸图片。未发布的活动请求频率限制为1次/秒,已发布的活动请求频率限制50次/秒。如有需要提高活动的请求频率限制,请在控制台中申请。
>
- 公共参数中的签名方式必须指定为V3版本,即配置SignatureMethod参数为TC3-HMAC-SHA256。
:param request: Request instance for FaceFusion.
:type request: :class:`tencentcloud.facefusion.v20181201.models.FaceFusionRequest`
:rtype: :class:`tencentcloud.facefusion.v20181201.models.FaceFusionResponse`
"""
try:
params = request._serialize()
body = self.call("FaceFusion", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.FaceFusionResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def FaceFusionLite(self, request):
"""人脸融合活动专用版,不推荐使用。人脸融合接口建议使用[人脸融合](https://cloud.tencent.com/document/product/670/31061)或[选脸融合](https://cloud.tencent.com/document/product/670/37736)接口
:param request: Request instance for FaceFusionLite.
:type request: :class:`tencentcloud.facefusion.v20181201.models.FaceFusionLiteRequest`
:rtype: :class:`tencentcloud.facefusion.v20181201.models.FaceFusionLiteResponse`
"""
try:
params = request._serialize()
body = self.call("FaceFusionLite", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.FaceFusionLiteResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def FuseFace(self, request):
"""本接口用于单脸、多脸融合,用户上传人脸图片,获取与模板融合后的人脸图片。查看 <a href="https://cloud.tencent.com/document/product/670/38247" target="_blank">选脸融合接入指引</a>。
未发布的活动请求频率限制为1次/秒,已发布的活动请求频率限制50次/秒。如有需要提高活动的请求频率限制,请在控制台中申请。
>
- 公共参数中的签名方式必须指定为V3版本,即配置SignatureMethod参数为TC3-HMAC-SHA256。
:param request: Request instance for FuseFace.
:type request: :class:`tencentcloud.facefusion.v20181201.models.FuseFaceRequest`
:rtype: :class:`tencentcloud.facefusion.v20181201.models.FuseFaceResponse`
"""
try:
params = request._serialize()
body = self.call("FuseFace", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.FuseFaceResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message) | 2.234375 | 2 |
topi/python/topi/hls/__init__.py | mingwayzhang/tvm | 64 | 12760863 | # pylint: disable=redefined-builtin, wildcard-import
"""HLS specific declaration and schedules."""
from __future__ import absolute_import as _abs
from .injective import schedule_injective, schedule_elemwise, schedule_broadcast
from .nn import *
| 0.980469 | 1 |
src/autopalette/render.py | indradhanush/autopalette | 11 | 12760864 | from typing import Union, ClassVar
import sty
from colour import Color
from autopalette.colormatch import ColorPoint, AnsiCodeType
from autopalette.palette import Ansi256Palette, Ansi16Palette, Ansi8Palette
from autopalette.utils import rgb_to_RGB255
OptionalColor = Union['Color', None]
OptionalPalette = ClassVar['BasePalette']
OptionalRenderer = ClassVar['Renderer']
class BaseRenderer(object):
def __init__(self,
palette: OptionalPalette = None,
fallback: OptionalPalette = None) -> None:
self.palette = palette if palette else Ansi256Palette()
self.fallback = fallback if fallback else Ansi256Palette()
def render(self, text, fg: Color, bg: OptionalColor = None):
raise NotImplementedError()
def is_bright(self, color: Color):
if color.get_saturation() == 0 \
and color.get_luminance() == 1:
return True
if color.get_luminance() > 0.7:
return True
if color.get_saturation() >= 0.3 \
and color.get_luminance() >= 0.3:
return True
return False
class Ansi256Renderer(BaseRenderer):
def render(self, text, fg: Color, bg: OptionalColor = None, ansi_reset=False):
if ansi_reset:
return text
fg = self.palette.match(fg, ansi=True)
if fg.ansi == '' or fg.ansi is None:
fg = self.fallback.match(fg.target, ansi=True)
if bg:
bg = self.palette.match(bg, ansi=True)
if bg.ansi == '' or bg.ansi is None:
bg = self.fallback.match(bg.target, ansi=True)
return self._render(text, fg=fg, bg=bg)
def _render(self, text, fg: ColorPoint, bg: ColorPoint = None):
out = ''
out += sty.fg(fg.ansi)
if bg:
out += sty.bg(bg.ansi)
out += text
out += sty.rs.all
return out
def is_bright(self, color: Color):
ansi = self.palette.match(color).ansi
if ansi < 16:
if ansi in (0, 1, 2, 3, 4, 5, 6, 8):
return False
return True
return super().is_bright(color)
def bg(self, color: Color) -> str:
return sty.bg(self.palette.match(color, ansi=True).ansi)
def fg(self, color: Color) -> str:
return sty.fg(self.palette.match(color, ansi=True).ansi)
@property
def rs(self):
return sty.rs
@property
def ef(self):
return sty.ef
class AnsiNoColorRenderer(Ansi256Renderer):
def render(self, text, fg: Color, bg: OptionalColor = None, ansi_reset=False):
return text
class Ansi16Renderer(Ansi256Renderer):
def __init__(self,
palette: OptionalPalette = None,
fallback: OptionalPalette = None) -> None:
super().__init__(palette=Ansi16Palette,
fallback=fallback)
def render(self, text, fg: Color, bg: OptionalColor = None, ansi_reset=False):
# todo: downsample 256 to 16 colors
return super().render(text, fg=fg, bg=bg, ansi_reset=False)
class Ansi8Renderer(Ansi256Renderer):
def __init__(self,
palette: OptionalPalette = None,
fallback: OptionalPalette = None) -> None:
super().__init__(palette=Ansi8Palette,
fallback=fallback)
def render(self, text, fg: Color, bg: OptionalColor = None, ansi_reset=False):
# todo: downsample 256 to 8 colors
return super().render(text, fg=fg, bg=bg, ansi_reset=False)
class AnsiTruecolorRenderer(BaseRenderer):
def match(self, color: Color) -> ColorPoint:
ansi = rgb_to_RGB255(color.rgb)
return ColorPoint(color, color, ansi=ansi)
def render(self, text, fg: Color, bg: OptionalColor = None, ansi_reset=False):
if ansi_reset:
return text
fg = self.palette.match(fg)
if bg:
bg = self.palette.match(bg)
return self._render(text, fg=fg, bg=bg)
def _render(self, text, fg: ColorPoint, bg: ColorPoint = None):
rgb = rgb_to_RGB255(fg.target.rgb)
out = ''
out += sty.fg(*rgb)
if bg:
bgrgb = rgb_to_RGB255(bg.target.rgb)
out += sty.bg(*bgrgb)
out += text
out += sty.rs.all
return out
def bg(self, color: Color) -> str:
bg = self.palette.match(color)
rgb = rgb_to_RGB255(bg.target.rgb)
return sty.fg(*rgb)
def fg(self, color: Color) -> str:
fg = self.palette.match(color)
rgb = rgb_to_RGB255(fg.target.rgb)
return sty.fg(*rgb)
@property
def rs(self):
return sty.rs
@property
def ef(self):
return sty.ef
render_map = {
'-1': AnsiTruecolorRenderer,
'0': AnsiNoColorRenderer,
'8': Ansi8Renderer,
'16': Ansi16Renderer,
'88': Ansi256Renderer,
'256': Ansi256Renderer,
'ansi': Ansi256Renderer,
'rgb': AnsiTruecolorRenderer,
'truecolor': AnsiTruecolorRenderer,
'24bit': AnsiTruecolorRenderer,
'vt100': AnsiNoColorRenderer,
'vt200': AnsiNoColorRenderer,
'vt220': AnsiNoColorRenderer,
'rxvt': Ansi16Renderer,
'rxvt-88color': Ansi256Renderer,
'xterm': Ansi16Renderer,
'xterm-color': Ansi16Renderer,
'xterm-256color': Ansi256Renderer,
}
| 2.421875 | 2 |
src/tmtccmd/sendreceive/single_command_sender_receiver.py | spacefishy/tmtccmd | 2 | 12760865 | <reponame>spacefishy/tmtccmd<filename>src/tmtccmd/sendreceive/single_command_sender_receiver.py
#!/usr/bin/python3.8
"""
@file
tmtcc_config.py
@date
01.11.2019
@brief
Used to send single tcs and listen for replies after that
"""
from tmtccmd.ccsds.handler import CcsdsTmHandler
from tmtccmd.sendreceive.cmd_sender_receiver import CommandSenderReceiver
from tmtccmd.sendreceive.tm_listener import TmListener
from tmtccmd.com_if.com_interface_base import CommunicationInterface
from tmtccmd.utility.tmtc_printer import TmTcPrinter
from tmtccmd.utility.logger import get_console_logger
from tmtccmd.tc.definitions import PusTcTupleT
logger = get_console_logger()
class SingleCommandSenderReceiver(CommandSenderReceiver):
"""
Specific implementation of CommandSenderReceiver to send a single telecommand
This object can be used by instantiating it and calling sendSingleTcAndReceiveTm()
"""
def __init__(
self,
com_if: CommunicationInterface,
tmtc_printer: TmTcPrinter,
tm_listener: TmListener,
tm_handler: CcsdsTmHandler,
apid: int,
):
"""
:param com_if: CommunicationInterface object, passed on to CommandSenderReceiver
:param tm_listener: TmListener object which runs in the background and receives all TM
:param tmtc_printer: TmTcPrinter object, passed on to CommandSenderReceiver
"""
super().__init__(
com_if=com_if,
tm_listener=tm_listener,
tmtc_printer=tmtc_printer,
tm_handler=tm_handler,
apid=apid,
)
def send_single_tc_and_receive_tm(self, pus_packet_tuple: PusTcTupleT):
"""
Send a single telecommand passed to the class and wait for replies
:return:
"""
try:
pus_packet_raw, pus_packet_obj = pus_packet_tuple
except TypeError:
logger.error("SingleCommandSenderReceiver: Invalid command input")
return
self._operation_pending = True
self._tm_listener.set_listener_mode(TmListener.ListenerModes.SEQUENCE)
self._tmtc_printer.print_telecommand(
tc_packet_obj=pus_packet_obj, tc_packet_raw=pus_packet_raw
)
self._com_if.send(data=pus_packet_raw)
self._last_tc = pus_packet_raw
self._last_tc_obj = pus_packet_obj
while self._operation_pending:
# wait until reply is received
super()._check_for_first_reply()
if self._reply_received:
self._tm_listener.set_mode_op_finished()
packet_queue = self._tm_listener.retrieve_ccsds_tm_packet_queue(
apid=self._apid, clear=True
)
self._tm_handler.handle_ccsds_packet_queue(
apid=self._apid, packet_queue=packet_queue
)
logger.info("SingleCommandSenderReceiver: Reply received")
logger.info("Listening for packages ...")
| 2.390625 | 2 |
src/evaluation/random_dataset.py | Zethson/MHCBoost | 1 | 12760866 | import logging
from src.evaluation.stats import stats_evaluation
console = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
console.setFormatter(formatter)
LOG = logging.getLogger("Random Dataset Validator")
LOG.addHandler(console)
LOG.setLevel(logging.INFO)
def random_dataset_split_eval(classifier, peptides_test, classification_test, test_size_percent):
"""
starts stats evaluation of predictions compared to known results,
and starts auc-plot, tree-plot, feature-importance-plot
:param classifier:
:param peptides_test:
:param classification_test:
:param test_size_percent: size of test dataset in percent/100
:return:
"""
LOG.info("Evaluating a random subset of size: " + str(test_size_percent) + " of the training data")
classification_pred = classifier.predict(peptides_test)
LOG.info("Successfully evaluated a random subset of the training data")
# plot_auc_curve(classifier)
# plot_learning_tree(classifier)
# plot_feature_importance(classifier)
stats_evaluation(classification_test, classification_pred)
| 2.765625 | 3 |
src/lib/mine/utility/test_config.py | rdw20170120/workstation | 0 | 12760867 | #!/usr/bin/env false
"""TODO: Write
"""
# Internal packages (absolute references, distributed with Python)
# External packages (absolute references, NOT distributed with Python)
# Library modules (absolute references, NOT packaged, in project)
from utility import my_assert as is_
# Project modules (relative references, NOT packaged, in project)
def test_application_name(config):
v = config.application_name
assert is_.nonempty_string(v)
def test_log_directory(config):
v = config.log_directory
assert is_.absolute_path(v)
if v.exists():
assert is_.absolute_directory(v)
def test_log_file(config):
v = config.log_file
assert is_.absolute_path(v)
def test_log_name(config):
v = config.log_name
assert is_.nonempty_string(v)
def test_log_suffix(config):
v = config.log_suffix
assert is_.nonempty_string(v)
def test_pid_file(config):
v = config.pid_file
assert is_.absolute_path(v)
def test_pid_suffix(config):
v = config.pid_suffix
assert is_.nonempty_string(v)
def test_project_directory(config):
v = config.project_directory
assert is_.absolute_directory(v)
def test_temporary_directory(config):
v = config.temporary_directory
assert is_.absolute_directory(v)
"""DisabledContent
"""
| 2.578125 | 3 |
tethys_datasets/base.py | CI-WATER/django-tethys_datasets | 0 | 12760868 | from tethys_dataset_services.valid_engines import VALID_ENGINES, VALID_SPATIAL_ENGINES
class DatasetService:
"""
Used to define dataset services for apps.
"""
def __init__(self, name, type, endpoint, apikey=None, username=None, password=None):
"""
Constructor
"""
self.name = name
# Validate the types
if type in VALID_ENGINES:
self.type = type
self.engine = VALID_ENGINES[type]
else:
if len(VALID_ENGINES) > 2:
comma_separated_types = ', '.join('"{0}"'.format(t) for t in VALID_ENGINES.keys()[:-1])
last_type = '"{0}"'.format(VALID_ENGINES.keys()[-1])
valid_types_string = '{0}, and {1}'.format(comma_separated_types, last_type)
elif len(VALID_ENGINES) == 2:
valid_types_string = '"{0}" and "{1}"'.format(VALID_ENGINES.keys()[0], VALID_ENGINES.keys()[1])
else:
valid_types_string = '"{0}"'.format(VALID_ENGINES.keys()[0])
raise ValueError('The value "{0}" is not a valid for argument "type" of DatasetService. Valid values for '
'"type" argument include {1}.'.format(type, valid_types_string))
self.endpoint = endpoint
self.apikey = apikey
self.username = username
self.password = password
def __repr__(self):
"""
String representation
"""
return '<DatasetService: type={0}, api_endpoint={1}>'.format(self.type, self.endpoint)
class SpatialDatasetService:
"""
Used to define spatial dataset services for apps.
"""
def __init__(self, name, type, endpoint, apikey=None, username=None, password=None):
"""
Constructor
"""
self.name = name
# Validate the types
if type in VALID_SPATIAL_ENGINES:
self.type = type
self.engine = VALID_SPATIAL_ENGINES[type]
else:
if len(VALID_SPATIAL_ENGINES) > 2:
comma_separated_types = ', '.join('"{0}"'.format(t) for t in VALID_SPATIAL_ENGINES.keys()[:-1])
last_type = '"{0}"'.format(VALID_SPATIAL_ENGINES.keys()[-1])
valid_types_string = '{0}, and {1}'.format(comma_separated_types, last_type)
elif len(VALID_SPATIAL_ENGINES) == 2:
valid_types_string = '"{0}" and "{1}"'.format(VALID_SPATIAL_ENGINES.keys()[0], VALID_SPATIAL_ENGINES.keys()[1])
else:
valid_types_string = '"{0}"'.format(VALID_SPATIAL_ENGINES.keys()[0])
raise ValueError('The value "{0}" is not a valid for argument "type" of SpatialDatasetService. Valid values for '
'"type" argument include {1}.'.format(type, valid_types_string))
self.endpoint = endpoint
self.apikey = apikey
self.username = username
self.password = password
def __repr__(self):
"""
String representation
"""
return '<SpatialDatasetService: type={0}, api_endpoint={1}>'.format(self.type, self.endpoint)
| 2.515625 | 3 |
easytorch/core/runner.py | YuhaoZeng/easytorch | 0 | 12760869 | <filename>easytorch/core/runner.py
import os
import time
import logging
from abc import ABCMeta, abstractmethod
import torch
from torch import nn
from torch.utils.data import Dataset, DataLoader
from torch.utils.data.distributed import DistributedSampler
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.utils.tensorboard import SummaryWriter
from .meter_pool import MeterPool
from .checkpoint import get_ckpt_dict, load_ckpt, save_ckpt, backup_last_ckpt, clear_ckpt
from .data_loader import build_data_loader, build_data_loader_ddp
from .optimizer_builder import build_optim, build_lr_scheduler
from ..utils import TimePredictor, get_logger, get_rank, is_master, master_only, setup_random_seed
class Runner(metaclass=ABCMeta):
def __init__(self, cfg: dict, use_gpu: bool = True):
# default logger
self.logger = get_logger('easytorch')
# setup random seed
# each rank has different seed in distributed mode
self.seed = cfg.get('SEED')
if self.seed is not None:
setup_random_seed(self.seed + get_rank())
# param
self.use_gpu = use_gpu
self.model_name = cfg['MODEL']['NAME']
self.ckpt_save_dir = cfg['TRAIN']['CKPT_SAVE_DIR']
self.logger.info('ckpt save dir: \'{}\''.format(self.ckpt_save_dir))
self.ckpt_save_strategy = None
self.num_epochs = None
self.start_epoch = None
self.val_interval = 1
# create model
self.model = self.build_model(cfg)
# declare optimizer and lr_scheduler
self.optim = None
self.scheduler = None
# declare data loader
self.train_data_loader = None
self.val_data_loader = None
# declare meter pool
self.meter_pool = None
# declare tensorboard_writer
self.tensorboard_writer = None
def init_logger(self, logger: logging.Logger = None, logger_name: str = None,
log_file_name: str = None, log_level: int = logging.INFO):
"""Initialize logger.
Args:
logger (logging.Logger, optional): specified logger.
logger_name (str, optional): specified name of logger.
log_file_name (str, optional): logger file name.
log_level (int, optional): log level, default is INFO.
"""
if logger is not None:
self.logger = logger
elif logger_name is not None:
if log_file_name is not None:
log_file_name = '{}_{}.log'.format(log_file_name, time.strftime("%Y%m%d%H%M%S", time.localtime()))
log_file_path = os.path.join(self.ckpt_save_dir, log_file_name)
else:
log_file_path = None
self.logger = get_logger(logger_name, log_file_path, log_level)
else:
raise TypeError('At least one of logger and logger_name is not None')
def to_running_device(self, src: torch.Tensor or torch.Module) -> torch.Tensor or torch.Module:
"""Move `src` to the running device. If `self.use_gpu` is ```True```,
the running device is GPU, else the running device is CPU.
Args:
src (torch.Tensor or torch.Module): source
Returns:
target (torch.Tensor or torch.Module)
"""
if self.use_gpu:
return src.cuda()
else:
return src.cpu()
@staticmethod
@abstractmethod
def define_model(cfg: dict) -> nn.Module:
"""It must be implement to define the model for training or inference.
Users can select different models by param in cfg.
Args:
cfg (dict): config
Returns:
model (nn.Module)
"""
pass
@staticmethod
@abstractmethod
def build_train_dataset(cfg: dict) -> Dataset:
"""It must be implement to build dataset for training.
Args:
cfg (dict): config
Returns:
train dataset (Dataset)
"""
pass
@staticmethod
def build_val_dataset(cfg: dict):
"""It can be implement to build dataset for validation (not necessary).
Args:
cfg (dict): config
Returns:
val dataset (Dataset)
"""
raise NotImplementedError()
def build_train_data_loader(self, cfg: dict) -> DataLoader:
"""Build train dataset and dataloader.
Build dataset by calling ```self.build_train_dataset```,
build dataloader by calling ```build_data_loader``` or
```build_data_loader_ddp``` when DDP is initialized
Args:
cfg (dict): config
Returns:
train data loader (DataLoader)
"""
dataset = self.build_train_dataset(cfg)
if torch.distributed.is_initialized():
return build_data_loader_ddp(dataset, cfg['TRAIN']['DATA'])
else:
return build_data_loader(dataset, cfg['TRAIN']['DATA'])
def build_val_data_loader(self, cfg: dict) -> DataLoader:
"""Build val dataset and dataloader.
Build dataset by calling ```self.build_train_dataset```,
build dataloader by calling ```build_data_loader```.
Args:
cfg (dict): config
Returns:
val data loader (DataLoader)
"""
dataset = self.build_val_dataset(cfg)
return build_data_loader(dataset, cfg['VAL']['DATA'])
def build_model(self, cfg: dict) -> nn.Module:
"""Build model.
Initialize model by calling ```self.define_model```,
Moves model to the GPU.
If DDP is initialized, initialize the DDP wrapper.
Args:
cfg (dict): config
Returns:
model (nn.Module)
"""
model = self.define_model(cfg)
model = self.to_running_device(model)
if torch.distributed.is_initialized():
model = DDP(model, device_ids=[get_rank()])
return model
def get_ckpt_path(self, epoch: int) -> str:
"""Get checkpoint path.
The format is "{ckpt_save_dir}/{model_name}_{epoch}"
Args:
epoch (int): current epoch.
Returns:
checkpoint path (str)
"""
epoch_str = str(epoch).zfill(len(str(self.num_epochs)))
ckpt_name = '{}_{}.pt'.format(self.model_name, epoch_str)
return os.path.join(self.ckpt_save_dir, ckpt_name)
def save_model(self, epoch: int):
"""Save checkpoint every epoch.
checkpoint format is {
'epoch': current epoch ([1, num_epochs]),
'model_state_dict': state_dict of model,
'optim_state_dict': state_dict of optimizer
}
Decide whether to delete the last checkpoint by the checkpoint save strategy.
Args:
epoch (int): current epoch.
"""
ckpt_dict = get_ckpt_dict(self.model, self.optim, epoch)
# backup last epoch
last_ckpt_path = self.get_ckpt_path(epoch - 1)
backup_last_ckpt(last_ckpt_path, epoch, self.ckpt_save_strategy)
# save ckpt
ckpt_path = self.get_ckpt_path(epoch)
save_ckpt(ckpt_dict, ckpt_path, self.logger)
# clear ckpt every 10 epoch or in the end
if epoch % 10 == 0 or epoch == self.num_epochs:
clear_ckpt(self.ckpt_save_dir)
def load_model_resume(self, strict: bool = True):
"""Load last checkpoint in checkpoint save dir to resume training.
Load model state dict.
Load optimizer state dict.
Load start epoch and set it to lr_scheduler.
Args:
strict (bool, optional): whether to strictly enforce that the keys
in :attr:`state_dict` match the keys returned by this module's
:meth:`~torch.nn.Module.state_dict` function. Default: ``True``
"""
try:
checkpoint_dict = load_ckpt(self.ckpt_save_dir, use_gpu=self.use_gpu, logger=self.logger)
if isinstance(self.model, DDP):
self.model.module.load_state_dict(checkpoint_dict['model_state_dict'], strict=strict)
else:
self.model.load_state_dict(checkpoint_dict['model_state_dict'], strict=strict)
self.optim.load_state_dict(checkpoint_dict['optim_state_dict'])
self.start_epoch = checkpoint_dict['epoch']
if self.scheduler is not None:
self.scheduler.last_epoch = checkpoint_dict['epoch']
self.logger.info('resume training')
except (IndexError, OSError, KeyError):
pass
def load_model(self, ckpt_path: str = None, strict: bool = True):
"""Load model state dict.
if param `ckpt_path` is None, load the last checkpoint in `self.ckpt_save_dir`,
else load checkpoint from `ckpt_path`
Args:
ckpt_path (str, optional): checkpoint path, default is None
strict (bool, optional): whether to strictly enforce that the keys
in :attr:`state_dict` match the keys returned by this module's
:meth:`~torch.nn.Module.state_dict` function. Default: ``True``
"""
try:
checkpoint_dict = load_ckpt(self.ckpt_save_dir, ckpt_path=ckpt_path, use_gpu=self.use_gpu,
logger=self.logger)
if isinstance(self.model, DDP):
self.model.module.load_state_dict(checkpoint_dict['model_state_dict'], strict=strict)
else:
self.model.load_state_dict(checkpoint_dict['model_state_dict'], strict=strict)
except (IndexError, OSError):
raise OSError('Ckpt file does not exist')
def train(self, cfg: dict):
"""Train model.
Train process:
[init_training]
for in train_epoch
[on_epoch_start]
for in train iters
[train_iters]
[on_epoch_end] ------> Epoch Val: val every n epoch
[on_validating_start]
for in val iters
val iter
[on_validating_end]
[on_training_end]
Args:
cfg (dict): config
"""
self.init_training(cfg)
# train time predictor
train_time_predictor = TimePredictor(self.start_epoch, self.num_epochs)
# training loop
for epoch_index in range(self.start_epoch, self.num_epochs):
epoch = epoch_index + 1
self.on_epoch_start(epoch)
epoch_start_time = time.time()
# start training
self.model.train()
for iter_index, data in enumerate(self.train_data_loader):
loss = self.train_iters(epoch, iter_index, data)
if loss is not None:
self.backward(loss)
# update lr_scheduler
if self.scheduler is not None:
self.scheduler.step()
epoch_end_time = time.time()
# epoch time
self.update_epoch_meter('train_time', epoch_end_time - epoch_start_time)
self.on_epoch_end(epoch)
expected_end_time = train_time_predictor.get_expected_end_time(epoch)
# estimate training finish time
if epoch < self.num_epochs:
self.logger.info('The estimated training finish time is {}'.format(
time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(expected_end_time))))
# log training finish time
self.logger.info('The training finished at {}'.format(
time.strftime('%Y-%m-%d %H:%M:%S', time.localtime())
))
self.on_training_end()
def init_training(self, cfg: dict):
"""Initialize training
Args:
cfg (dict): config
"""
# init training param
self.num_epochs = cfg['TRAIN']['NUM_EPOCHS']
self.start_epoch = 0
self.ckpt_save_strategy = cfg['TRAIN'].get('CKPT_SAVE_STRATEGY')
# train data loader
self.train_data_loader = self.build_train_data_loader(cfg)
self.register_epoch_meter('train_time', 'train', '{:.2f} (s)', plt=False)
# create optim
self.optim = build_optim(cfg['TRAIN']['OPTIM'], self.model)
self.logger.info('set optim: ' + str(self.optim))
# create lr_scheduler
if hasattr(cfg['TRAIN'], 'LR_SCHEDULER'):
self.scheduler = build_lr_scheduler(cfg['TRAIN']['LR_SCHEDULER'], self.optim)
self.logger.info('set lr_scheduler: ' + str(self.scheduler))
self.register_epoch_meter('lr', 'train', '{:.2e}')
# fine tune
if hasattr(cfg['TRAIN'], 'FINETUNE_FROM'):
self.load_model(cfg['TRAIN']['FINETUNE_FROM'])
self.logger.info('start fine tuning')
# resume
self.load_model_resume()
# init tensorboard(after resume)
if is_master():
self.tensorboard_writer = SummaryWriter(
os.path.join(self.ckpt_save_dir, 'tensorboard'),
purge_step=(self.start_epoch + 1) if self.start_epoch != 0 else None
)
# init validation
if hasattr(cfg, 'VAL'):
self.init_validation(cfg)
def on_epoch_start(self, epoch: int):
"""Callback at the start of an epoch.
Args:
epoch (int): current epoch
"""
# print epoch num
self.logger.info('epoch {:d} / {:d}'.format(epoch, self.num_epochs))
# update lr meter
if self.scheduler is not None:
self.update_epoch_meter('lr', self.scheduler.get_last_lr()[0])
# set epoch for sampler in distributed mode
# see https://pytorch.org/docs/stable/data.html
sampler = self.train_data_loader.sampler
if torch.distributed.is_initialized() and isinstance(sampler, DistributedSampler) and sampler.shuffle:
sampler.set_epoch(epoch)
def on_epoch_end(self, epoch: int):
"""Callback at the end of an epoch.
Args:
epoch (int): current epoch.
"""
# print train meters
self.print_epoch_meters('train')
# tensorboard plt meters
self.plt_epoch_meters('train', epoch)
# validate
if self.val_data_loader is not None and epoch % self.val_interval == 0:
self.validate(train_epoch=epoch)
# save model
if is_master():
self.save_model(epoch)
# reset meters
self.reset_epoch_meters()
def on_training_end(self):
"""Callback at the end of training.
"""
if is_master():
# close tensorboard writer
self.tensorboard_writer.close()
@abstractmethod
def train_iters(self, epoch: int, iter_index: int, data: torch.Tensor or tuple) -> torch.Tensor:
"""It must be implement to define training detail.
If it returns `loss`, the function ```self.backward``` will be called.
Args:
epoch (int): current epoch.
iter_index (int): current iter.
data (torch.Tensor or tuple): Data provided by DataLoader
Returns:
loss (torch.Tensor)
"""
pass
def backward(self, loss: torch.Tensor):
"""Backward and update params.
Args:
loss (torch.Tensor): loss
"""
self.optim.zero_grad()
loss.backward()
self.optim.step()
@torch.no_grad()
@master_only
def validate(self, cfg: dict = None, train_epoch: int = None):
"""Validate model.
Args:
cfg (dict, optional): config
train_epoch (int, optional): current epoch if in training process.
"""
# init validation if not in training process
if train_epoch is None:
self.init_validation(cfg)
self.on_validating_start()
val_start_time = time.time()
self.model.eval()
# val loop
for iter_index, data in enumerate(self.val_data_loader):
self.val_iters(iter_index, data)
val_end_time = time.time()
self.update_epoch_meter('val_time', val_end_time - val_start_time)
# print val meters
self.print_epoch_meters('val')
if train_epoch is not None:
# tensorboard plt meters
self.plt_epoch_meters('val', train_epoch // self.val_interval)
self.on_validating_end()
@master_only
def init_validation(self, cfg: dict):
"""Initialize validation
Args:
cfg (dict): config
"""
self.val_interval = cfg['VAL'].get('INTERVAL', 1)
self.val_data_loader = self.build_val_data_loader(cfg)
self.register_epoch_meter('val_time', 'val', '{:.2f} (s)', plt=False)
@master_only
def on_validating_start(self):
"""Callback at the start of validating.
"""
pass
@master_only
def on_validating_end(self):
"""Callback at the end of validating.
"""
pass
def val_iters(self, iter_index: int, data: torch.Tensor or tuple):
"""It can be implement to define validating detail (not necessary).
Args:
iter_index (int): current iter.
data (torch.Tensor or tuple): Data provided by DataLoader
"""
raise NotImplementedError()
@master_only
def register_epoch_meter(self, name, meter_type, fmt='{:f}', plt=True):
if self.meter_pool is None:
self.meter_pool = MeterPool()
self.meter_pool.register(name, meter_type, fmt, plt)
@master_only
def update_epoch_meter(self, name, value):
self.meter_pool.update(name, value)
@master_only
def print_epoch_meters(self, meter_type):
self.meter_pool.print_meters(meter_type, self.logger)
@master_only
def plt_epoch_meters(self, meter_type, step):
self.meter_pool.plt_meters(meter_type, step, self.tensorboard_writer)
@master_only
def reset_epoch_meters(self):
self.meter_pool.reset()
| 2.125 | 2 |
testing/components/distributions/laplace_test.py | JeremiasKnoblauch/MXFusion | 2 | 12760870 | import pytest
import mxnet as mx
import numpy as np
from mxfusion.components.variables.runtime_variable import add_sample_dimension, array_has_samples, get_num_samples
from mxfusion.components.distributions import Laplace
from mxfusion.util.testutils import numpy_array_reshape, plot_univariate
from mxfusion.util.testutils import MockMXNetRandomGenerator
from scipy.stats import laplace
@pytest.mark.usefixtures("set_seed")
class TestLaplaceDistribution(object):
@pytest.mark.parametrize(
"dtype, location, location_is_samples, scale, scale_is_samples, rv, rv_is_samples, num_samples", [
(np.float64, np.random.rand(5,3,2), True, np.random.rand(3,2)+0.1, False, np.random.rand(5,3,2), True, 5),
(np.float64, np.random.rand(3,2), False, np.random.rand(5,3,2)+0.1, True, np.random.rand(5,3,2), True, 5),
(np.float64, np.random.rand(3,2), False, np.random.rand(3,2)+0.1, False, np.random.rand(5,3,2), True, 5),
(np.float64, np.random.rand(3,2), False, np.random.rand(3,2)+0.1, False, np.random.rand(3,2), False, 1),
(np.float32, np.random.rand(5,3,2), True, np.random.rand(3,2)+0.1, False, np.random.rand(5,3,2), True, 5),
])
def test_log_pdf(self, dtype, location, location_is_samples, scale, scale_is_samples, rv, rv_is_samples,
num_samples):
is_samples_any = any([location_is_samples, scale_is_samples, rv_is_samples])
rv_shape = rv.shape[1:] if rv_is_samples else rv.shape
n_dim = 1 + len(rv.shape) if is_samples_any and not rv_is_samples else len(rv.shape)
location_np = numpy_array_reshape(location, location_is_samples, n_dim)
scale_np = numpy_array_reshape(scale, scale_is_samples, n_dim)
rv_np = numpy_array_reshape(rv, rv_is_samples, n_dim)
log_pdf_np = laplace.logpdf(rv_np, location_np, scale_np)
var = Laplace.define_variable(shape=rv_shape, dtype=dtype).factor
location_mx = mx.nd.array(location, dtype=dtype)
if not location_is_samples:
location_mx = add_sample_dimension(mx.nd, location_mx)
var_mx = mx.nd.array(scale, dtype=dtype)
if not scale_is_samples:
var_mx = add_sample_dimension(mx.nd, var_mx)
rv_mx = mx.nd.array(rv, dtype=dtype)
if not rv_is_samples:
rv_mx = add_sample_dimension(mx.nd, rv_mx)
variables = {var.location.uuid: location_mx, var.scale.uuid: var_mx, var.random_variable.uuid: rv_mx}
log_pdf_rt = var.log_pdf(F=mx.nd, variables=variables)
assert np.issubdtype(log_pdf_rt.dtype, dtype)
assert array_has_samples(mx.nd, log_pdf_rt) == is_samples_any
if is_samples_any:
assert get_num_samples(mx.nd, log_pdf_rt) == num_samples
if np.issubdtype(dtype, np.float64):
rtol, atol = 1e-7, 1e-10
else:
rtol, atol = 1e-4, 1e-5
assert np.allclose(log_pdf_np, log_pdf_rt.asnumpy(), rtol=rtol, atol=atol)
@pytest.mark.parametrize(
"dtype, location, location_is_samples, scale, scale_is_samples, rv_shape, num_samples", [
(np.float64, np.random.rand(5,3,2), True, np.random.rand(3,2)+0.1, False, (3,2), 5),
(np.float64, np.random.rand(3,2), False, np.random.rand(5,3,2)+0.1, True, (3,2), 5),
(np.float64, np.random.rand(3,2), False, np.random.rand(3,2)+0.1, False, (3,2), 5),
(np.float64, np.random.rand(5,3,2), True, np.random.rand(5,3,2)+0.1, True, (3,2), 5),
(np.float32, np.random.rand(5,3,2), True, np.random.rand(3,2)+0.1, False, (3,2), 5),
])
def test_draw_samples(self, dtype, location, location_is_samples, scale,
scale_is_samples, rv_shape, num_samples):
n_dim = 1 + len(rv_shape)
location_np = numpy_array_reshape(location, location_is_samples, n_dim)
scale_np = numpy_array_reshape(scale, scale_is_samples, n_dim)
rand = np.random.laplace(size=(num_samples,) + rv_shape)
rv_samples_np = location_np + rand * scale_np
rand_gen = MockMXNetRandomGenerator(mx.nd.array(rand.flatten(), dtype=dtype))
var = Laplace.define_variable(shape=rv_shape, dtype=dtype, rand_gen=rand_gen).factor
location_mx = mx.nd.array(location, dtype=dtype)
if not location_is_samples:
location_mx = add_sample_dimension(mx.nd, location_mx)
scale_mx = mx.nd.array(scale, dtype=dtype)
if not scale_is_samples:
scale_mx = add_sample_dimension(mx.nd, scale_mx)
variables = {var.location.uuid: location_mx, var.scale.uuid: scale_mx}
rv_samples_rt = var.draw_samples(F=mx.nd, variables=variables, num_samples=num_samples)
assert np.issubdtype(rv_samples_rt.dtype, dtype)
assert array_has_samples(mx.nd, rv_samples_rt)
assert get_num_samples(mx.nd, rv_samples_rt) == num_samples
if np.issubdtype(dtype, np.float64):
rtol, atol = 1e-7, 1e-10
else:
rtol, atol = 1e-4, 1e-5
assert np.allclose(rv_samples_np, rv_samples_rt.asnumpy(), rtol=rtol, atol=atol)
def test_draw_samples_non_mock(self, plot=False):
# Also make sure the non-mock sampler works
dtype = np.float32
num_samples = 100000
location = np.array([0.5])
scale = np.array([2])
rv_shape = (1,)
location_mx = add_sample_dimension(mx.nd, mx.nd.array(location, dtype=dtype))
scale_mx = add_sample_dimension(mx.nd, mx.nd.array(scale, dtype=dtype))
rand_gen = None
var = Laplace.define_variable(shape=rv_shape, rand_gen=rand_gen, dtype=dtype).factor
variables = {var.location.uuid: location_mx, var.scale.uuid: scale_mx}
rv_samples_rt = var.draw_samples(F=mx.nd, variables=variables, num_samples=num_samples)
assert array_has_samples(mx.nd, rv_samples_rt)
assert get_num_samples(mx.nd, rv_samples_rt) == num_samples
assert rv_samples_rt.dtype == dtype
if plot:
plot_univariate(samples=rv_samples_rt, dist=laplace, loc=location[0], scale=scale[0])
location_est, scale_est = laplace.fit(rv_samples_rt.asnumpy().ravel())
location_tol = 1e-2
scale_tol = 1e-2
assert np.abs(location[0] - location_est) < location_tol
assert np.abs(scale[0] - scale_est) < scale_tol
| 1.953125 | 2 |
scipy/stats/tests/test_fit.py | letian-w/scipy | 1 | 12760871 | <reponame>letian-w/scipy
import os
import numpy as np
import numpy.testing as npt
from numpy.testing import assert_allclose
import pytest
from scipy import stats
from scipy.optimize import differential_evolution
from .test_continuous_basic import distcont
from scipy.stats._distn_infrastructure import FitError
from scipy.stats._distr_params import distdiscrete
# this is not a proper statistical test for convergence, but only
# verifies that the estimate and true values don't differ by too much
fit_sizes = [1000, 5000, 10000] # sample sizes to try
thresh_percent = 0.25 # percent of true parameters for fail cut-off
thresh_min = 0.75 # minimum difference estimate - true to fail test
mle_failing_fits = [
'burr',
'chi2',
'gausshyper',
'genexpon',
'gengamma',
'kappa4',
'ksone',
'kstwo',
'mielke',
'ncf',
'ncx2',
'pearson3',
'powerlognorm',
'truncexpon',
'tukeylambda',
'vonmises',
'levy_stable',
'trapezoid',
'truncweibull_min',
'studentized_range',
]
mm_failing_fits = ['alpha', 'betaprime', 'burr', 'burr12', 'cauchy', 'chi',
'chi2', 'crystalball', 'dgamma', 'dweibull', 'f',
'fatiguelife', 'fisk', 'foldcauchy', 'genextreme',
'gengamma', 'genhyperbolic', 'gennorm', 'genpareto',
'halfcauchy', 'invgamma', 'invweibull', 'johnsonsu',
'kappa3', 'ksone', 'kstwo', 'levy', 'levy_l',
'levy_stable', 'loglaplace', 'lomax', 'mielke', 'nakagami',
'ncf', 'nct', 'ncx2', 'pareto', 'powerlognorm', 'powernorm',
'skewcauchy', 't', 'trapezoid', 'triang',
'truncweibull_min', 'tukeylambda', 'studentized_range']
# not sure if these fail, but they caused my patience to fail
mm_slow_fits = ['argus', 'exponpow', 'exponweib', 'gausshyper', 'genexpon',
'genhalflogistic', 'halfgennorm', 'gompertz', 'johnsonsb',
'kappa4', 'kstwobign', 'recipinvgauss', 'skewnorm',
'truncexpon', 'vonmises', 'vonmises_line']
failing_fits = {"MM": mm_failing_fits + mm_slow_fits, "MLE": mle_failing_fits}
# Don't run the fit test on these:
skip_fit = [
'erlang', # Subclass of gamma, generates a warning.
'genhyperbolic', # too slow
]
def cases_test_cont_fit():
# this tests the closeness of the estimated parameters to the true
# parameters with fit method of continuous distributions
# Note: is slow, some distributions don't converge with sample
# size <= 10000
for distname, arg in distcont:
if distname not in skip_fit:
yield distname, arg
@pytest.mark.slow
@pytest.mark.parametrize('distname,arg', cases_test_cont_fit())
@pytest.mark.parametrize('method', ["MLE", "MM"])
def test_cont_fit(distname, arg, method):
if distname in failing_fits[method]:
# Skip failing fits unless overridden
try:
xfail = not int(os.environ['SCIPY_XFAIL'])
except Exception:
xfail = True
if xfail:
msg = "Fitting %s doesn't work reliably yet" % distname
msg += (" [Set environment variable SCIPY_XFAIL=1 to run this"
" test nevertheless.]")
pytest.xfail(msg)
distfn = getattr(stats, distname)
truearg = np.hstack([arg, [0.0, 1.0]])
diffthreshold = np.max(np.vstack([truearg*thresh_percent,
np.full(distfn.numargs+2, thresh_min)]),
0)
for fit_size in fit_sizes:
# Note that if a fit succeeds, the other fit_sizes are skipped
np.random.seed(1234)
with np.errstate(all='ignore'):
rvs = distfn.rvs(size=fit_size, *arg)
est = distfn.fit(rvs, method=method) # start with default values
diff = est - truearg
# threshold for location
diffthreshold[-2] = np.max([np.abs(rvs.mean())*thresh_percent,
thresh_min])
if np.any(np.isnan(est)):
raise AssertionError('nan returned in fit')
else:
if np.all(np.abs(diff) <= diffthreshold):
break
else:
txt = 'parameter: %s\n' % str(truearg)
txt += 'estimated: %s\n' % str(est)
txt += 'diff : %s\n' % str(diff)
raise AssertionError('fit not very good in %s\n' % distfn.name + txt)
def _check_loc_scale_mle_fit(name, data, desired, atol=None):
d = getattr(stats, name)
actual = d.fit(data)[-2:]
assert_allclose(actual, desired, atol=atol,
err_msg='poor mle fit of (loc, scale) in %s' % name)
def test_non_default_loc_scale_mle_fit():
data = np.array([1.01, 1.78, 1.78, 1.78, 1.88, 1.88, 1.88, 2.00])
_check_loc_scale_mle_fit('uniform', data, [1.01, 0.99], 1e-3)
_check_loc_scale_mle_fit('expon', data, [1.01, 0.73875], 1e-3)
def test_expon_fit():
"""gh-6167"""
data = [0, 0, 0, 0, 2, 2, 2, 2]
phat = stats.expon.fit(data, floc=0)
assert_allclose(phat, [0, 1.0], atol=1e-3)
def test_fit_error():
data = np.concatenate([np.zeros(29), np.ones(21)])
message = "Optimization converged to parameters that are..."
with pytest.raises(FitError, match=message), \
pytest.warns(RuntimeWarning):
stats.beta.fit(data)
@pytest.mark.parametrize("dist, params",
[(stats.norm, (0.5, 2.5)), # type: ignore[attr-defined] # noqa
(stats.binom, (10, 0.3, 2))]) # type: ignore[attr-defined] # noqa
def test_nnlf_and_related_methods(dist, params):
rng = np.random.default_rng(983459824)
if hasattr(dist, 'pdf'):
logpxf = dist.logpdf
else:
logpxf = dist.logpmf
x = dist.rvs(*params, size=100, random_state=rng)
ref = -logpxf(x, *params).sum()
res1 = dist.nnlf(params, x)
res2 = dist._penalized_nnlf(params, x)
assert_allclose(res1, ref)
assert_allclose(res2, ref)
def cases_test_fit():
# These three fail default test; check separately
skip_basic_fit = {'argus', 'foldnorm', 'truncweibull_min'}
# status of 'studentized_range', 'ksone', 'kstwo' unknown; all others pass
slow_basic_fit = {'burr12', 'johnsonsb', 'bradford', 'fisk', 'mielke',
'exponpow', 'rdist', 'norminvgauss', 'betaprime',
'powerlaw', 'pareto', 'johnsonsu', 'loglaplace',
'wrapcauchy', 'weibull_max', 'arcsine', 'binom', 'rice',
'uniform', 'f', 'invweibull', 'genpareto', 'weibull_min',
'nbinom', 'kappa3', 'lognorm', 'halfgennorm', 'pearson3',
'alpha', 't', 'crystalball', 'fatiguelife', 'nakagami',
'kstwobign', 'gompertz', 'dweibull', 'lomax', 'invgauss',
'recipinvgauss', 'chi', 'foldcauchy', 'powernorm',
'gennorm', 'skewnorm', 'randint', 'genextreme'}
xslow_basic_fit = {'studentized_range', 'ksone', 'kstwo', 'levy_stable',
'nchypergeom_fisher', 'nchypergeom_wallenius',
'gausshyper', 'genexpon', 'gengamma', 'genhyperbolic',
'geninvgauss', 'tukeylambda', 'skellam', 'ncx2',
'hypergeom', 'nhypergeom', 'zipfian', 'ncf',
'truncnorm', 'powerlognorm', 'beta',
'loguniform', 'reciprocal', 'trapezoid', 'nct',
'kappa4', 'betabinom', 'exponweib', 'genhalflogistic',
'burr', 'triang'}
for dist in dict(distdiscrete + distcont):
if dist in skip_basic_fit or not isinstance(dist, str):
reason = "tested separately"
yield pytest.param(dist, marks=pytest.mark.skip(reason=reason))
elif dist in slow_basic_fit:
reason = "too slow (>= 0.25s)"
yield pytest.param(dist, marks=pytest.mark.slow(reason=reason))
elif dist in xslow_basic_fit:
reason = "too slow (>= 1.0s)"
yield pytest.param(dist, marks=pytest.mark.xslow(reason=reason))
else:
yield dist
def assert_nllf_less_or_close(dist, data, params1, params0, rtol=1e-7, atol=0):
nllf1 = dist.nnlf(params1, data)
nllf0 = dist.nnlf(params0, data)
if not (nllf1 < nllf0):
np.testing.assert_allclose(nllf1, nllf0, rtol=rtol, atol=atol)
class TestFit:
dist = stats.binom # type: ignore[attr-defined]
seed = 654634816187
rng = np.random.default_rng(seed)
data = stats.binom.rvs(5, 0.5, size=100, random_state=rng) # type: ignore[attr-defined] # noqa
shape_bounds_a = [(1, 10), (0, 1)]
shape_bounds_d = {'n': (1, 10), 'p': (0, 1)}
atol = 5e-2
rtol = 1e-2
tols = {'atol': atol, 'rtol': rtol}
def opt(self, *args, **kwds):
return differential_evolution(*args, seed=0, **kwds)
def test_dist_iv(self):
message = "`dist` must be an instance of..."
with pytest.raises(ValueError, match=message):
stats.fit(10, self.data, self.shape_bounds_a)
def test_data_iv(self):
message = "`data` must be exactly one-dimensional."
with pytest.raises(ValueError, match=message):
stats.fit(self.dist, [[1, 2, 3]], self.shape_bounds_a)
message = "All elements of `data` must be finite numbers."
with pytest.raises(ValueError, match=message):
stats.fit(self.dist, [1, 2, 3, np.nan], self.shape_bounds_a)
with pytest.raises(ValueError, match=message):
stats.fit(self.dist, [1, 2, 3, np.inf], self.shape_bounds_a)
with pytest.raises(ValueError, match=message):
stats.fit(self.dist, ['1', '2', '3'], self.shape_bounds_a)
def test_bounds_iv(self):
message = "Bounds provided for the following unrecognized..."
shape_bounds = {'n': (1, 10), 'p': (0, 1), '1': (0, 10)}
with pytest.warns(RuntimeWarning, match=message):
stats.fit(self.dist, self.data, shape_bounds)
message = "Each element of a `bounds` sequence must be a tuple..."
shape_bounds = [(1, 10, 3), (0, 1)]
with pytest.raises(ValueError, match=message):
stats.fit(self.dist, self.data, shape_bounds)
message = "Each element of `bounds` must be a tuple specifying..."
shape_bounds = [(1, 10, 3), (0, 1, 0.5)]
with pytest.raises(ValueError, match=message):
stats.fit(self.dist, self.data, shape_bounds)
shape_bounds = [1, 0]
with pytest.raises(ValueError, match=message):
stats.fit(self.dist, self.data, shape_bounds)
message = "A `bounds` sequence must contain at least 2 elements..."
shape_bounds = [(1, 10)]
with pytest.raises(ValueError, match=message):
stats.fit(self.dist, self.data, shape_bounds)
message = "A `bounds` sequence may not contain more than 3 elements..."
bounds = [(1, 10), (1, 10), (1, 10), (1, 10)]
with pytest.raises(ValueError, match=message):
stats.fit(self.dist, self.data, bounds)
message = "There are no values for `p` on the interval..."
shape_bounds = {'n': (1, 10), 'p': (1, 0)}
with pytest.raises(ValueError, match=message):
stats.fit(self.dist, self.data, shape_bounds)
message = "There are no values for `n` on the interval..."
shape_bounds = [(10, 1), (0, 1)]
with pytest.raises(ValueError, match=message):
stats.fit(self.dist, self.data, shape_bounds)
message = "There are no integer values for `n` on the interval..."
shape_bounds = [(1.4, 1.6), (0, 1)]
with pytest.raises(ValueError, match=message):
stats.fit(self.dist, self.data, shape_bounds)
message = "The intersection of user-provided bounds for `n`"
with pytest.raises(ValueError, match=message):
stats.fit(self.dist, self.data)
shape_bounds = [(-np.inf, np.inf), (0, 1)]
with pytest.raises(ValueError, match=message):
stats.fit(self.dist, self.data, shape_bounds)
def test_guess_iv(self):
message = "Guesses provided for the following unrecognized..."
guess = {'n': 1, 'p': 0.5, '1': 255}
with pytest.warns(RuntimeWarning, match=message):
stats.fit(self.dist, self.data, self.shape_bounds_d, guess=guess)
message = "Each element of `guess` must be a scalar..."
guess = {'n': 1, 'p': 'hi'}
with pytest.raises(ValueError, match=message):
stats.fit(self.dist, self.data, self.shape_bounds_d, guess=guess)
guess = [1, 'f']
with pytest.raises(ValueError, match=message):
stats.fit(self.dist, self.data, self.shape_bounds_d, guess=guess)
guess = [[1, 2]]
with pytest.raises(ValueError, match=message):
stats.fit(self.dist, self.data, self.shape_bounds_d, guess=guess)
message = "A `guess` sequence must contain at least 2..."
guess = [1]
with pytest.raises(ValueError, match=message):
stats.fit(self.dist, self.data, self.shape_bounds_d, guess=guess)
message = "A `guess` sequence may not contain more than 3..."
guess = [1, 2, 3, 4]
with pytest.raises(ValueError, match=message):
stats.fit(self.dist, self.data, self.shape_bounds_d, guess=guess)
message = "Guess for parameter `n` rounded..."
guess = {'n': 4.5, 'p': -0.5}
with pytest.warns(RuntimeWarning, match=message):
stats.fit(self.dist, self.data, self.shape_bounds_d, guess=guess)
message = "Guess for parameter `loc` rounded..."
guess = [5, 0.5, 0.5]
with pytest.warns(RuntimeWarning, match=message):
stats.fit(self.dist, self.data, self.shape_bounds_d, guess=guess)
message = "Guess for parameter `p` clipped..."
guess = {'n': 5, 'p': -0.5}
with pytest.warns(RuntimeWarning, match=message):
stats.fit(self.dist, self.data, self.shape_bounds_d, guess=guess)
message = "Guess for parameter `loc` clipped..."
guess = [5, 0.5, 1]
with pytest.warns(RuntimeWarning, match=message):
stats.fit(self.dist, self.data, self.shape_bounds_d, guess=guess)
@pytest.mark.parametrize("dist_name", cases_test_fit())
def test_basic_fit(self, dist_name):
N = 5000
dist_data = dict(distcont + distdiscrete)
rng = np.random.default_rng(self.seed)
dist = getattr(stats, dist_name)
shapes = np.array(dist_data[dist_name])
bounds = np.empty((len(shapes) + 2, 2), dtype=np.float64)
bounds[:-2, 0] = shapes/10**np.sign(shapes)
bounds[:-2, 1] = shapes*10**np.sign(shapes)
bounds[-2] = (0, 10)
bounds[-1] = (0, 10)
loc = rng.uniform(*bounds[-2])
scale = rng.uniform(*bounds[-1])
ref = list(dist_data[dist_name]) + [loc, scale]
if getattr(dist, 'pmf', False):
ref = ref[:-1]
ref[-1] = np.floor(loc)
data = dist.rvs(*ref, size=N, random_state=rng)
bounds = bounds[:-1]
if getattr(dist, 'pdf', False):
data = dist.rvs(*ref, size=N, random_state=rng)
with npt.suppress_warnings() as sup:
sup.filter(RuntimeWarning, "overflow encountered")
res = stats.fit(dist, data, bounds, optimizer=self.opt)
assert_nllf_less_or_close(dist, data, res.params, ref, **self.tols)
def test_argus(self):
# Can't guarantee that all distributions will fit all data with
# arbitrary bounds. This distribution just happens to fail above.
# Try something slightly different.
N = 1000
rng = np.random.default_rng(self.seed)
dist = stats.argus
shapes = (1., 2., 3.)
data = dist.rvs(*shapes, size=N, random_state=rng)
shape_bounds = {'chi': (0.1, 10), 'loc': (0.1, 10), 'scale': (0.1, 10)}
res = stats.fit(dist, data, shape_bounds, optimizer=self.opt)
assert_nllf_less_or_close(dist, data, res.params, shapes, **self.tols)
def test_foldnorm(self):
# Can't guarantee that all distributions will fit all data with
# arbitrary bounds. This distribution just happens to fail above.
# Try something slightly different.
N = 1000
rng = np.random.default_rng(self.seed)
dist = stats.foldnorm
shapes = (1.952125337355587, 2., 3.)
data = dist.rvs(*shapes, size=N, random_state=rng)
shape_bounds = {'c': (0.1, 10), 'loc': (0.1, 10), 'scale': (0.1, 10)}
res = stats.fit(dist, data, shape_bounds, optimizer=self.opt)
assert_nllf_less_or_close(dist, data, res.params, shapes, **self.tols)
def test_truncweibull_min(self):
# Can't guarantee that all distributions will fit all data with
# arbitrary bounds. This distribution just happens to fail above.
# Try something slightly different.
N = 1000
rng = np.random.default_rng(self.seed)
dist = stats.truncweibull_min
shapes = (2.5, 0.25, 1.75, 2., 3.)
data = dist.rvs(*shapes, size=N, random_state=rng)
shape_bounds = [(0.1, 10)]*5
res = stats.fit(dist, data, shape_bounds, optimizer=self.opt)
assert_nllf_less_or_close(dist, data, res.params, shapes, **self.tols)
def test_missing_shape_bounds(self):
# some distributions have a small domain w.r.t. a parameter, e.g.
# $p \in [0, 1]$ for binomial distribution
# User does not need to provide these because the intersection of the
# user's bounds (none) and the distribution's domain is finite
N = 1000
rng = np.random.default_rng(self.seed)
dist = stats.binom
n, p, loc = 10, 0.65, 0
data = dist.rvs(n, p, loc=loc, size=N, random_state=rng)
shape_bounds = {'n': np.array([0, 20])} # check arrays are OK, too
res = stats.fit(dist, data, shape_bounds, optimizer=self.opt)
assert_allclose(res.params, (n, p, loc), **self.tols)
dist = stats.bernoulli
p, loc = 0.314159, 0
data = dist.rvs(p, loc=loc, size=N, random_state=rng)
res = stats.fit(dist, data, optimizer=self.opt)
assert_allclose(res.params, (p, loc), **self.tols)
def test_fit_only_loc_scale(self):
# fit only loc
N = 5000
rng = np.random.default_rng(self.seed)
dist = stats.norm
loc, scale = 1.5, 1
data = dist.rvs(loc=loc, size=N, random_state=rng)
loc_bounds = (0, 5)
bounds = {'loc': loc_bounds}
res = stats.fit(dist, data, bounds, optimizer=self.opt)
assert_allclose(res.params, (loc, scale), **self.tols)
# fit only scale
loc, scale = 0, 2.5
data = dist.rvs(scale=scale, size=N, random_state=rng)
scale_bounds = (0, 5)
bounds = {'scale': scale_bounds}
res = stats.fit(dist, data, bounds, optimizer=self.opt)
assert_allclose(res.params, (loc, scale), **self.tols)
# fit only loc and scale
dist = stats.norm
loc, scale = 1.5, 2.5
data = dist.rvs(loc=loc, scale=scale, size=N, random_state=rng)
bounds = {'loc': loc_bounds, 'scale': scale_bounds}
res = stats.fit(dist, data, bounds, optimizer=self.opt)
assert_allclose(res.params, (loc, scale), **self.tols)
def test_everything_fixed(self):
N = 5000
rng = np.random.default_rng(self.seed)
dist = stats.norm
loc, scale = 1.5, 2.5
data = dist.rvs(loc=loc, scale=scale, size=N, random_state=rng)
# loc, scale fixed to 0, 1 by default
res = stats.fit(dist, data)
assert_allclose(res.params, (0, 1), **self.tols)
# loc, scale explicitly fixed
bounds = {'loc': (loc, loc), 'scale': (scale, scale)}
res = stats.fit(dist, data, bounds)
assert_allclose(res.params, (loc, scale), **self.tols)
# `n` gets fixed during polishing
dist = stats.binom
n, p, loc = 10, 0.65, 0
data = dist.rvs(n, p, loc=loc, size=N, random_state=rng)
shape_bounds = {'n': (0, 20), 'p': (0.65, 0.65)}
res = stats.fit(dist, data, shape_bounds, optimizer=self.opt)
assert_allclose(res.params, (n, p, loc), **self.tols)
def test_failure(self):
N = 5000
rng = np.random.default_rng(self.seed)
dist = stats.nbinom
shapes = (5, 0.5)
data = dist.rvs(*shapes, size=N, random_state=rng)
assert data.min() == 0
# With lower bounds on location at 0.5, likelihood is zero
bounds = [(0, 30), (0, 1), (0.5, 10)]
res = stats.fit(dist, data, bounds)
message = "Optimization converged to parameter values that are"
assert res.message.startswith(message)
assert res.success is False
@pytest.mark.xslow
def test_guess(self):
# Test that guess helps DE find the desired solution
N = 2000
rng = np.random.default_rng(self.seed)
dist = stats.nhypergeom
params = (20, 7, 12, 0)
bounds = [(2, 200), (0.7, 70), (1.2, 120), (0, 10)]
data = dist.rvs(*params, size=N, random_state=rng)
res = stats.fit(dist, data, bounds, optimizer=self.opt)
assert not np.allclose(res.params, params, **self.tols)
res = stats.fit(dist, data, bounds, guess=params, optimizer=self.opt)
assert_allclose(res.params, params, **self.tols)
| 1.789063 | 2 |
demo/filled_errors.py | spyke/spyke | 22 | 12760872 | <gh_stars>10-100
"""Demo plotting filled error ranges around lines"""
from __future__ import division
import numpy as np
import matplotlib as mpl
import matplotlib.mlab as mlab
import pylab as pl
yoffs = np.arange(10)
nplots = len(yoffs)
x = np.arange(0, 1, 0.1)
npoints = len(x)
y = np.zeros((nplots, len(x)))
stdy = np.zeros((nplots, len(x)))
verts = np.zeros((nplots, 2*npoints, 2)) # each timepoint has a +ve and a -ve value
for ploti, yoff in enumerate(yoffs):
y[ploti] = np.random.random(len(x)) / 2 + 0.25 + yoff
stdy[ploti] = 0.2 + np.random.random(len(x)) * 0.2
vert = mlab.poly_between(x, y[ploti]-stdy[ploti], y[ploti]+stdy[ploti])
vert = np.asarray(vert).T
verts[ploti] = vert
# can also use axes.fill() instead of a poly collection, or directly use axes.fill_between()
pcol = mpl.collections.PolyCollection(verts, facecolors='r', edgecolors='none', alpha=0.2)
a = pl.gca()
#pcol = a.fill_between(x, y+stdy, y-stdy, facecolors='r', edgecolors='none', alpha=0.2)
a.add_collection(pcol)
for ploti in range(nplots):
a.plot(x, y[ploti], 'r-')
a.set_xlim((0, 1))
| 2.171875 | 2 |
home/models.py | R-Wolf/CFD_A_library | 4 | 12760873 | <gh_stars>1-10
from django.db import models
class Home(models.Model):
date=models.DateTimeField(editable=False)
book_date=models.DateTimeField(editable=False, null=True)
id0 = models.IntegerField(editable=False,null=True)
id1 = models.IntegerField(editable=False,null=True)
id2 = models.IntegerField(editable=False,null=True)
id3 = models.IntegerField(editable=False,null=True)
id4 = models.IntegerField(editable=False,null=True)
id5 = models.IntegerField(editable=False,null=True)
id6 = models.IntegerField(editable=False,null=True)
id7 = models.IntegerField(editable=False,null=True)
id8 = models.IntegerField(editable=False,null=True)
id9 = models.IntegerField(editable=False,null=True)
id10 = models.IntegerField(editable=False,null=True)
id11 = models.IntegerField(editable=False,null=True)
id12 = models.IntegerField(editable=False,null=True)
id13 = models.IntegerField(editable=False,null=True)
id14 = models.IntegerField(editable=False,null=True)
id15 = models.IntegerField(editable=False,null=True)
id16 = models.IntegerField(editable=False,null=True)
id17 = models.IntegerField(editable=False,null=True)
id18 = models.IntegerField(editable=False,null=True)
id19 = models.IntegerField(editable=False,null=True) | 2.0625 | 2 |
RoomIOT.py | kunz07/fyp2017 | 7 | 12760874 | <reponame>kunz07/fyp2017
# FYP2017
# Program to send room temperature and humidity to ThingSpeak
# Author: <NAME>
# License: Public Domain
import time
import serial
import sys
import urllib.request
import urllib.parse
PORT = '/dev/ttyUSB0'
BAUD_RATE = 9600
# Open serial port
ser = serial.Serial(PORT, BAUD_RATE)
def sendData():
if ser.isOpen():
ser.close()
ser.open()
ser.isOpen()
ser.write('s'.encode())
time.sleep(2)
response = ser.readline().strip().decode()
hum = float(response[:5])
temp = float(response[5:])
# Send to ThingSpeak
f = open('TS_APIkey.txt','r')
api_key = f.read()
params = urllib.parse.urlencode({'key': api_key ,
'field4': temp ,
'field5': hum
})
params = params.encode('utf-8')
fh = urllib.request.urlopen("https://api.thingspeak.com/update", data=params)
fh.close()
if __name__ == "__main__":
while True:
sendData()
time.sleep(300)
| 2.796875 | 3 |
qiskit_optimization/runtime/qaoa_program.py | X-Libor/qiskit-optimization | 0 | 12760875 | <gh_stars>0
# This code is part of Qiskit.
#
# (C) Copyright IBM 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""The Qiskit Optimization QAOA Quantum Program."""
from typing import List, Callable, Optional, Any, Dict, Union
import numpy as np
from qiskit import QuantumCircuit
from qiskit.algorithms import MinimumEigensolverResult
from qiskit.circuit.library import QAOAAnsatz
from qiskit.opflow import OperatorBase
from qiskit.providers import Provider
from qiskit.providers.backend import Backend
from qiskit_optimization.exceptions import QiskitOptimizationError
from .vqe_program import VQEProgram
class QAOAProgram(VQEProgram):
"""The Qiskit Optimization QAOA Quantum Program."""
def __init__(
self,
optimizer: Optional[Dict[str, Any]] = None,
reps: int = 1,
initial_state: Optional[QuantumCircuit] = None,
mixer: Union[QuantumCircuit, OperatorBase] = None,
initial_point: Optional[np.ndarray] = None,
provider: Optional[Provider] = None,
backend: Optional[Backend] = None,
shots: int = 1024,
measurement_error_mitigation: bool = False,
callback: Optional[Callable[[int, np.ndarray, float, float], None]] = None,
store_intermediate: bool = False,
) -> None:
"""
Args:
optimizer: A dictionary specifying a classical optimizer.
Currently only SPSA and QN-SPSA are supported. Per default, SPSA is used.
The dictionary must contain a key ``name`` for the name of the optimizer and may
contain additional keys for the settings.
E.g. ``{'name': 'SPSA', 'maxiter': 100}``.
reps: the integer parameter :math:`p` as specified in https://arxiv.org/abs/1411.4028,
Has a minimum valid value of 1.
initial_state: An optional initial state to prepend the QAOA circuit with
mixer: the mixer Hamiltonian to evolve with or a custom quantum circuit. Allows support
of optimizations in constrained subspaces as per https://arxiv.org/abs/1709.03489
as well as warm-starting the optimization as introduced
in http://arxiv.org/abs/2009.10095.
initial_point: An optional initial point (i.e. initial parameter values)
for the optimizer. If ``None`` a random vector is used.
provider: The provider.
backend: The backend to run the circuits on.
shots: The number of shots to be used
measurement_error_mitigation: Whether or not to use measurement error mitigation.
callback: a callback that can access the intermediate data during the optimization.
Four parameter values are passed to the callback as follows during each evaluation
by the optimizer for its current set of parameters as it works towards the minimum.
These are: the evaluation count, the optimizer parameters for the
ansatz, the evaluated mean and the evaluated standard deviation.
store_intermediate: Whether or not to store intermediate values of the optimization
steps. Per default False.
"""
super().__init__(
ansatz=None,
optimizer=optimizer,
initial_point=initial_point,
provider=provider,
backend=backend,
shots=shots,
measurement_error_mitigation=measurement_error_mitigation,
callback=callback,
store_intermediate=store_intermediate,
)
self._initial_state = initial_state
self._mixer = mixer
self._reps = reps
@property
def ansatz(self) -> Optional[QuantumCircuit]:
return self._ansatz
@ansatz.setter
def ansatz(self, ansatz: QuantumCircuit) -> None:
raise QiskitOptimizationError(
"Cannot set the ansatz for QAOA, it is directly inferred from "
"the problem Hamiltonian."
)
@property
def initial_state(self) -> Optional[QuantumCircuit]:
"""
Returns:
Returns the initial state.
"""
return self._initial_state
@initial_state.setter
def initial_state(self, initial_state: Optional[QuantumCircuit]) -> None:
"""
Args:
initial_state: Initial state to set.
"""
self._initial_state = initial_state
@property
def mixer(self) -> Union[QuantumCircuit, OperatorBase]:
"""
Returns:
Returns the mixer.
"""
return self._mixer
@mixer.setter
def mixer(self, mixer: Union[QuantumCircuit, OperatorBase]) -> None:
"""
Args:
mixer: Mixer to set.
"""
self._mixer = mixer
@property
def reps(self) -> int:
"""
Returns:
Returns the reps.
"""
return self._reps
@reps.setter
def reps(self, reps: int) -> None:
"""
Args:
reps: The new number of reps.
"""
self._reps = reps
def compute_minimum_eigenvalue(
self,
operator: OperatorBase,
aux_operators: Optional[List[Optional[OperatorBase]]] = None,
) -> MinimumEigensolverResult:
self._ansatz = QAOAAnsatz(
operator,
reps=self.reps,
initial_state=self.initial_state,
mixer_operator=self.mixer,
)
return super().compute_minimum_eigenvalue(operator, aux_operators)
| 2.140625 | 2 |
test/unit/display/test_tables.py | ajpagente/Fraternal | 0 | 12760876 | <reponame>ajpagente/Fraternal<gh_stars>0
import unittest
import sys, os
from display.tables import SimpleConsoleTable
from display.format_specs import RowFormatSpecification
from display.string_format import WarningFormatter
class TestSimpleConsoleTable(unittest.TestCase):
def test_header_created(self):
"""Ensure that the table returned is not empty and the properties are set correctly. The header items will not match the formatter. Actual content is not validated."""
header = ['One', 'Two', 'Three', 'Four']
fs = RowFormatSpecification(header, 'Three', ['Dangerous'], WarningFormatter())
ct = SimpleConsoleTable(header, fs)
actual = ct.get_table()
self.assertIsNotNone(actual)
self.assertEqual(len(ct.rows), 1)
def test_add_exact_len_row(self):
header = ['One', 'Two', 'Three', 'Four']
ct = SimpleConsoleTable(header)
ct.add_row(['a','b','c','d'])
self.assertEqual(len(ct.rows), 2)
actual = ct.get_table()
self.assertIsNotNone(actual)
actual = ct.rows[1]
expected = ['a', 'b', 'c', 'd']
self.assertEqual(actual, expected)
def test_add_short_row(self):
header = ['One', 'Two', 'Three', 'Four']
ct = SimpleConsoleTable(header)
ct.add_row(['a','b'])
self.assertEqual(len(ct.rows), 2)
actual = ct.get_table()
self.assertIsNotNone(actual)
actual = ct.rows[1]
expected = ['a', 'b', '', '']
self.assertEqual(actual, expected)
def test_add_too_long_row(self):
header = ['One', 'Two']
ct = SimpleConsoleTable(header)
self.assertRaises(ValueError, ct.add_row, ['a','b','c','d'])
self.assertEqual(len(ct.rows), 1)
actual = ct.get_table()
self.assertIsNotNone(actual)
def test_row_format(self):
"""Validates that all properties are correct after a row is formatted. The formatting is not validated."""
header = ['One', 'Two', 'Three', 'Four']
fs = RowFormatSpecification(header, 'Three', ['c'], WarningFormatter())
ct = SimpleConsoleTable(header, fs)
ct.add_row(['a','b','c','d'])
actual = ct.get_table()
self.assertIsNotNone(actual)
self.assertEqual(len(ct.rows), 2)
if __name__ == '__main__':
unittest.main() | 2.703125 | 3 |
pbsuite/banana/Polish.py | cgjosephlee/PBJelly | 2 | 12760877 | <filename>pbsuite/banana/Polish.py
#!/usr/bin/env python
import sys, re, random, argparse, textwrap, logging
import operator
from collections import defaultdict, namedtuple, Counter
from pbsuite.utils.FileHandlers import FastaFile, M5File, FastqFile
from pbsuite.utils.CommandRunner import exe
from pbsuite.utils.setupLogging import setupLogging
USAGE = """\
Takes reads.fastq and ref.fasta
maps with blasr
creates consensus
"""
def blasr(query, target, nproc=1, bestn=1, outName="map.m5"):
"""
runs blasr
"""
r,o,e = exe("blasr %s %s --bestn %d --affineAlign -m 5 --nproc %d --out %s" \
% (query, target, bestn, nproc, outName))
def realign(alignment):
"""
realigns target, query so that every alignment should
have the comparable characteristics in the same sequence context
regardless of differences due to the error rate and alignment
variation
realignment happens inplace
"""
def inner(align):
target = list(align.targetSeq)
query = list(align.querySeq)
compSeq = list(align.compSeq)
for pos in range(len(align.targetSeq)):
if align.targetSeq[pos] == '-':
i = re.match('-+[ATCGatcg]?', align.targetSeq[pos:])
if align.targetSeq[pos+i.end()-1] == align.querySeq[pos]:
target[pos] = align.targetSeq[pos+i.end()-1]
compSeq[pos] = '|'
target[pos+i.end()-1] = '-'
compSeq[pos+i.end()-1] = '*'
align.targetSeq = "".join(target)
align.compSeq = "".join(compSeq)
inner(alignment)
alignment.targetSeq, alignment.querySeq = alignment.querySeq, alignment.targetSeq
inner(alignment)
alignment.targetSeq, alignment.querySeq = alignment.querySeq, alignment.targetSeq
def offSetSeqs(offset, align):
q = [" "]*offset; q.extend(list(align.querySeq))
c = [" "]*offset; c.extend(list(align.compSeq))
t = [" "]*offset; t.extend(list(align.targetSeq))
return q,c,t
def printSeqs(seqs):
for s in seqs:
q,c,t = s
logging.debug( "".join(q))
logging.debug( "".join(c))
logging.debug( "".join(t))
def insert(base, pos, ch):
base[0].insert(pos, ch)
base[1].insert(pos, ch)
base[2].insert(pos, ch)
def consensus(aligns):
"""
expands alignment based on query, and then majority rules consensus the seqs
"""
if len(aligns) > 500:#hard limit
keep = []
scores = map(lambda x: x.score, aligns)
scores.sort()
minS = scores[499]
aligns = filter(lambda x: x.score <= minS, aligns)
seqs = []
for i in aligns:
realign(i)
seqs.append(offSetSeqs(i.tstart, i))
#logging.debug("#Original Seqs (%d)" % (len(seqs)))
#printSeqs(seqs)
i = 0 #<-- target relative position
remain = len(seqs) # number of sequences remaining
while remain > 1:
ins = False # Is there an insertion here
for base in seqs:
if i == len(base[1]):
remain -= 1# kill it
if i < len(base[1]) and base[2][i] == '-':
ins = True
if ins: # apply insertion across all non-ins bases
for base in seqs:
if i < len(base[1]):
if base[2][i] != '-' and base[2][i] != " ":
insert(base, i ,'_')
elif base[2][i] == " ":
insert(base, i, ' ')
i += 1
#logging.debug( "#Expanded Seqs" )
#printSeqs(seqs)
#majority vote consensus
out = []
contribBases = 0
fillBases = 0
if len(seqs) == 0:
logging.info("no sequences")
for p in range(max(map(lambda x: len(x[0]), seqs))):
cnt = Counter()
#Count it
for s in seqs:
if p < len(s[0]):
cnt[s[0][p]] += 1
cnt[" "] = 0
contribBases += sum(cnt.values())
#Maximum count
nuc = cnt.most_common()[0][0]
if nuc not in ['-',"_"]:
fillBases += 1
out.append(nuc)
#logging.critical(nuc)
#nuc = max(cnt.iteritems(), key=operator.itemgetter(1))[1]
#Who all has maximum count
#n = []
#logging.critical(cnt)
#for j in cnt:
#logging.critical(j)
#if cnt[j] == nuc:
#n.append(j)
##get random one
#n = random.sample(n, 1)[0]
#if n not in ["-","_"]:
#fillBases += 1
#out.append(n)
consen = "".join(out)
logging.debug("# expanded consensus (%d nuc votes) <%d fill bases>" % (contribBases, fillBases))
#logging.debug(consen)
consen = consen.replace('_','').replace('-','').replace(' ','')
results = namedtuple("polish_results", "contribSeqs contribBases fillBases sequence")
return results(len(aligns), contribBases, fillBases, consen)
def parseArgs():
parser = argparse.ArgumentParser(description=USAGE, \
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument("reads", metavar="reads", type=str, \
help="Input reads .fasta or .fastq")
parser.add_argument("-t", "--target", type=str, \
help="Target sequence name")
parser.add_argument("-T", "--Target", type=str, \
help="Fasta file containing target sequence")
parser.add_argument("-s", "--super", dest="super", action="store_true",\
help="Treat each read as the target once")
parser.add_argument("-m", "--maxtail", type = int, default=sys.maxsize, \
help="Max number of bases allowed to be in tail (inf)")
parser.add_argument("-n", "--nproc", dest="nproc", default=1, type=int,\
help="Number of processors to use with blasr (1)")
parser.add_argument("-o", "--outname", dest="outname", default="polish.out", \
type=str, \
help="Base name for output files (polish.out)")
parser.add_argument("--debug", action="store_true")
args = parser.parse_args()
setupLogging(args.debug)
#I don't think this is exhaustive
if (args.target is not None and args.Target is not None) \
or (args.super and (args.target is not None or args.Target is not None)):
print("Error! only specify one of --super or --target or --Target")
exit(1)
return args
class NullDevice():
def write(self, s):
pass
if __name__ == '__main__':
args = parseArgs()
alignFile = args.outname+".m5"
consensusFile = args.outname+".fasta"
#extract the read I'm looking for
if args.target is not None:#Name
tempOut = open("temp.fasta",'w')
fasta = FastaFile(args.reads)
tempOut.write(">%s\n%s\n" % (args.target, fasta[args.target]))
tempOut.write
blasr(args.reads, tempOut.name, nproc=args.nproc, outName=alignFile)
aligns = M5File(alignFile)
fout = open(consensusFile, 'w')
results = consensus(aligns)
fout.write(">pbjpolish_%d_vote_%d_len\n" % (results.contribBases,\
results.fillBases, results.sequence))
#fout.write(">\n%s\n" % consensus(aligns))
fout.close()
elif args.Target is not None:#File
blasr(args.reads, args.Target, nproc=args.nproc, outName=alignFile)
aligns = M5File(alignFile)
fout = open(consensusFile, 'w')
results = consensus(aligns)
fout.write(">pbjpolish_%d_vote_%d_len\n%s\n" % (results.contribBases,\
results.fillBases, results.sequence))
#fout.write(">%s\n%s\n" % consensus(aligns))
fout.close()
elif args.super:#All
tempfile = open("temp.fasta",'w')
if args.reads.endswith(".fasta"):
seqs = FastaFile(args.reads)
#temp flie
for s in seqs:
tempfile.write(">%s\n%s\n" % (s, seqs[s]))
elif args.reads.endswith(".fastq"):
seqs = FastqFile(args.reads)
#temp file
for s in seqs:
tempfile.write(">%s\n%s\n" % (s, seqs[s].seq))
blasr(args.reads, tempfile.name, nproc=args.nproc, bestn=len(seqs), outName=alignFile)
aligns = M5File(alignFile)
groups = defaultdict(list)
for a in aligns:
groups[a.tname].append(a)
fout = open(consensusFile, 'w')
for g in groups:
results = consensus(aligns)
fout.write(">pbjpolish_%d_vote_%d_len\n" % (results.contribBases,\
results.fillBases, results.sequence))
fout.close()
| 2.234375 | 2 |
leetcode_701_2.py | xulu199705/LeetCode | 0 | 12760878 | from typing import List
# Definition for a binary tree node.
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def insertIntoBST(self, root: TreeNode, val: int) -> TreeNode:
if root is None:
return TreeNode(val)
tmp = root
node = TreeNode(val)
while True:
if tmp.val > val:
if tmp.left is None:
tmp.left = node
return root
else:
tmp = tmp.left
else:
if tmp.right is None:
tmp.right = node
return root
else:
tmp = tmp.right
| 3.9375 | 4 |
emojiwatch/apps.py | posita/emojiwatch-django | 0 | 12760879 | # -*- encoding: utf-8 -*-
# ======================================================================
"""
Copyright and other protections apply. Please see the accompanying
:doc:`LICENSE <LICENSE>` and :doc:`CREDITS <CREDITS>` file(s) for rights
and restrictions governing use of this software. All rights not
expressly waived or licensed are reserved. If those files are missing or
appear to be modified from their originals, then please contact the
author before viewing or using this software in any capacity.
"""
# ======================================================================
from __future__ import absolute_import, division, print_function
TYPE_CHECKING = False # from typing import TYPE_CHECKING
if TYPE_CHECKING:
import typing # noqa: F401 # pylint: disable=import-error,unused-import,useless-suppression
from builtins import * # noqa: F401,F403 # pylint: disable=redefined-builtin,unused-wildcard-import,useless-suppression,wildcard-import
from future.builtins.disabled import * # noqa: F401,F403 # pylint: disable=no-name-in-module,redefined-builtin,unused-wildcard-import,useless-suppression,wildcard-import
from future.standard_library import install_aliases
install_aliases()
# ---- Imports ---------------------------------------------------------
from gettext import gettext
import django.apps as d_apps
from . import (
LOGGER,
SLACK_VERIFICATION_TOKEN,
)
# ---- Classes ---------------------------------------------------------
# ======================================================================
class EmojiwatchConfig(d_apps.AppConfig):
# ---- Data --------------------------------------------------------
name = 'emojiwatch'
verbose_name = gettext('Emojiwatch')
# ---- Overrides ---------------------------------------------------
def ready(self):
# type: (...) -> None
super().ready() # type: ignore # py2
if not SLACK_VERIFICATION_TOKEN:
LOGGER.critical("EMOJIWATCH['slack_verification_token'] setting is missing")
| 1.601563 | 2 |
aims/management/commands/load_sectors.py | matmaxgeds/somaliaims-demo | 0 | 12760880 | <filename>aims/management/commands/load_sectors.py
from django.core.management.base import BaseCommand
from management.models import Sector
from openpyxl import load_workbook
class Command(BaseCommand):
help = 'Imports sector information from xlsx. The xlsx file is given as a cmd line argument'
SECTOR_RANGE = 'B8:C44'
def add_arguments(self, parser):
parser.add_argument('file_path')
def handle(self, file_path, *args, **options):
wb = load_workbook(file_path, use_iterators=True)
sheet = wb.get_sheet_by_name('Sectors')
for row in sheet.iter_rows(self.SECTOR_RANGE):
count = 0
for cell in row:
if count == 0 or 0 == (count % 2):
try:
full_name = cell.value
values = full_name.rpartition(': ')
code = values[0]
name = values[2].strip()
print(name)
except TypeError: # Raised by running re.search on an empty value
code = ''
count += 1
if 0 != (count % 2):
description = cell.value.rpartition(': ')[2]
try:
Sector.objects.get(name=name)
except Exception:
if name:
new_sector = Sector(name=name, description=description)
new_sector.save()
| 2.40625 | 2 |
lang/py/cookbook/v2/source/cb2_10_12_sol_2.py | ch1huizong/learning | 0 | 12760881 | <filename>lang/py/cookbook/v2/source/cb2_10_12_sol_2.py
from ctypes import oledll
dll = oledll[r'C:\Path\To\Some.DLL']
dll.DllRegisterServer()
dll.DllUnregisterServer()
| 1.242188 | 1 |
oms_cms/backend/menu/management/commands/addmenu.py | Hamel007/oms_cms | 0 | 12760882 | from django.core.management.base import BaseCommand
from oms_cms.backend.menu.models import Menu, MenuItem
class Command(BaseCommand):
help = 'Add menu'
def handle(self, *args, **options):
menu = Menu.objects.create(name="Верхнее")
MenuItem.objects.create(title="Главная", name="home", menu=menu, lang_id=1)
MenuItem.objects.create(title="Новости", name="news", menu=menu, lang_id=1)
menu = Menu.objects.create(name="Верхнее 2")
MenuItem.objects.create(title="Контакты", name="contact", menu=menu, lang_id=1)
self.stdout.write('Success menu')
| 1.914063 | 2 |
subjects/apps.py | soumith2105/vasv-stdin-backend | 0 | 12760883 | <reponame>soumith2105/vasv-stdin-backend
from django.apps import AppConfig
class SubjectsConfig(AppConfig):
name = "subjects"
| 1.234375 | 1 |
python/testData/inspections/PyTypeCheckerInspection/ParametrizedBuiltinTypeAndTypingTypeAreEquivalent.py | alexey-anufriev/intellij-community | 2 | 12760884 | <gh_stars>1-10
from typing import Type, TypeVar
def expects_typing_Type(x: Type[str]):
expects_builtin_type(x)
expects_builtin_type(<warning descr="Expected type 'Type[str]', got 'Type[int]' instead">int</warning>)
def expects_builtin_type(x: type[str]):
expects_typing_Type(x)
expects_typing_Type(<warning descr="Expected type 'Type[str]', got 'Type[int]' instead">int</warning>)
T = TypeVar('T', bound=str)
def expects_generic_builtin_type(x: type[T]):
expects_generic_typing_Type(x)
expects_generic_typing_Type(<warning descr="Expected type 'Type[T]', got 'Type[int]' instead">int</warning>)
def expects_generic_typing_Type(x: Type[T]):
expects_generic_builtin_type(x)
expects_generic_builtin_type(<warning descr="Expected type 'Type[T]', got 'Type[int]' instead">int</warning>)
| 2.765625 | 3 |
aletheia/mechanism_engine/__init__.py | Brain-in-Vat/Aletheia | 0 | 12760885 | <filename>aletheia/mechanism_engine/__init__.py
from mesa import Model
| 1.046875 | 1 |
src/robotide/lib/robot/running/randomizer.py | ludovicurbain/SWIFT-RIDE | 775 | 12760886 | # Copyright 2008-2015 Nokia Networks
# Copyright 2016- Robot Framework Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from random import Random
from robotide.lib.robot.model import SuiteVisitor
class Randomizer(SuiteVisitor):
def __init__(self, randomize_suites=True, randomize_tests=True, seed=None):
self.randomize_suites = randomize_suites
self.randomize_tests = randomize_tests
self.seed = seed
# Cannot use just Random(seed) due to
# https://ironpython.codeplex.com/workitem/35155
args = (seed,) if seed is not None else ()
self._shuffle = Random(*args).shuffle
def start_suite(self, suite):
if not self.randomize_suites and not self.randomize_tests:
return False
if self.randomize_suites:
self._shuffle(suite.suites)
if self.randomize_tests:
self._shuffle(suite.tests)
if not suite.parent:
suite.metadata['Randomized'] = self._get_message()
def _get_message(self):
possibilities = {(True, True): 'Suites and tests',
(True, False): 'Suites',
(False, True): 'Tests'}
randomized = (self.randomize_suites, self.randomize_tests)
return '%s (seed %s)' % (possibilities[randomized], self.seed)
def visit_test(self, test):
pass
def visit_keyword(self, kw):
pass
| 2.09375 | 2 |
dxlclient/test/test_dxlclient.py | rahul1809/opendxl-client-python | 1 | 12760887 | # -*- coding: utf-8 -*-
################################################################################
# Copyright (c) 2014 McAfee Inc. - All Rights Reserved.
################################################################################
# Run with python -m unittest dxlclient.test.test_dxlclient
import unittest
import time
import threading
from base_test import BaseClientTest
import io
from nose.plugins.attrib import attr
from nose_parameterized import parameterized
from mock import Mock, patch
from textwrap import dedent
import __builtin__
import dxlclient._global_settings
from dxlclient import Request
from dxlclient import Response
from dxlclient import Event
from dxlclient import ErrorResponse
from dxlclient import DxlClient
from dxlclient import DxlClientConfig
from dxlclient import Broker
from dxlclient import UuidGenerator
from dxlclient import EventCallback
from dxlclient import RequestCallback
from dxlclient import ResponseCallback
from dxlclient import DxlException
from dxlclient import BrokerListError
from dxlclient._global_settings import *
CONFIG_DATA_NO_CERTS_SECTION = """
[no_certs]
BrokerCertChain: certchain.pem
CertFile: certfile.pem
PrivateKey: privatekey.pk
[Brokers]
22cdcace-6e8f-11e5-29c0-005056aa56de: 22cdcace-6e8f-11e5-29c0-005056aa56de;8883;dxl-broker-1;10.218.73.206
"""
CONFIG_DATA_NO_CA_OPTION = """
[Certs]
CertFile: certfile.pem
PrivateKey: privatekey.pk
[Brokers]
22cdcace-6e8f-11e5-29c0-005056aa56de: 22cdcace-6e8f-11e5-29c0-005056aa56de;8883;dxl-broker-1;10.218.73.206
"""
CONFIG_DATA_NO_CERT_OPTION = """
[Certs]
BrokerCertChain: certchain.pem
PrivateKey: privatekey.pk
[Brokers]
22cdcace-6e8f-11e5-29c0-005056aa56de: 22cdcace-6e8f-11e5-29c0-005056aa56de;8883;dxl-broker-1;10.218.73.206
"""
CONFIG_DATA_NO_PK_OPTION = """
[Certs]
BrokerCertChain: certchain.pem
CertFile: certfile.pem
[Brokers]
22cdcace-6e8f-11e5-29c0-005056aa56de: 22cdcace-6e8f-11e5-29c0-005056aa56de;8883;dxl-broker-1;10.218.73.206
"""
CONFIG_DATA_NO_BROKERS_SECTION = """
[Certs]
BrokerCertChain: certchain.pem
CertFile: certfile.pem
PrivateKey: privatekey.pk
22cdcace-6e8f-11e5-29c0-005056aa56de: 22cdcace-6e8f-11e5-29c0-005056aa56de;8883;dxl-broker-1;10.218.73.206
"""
CONFIG_DATA_NO_BROKERS_OPTION = """
[Certs]
BrokerCertChain: certchain.pem
CertFile: certfile.pem
PrivateKey: privatekey.pk
[Brokers]
"""
class DxlClientConfigTest(unittest.TestCase):
@parameterized.expand([
(None,),
("",)
])
def test_config_throws_value_error_for_empty_ca_bundle(self, ca_bundle):
self.assertRaises(ValueError, DxlClientConfig, broker_ca_bundle=ca_bundle,
cert_file=get_cert_file_pem(), private_key=get_dxl_private_key(), brokers=[])
@parameterized.expand([
(None,),
("",)
])
def test_config_throws_value_error_for_empty_cert_file(self, cert_file):
self.assertRaises(ValueError, DxlClientConfig,
cert_file=cert_file, broker_ca_bundle=get_ca_bundle_pem(), private_key=get_dxl_private_key(),
brokers=[])
def test_get_fastest_broker_gets_the_fastest(self):
semaphore = threading.Semaphore(0)
# Mock brokers connect speed
fast_broker = Mock()
slow_broker = Mock()
def connect_to_broker_slow():
import time
semaphore.acquire()
time.sleep(0.1)
return
def connect_to_broker_fast():
semaphore.release()
return
slow_broker._connect_to_broker = connect_to_broker_slow
fast_broker._connect_to_broker = connect_to_broker_fast
# Create config and add brokers
config = DxlClientConfig(broker_ca_bundle=get_ca_bundle_pem(),
cert_file=get_cert_file_pem(),
private_key=get_dxl_private_key(),
brokers=[])
config.brokers.append(fast_broker)
config.brokers.append(slow_broker)
# Check that the returned is the fastest
self.assertEqual(config._get_fastest_broker(), fast_broker)
def test_get_sorted_broker_list_returns_empty_when_no_brokers(self):
config = DxlClientConfig(broker_ca_bundle=get_ca_bundle_pem(),
cert_file=get_cert_file_pem(),
private_key=get_dxl_private_key(),
brokers=[])
self.assertEqual(config._get_sorted_broker_list(), [])
def test_get_sorted_broker_list_returns_all_brokers(self):
# Create config
config = DxlClientConfig(broker_ca_bundle=get_ca_bundle_pem(),
cert_file=get_cert_file_pem(),
private_key=get_dxl_private_key(),
brokers=[])
# Create mocked brokers
b1 = Mock()
b2 = Mock()
b1._connect_to_broker = b2._connect_to_broker = Mock(return_value=True)
# Add them to config
config.brokers.append(b1)
config.brokers.append(b2)
# Get all brokers
l = config._get_sorted_broker_list()
# Check all brokers are in the list
self.assertTrue(b1 in l)
self.assertTrue(b2 in l)
@parameterized.expand([
({"BrokersList": "Actually not a brokers list"},)
])
def test_get_brokers_raises_exception_from_invalid_json(self, policy):
config = DxlClientConfig(broker_ca_bundle=get_ca_bundle_pem(),
cert_file=get_cert_file_pem(),
private_key=get_dxl_private_key(),
brokers=[])
with self.assertRaises(BrokerListError):
config._set_brokers_from_json(policy)
def test_set_config_from_file_generates_dxl_config(self):
read_data = """
[Certs]
BrokerCertChain: certchain.pem
CertFile: certfile.pem
PrivateKey: privatekey.pk
[Brokers]
22cdcace-6e8f-11e5-29c0-005056aa56de: 22cdcace-6e8f-11e5-29c0-005056aa56de;8883;dxl-broker-1;10.218.73.206
"""
with patch.object(__builtin__, 'open', return_value=io.BytesIO(dedent(read_data))):
client_config = DxlClientConfig.create_dxl_config_from_file("mock_file")
self.assertEqual(client_config.cert_file, "certfile.pem")
self.assertEqual(client_config.broker_ca_bundle, "certchain.pem")
self.assertEqual(client_config.private_key, "privatekey.pk")
broker = client_config.brokers[0]
self.assertEqual(broker.host_name, "dxl-broker-1")
self.assertEqual(broker.ip_address, "10.218.73.206")
self.assertEqual(broker.port, 8883)
self.assertEqual(broker.unique_id, "22cdcace-6e8f-11e5-29c0-005056aa56de")
def test_set_config_wrong_file_raises_exception(self):
with self.assertRaises(Exception):
DxlClientConfig.create_dxl_config_from_file("this_file_doesnt_exist.cfg")
@parameterized.expand([
(CONFIG_DATA_NO_CERTS_SECTION,),
(CONFIG_DATA_NO_CA_OPTION,),
(CONFIG_DATA_NO_CERT_OPTION,),
(CONFIG_DATA_NO_PK_OPTION,),
])
def test_missing_certs_raises_exception(self, read_data):
with patch.object(__builtin__, 'open', return_value=io.BytesIO(dedent(read_data))):
with self.assertRaises(ValueError):
DxlClientConfig.create_dxl_config_from_file("mock_file.cfg")
@parameterized.expand([
(CONFIG_DATA_NO_BROKERS_SECTION,),
(CONFIG_DATA_NO_BROKERS_OPTION,),
])
def test_missing_brokers_doesnt_raise_exceptions(self, read_data):
with patch.object(__builtin__, 'open', return_value=io.BytesIO(dedent(read_data))):
client_config = DxlClientConfig.create_dxl_config_from_file("mock_file.cfg")
self.assertEqual(len(client_config.brokers), 0)
class DxlClientTest(unittest.TestCase):
def setUp(self):
self.config = DxlClientConfig(broker_ca_bundle=get_ca_bundle_pem(),
cert_file=get_cert_file_pem(),
private_key=get_dxl_private_key(),
brokers=[])
mqtt_client_patch = patch('paho.mqtt.client.Client')
mqtt_client_patch.start()
self.client = DxlClient(self.config)
self.client._request_manager.wait_for_response = Mock(return_value=Response(request=None))
self.test_channel = '/test/channel'
def tearDown(self):
patch.stopall()
def test_client_raises_exception_on_connect_when_already_connecting(self):
self.client._client.connect.side_effect = Exception("An exception!")
class MyThread(threading.Thread):
def __init__(self, client):
super(MyThread, self).__init__()
self._client = client
def run(self):
self._client.connect()
t = MyThread(self.client)
t.setDaemon(True)
t.start()
time.sleep(2)
self.assertEqual(self.client.connected, False)
with self.assertRaises(DxlException):
self.client.connect()
# self.client.disconnect()
def test_client_raises_exception_on_connect_when_already_connected(self):
self.client._client.connect.side_effect = Exception("An exception!")
self.client._connected = Mock(return_value=True)
with self.assertRaises(DxlException):
self.client.connect()
# self.client.disconnect()
# The following test is too slow
def test_client_disconnect_doesnt_raises_exception_on_disconnect_when_disconnected(self):
self.assertEqual(self.client.connected, False)
self.client.disconnect()
self.client.disconnect()
@parameterized.expand([
# (connect + retries) * 2 = connect_count
(0, 2),
(1, 4),
(2, 6),
])
def test_client_retries_defines_how_many_times_the_client_retries_connection(self, retries, connect_count):
# Client wont' connect ;)
self.client._client.connect = Mock(side_effect=Exception('Could not connect'))
# No delay between retries (faster unit tests)
self.client.config.reconnect_delay = 0
self.client._wait_for_policy_delay = 0
broker = Broker(host_name='localhost')
broker._parse(UuidGenerator.generate_id_as_string() + ";9999;localhost;127.0.0.1")
self.client.config.brokers = [broker]
self.client.config.connect_retries = retries
with self.assertRaises(DxlException):
self.client.connect()
self.assertEqual(self.client._client.connect.call_count, connect_count)
# self.client.disconnect()
def test_client_subscribe_adds_subscription_when_not_connected(self):
self.client._client.subscribe = Mock(return_value=None)
self.assertFalse(self.client.connected)
self.client.subscribe(self.test_channel)
self.assertTrue(self.test_channel in self.client.subscriptions)
self.assertEqual(self.client._client.subscribe.call_count, 0)
def test_client_unsubscribe_removes_subscription_when_not_connected(self):
self.client._client.unsubscribe = Mock(return_value=None)
self.assertFalse(self.client.connected)
# Add subscription
self.client.subscribe(self.test_channel)
self.assertTrue(self.test_channel in self.client.subscriptions)
# Remove subscription
self.client.unsubscribe(self.test_channel)
self.assertFalse(self.test_channel in self.client.subscriptions)
def test_client_subscribe_doesnt_add_twice_same_channel(self):
# Mock client.subscribe and is_connected
self.client._client.subscribe = Mock(return_value=None)
self.client._connected = Mock(return_value=True)
# We always have the default (myself) channel
self.assertEqual(len(self.client.subscriptions), 1)
self.client.subscribe(self.test_channel)
self.assertEqual(len(self.client.subscriptions), 2)
self.client.subscribe(self.test_channel)
self.assertEqual(len(self.client.subscriptions), 2)
self.assertEqual(self.client._client.subscribe.call_count, 1)
def test_client_handle_message_with_event_calls_event_callback(self):
event_callback = EventCallback()
event_callback.on_event = Mock()
self.client.add_event_callback(self.test_channel, event_callback)
# Create and process Event
evt = Event(destination_topic=self.test_channel)._to_bytes()
self.client._handle_message(self.test_channel, evt)
# Check that callback was called
self.assertEqual(event_callback.on_event.call_count, 1)
def test_client_handle_message_with_request_calls_request_callback(self):
req_callback = RequestCallback()
req_callback.on_request = Mock()
self.client.add_request_callback(self.test_channel, req_callback)
# Create and process Request
req = Request(destination_topic=self.test_channel)._to_bytes()
self.client._handle_message(self.test_channel, req)
# Check that callback was called
self.assertEqual(req_callback.on_request.call_count, 1)
def test_client_handle_message_with_response_calls_response_callback(self):
callback = ResponseCallback()
callback.on_response = Mock()
self.client.add_response_callback(self.test_channel, callback)
# Create and process Response
msg = Response(request=None)._to_bytes()
self.client._handle_message(self.test_channel, msg)
# Check that callback was called
self.assertEqual(callback.on_response.call_count, 1)
def test_client_send_event_publishes_message_to_dxl_fabric(self):
self.client._client.publish = Mock(return_value=None)
# Create and process Request
msg = Event(destination_topic="")
self.client.send_event(msg)
# Check that callback was called
self.assertEqual(self.client._client.publish.call_count, 1)
def test_client_send_request_publishes_message_to_dxl_fabric(self):
self.client._client.publish = Mock(return_value=None)
# Create and process Request
msg = Request(destination_topic="")
self.client._send_request(msg)
# Check that callback was called
self.assertEqual(self.client._client.publish.call_count, 1)
def test_client_send_response_publishes_message_to_dxl_fabric(self):
self.client._client.publish = Mock(return_value=None)
# Create and process Request
msg = Response(request=None)
self.client.send_response(msg)
# Check that callback was called
self.assertEqual(self.client._client.publish.call_count, 1)
def test_client_handles_error_response_and_fire_response_handler(self):
self.client._fire_response = Mock(return_value=None)
# Create and process Request
msg = ErrorResponse(request=None, error_code=666, error_message="test message")
payload = msg._to_bytes()
# Handle error response message
self.client._handle_message(self.test_channel, payload)
# Check that message response was properly delivered to handler
self.assertEqual(self.client._fire_response.call_count, 1)
"""
Service unit tests
"""
def test_client_register_service_subscribes_client_to_channel(self):
channel1 = '/mcafee/service/unittest/one'
channel2 = '/mcafee/service/unittest/two'
# Create dummy service
service_info = dxlclient.service.ServiceRegistrationInfo(
service_type='/mcafee/service/unittest', client=self.client)
service_info.add_topic(channel1, RequestCallback())
service_info.add_topic(channel2, RequestCallback())
# Register service in client
self.client.register_service_async(service_info)
# Check subscribed channels
subscriptions = self.client.subscriptions
assert channel1 in subscriptions, "Client wasn't subscribed to service channel"
assert channel2 in subscriptions, "Client wasn't subscribed to service channel"
def test_client_wont_register_the_same_service_twice(self):
service_info = dxlclient.service.ServiceRegistrationInfo(
service_type='/mcafee/service/unittest', client=self.client)
# Register service in client
self.client.register_service_async(service_info)
with self.assertRaises(dxlclient.DxlException):
# Re-register service
self.client.register_service_async(service_info)
def test_client_register_service_sends_register_request_to_broker(self):
service_info = dxlclient.service.ServiceRegistrationInfo(
service_type='/mcafee/service/unittest', client=self.client)
self.client._send_request = Mock(return_value=True)
self.client._connected = Mock(return_value=True)
# Register service in client
self.client.register_service_async(service_info)
time.sleep(2)
# Check that method has been called
self.assertTrue(self.client._send_request.called)
def test_client_register_service_unsubscribes_client_to_channel(self):
channel1 = '/mcafee/service/unittest/one'
channel2 = '/mcafee/service/unittest/two'
# Create dummy service
service_info = dxlclient.service.ServiceRegistrationInfo(
service_type='/mcafee/service/unittest', client=self.client)
service_info.add_topic(channel1, RequestCallback())
service_info.add_topic(channel2, RequestCallback())
# Register service in client
self.client.register_service_async(service_info)
# Check subscribed channels
subscriptions = self.client.subscriptions
assert channel1 in subscriptions, "Client wasn't subscribed to service channel"
assert channel2 in subscriptions, "Client wasn't subscribed to service channel"
self.client.unregister_service_async(service_info)
subscriptions = self.client.subscriptions
assert channel1 not in subscriptions, "Client wasn't unsubscribed to service channel"
assert channel2 not in subscriptions, "Client wasn't unsubscribed to service channel"
def test_client_register_service_unsuscribes_from_channel_by_guid(self):
channel1 = '/mcafee/service/unittest/one'
channel2 = '/mcafee/service/unittest/two'
# Create dummy service
service_info = dxlclient.service.ServiceRegistrationInfo(
service_type='/mcafee/service/unittest', client=self.client)
service_info.add_topic(channel1, RequestCallback())
service_info.add_topic(channel2, RequestCallback())
# Create same dummy service - different object
service_info2 = service_info = dxlclient.service.ServiceRegistrationInfo(
service_type='/mcafee/service/unittest', client=self.client)
service_info._service_id = service_info.service_id
service_info.add_topic(channel1, RequestCallback())
service_info.add_topic(channel2, RequestCallback())
# Register service in client
self.client.register_service_async(service_info)
# Check subscribed channels
subscriptions = self.client.subscriptions
assert channel1 in subscriptions, "Client wasn't subscribed to service channel"
assert channel2 in subscriptions, "Client wasn't subscribed to service channel"
self.client.unregister_service_async(service_info2)
subscriptions = self.client.subscriptions
assert channel1 not in subscriptions, "Client wasn't unsubscribed to service channel"
assert channel2 not in subscriptions, "Client wasn't unsubscribed to service channel"
@attr('system')
class DxlClientSystemClientTest(BaseClientTest):
def test_client_connects_to_broker_and_sets_current_broker(self):
with self.create_client() as client:
client.connect()
broker_id = "unique_broker_id_1"
self.assertTrue(client.connected)
self.assertEqual(client.current_broker.unique_id, broker_id)
def test_client_raises_exception_when_cannot_sync_connect_to_broker(self):
with self.create_client() as client:
broker = Broker("localhost", UuidGenerator.generate_id_as_string(), "127.0.0.1")
client._config.brokers = [broker]
with self.assertRaises(DxlException):
client.connect()
def test_client_receives_event_on_topic_only_after_subscribe(self):
"""
The idea of this test is to send an event to a topic which we are not
subscribed, so we shouldn't be notified. Then, we subscribe to that
topic and send a new event, we should get that last one.
"""
with self.create_client() as client:
test_topic = '/test/whatever/' + client.config._client_id
client.connect()
time.sleep(2)
self.assertTrue(client.connected)
# Set request callback (use mock to easily check when it was called)
ecallback = EventCallback()
ecallback.on_event = Mock()
client.add_event_callback(test_topic, ecallback, False)
# Send event thru dxl fabric to a topic which we are *not* subscribed
msg = Event(destination_topic=test_topic)
client.send_event(msg)
time.sleep(1)
# We haven't been notified
self.assertEqual(ecallback.on_event.call_count, 0)
# Subscribe to topic
client.subscribe(test_topic)
time.sleep(1)
# Send event thru dxl fabric again to that topic
msg = Event(destination_topic=test_topic)
client.send_event(msg)
time.sleep(1)
# Now we should have been notified of the event
self.assertEqual(ecallback.on_event.call_count, 1)
def test_client_receives_error_response_on_request_to_unknown_service(self):
"""
The idea of this test is to send a sync request to an unknown service
and get a "unable to locate service" error response.
"""
with self.create_client() as client:
test_topic = '/test/doesntexists/' + client.config._client_id
client.connect()
time.sleep(2)
self.assertTrue(client.connected)
# Send request thru dxl fabric to a service which doesn't exists
msg = Request(destination_topic=test_topic)
msg.service_id = UuidGenerator.generate_id_as_string()
response = client.sync_request(msg, 1)
# Check that we have an error response for our request
self.assertTrue(isinstance(response, ErrorResponse))
self.assertEqual(response.service_id, msg.service_id)
if __name__ == '__main__':
unittest.main()
| 1.648438 | 2 |
sdk/python/pulumi_mongodbatlas/get_private_link_endpoint_service.py | pulumi/pulumi-mongodbatlas | 9 | 12760888 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
__all__ = [
'GetPrivateLinkEndpointServiceResult',
'AwaitableGetPrivateLinkEndpointServiceResult',
'get_private_link_endpoint_service',
]
@pulumi.output_type
class GetPrivateLinkEndpointServiceResult:
"""
A collection of values returned by getPrivateLinkEndpointService.
"""
def __init__(__self__, aws_connection_status=None, azure_status=None, delete_requested=None, endpoint_service_id=None, error_message=None, id=None, interface_endpoint_id=None, private_endpoint_connection_name=None, private_endpoint_ip_address=None, private_endpoint_resource_id=None, private_link_id=None, project_id=None, provider_name=None):
if aws_connection_status and not isinstance(aws_connection_status, str):
raise TypeError("Expected argument 'aws_connection_status' to be a str")
pulumi.set(__self__, "aws_connection_status", aws_connection_status)
if azure_status and not isinstance(azure_status, str):
raise TypeError("Expected argument 'azure_status' to be a str")
pulumi.set(__self__, "azure_status", azure_status)
if delete_requested and not isinstance(delete_requested, bool):
raise TypeError("Expected argument 'delete_requested' to be a bool")
pulumi.set(__self__, "delete_requested", delete_requested)
if endpoint_service_id and not isinstance(endpoint_service_id, str):
raise TypeError("Expected argument 'endpoint_service_id' to be a str")
pulumi.set(__self__, "endpoint_service_id", endpoint_service_id)
if error_message and not isinstance(error_message, str):
raise TypeError("Expected argument 'error_message' to be a str")
pulumi.set(__self__, "error_message", error_message)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if interface_endpoint_id and not isinstance(interface_endpoint_id, str):
raise TypeError("Expected argument 'interface_endpoint_id' to be a str")
pulumi.set(__self__, "interface_endpoint_id", interface_endpoint_id)
if private_endpoint_connection_name and not isinstance(private_endpoint_connection_name, str):
raise TypeError("Expected argument 'private_endpoint_connection_name' to be a str")
pulumi.set(__self__, "private_endpoint_connection_name", private_endpoint_connection_name)
if private_endpoint_ip_address and not isinstance(private_endpoint_ip_address, str):
raise TypeError("Expected argument 'private_endpoint_ip_address' to be a str")
pulumi.set(__self__, "private_endpoint_ip_address", private_endpoint_ip_address)
if private_endpoint_resource_id and not isinstance(private_endpoint_resource_id, str):
raise TypeError("Expected argument 'private_endpoint_resource_id' to be a str")
pulumi.set(__self__, "private_endpoint_resource_id", private_endpoint_resource_id)
if private_link_id and not isinstance(private_link_id, str):
raise TypeError("Expected argument 'private_link_id' to be a str")
pulumi.set(__self__, "private_link_id", private_link_id)
if project_id and not isinstance(project_id, str):
raise TypeError("Expected argument 'project_id' to be a str")
pulumi.set(__self__, "project_id", project_id)
if provider_name and not isinstance(provider_name, str):
raise TypeError("Expected argument 'provider_name' to be a str")
pulumi.set(__self__, "provider_name", provider_name)
@property
@pulumi.getter(name="awsConnectionStatus")
def aws_connection_status(self) -> str:
"""
Status of the interface endpoint for AWS.
Returns one of the following values:
"""
return pulumi.get(self, "aws_connection_status")
@property
@pulumi.getter(name="azureStatus")
def azure_status(self) -> str:
"""
Status of the interface endpoint for AZURE.
Returns one of the following values:
"""
return pulumi.get(self, "azure_status")
@property
@pulumi.getter(name="deleteRequested")
def delete_requested(self) -> bool:
"""
Indicates if Atlas received a request to remove the interface endpoint from the private endpoint connection.
"""
return pulumi.get(self, "delete_requested")
@property
@pulumi.getter(name="endpointServiceId")
def endpoint_service_id(self) -> str:
return pulumi.get(self, "endpoint_service_id")
@property
@pulumi.getter(name="errorMessage")
def error_message(self) -> str:
"""
Error message pertaining to the interface endpoint. Returns null if there are no errors.
"""
return pulumi.get(self, "error_message")
@property
@pulumi.getter
def id(self) -> str:
"""
The provider-assigned unique ID for this managed resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="interfaceEndpointId")
def interface_endpoint_id(self) -> str:
"""
Unique identifier of the interface endpoint.
"""
return pulumi.get(self, "interface_endpoint_id")
@property
@pulumi.getter(name="privateEndpointConnectionName")
def private_endpoint_connection_name(self) -> str:
"""
Name of the connection for this private endpoint that Atlas generates.
"""
return pulumi.get(self, "private_endpoint_connection_name")
@property
@pulumi.getter(name="privateEndpointIpAddress")
def private_endpoint_ip_address(self) -> str:
"""
Private IP address of the private endpoint network interface.
"""
return pulumi.get(self, "private_endpoint_ip_address")
@property
@pulumi.getter(name="privateEndpointResourceId")
def private_endpoint_resource_id(self) -> str:
"""
Unique identifier of the private endpoint.
"""
return pulumi.get(self, "private_endpoint_resource_id")
@property
@pulumi.getter(name="privateLinkId")
def private_link_id(self) -> str:
return pulumi.get(self, "private_link_id")
@property
@pulumi.getter(name="projectId")
def project_id(self) -> str:
return pulumi.get(self, "project_id")
@property
@pulumi.getter(name="providerName")
def provider_name(self) -> str:
return pulumi.get(self, "provider_name")
class AwaitableGetPrivateLinkEndpointServiceResult(GetPrivateLinkEndpointServiceResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetPrivateLinkEndpointServiceResult(
aws_connection_status=self.aws_connection_status,
azure_status=self.azure_status,
delete_requested=self.delete_requested,
endpoint_service_id=self.endpoint_service_id,
error_message=self.error_message,
id=self.id,
interface_endpoint_id=self.interface_endpoint_id,
private_endpoint_connection_name=self.private_endpoint_connection_name,
private_endpoint_ip_address=self.private_endpoint_ip_address,
private_endpoint_resource_id=self.private_endpoint_resource_id,
private_link_id=self.private_link_id,
project_id=self.project_id,
provider_name=self.provider_name)
def get_private_link_endpoint_service(endpoint_service_id: Optional[str] = None,
private_link_id: Optional[str] = None,
project_id: Optional[str] = None,
provider_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetPrivateLinkEndpointServiceResult:
"""
`PrivateLinkEndpointService` describe a Private Endpoint Link. This represents a Private Endpoint Link Connection that wants to retrieve details in an Atlas project.
> **NOTE:** Groups and projects are synonymous terms. You may find group_id in the official documentation.
:param str endpoint_service_id: Unique identifier of the `AWS` or `AZURE` resource.
:param str private_link_id: Unique identifier of the private endpoint service for which you want to retrieve a private endpoint.
:param str project_id: Unique identifier for the project.
:param str provider_name: Cloud provider for which you want to create a private endpoint. Atlas accepts `AWS` or `AZURE`.
"""
__args__ = dict()
__args__['endpointServiceId'] = endpoint_service_id
__args__['privateLinkId'] = private_link_id
__args__['projectId'] = project_id
__args__['providerName'] = provider_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('mongodbatlas:index/getPrivateLinkEndpointService:getPrivateLinkEndpointService', __args__, opts=opts, typ=GetPrivateLinkEndpointServiceResult).value
return AwaitableGetPrivateLinkEndpointServiceResult(
aws_connection_status=__ret__.aws_connection_status,
azure_status=__ret__.azure_status,
delete_requested=__ret__.delete_requested,
endpoint_service_id=__ret__.endpoint_service_id,
error_message=__ret__.error_message,
id=__ret__.id,
interface_endpoint_id=__ret__.interface_endpoint_id,
private_endpoint_connection_name=__ret__.private_endpoint_connection_name,
private_endpoint_ip_address=__ret__.private_endpoint_ip_address,
private_endpoint_resource_id=__ret__.private_endpoint_resource_id,
private_link_id=__ret__.private_link_id,
project_id=__ret__.project_id,
provider_name=__ret__.provider_name)
| 1.9375 | 2 |
projects/serializers.py | engineer237/Project-Track-Api | 0 | 12760889 | from dataclasses import fields
from pyexpat import model
from rest_framework import serializers
from .models import Project
from track.serializers import TrackSerializer
class ProjectSerializer(serializers.ModelSerializer):
class Meta:
model= Project
fields = '__all__' | 1.625 | 2 |
training/spit_sizes_experiment.py | ethansaxenian/RosettaDecode | 0 | 12760890 | <filename>training/spit_sizes_experiment.py
from typing import Type, Any, Dict
import matplotlib.pyplot as plt
import numpy as np
from sklearn.utils import resample
from shared import RANDOM_SEED, Model
from training.data_splits import DataSplitter
from training.models import MODELS
def test_split_sizes(models: Dict[Type[Model], Dict[str, Any]]):
splitter = DataSplitter("../data/features_data_all_bc.jsonl", seed=RANDOM_SEED)
X, y = splitter.collect_features_data()
X_train, X_vali, X_test, y_train, y_vali, y_test = splitter.split_train_vali_test(X, y)
N = len(y_train)
num_trials = 10
percentages = list(range(100, 0, -10))
for model_type, params in models.items():
scores = {}
acc_mean = []
acc_std = []
print(model_type.__name__)
for pct in percentages:
print(f"{pct}% = {int(N * (pct / 100))} samples...")
scores[pct] = []
for i in range(num_trials):
X_sample, y_sample = resample(X_train, y_train, n_samples=int(N * (pct / 100)), replace=False)
model = model_type(**params)
if hasattr(model, "random_state"):
model.random_state = RANDOM_SEED + pct + i
model.fit(X_sample, y_sample)
scores[pct].append(model.score(X_vali, y_vali))
acc_mean.append(np.mean(scores[pct]))
acc_std.append(np.std(scores[pct]))
means = np.array(acc_mean)
std = np.array(acc_std)
plt.plot(percentages, acc_mean, "o-")
plt.fill_between(percentages, means - std, means + std, alpha=0.2, label=model_type.__name__)
plt.legend()
plt.xlabel("Percent Train")
plt.ylabel("Mean Accuracy")
plt.xlim([0, 100])
plt.title(f"Shaded Accuracy Plot")
plt.savefig(f"../data/area-Accuracies.png")
plt.show()
if __name__ == '__main__':
test_split_sizes(MODELS)
| 2.765625 | 3 |
sumo/phonon/__init__.py | zhubonan/sumo | 1 | 12760891 | # coding: utf-8
# Copyright (c) Scanlon Materials Theory Group
# Distributed under the terms of the MIT License.
"""
Package containing functions for loading and manipulating phonon data.
"""
| 0.71875 | 1 |
dash/app_2.py | gllrt/data2day2018 | 0 | 12760892 | <reponame>gllrt/data2day2018
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
from datetime import datetime as dt, timedelta
# Define object for external CSS stylesheet
external_stylesheets = [
'https://getbootstrap.com/docs/3.3/getting-started/',
{
'href': 'https://maxcdn.bootstrapcdn.com/bootstrap/3.3.7/css/bootstrap.min.css',
'rel': 'stylesheet',
'integrity': '<KEY>',
'crossorigin': 'anonymous'
}
]
# Initialize app object and add external stylesheet
app = dash.Dash(__name__,
external_stylesheets=external_stylesheets)
# Define the layout
app.layout = html.Div(children=[
html.H2('Twitter Showcase'),
# Create a new Row in the UI for Inputs
html.Div([
html.Div([
html.Div([
html.Div([
html.P('Datumsbereich:')
],
className="control-label"
),
dcc.DatePickerRange(
id='my-date-picker-range',
min_date_allowed=(dt.now() - timedelta(weeks=52)).date(),
max_date_allowed=dt.today(),
initial_visible_month=dt.today(),
start_date=(dt.now() - timedelta(days=7)).date(),
end_date=dt.now().date(),
display_format='DD.MM.YYYY',
)
]
)
],
className="col-sm-4"
),
html.Div([
html.Div([
html.P('Hash-Tag oder Benutzer:')
],
className="control-label"
),
dcc.Input(
id='hashtag-input',
type='text',
value='@data2day'
)
],
className="col-sm-4"
),
html.Div([
html.Div([
html.P('Anzahl von Tweets:')
],
className="control-label"
),
dcc.Slider(
id='number-tweets-slider',
min=100,
max=2000,
value=500,
step=100,
marks={i: '{}'.format(i) for i in list(filter(lambda x: '{}'.format(x) if (x/100)%2 == 1 else '', [(100*(i+1)) for i in range(20)]))}
)
],
className="col-sm-4"
)
],
className="row"
),
# Create a new row for exemplary output
html.Div([
html.Div([
html.Div(id='output-container-date-picker-range')
],
className="col-sm-4"
),
html.Div([
html.Div(id='output-hashtag-input')
],
className="col-sm-4"
),
html.Div([
html.Div(id='ouput-number-tweets-slider')
],
className="col-sm-4"
)
],
className="row"
)
],
className="container-fluid"
)
# Create function based on input of date range slider
@app.callback(
Output('output-container-date-picker-range', 'children'),
[Input('my-date-picker-range', 'start_date'),
Input('my-date-picker-range', 'end_date')]
)
def update_output(start_date, end_date):
string_prefix = 'Aktuelle Auswahl: '
if start_date is not None:
start_date = dt.strptime(start_date, '%Y-%m-%d')
start_date_string = start_date.strftime('%d. %B %Y')
string_prefix = string_prefix + start_date_string + ' bis '
if end_date is not None:
end_date = dt.strptime(end_date, '%Y-%m-%d')
end_date_string = end_date.strftime('%d. %B %Y')
string_prefix = string_prefix + end_date_string
if len(string_prefix) == len('You have selected: '):
return 'Select a date to see it displayed here'
else:
return string_prefix
# Create function based on input of text input
@app.callback(
Output('output-hashtag-input', 'children'),
[Input('hashtag-input', 'value')]
)
def update_output_hashtag(input_hashtag):
return '{}'.format(input_hashtag)
# Create function based on input of integer slider
@app.callback(
dash.dependencies.Output('ouput-number-tweets-slider', 'children'),
[dash.dependencies.Input('number-tweets-slider', 'value')])
def update_output_number_tweets(input_number_tweets):
return 'Anzahl an abzurufenden Tweets: {}'.format(input_number_tweets)
# Host the app via Flask
if __name__ == '__main__':
app.run_server(host='0.0.0.0', port=8052, debug=True)
| 2.828125 | 3 |
src/p2_add_two_numbers.py | magic-akari/leetcode | 0 | 12760893 | <filename>src/p2_add_two_numbers.py
#
# @lc app=leetcode id=2 lang=python3
#
# [2] Add Two Numbers
#
from structs import ListNode
# @lc code=start
class Solution:
def addTwoNumbers(self, l1: ListNode, l2: ListNode) -> ListNode:
head = ListNode(0)
p = head
carry = 0
while p != None:
if l1 != None:
p.val += l1.val
l1 = l1.next
if l2 != None:
p.val += l2.val
l2 = l2.next
if p.val >= 10:
p.val -= 10
carry = 1
else:
carry = 0
if l1 != None or l2 != None or carry != 0:
p.next = ListNode(carry)
p = p.next
return head
# @lc code=end
if __name__ == "__main__":
s = Solution()
l1 = ListNode(2)**ListNode(4)**ListNode(3)
l2 = ListNode(5)**ListNode(6)**ListNode(4)
print(s.addTwoNumbers(l1, l2))
| 3.65625 | 4 |
data/idd_dataset.py | valeoai/SemanticPalette | 17 | 12760894 | <reponame>valeoai/SemanticPalette
import os.path
from data.base_dataset import BaseDataset
from data.image_folder import make_dataset
class IddDataset(BaseDataset):
def get_paths(self, opt, phase="train"):
root = opt.dataroot
phase = 'val' if phase == 'valid' else 'train'
seg_dir = os.path.join(root, 'preprocessed', phase)
seg_paths_all = make_dataset(seg_dir, recursive=True)
seg_paths = [p for p in seg_paths_all if p.endswith('_instanceIds.png')]
img_dir = os.path.join(root, 'leftImg8bit', phase)
img_paths_all = make_dataset(img_dir, recursive=True)
img_paths = [p for p in img_paths_all if p.endswith('_leftImg8bit.png')]
return seg_paths, None, img_paths
def paths_match(self, path1, path2):
name1 = os.path.basename(path1)
name2 = os.path.basename(path2)
return name1.split('_')[-2] == name2.split('_')[-2]
| 2.40625 | 2 |
tests/test_datasets/test_dataset_functions.py | irfannurafif/openml-keras | 0 | 12760895 | import unittest
import os
import sys
if sys.version_info[0] >= 3:
from unittest import mock
else:
import mock
from oslo_concurrency import lockutils
import scipy.sparse
import openml
from openml import OpenMLDataset
from openml.exceptions import OpenMLCacheException, PyOpenMLError
from openml.testing import TestBase
from openml.datasets.functions import (_get_cached_dataset,
_get_cached_dataset_features,
_get_cached_dataset_qualities,
_get_cached_datasets,
_get_dataset_description,
_get_dataset_arff,
_get_dataset_features,
_get_dataset_qualities)
class TestOpenMLDataset(TestBase):
_multiprocess_can_split_ = True
def setUp(self):
super(TestOpenMLDataset, self).setUp()
def tearDown(self):
self._remove_pickle_files()
super(TestOpenMLDataset, self).tearDown()
def _remove_pickle_files(self):
cache_dir = self.static_cache_dir
for did in ['-1', '2']:
with lockutils.external_lock(
name='datasets.functions.get_dataset:%s' % did,
lock_path=os.path.join(openml.config.get_cache_directory(), 'locks'),
):
pickle_path = os.path.join(cache_dir, 'datasets', did,
'dataset.pkl')
try:
os.remove(pickle_path)
except:
pass
def test__list_cached_datasets(self):
openml.config.set_cache_directory(self.static_cache_dir)
cached_datasets = openml.datasets.functions._list_cached_datasets()
self.assertIsInstance(cached_datasets, list)
self.assertEqual(len(cached_datasets), 2)
self.assertIsInstance(cached_datasets[0], int)
@mock.patch('openml.datasets.functions._list_cached_datasets')
def test__get_cached_datasets(self, _list_cached_datasets_mock):
openml.config.set_cache_directory(self.static_cache_dir)
_list_cached_datasets_mock.return_value = [-1, 2]
datasets = _get_cached_datasets()
self.assertIsInstance(datasets, dict)
self.assertEqual(len(datasets), 2)
self.assertIsInstance(list(datasets.values())[0], OpenMLDataset)
def test__get_cached_dataset(self, ):
openml.config.set_cache_directory(self.static_cache_dir)
dataset = _get_cached_dataset(2)
features = _get_cached_dataset_features(2)
qualities = _get_cached_dataset_qualities(2)
self.assertIsInstance(dataset, OpenMLDataset)
self.assertTrue(len(dataset.features) > 0)
self.assertTrue(len(dataset.features) == len(features['oml:feature']))
self.assertTrue(len(dataset.qualities) == len(qualities))
def test_get_cached_dataset_description(self):
openml.config.set_cache_directory(self.static_cache_dir)
description = openml.datasets.functions._get_cached_dataset_description(2)
self.assertIsInstance(description, dict)
def test_get_cached_dataset_description_not_cached(self):
openml.config.set_cache_directory(self.static_cache_dir)
self.assertRaisesRegexp(OpenMLCacheException, "Dataset description for "
"dataset id 3 not cached",
openml.datasets.functions._get_cached_dataset_description,
3)
def test_get_cached_dataset_arff(self):
openml.config.set_cache_directory(self.static_cache_dir)
description = openml.datasets.functions._get_cached_dataset_arff(
dataset_id=2)
self.assertIsInstance(description, str)
def test_get_cached_dataset_arff_not_cached(self):
openml.config.set_cache_directory(self.static_cache_dir)
self.assertRaisesRegexp(OpenMLCacheException, "ARFF file for "
"dataset id 3 not cached",
openml.datasets.functions._get_cached_dataset_arff,
3)
def test_list_datasets(self):
# We can only perform a smoke test here because we test on dynamic
# data from the internet...
datasets = openml.datasets.list_datasets()
# 1087 as the number of datasets on openml.org
self.assertGreaterEqual(len(datasets), 100)
for did in datasets:
self._check_dataset(datasets[did])
def test_list_datasets_by_tag(self):
datasets = openml.datasets.list_datasets(tag='study_14')
self.assertGreaterEqual(len(datasets), 100)
for did in datasets:
self._check_dataset(datasets[did])
def test_list_datasets_paginate(self):
size = 10
max = 100
for i in range(0, max, size):
datasets = openml.datasets.list_datasets(offset=i, size=size)
self.assertGreaterEqual(size, len(datasets))
for did in datasets:
self._check_dataset(datasets[did])
@unittest.skip('See https://github.com/openml/openml-python/issues/149')
def test_check_datasets_active(self):
active = openml.datasets.check_datasets_active([1, 17])
self.assertTrue(active[1])
self.assertFalse(active[17])
self.assertRaisesRegexp(ValueError, 'Could not find dataset 79 in OpenML'
' dataset list.',
openml.datasets.check_datasets_active, [79])
def test_get_datasets(self):
dids = [1, 2]
datasets = openml.datasets.get_datasets(dids)
self.assertEqual(len(datasets), 2)
self.assertTrue(os.path.exists(os.path.join(
openml.config.get_cache_directory(), "datasets", "1", "description.xml")))
self.assertTrue(os.path.exists(os.path.join(
openml.config.get_cache_directory(), "datasets", "2", "description.xml")))
self.assertTrue(os.path.exists(os.path.join(
openml.config.get_cache_directory(), "datasets", "1", "dataset.arff")))
self.assertTrue(os.path.exists(os.path.join(
openml.config.get_cache_directory(), "datasets", "2", "dataset.arff")))
self.assertTrue(os.path.exists(os.path.join(
openml.config.get_cache_directory(), "datasets", "1", "features.xml")))
self.assertTrue(os.path.exists(os.path.join(
openml.config.get_cache_directory(), "datasets", "2", "features.xml")))
self.assertTrue(os.path.exists(os.path.join(
openml.config.get_cache_directory(), "datasets", "1", "qualities.xml")))
self.assertTrue(os.path.exists(os.path.join(
openml.config.get_cache_directory(), "datasets", "2", "qualities.xml")))
def test_get_dataset(self):
dataset = openml.datasets.get_dataset(1)
self.assertEqual(type(dataset), OpenMLDataset)
self.assertEqual(dataset.name, 'anneal')
self.assertTrue(os.path.exists(os.path.join(
openml.config.get_cache_directory(), "datasets", "1", "description.xml")))
self.assertTrue(os.path.exists(os.path.join(
openml.config.get_cache_directory(), "datasets", "1", "dataset.arff")))
self.assertTrue(os.path.exists(os.path.join(
openml.config.get_cache_directory(), "datasets", "1", "features.xml")))
self.assertTrue(os.path.exists(os.path.join(
openml.config.get_cache_directory(), "datasets", "1", "qualities.xml")))
self.assertGreater(len(dataset.features), 1)
self.assertGreater(len(dataset.qualities), 4)
def test_get_dataset_with_string(self):
dataset = openml.datasets.get_dataset(101)
self.assertRaises(PyOpenMLError, dataset._get_arff, 'arff')
self.assertRaises(PyOpenMLError, dataset.get_data)
def test_get_dataset_sparse(self):
dataset = openml.datasets.get_dataset(102)
X = dataset.get_data()
self.assertIsInstance(X, scipy.sparse.csr_matrix)
def test_download_rowid(self):
# Smoke test which checks that the dataset has the row-id set correctly
did = 44
dataset = openml.datasets.get_dataset(did)
self.assertEqual(dataset.row_id_attribute, 'Counter')
def test__get_dataset_description(self):
description = _get_dataset_description(self.workdir, 2)
self.assertIsInstance(description, dict)
description_xml_path = os.path.join(self.workdir,
'description.xml')
self.assertTrue(os.path.exists(description_xml_path))
def test__getarff_path_dataset_arff(self):
openml.config.set_cache_directory(self.static_cache_dir)
description = openml.datasets.functions._get_cached_dataset_description(2)
arff_path = _get_dataset_arff(self.workdir, description)
self.assertIsInstance(arff_path, str)
self.assertTrue(os.path.exists(arff_path))
def test__getarff_md5_issue(self):
description = {
'oml:id': 5,
'oml:md5_checksum': 'abc',
'oml:url': 'https://www.openml.org/data/download/61',
}
self.assertRaisesRegexp(
ValueError,
'Checksum ad484452702105cbf3d30f8deaba39a9 of downloaded dataset 5 '
'is unequal to the checksum abc sent by the server.',
_get_dataset_arff,
self.workdir, description,
)
def test__get_dataset_features(self):
features = _get_dataset_features(self.workdir, 2)
self.assertIsInstance(features, dict)
features_xml_path = os.path.join(self.workdir, 'features.xml')
self.assertTrue(os.path.exists(features_xml_path))
def test__get_dataset_qualities(self):
# Only a smoke check
qualities = _get_dataset_qualities(self.workdir, 2)
self.assertIsInstance(qualities, list)
def test_deletion_of_cache_dir(self):
# Simple removal
did_cache_dir = openml.datasets.functions.\
_create_dataset_cache_directory(1)
self.assertTrue(os.path.exists(did_cache_dir))
openml.datasets.functions._remove_dataset_cache_dir(did_cache_dir)
self.assertFalse(os.path.exists(did_cache_dir))
# Use _get_dataset_arff to load the description, trigger an exception in the
# test target and have a slightly higher coverage
@mock.patch('openml.datasets.functions._get_dataset_arff')
def test_deletion_of_cache_dir_faulty_download(self, patch):
patch.side_effect = Exception('Boom!')
self.assertRaisesRegexp(Exception, 'Boom!', openml.datasets.get_dataset,
1)
datasets_cache_dir = os.path.join(self.workdir, 'datasets')
self.assertEqual(len(os.listdir(datasets_cache_dir)), 0)
def test_publish_dataset(self):
dataset = openml.datasets.get_dataset(3)
file_path = os.path.join(openml.config.get_cache_directory(),
"datasets", "3", "dataset.arff")
dataset = OpenMLDataset(
name="anneal", version=1, description="test",
format="ARFF", licence="public", default_target_attribute="class", data_file=file_path)
dataset.publish()
self.assertIsInstance(dataset.dataset_id, int)
def test__retrieve_class_labels(self):
openml.config.set_cache_directory(self.static_cache_dir)
labels = openml.datasets.get_dataset(2).retrieve_class_labels()
self.assertEqual(labels, ['1', '2', '3', '4', '5', 'U'])
labels = openml.datasets.get_dataset(2).retrieve_class_labels(
target_name='product-type')
self.assertEqual(labels, ['C', 'H', 'G'])
def test_upload_dataset_with_url(self):
dataset = OpenMLDataset(
name="UploadTestWithURL", version=1, description="test",
format="ARFF",
url="https://www.openml.org/data/download/61/dataset_61_iris.arff")
dataset.publish()
self.assertIsInstance(dataset.dataset_id, int)
| 2.109375 | 2 |
scripts/sfh.py | kmcquinn/match | 0 | 12760896 | from __future__ import print_function
import argparse
import logging
import sys
import os
import numpy as np
import matplotlib.pyplot as plt
from .config import EXT
from .fileio import read_binned_sfh
from .utils import convertz, parse_pipeline, float2sci
logger = logging.getLogger()
def mh2z(num):
return 0.02 * 10 ** num
def quadriture(x):
return np.sqrt(np.sum(x * x))
class SFH(object):
'''
load the match sfh solution as a class with attributes set by the
best fits from the sfh file.
'''
def __init__(self, filename, hmc_file=None, meta_file=None):
"""
Parameters
----------
filename : str
data file
hmc_file : str
data file from which to overwite uncertainties
meta_file : str
data file to only read bestfit line.
"""
self.base, self.name = os.path.split(filename)
self.data = read_binned_sfh(filename, hmc_file)
if meta_file is None:
meta_file = filename
self.load_match_header(meta_file)
def load_match_header(self, filename):
'''
assumes header is from line 0 to 6 and sets footer to be the final
line of the file
header formatting is important:
Line # format requirement
first Ends with "= %f (%s)"
N is the string "Best fit:\n"
N+1 has ',' separated strings of "%s=%f+%f-%f"
last is formatted "%s %f %f %f"
'''
def set_value_err_attr(key, attr, pattr, mattr):
'''
set attributes [key], [key]_perr, [key]_merr
to attr, pattr, mattr (must be floats)
'''
self.__setattr__(key, float(attr))
self.__setattr__(key + '_perr', float(pattr))
self.__setattr__(key + '_merr', float(mattr))
with open(filename, 'r') as infile:
lines = infile.readlines()
if len(lines) == 0:
print('empty file: %s' % filename)
self.header = []
self.footer = []
self.bestfit = np.nan
self.match_out = ''
self.data = np.array([])
return
self.header = lines[0:6]
self.footer = lines[-1]
try:
bestfit, fout = \
self.header[0].replace(' ', '').split('=')[1].split('(')
self.bestfit = float(bestfit)
self.match_out = fout.split(')')[0]
try:
iline = self.header.index('Best fit:\n') + 1
except ValueError:
print('Need Best fit line to assign attributes')
raise ValueError
line = self.header[iline].strip().replace(' ', '').split(',')
for i in line:
key, attrs = i.split('=')
attr, pmattr = attrs.split('+')
pattr, mattr = pmattr.split('-')
set_value_err_attr(key, attr, pattr, mattr)
# the final line has totalSF
key, attr, pattr, mattr = self.header[-1].strip().split()
set_value_err_attr(key, attr, pattr, mattr)
except:
# zcmerge files: the first line has totalSF
self.header = lines[0]
self.footer = ['']
try:
key, attr, pattr, mattr = self.header.strip().split()
set_value_err_attr(key, attr, pattr, mattr)
except:
# no header
pass
self.flag = None
if np.sum(np.diff(self.data.mh)) == 0:
self.flag = 'setz'
if len(np.nonzero(np.diff(self.data.mh) >= 0)[0]) == len(self.data.mh):
self.flag = 'zinc'
return
def mh2z(self, num):
"""nore really [M/H] """
return 0.02 * 10 ** num
def plot_bins(self, val='sfr', err=False, convertz=False, offset=1.):
'''make SFH bins for plotting'''
if isinstance(val, str):
if err:
valm = self.data['%s_errm' % val] * offset
valp = self.data['%s_errp' % val] * offset
val = self.data[val] * offset
if convertz:
val = mh2z(val)
if err:
valm = mh2z(valm)
valp = mh2z(valp)
lagei = self.data.lagei
lagef = self.data.lagef
# double up value
# lagei_i, lagef_i, lagei_i+1, lagef_i+1 ...
lages = np.ravel([(lagei[i], lagef[i]) for i in range(len(lagei))])
vals = np.ravel([(val[i], val[i]) for i in range(len(val))])
if err:
valm = np.ravel([(valm[i], valm[i]) for i in range(len(val))])
valp = np.ravel([(valp[i], valp[i]) for i in range(len(val))])
data = (vals, valm, valp)
else:
data = vals
return lages, data
def age_plot(self, val='sfr', ax=None, plt_kw={}, errors=True,
convertz=False, xlabel=None, ylabel=None,
sfr_offset=1e3):
plt_kw = dict({'lw': 3, 'color': 'black'}, **plt_kw)
eplt_kw = plt_kw.copy()
eplt_kw.update({'linestyle': 'None'})
lages, sfrs = self.plot_bins(offset=sfr_offset)
rlages, (rsfrs, sfr_merrs, sfr_perrs) = \
self.plot_bins(err=True, offset=sfr_offset)
rlages = np.append(self.data['lagei'], self.data['lagef'][-1])
rlages = rlages[:-1] + np.diff(rlages) / 2.
rsfrs = self.data['sfr'] * sfr_offset
rsfr_merrs = self.data['sfr_errm'] * sfr_offset
rsfr_perrs = self.data['sfr_errp'] * sfr_offset
lages = 10 ** (lages - 9.)
rlages = 10 ** (rlages - 9.)
if val != 'sfr':
lages, vals = self.plot_bins(val=val, convertz=convertz)
# mask values with no SF
isfr, = np.nonzero(sfrs == 0)
vals[isfr] = np.nan
if self.flag != 'setz':
rlages, (rvals, val_merrs, val_perrs) = \
self.plot_bins(val=val, err=True)
# mask values with no SF
irsfr, = np.nonzero(rsfrs == 0)
val_merrs[irsfr] = 0.
val_perrs[irsfr] = 0.
if np.sum(val_merrs) == 0 or np.sum(val_perrs) == 0:
errors = False
else:
errors = False
if 'mh' in val:
if ylabel is not None:
ylabel = r'$\rm{[M/H]}$'
if convertz:
ylabel = r'$Z$'
else:
ylabel = r'$SFR\ %s\ (\rm{M_\odot/yr})$' % \
float2sci(1. / sfr_offset).replace('$', '')
vals = sfrs
rvals = rsfrs
val_merrs = rsfr_merrs
val_perrs = rsfr_perrs
if ax is None:
_, ax = plt.subplots()
xlabel = r'$\log Age\ \rm{(yr)}$'
ax.plot(lages, vals, **plt_kw)
if errors:
ax.errorbar(rlages, rvals, yerr=[val_merrs, val_perrs], **eplt_kw)
if xlabel is not None:
ax.set_xlabel(xlabel, fontsize=20)
if ylabel is not None:
ax.set_ylabel(ylabel, fontsize=20)
return ax
def plot_csfr(self, ax=None, errors=True, plt_kw={}, fill_between_kw={},
xlim=None, ylim=(-0.01, 1.01), data=True):
'''cumulative sfr plot from match'''
one_off = False
if ax is None:
fig, ax = plt.subplots(figsize=(8, 8))
plt.subplots_adjust(right=0.95, left=0.1, bottom=0.1, top=0.95)
ax.tick_params(direction='in')
one_off = True
fill_between_kw = dict({'alpha': 1, 'color': 'gray'},
**fill_between_kw)
plt_kw = dict({'lw': 3}, **plt_kw)
# lages, (csfh, csfh_errm, csfh_errp) = self.plot_bins(val='csfr',
# err=True)
lages = self.data['lagei']
csfh = self.data['csfr']
csfh_errm = self.data['csfr_errm']
csfh_errp = self.data['csfr_errp']
age = 10 ** (lages - 9.)
# age = lages
age = np.append(age, 10 ** (self.data['lagef'][-1] - 9))
csfh = np.append(csfh, 0)
csfh_errm = np.append(csfh_errm, 0)
csfh_errp = np.append(csfh_errp, 0)
if errors:
ax.fill_between(age, csfh - csfh_errm, csfh + csfh_errp,
**fill_between_kw)
if data:
ax.plot(age, csfh, **plt_kw)
if xlim is not None:
ax.set_xlim(xlim)
ax.set_ylim(ylim)
# ax.set_xscale('log')
# ax.xaxis.set_major_locator(LogNLocator)
if one_off:
ax.set_xlabel('$\\rm{Star\ Formation\ Time\ (Gyr)}$', fontsize=20)
ax.set_ylabel('$\\rm{Culmulative\ Star\ Formation}$', fontsize=20)
plt.legend(loc=0, frameon=False)
if 'label' in plt_kw.keys():
outfile = \
'{}_csfr'.format(plt_kw['label'].replace('$', '').lower(),
EXT)
else:
outfile = \
'{}_csfr{}'.format(os.path.join(self.base, self.name), EXT)
plt.savefig(outfile)
print('wrote {}'.format(outfile))
return ax
def sf_weighted_metallicity(self):
agebins = (10 ** self.data.lagef - 10 ** self.data.lagei)
totalsf = np.sum(self.data.sfr * agebins)
fracsf = (self.data.sfr * agebins) / totalsf
feh = np.array([convertz(z=0.02 * 10 ** m)[-2] for m in self.data.mh])
return np.sum(fracsf * feh)
def param_table(self, angst=True, agesplit=[1e9, 3e9], target='',
filters=['', '']):
try:
dic = {'bestfit': self.bestfit, 'Av': self.Av, 'dmod': self.dmod}
except:
print('No bestfit info')
dic = {'bestfit': np.nan, 'Av': np.nan, 'dmod': np.nan}
dic['header'] = \
(r'Galaxy & Optical Filters & A$_V$ & $(m\!-\!M)_0$ &'
r'$\% \frac{{\rm{{SF}}}}{{\rm{{SF_{{TOT}}}}}}$ &'
r'$\langle \mbox{{[Fe/H]}} \rangle$ &'
r'$\% \frac{{\rm{{SF}}}}{{\rm{{SF_{{TOT}}}}}}$ &'
r'$\langle \mbox{{[Fe/H]}} \rangle$ & $bestfit$ \\ & & & & '
r'\multicolumn{{2}}{{c}}{{$<{0}\rm{{Gyr}}$}} & '
r'\multicolumn{{2}}{{c}}{{${0}-{1}\rm{{Gyr}}$}} & \\ \hline'
'\n'.format(*agesplit))
dic['target'] = target
if angst:
try:
dic['target'], filters = parse_pipeline(self.name)
except:
pass
dic['filters'] = ','.join(filters)
fyng, fyng_errp, fyng_errm = self.mass_fraction(0, agesplit[0])
fint, fint_errp, fint_errm = self.mass_fraction(agesplit[0],
agesplit[1])
# logZ = 0 if there is no SF, that will add error to mean Fe/H
iyng = self.nearest_age(agesplit[0], i=False)
iint = self.nearest_age(agesplit[1], i=False)
iyngs, = np.nonzero(self.data.mh[:iyng + 1] != 0)
iints, = np.nonzero(self.data.mh[:iint + 1] != 0)
iints = list(set(iints) - set(iyngs))
feh_yng = convertz(z=mh2z(np.mean(self.data.mh[iyngs])))[-2]
feh_int = convertz(z=mh2z(np.mean(self.data.mh[iints])))[-2]
feh_yng_errp = \
convertz(z=mh2z(quadriture(self.data.mh_errp[iyngs])))[-2]
feh_yng_errm = \
convertz(z=mh2z(quadriture(self.data.mh_errm[iyngs])))[-2]
feh_int_errp = \
convertz(z=mh2z(quadriture(self.data.mh_errp[iints])))[-2]
feh_int_errm = \
convertz(z=mh2z(quadriture(self.data.mh_errm[iints])))[-2]
maf = '${0: .2f}^{{+{1: .2f}}}_{{-{2: .2f}}}$'
dic['fyng'], dic['fint'] = \
[maf.format(v, p, m) for v, p, m in zip([fyng, fint],
[fyng_errp, fint_errp],
[fyng_errm, fint_errm])]
dic['feh_yng'], dic['feh_int'] = \
[maf.format(v, p, m) for v, p, m in
zip([feh_yng, feh_int],
[feh_yng_errp, feh_int_errp],
[feh_yng_errm, feh_int_errm])]
line = ['{target}', '{filters}', '{Av: .2f}', '{dmod: .2f}',
'{fyng}', '{feh_yng}', '{fint}', '{feh_int}']
dic['fmt'] = '%s \\\\ \n' % (' & '.join(line))
return dic
def nearest_age(self, lage, i=True):
if lage > 10.15:
lage = np.log10(lage)
logger.warning('converting input age to log age')
age_arr = self.data.lagef
msg = 'lagef'
if i:
age_arr = self.data.lagei
msg = 'lagei'
# min age bin size, will trigger warning if ages requested are
# higher than the min binsize.
tol = np.min(np.diff(age_arr))
# find closest age bin to lage
idx = np.argmin(np.abs(age_arr - lage))
difi = np.abs(age_arr[idx] - lage)
if difi > tol:
logger.warning(('input {}={} not found. ',
'Using {}').format(msg, lage, age_arr[idx]))
return idx
def mass_fraction(self, lagei, lagef):
"""
Return the fraction of total mass formed between lagei and lagef.
lage[] units can be log yr or yr.
Multiply by self.totalSF to obtain the mass formed.
"""
agebins = (10 ** self.data.lagef - 10 ** self.data.lagei)
if lagef-lagei < np.min(np.diff(self.data.lagei)):
logger.error('Age difference smaller than bin sizes (or negative)')
return 0, 0, 0
# higher precision than self.totalSF
totalsf = np.sum(self.data.sfr * agebins)
idxi = self.nearest_age(lagei)
# +1 is to include final bin
idxf = self.nearest_age(lagef, i=False) + 1
fracsfr = np.sum(self.data.sfr[idxi:idxf] *
agebins[idxi:idxf]) / totalsf
fracsfr_errp = quadriture(self.data.sfr_errp[idxi:idxf] *
agebins[idxi:idxf]) / totalsf
fracsfr_errm = quadriture(self.data.sfr_errm[idxi:idxf] *
agebins[idxi:idxf]) / totalsf
return fracsfr, fracsfr_errp, fracsfr_errm
def sfh_plot(self):
from matplotlib.ticker import NullFormatter
_, (ax1, ax2) = plt.subplots(nrows=2)
self.age_plot(ax=ax1)
self.age_plot(val='mh', convertz=False, ax=ax2)
ax1.xaxis.set_major_formatter(NullFormatter())
plt.subplots_adjust(hspace=0.1)
figname = os.path.join(self.base, self.name + EXT)
print('wrote {}'.format(figname))
plt.savefig(figname)
plt.close()
def main(argv):
"""
Main function for sfh.py plot sfh output from calcsfh, zcombine, or zcmerge
"""
parser = argparse.ArgumentParser(description="Plot match sfh")
parser.add_argument('sfh_files', nargs='*', type=str,
help='ssp output(s) or formated output(s)')
args = parser.parse_args(argv)
for sfh_file in args.sfh_files:
msfh = SFH(sfh_file)
if len(msfh.data) != 0:
msfh.sfh_plot()
msfh.plot_csfr()
# dic = msfh.param_table()
# print(dic['fmt'].format(**dic))
if __name__ == '__main__':
main(sys.argv[1:])
| 2.640625 | 3 |
netbox_prometheus_sd/tests/utils.py | johanfleury/netbox-plugin-prometheus-sd | 27 | 12760897 | <reponame>johanfleury/netbox-plugin-prometheus-sd<filename>netbox_prometheus_sd/tests/utils.py
from dcim.models.devices import DeviceType, Manufacturer
from dcim.models.sites import Site
from dcim.models import Device, DeviceRole, Platform
from ipam.models import IPAddress
from tenancy.models import Tenant, TenantGroup
from virtualization.models import (
Cluster,
ClusterGroup,
ClusterType,
VirtualMachine,
)
def build_cluster():
return Cluster.objects.get_or_create(
name="DC1",
group=ClusterGroup.objects.get_or_create(name="VMware")[0],
type=ClusterType.objects.get_or_create(name="On Prem")[0],
site=Site.objects.get_or_create(name="Campus A", slug="campus-a")[0],
)[0]
def build_tenant():
return Tenant.objects.get_or_create(name="Acme Corp.", slug="acme")[0]
def build_minimal_vm(name):
return VirtualMachine.objects.get_or_create(name=name, cluster=build_cluster())[0]
def build_vm_full(name):
vm = build_minimal_vm(name=name)
vm.tenant = build_tenant()
vm.role = DeviceRole.objects.get_or_create(name="VM", slug="vm", vm_role=True)[0]
vm.platform = Platform.objects.get_or_create(
name="Ubuntu 20.04", slug="ubuntu-20.04"
)[0]
vm.primary_ip4 = IPAddress.objects.get_or_create(address="192.168.0.1/24")[0]
vm.tags.add("Tag1")
vm.tags.add("Tag 2")
return vm
def build_minimal_device(name):
return Device.objects.get_or_create(
name=name,
device_role=DeviceRole.objects.get_or_create(name="Firewall", slug="firewall")[
0
],
device_type=DeviceType.objects.get_or_create(
model="SRX",
slug="srx",
manufacturer=Manufacturer.objects.get_or_create(
name="Juniper", slug="juniper"
)[0],
)[0],
site=Site.objects.get_or_create(name="Site", slug="site")[0],
)[0]
def build_device_full(name):
device = build_minimal_device(name)
device.tenant = build_tenant()
device.platform = Platform.objects.get_or_create(name="Junos", slug="junos")[0]
device.primary_ip6 = IPAddress.objects.get_or_create(address="2001:db8:1701::2/64")[
0
]
device.tags.add("Tag1")
device.tags.add("Tag 2")
return device
def build_minimal_ip(address):
return IPAddress.objects.get_or_create(address=address)[0]
def build_full_ip(address, dns_name=""):
ip = build_minimal_ip(address=address)
ip.tenant = Tenant.objects.get_or_create(
name="Starfleet",
slug="starfleet",
group=TenantGroup.objects.get_or_create(name="Federation", slug="federation")[
0
],
)[0]
ip.dns_name = dns_name
ip.tags.add("Tag1")
ip.tags.add("Tag 2")
return ip
| 1.828125 | 2 |
exp_cms.py | RUSH-LAB/Count-Sketch-Optimizers | 1 | 12760898 | import torch
from cupy_kernel import cupyKernel
import numpy as np
import math
kernel = '''
extern "C"
__inline__ __device__
int hash(int value, int range, int a, int b)
{
int h = a * value + b;
h ^= h >> 16;
h *= 0x85ebca6b;
h ^= h >> 13;
h *= 0xc2b2ae35;
h ^= h >> 16;
return h % range;
}
extern "C"
__inline__ __device__
float minimum(float a, float b, float c)
{
return fminf(fminf(a,b),c);
}
extern "C"
__inline__ __device__
float update_retrieve(float* mem,
float* result,
const float beta,
const int N,
const int D,
const long index,
const float value)
{
int a = 994443;
int b = 609478;
const int hash_idx = hash(index, N, a, b) * D + threadIdx.x;
float old_value = mem[hash_idx];
float update = (1. - beta) * (value - old_value);
atomicAdd(&mem[hash_idx], update);
return old_value + update;
}
extern "C"
__inline__ __device__
float cms_update_retrieve(float* mem,
float* result,
const float beta,
const int N,
const int W,
const int D,
const long index,
const float value)
{
float r[3];
int a[3] = {994443, 4113759, 9171025};
int b[3] = {609478, 2949676, 2171464};
for(int idx = 0; idx < 3; ++idx)
{
const int hash_idx = idx*W + hash(index, N, a[idx], b[idx]) * D + threadIdx.x;
float old_value = mem[hash_idx];
float update = (1. - beta) * (value - old_value);
atomicAdd(&mem[hash_idx], update);
r[idx] = old_value + update;
}
return minimum(r[0], r[1], r[2]);
}
extern "C"
__global__
void cms_hash_update_retrieve(const long* indices,
const float* values,
const float* beta,
float* mem,
float* result,
const int N,
const int W,
const int D)
{
if(threadIdx.x < D)
{
const int idx = blockIdx.x * D + threadIdx.x;
const float value = values[idx];
const long index = indices[blockIdx.x];
result[idx] = cms_update_retrieve(mem, result, *beta, N, W, D, index, value);
}
}
'''
class CountMinSketch:
def __init__(self, N, D, sketch_size=0.20):
self.N = N
self.D = D
self.blk_size = math.ceil(D // 32) * 32
self.range = int(N*sketch_size/3.)
self.width = self.range * D
self.kernel = cupyKernel(kernel, "cms_hash_update_retrieve")
self.cms = torch.zeros(3, self.range, D).float().cuda()
print(N, "CMS", self.cms.size())
def update(self, indices, values, size, beta):
M, D = values.size()
result = torch.zeros(values.size()).float().cuda()
beta = torch.FloatTensor([beta]).cuda()
self.kernel(grid=(M,1,1),
block=(self.blk_size,1,1),
args=[indices.data_ptr(),
values.data_ptr(),
beta.data_ptr(),
self.cms.data_ptr(),
result.data_ptr(),
self.range,
self.width,
self.D],
strm=torch.cuda.current_stream().cuda_stream)
return torch.cuda.sparse.FloatTensor(indices, result, size)
def clean(self, alpha):
self.cms.mul_(alpha)
| 2.03125 | 2 |
server/githubsrm/administrator/errors.py | Aradhya-Tripathi/githubsrm | 1 | 12760899 | from core.errorfactory import (
AuthenticationErrors,
AdminErrors,
MaintainerErrors,
ProjectErrors,
ContributorApprovedError,
ContributorNotFoundError,
MaintainerNotFoundError,
)
class InvalidWebhookError(AuthenticationErrors):
...
class ExistingAdminError(AdminErrors):
...
class InvalidAdminCredentialsError(AdminErrors):
...
class MaintainerApprovedError(MaintainerErrors):
...
class ProjectNotFoundError(ProjectErrors):
...
class InvalidRefreshTokenError(AuthenticationErrors):
...
class InvalidUserError(AuthenticationErrors):
...
| 1.867188 | 2 |
scripts/count_hours.py | sciforce/phones-las | 35 | 12760900 | import argparse
import warnings
import librosa
from tqdm import tqdm
SAMPLE_RATE = 16000
def read_audio_and_text(inputs):
audio_path = inputs['file_path']
audio, sr = librosa.load(audio_path, sr=SAMPLE_RATE, mono=True)
return audio.size / float(sr)
def process_line(args, line):
filename, language, text = line.split(args.delimiter)
inputs = {
'file_path': filename,
'text': text.strip(),
'language': language
}
try:
return read_audio_and_text(inputs)
except Exception as err:
print(str(err))
return 0
def main(args):
with open(args.input_file) as f:
total = 0
for x in tqdm(f):
total += process_line(args, x)
total /= 3600
print('Hours: ', total)
if __name__ == '__main__':
warnings.simplefilter(action='ignore', category=FutureWarning)
parser = argparse.ArgumentParser()
parser.add_argument('--input_file', help='File with audio paths and texts.', required=True)
parser.add_argument('--step', help='Analysis window step in ms.', type=int, default=10)
parser.add_argument('--start', help='Index of example to start from', type=int, default=0)
parser.add_argument('--delimiter', help='CSV delimiter', type=str, default=',')
main(parser.parse_args())
| 2.78125 | 3 |
CursoEmVideoPython/desafio65.py | miguelabreuss/scripts_python | 0 | 12760901 | count = 0
soma = 0
num = 0
maior = 0
menor = 999999999999999999999999
resp = ''
while resp in 'Ss':
num = int(input('Digite um número: '))
resp = str(input('Deseja continuar [S/N]: '))
soma += num
if num > maior:
maior = num
if num < menor:
menor = num
count += 1
print('O MAIOR número digitado foi {}, o MENOR foi {} e a média foi {}'.format(maior, menor, soma / count)) | 3.546875 | 4 |
project_1__tictcactoe/player_types/Player.py | mulvenstein/cs_AI | 0 | 12760902 | <filename>project_1__tictcactoe/player_types/Player.py
class Player:
def __init__(self, char='X'):
self.kind = 'human'
self.char = char
def move(self, board):
while True: #valid move
move = int(input('Your move? '))
if board[move] != "X" and board[move] != "O" and move >= 0 and move <= 9:
return move
def available_positions(self, board):
return [i for i in range(0, 9) if board[i] == '█'] | 3.5625 | 4 |
PYTHON-CURSO EM VIDEO/Desafio 024.py | JaumVitor/HOMEWORK-PYTHON | 0 | 12760903 | <gh_stars>0
cidade = str ( input ('Qual cidade você nasceu ? ')).strip().lower().split()
cidade = cidade[0]
print ('Sua cidade começa com a palavra Santo ? {}'.format('santo' in (cidade)))
| 3.578125 | 4 |
examples/test_ftp.py | gitdachong/lasttester | 0 | 12760904 | <reponame>gitdachong/lasttester
from lasttester.components.configs.ftp import Configurer
import os
config = Configurer({
'name':'ftp',
'config_body':{
'host':'192.168.127.12',
'port':'21',
'username':'test123',
'password':'<PASSWORD>',
}
})
key,value = config.parse()[0]
ftp = value.get('ftp')
print(ftp)
config.upload('/5/','/Users/scott/Downloads/test/1')
# config.upload('/6/','E:\pornhub-downloader-master\img')
# config.download('/5/','/Users/scott/Downloads/test/111/')
# config.delete('/')
config.close()
| 2.125 | 2 |
ssh2net/core/cisco_iosxr/__init__.py | carlmontanari/ssh2net | 10 | 12760905 | <gh_stars>1-10
"""ssh2net cisco iosxr driver"""
| 0.894531 | 1 |
test/integration/plugins/nuagevsp/test_nuage_vpc_internal_lb.py | mrehman29/cloudstack | 1 | 12760906 | <reponame>mrehman29/cloudstack
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" Component tests for VPC Internal Load Balancer functionality with Nuage VSP SDN plugin
"""
# Import Local Modules
from nuageTestCase import nuageTestCase
from marvin.lib.base import (Account,
ApplicationLoadBalancer,
Network,
Router)
from marvin.cloudstackAPI import (listInternalLoadBalancerVMs,
stopInternalLoadBalancerVM,
startInternalLoadBalancerVM)
# Import System Modules
from nose.plugins.attrib import attr
import copy
import time
class TestNuageInternalLb(nuageTestCase):
"""Test VPC Internal LB functionality with Nuage VSP SDN plugin
"""
@classmethod
def setUpClass(cls):
super(TestNuageInternalLb, cls).setUpClass()
return
def setUp(self):
# Create an account
self.account = Account.create(self.api_client,
self.test_data["account"],
admin=True,
domainid=self.domain.id
)
self.cleanup = [self.account]
return
# create_Internal_LB_Rule - Creates Internal LB rule in the given VPC network
def create_Internal_LB_Rule(self, network, vm_array=None, services=None, source_ip=None):
self.debug("Creating Internal LB rule in VPC network with ID - %s" % network.id)
if not services:
services = self.test_data["internal_lbrule"]
int_lb_rule = ApplicationLoadBalancer.create(self.api_client,
services=services,
sourcenetworkid=network.id,
networkid=network.id,
sourceipaddress=source_ip
)
self.debug("Created Internal LB rule")
# Assigning VMs to the created Internal Load Balancer rule
if vm_array:
self.debug("Assigning virtual machines - %s to the created Internal LB rule" % vm_array)
int_lb_rule.assign(self.api_client, vms=vm_array)
self.debug("Assigned VMs to the created Internal LB rule")
return int_lb_rule
# validate_Internal_LB_Rule - Validates the given Internal LB rule,
# matches the given Internal LB rule name and state against the list of Internal LB rules fetched
def validate_Internal_LB_Rule(self, int_lb_rule, state=None, vm_array=None):
"""Validates the Internal LB Rule"""
self.debug("Check if the Internal LB Rule is created successfully ?")
int_lb_rules = ApplicationLoadBalancer.list(self.api_client,
id=int_lb_rule.id
)
self.assertEqual(isinstance(int_lb_rules, list), True,
"List Internal LB Rule should return a valid list"
)
self.assertEqual(int_lb_rule.name, int_lb_rules[0].name,
"Name of the Internal LB Rule should match with the returned list data"
)
if state:
self.assertEqual(int_lb_rules[0].loadbalancerrule[0].state, state,
"Internal LB Rule state should be '%s'" % state
)
if vm_array:
instance_ids = [instance.id for instance in int_lb_rules[0].loadbalancerinstance]
for vm in vm_array:
self.assertEqual(vm.id in instance_ids, True,
"Internal LB instance list should have the VM with ID - %s" % vm.id
)
self.debug("Internal LB Rule creation successfully validated for %s" % int_lb_rule.name)
# list_InternalLbVms - Lists deployed Internal LB VM instances
def list_InternalLbVms(self, network_id=None, source_ip=None):
listInternalLoadBalancerVMsCmd = listInternalLoadBalancerVMs.listInternalLoadBalancerVMsCmd()
listInternalLoadBalancerVMsCmd.account = self.account.name
listInternalLoadBalancerVMsCmd.domainid = self.account.domainid
if network_id:
listInternalLoadBalancerVMsCmd.networkid = network_id
internal_lb_vms = self.api_client.listInternalLoadBalancerVMs(listInternalLoadBalancerVMsCmd)
if source_ip:
return [internal_lb_vm for internal_lb_vm in internal_lb_vms
if str(internal_lb_vm.guestipaddress) == source_ip]
else:
return internal_lb_vms
# get_InternalLbVm - Returns Internal LB VM instance for the given VPC network and source ip
def get_InternalLbVm(self, network, source_ip):
self.debug("Finding the InternalLbVm for network with ID - %s and source IP address - %s" %
(network.id, source_ip))
internal_lb_vms = self.list_InternalLbVms(network.id, source_ip)
self.assertEqual(isinstance(internal_lb_vms, list), True,
"List InternalLbVms should return a valid list"
)
return internal_lb_vms[0]
# stop_InternalLbVm - Stops the given Internal LB VM instance
def stop_InternalLbVm(self, int_lb_vm, force=None):
self.debug("Stopping InternalLbVm with ID - %s" % int_lb_vm.id)
cmd = stopInternalLoadBalancerVM.stopInternalLoadBalancerVMCmd()
cmd.id = int_lb_vm.id
if force:
cmd.forced = force
self.api_client.stopInternalLoadBalancerVM(cmd)
# start_InternalLbVm - Starts the given Internal LB VM instance
def start_InternalLbVm(self, int_lb_vm):
self.debug("Starting InternalLbVm with ID - %s" % int_lb_vm.id)
cmd = startInternalLoadBalancerVM.startInternalLoadBalancerVMCmd()
cmd.id = int_lb_vm.id
self.api_client.startInternalLoadBalancerVM(cmd)
# check_InternalLbVm_state - Checks if the Internal LB VM instance of the given VPC network and source ip is in the
# expected state form the list of fetched Internal LB VM instances
def check_InternalLbVm_state(self, network, source_ip, state=None):
self.debug("Check if the InternalLbVm is in state - %s" % state)
internal_lb_vms = self.list_InternalLbVms(network.id, source_ip)
self.assertEqual(isinstance(internal_lb_vms, list), True,
"List InternalLbVm should return a valid list"
)
if state:
self.assertEqual(internal_lb_vms[0].state, state,
"InternalLbVm is not in the expected state"
)
self.debug("InternalLbVm instance - %s is in the expected state - %s" % (internal_lb_vms[0].name, state))
# wget_from_vm_cmd - From within the given VM (ssh client),
# fetches index.html file of web server running with the given public IP
def wget_from_vm_cmd(self, ssh_client, ip_address, port):
cmd = "wget --no-cache -t 1 http://" + ip_address + ":" + str(port) + "/"
response = self.execute_cmd(ssh_client, cmd)
if "200 OK" not in response:
self.fail("Failed to wget from a VM with http server IP address - %s" % ip_address)
# Reading the wget file
cmd = "cat index.html"
wget_file = self.execute_cmd(ssh_client, cmd)
# Removing the wget file
cmd = "rm -r index.html"
self.execute_cmd(ssh_client, cmd)
return wget_file
# verify_lb_wget_file - Verifies that the given wget file (index.html) belongs to the given Internal LB rule
# assigned VMs (vm array)
def verify_lb_wget_file(self, wget_file, vm_array):
wget_server_ip = None
for vm in vm_array:
for nic in vm.nic:
if str(nic.ipaddress) in str(wget_file):
wget_server_ip = str(nic.ipaddress)
if wget_server_ip:
self.debug("Verified wget file from an Internal Load Balanced VM with http server IP address - %s"
% wget_server_ip)
else:
self.fail("Did not wget file from the Internal Load Balanced VMs - %s" % vm_array)
return wget_server_ip
# validate_internallb_algorithm_traffic - Validates Internal LB algorithms by performing multiple wget traffic tests
# against the given Internal LB VM instance (source port)
def validate_internallb_algorithm_traffic(self, ssh_client, source_ip, port, vm_array, algorithm):
# Internal LB (wget) traffic tests
iterations = 2 * len(vm_array)
wget_files = []
for i in range(iterations):
wget_files.append(self.wget_from_vm_cmd(ssh_client, source_ip, port))
# Verifying Internal LB (wget) traffic tests
wget_servers_ip_list = []
for i in range(iterations):
wget_servers_ip_list.append(self.verify_lb_wget_file(wget_files[i], vm_array))
# Validating Internal LB algorithm
if algorithm == "roundrobin" or algorithm == "leastconn":
for i in range(iterations):
if wget_servers_ip_list.count(wget_servers_ip_list[i]) is not 2:
self.fail("Round Robin Internal LB algorithm validation failed - %s" % wget_servers_ip_list)
self.debug("Successfully validated Round Robin/Least connections Internal LB algorithm - %s" %
wget_servers_ip_list)
if algorithm == "source":
for i in range(iterations):
if wget_servers_ip_list.count(wget_servers_ip_list[i]) is not iterations:
self.fail("Source Internal LB algorithm validation failed - %s" % wget_servers_ip_list)
self.debug("Successfully validated Source Internal LB algorithm - %s" % wget_servers_ip_list)
@attr(tags=["advanced", "nuagevsp"], required_hardware="false")
def test_01_nuage_internallb_vpc_Offering(self):
"""Test Nuage VSP VPC Offering with different combinations of LB service providers
"""
# 1. Verify that the network service providers supported by Nuage VSP for VPC Internal LB functionality are all
# successfully created and enabled.
# 2. Create Nuage VSP VPC offering with LB service provider as "InternalLbVm", check if it is successfully
# created and enabled. Verify that the VPC creation succeeds with this VPC offering.
# 3. Create Nuage VSP VPC offering with LB service provider as "VpcVirtualRouter", check if it is successfully
# created and enabled. Verify that the VPC creation fails with this VPC offering as Nuage VSP does not
# support provider "VpcVirtualRouter" for service LB.
# 4. Create Nuage VSP VPC offering with LB service provider as "Netscaler", check if it is successfully
# created and enabled. Verify that the VPC creation fails with this VPC offering as Nuage VSP does not
# support provider "Netscaler" for service LB.
# 5. Delete the created VPC offerings (cleanup).
self.debug("Validating network service providers supported by Nuage VSP for VPC Internal LB functionality")
providers = ["NuageVsp", "VpcVirtualRouter", "InternalLbVm"]
for provider in providers:
self.validate_NetworkServiceProvider(provider, state="Enabled")
# Creating VPC offerings
self.debug("Creating Nuage VSP VPC offering with LB service provider as InternalLbVm...")
vpc_off_1 = self.create_VpcOffering(self.test_data["nuagevsp"]["vpc_offering_lb"])
self.validate_VpcOffering(vpc_off_1, state="Enabled")
self.debug("Creating Nuage VSP VPC offering with LB service provider as VpcVirtualRouter...")
vpc_offering_lb = copy.deepcopy(self.test_data["nuagevsp"]["vpc_offering_lb"])
vpc_offering_lb["serviceProviderList"]["Lb"] = "VpcVirtualRouter"
vpc_off_2 = self.create_VpcOffering(vpc_offering_lb)
self.validate_VpcOffering(vpc_off_2, state="Enabled")
self.debug("Creating Nuage VSP VPC offering with LB service provider as Netscaler...")
vpc_offering_lb["serviceProviderList"]["Lb"] = "Netscaler"
vpc_off_3 = self.create_VpcOffering(vpc_offering_lb)
self.validate_VpcOffering(vpc_off_3, state="Enabled")
self.debug("Creating Nuage VSP VPC offering without LB service...")
vpc_off_4 = self.create_VpcOffering(self.test_data["nuagevsp"]["vpc_offering"])
self.validate_VpcOffering(vpc_off_4, state="Enabled")
# Creating VPCs
self.debug("Creating a VPC with LB service provider as InternalLbVm...")
vpc_1 = self.create_Vpc(vpc_off_1, cidr='10.1.0.0/16')
self.validate_Vpc(vpc_1, state="Enabled")
self.debug("Creating a VPC with LB service provider as VpcVirtualRouter...")
with self.assertRaises(Exception):
self.create_Vpc(vpc_off_2, cidr='10.1.0.0/16')
self.debug("Nuage VSP does not support provider VpcVirtualRouter for service LB for VPCs")
self.debug("Creating a VPC with LB service provider as Netscaler...")
with self.assertRaises(Exception):
self.create_Vpc(vpc_off_3, cidr='10.1.0.0/16')
self.debug("Nuage VSP does not support provider Netscaler for service LB for VPCs")
self.debug("Creating a VPC without LB service...")
vpc_2 = self.create_Vpc(vpc_off_4, cidr='10.1.0.0/16')
self.validate_Vpc(vpc_2, state="Enabled")
@attr(tags=["advanced", "nuagevsp"], required_hardware="false")
def test_02_nuage_internallb_vpc_network_offering(self):
"""Test Nuage VSP VPC Network Offering with and without Internal LB service
"""
# 1. Create Nuage Vsp VPC Network offering with LB Service Provider as "InternalLbVm" and LB Service Capability
# "lbSchemes" as "internal", check if it is successfully created and enabled. Verify that the VPC network
# creation succeeds with this Network offering.
# 2. Recreate above Network offering with ispersistent False, check if it is successfully created and enabled.
# Verify that the VPC network creation fails with this Network offering as Nuage VSP does not support non
# persistent VPC networks.
# 3. Recreate above Network offering with conserve mode On, check if the network offering creation failed
# as only networks with conserve mode Off can belong to VPC.
# 4. Create Nuage Vsp VPC Network offering with LB Service Provider as "InternalLbVm" and LB Service Capability
# "lbSchemes" as "public", check if the network offering creation failed as "public" lbScheme is not
# supported for LB Service Provider "InternalLbVm".
# 5. Create Nuage Vsp VPC Network offering without Internal LB Service, check if it is successfully created and
# enabled. Verify that the VPC network creation succeeds with this Network offering.
# 6. Recreate above Network offering with ispersistent False, check if it is successfully created and enabled.
# Verify that the VPC network creation fails with this Network offering as Nuage VSP does not support non
# persistent VPC networks.
# 7. Recreate the above Network offering with conserve mode On, check if the network offering creation failed
# as only networks with conserve mode Off can belong to VPC.
# 8. Delete the created Network offerings (cleanup).
# Creating VPC offering
self.debug("Creating Nuage VSP VPC offering with Internal LB service...")
vpc_off = self.create_VpcOffering(self.test_data["nuagevsp"]["vpc_offering_lb"])
self.validate_VpcOffering(vpc_off, state="Enabled")
# Creating VPC
self.debug("Creating a VPC with Internal LB service...")
vpc = self.create_Vpc(vpc_off, cidr='10.1.0.0/16')
self.validate_Vpc(vpc, state="Enabled")
# Creating network offerings
self.debug("Creating Nuage VSP VPC Network offering with LB Service Provider as InternalLbVm and LB Service "
"Capability lbSchemes as internal...")
net_off_1 = self.create_NetworkOffering(self.test_data["nuagevsp"]["vpc_network_offering_internal_lb"])
self.validate_NetworkOffering(net_off_1, state="Enabled")
self.debug("Recreating above Network offering with ispersistent False...")
vpc_net_off_lb_non_persistent = copy.deepcopy(self.test_data["nuagevsp"]["vpc_network_offering_internal_lb"])
vpc_net_off_lb_non_persistent["ispersistent"] = "False"
net_off_2 = self.create_NetworkOffering(vpc_net_off_lb_non_persistent)
self.validate_NetworkOffering(net_off_2, state="Enabled")
self.debug("Recreating above Network offering with conserve mode On...")
with self.assertRaises(Exception):
self.create_NetworkOffering(self.test_data["nuagevsp"]["vpc_network_offering_internal_lb"],
conserve_mode=True)
self.debug("Network offering creation failed as only networks with conserve mode Off can belong to VPC")
self.debug("Creating Nuage VSP VPC Network offering with LB Service Provider as InternalLbVm and LB Service "
"Capability lbSchemes as public...")
network_offering_internal_lb = copy.deepcopy(self.test_data["nuagevsp"]["vpc_network_offering_internal_lb"])
network_offering_internal_lb["serviceCapabilityList"]["Lb"]["lbSchemes"] = "public"
with self.assertRaises(Exception):
self.create_NetworkOffering(network_offering_internal_lb)
self.debug("Network offering creation failed as public lbScheme is not supported for LB Service Provider "
"InternalLbVm")
self.debug("Creating Nuage Vsp VPC Network offering without Internal LB service...")
net_off_3 = self.create_NetworkOffering(self.test_data["nuagevsp"]["vpc_network_offering"])
self.validate_NetworkOffering(net_off_3, state="Enabled")
self.debug("Recreating above Network offering with ispersistent False...")
vpc_net_off_non_persistent = copy.deepcopy(self.test_data["nuagevsp"]["vpc_network_offering"])
vpc_net_off_non_persistent["ispersistent"] = "False"
net_off_4 = self.create_NetworkOffering(vpc_net_off_non_persistent)
self.validate_NetworkOffering(net_off_4, state="Enabled")
self.debug("Recreating above Network offering with conserve mode On...")
with self.assertRaises(Exception):
self.create_NetworkOffering(self.test_data["nuagevsp"]["vpc_network_offering"], conserve_mode=True)
self.debug("Network offering creation failed as only networks with conserve mode Off can belong to VPC")
# Creating VPC networks in the VPC
self.debug("Creating a persistent VPC network with Internal LB service...")
internal_tier = self.create_Network(net_off_1, gateway='10.1.1.1', vpc=vpc)
self.validate_Network(internal_tier, state="Implemented")
vr = self.get_Router(internal_tier)
self.check_Router_state(vr, state="Running")
# VSD verification
self.verify_vsp_network(self.domain.id, internal_tier, vpc)
self.verify_vsp_router(vr)
self.debug("Creating a non persistent VPC network with Internal LB service...")
with self.assertRaises(Exception):
self.create_Network(net_off_2, gateway='10.1.2.1', vpc=vpc)
self.debug("Nuage VSP does not support non persistent VPC networks")
self.debug("Creating a persistent VPC network without Internal LB service...")
public_tier = self.create_Network(net_off_3, gateway='10.1.3.1', vpc=vpc)
self.validate_Network(public_tier, state="Implemented")
vr = self.get_Router(public_tier)
self.check_Router_state(vr, state="Running")
# VSD verification
self.verify_vsp_network(self.domain.id, public_tier, vpc)
self.verify_vsp_router(vr)
self.debug("Creating a non persistent VPC network without Internal LB service...")
with self.assertRaises(Exception):
self.create_Network(net_off_4, gateway='10.1.4.1', vpc=vpc)
self.debug("Nuage VSP does not support non persistent VPC networks")
@attr(tags=["advanced", "nuagevsp"], required_hardware="false")
def test_03_nuage_internallb_vpc_networks(self):
"""Test Nuage VSP VPC Networks with and without Internal LB service
"""
# 1. Create Nuage VSP VPC offering with Internal LB service, check if it is successfully created and enabled.
# 2. Create Nuage VSP VPC offering without Internal LB service, check if it is successfully created and enabled.
# 3. Create a VPC "vpc_1" with Internal LB service, check if it is successfully created and enabled.
# 4. Create a VPC "vpc_2" without Internal LB service, check if it is successfully created and enabled.
# 5. Create Nuage VSP VPC Network offering with Internal LB service, check if it is successfully created and
# enabled.
# 6. Create Nuage VSP VPC Network offering without Internal LB service, check if it is successfully created and
# enabled.
# 7. Create a VPC network in vpc_1 with Internal LB service and spawn a VM, check if the tier is added to the
# VPC VR, and the VM is deployed successfully in the tier.
# 8. Create one more VPC network in vpc_1 with Internal LB service and spawn a VM, check if the tier is added
# to the VPC VR, and the VM is deployed successfully in the tier.
# 9. Create a VPC network in vpc_2 with Internal LB service, check if the tier creation failed.
# 10. Create a VPC network in vpc_1 without Internal LB service and spawn a VM, check if the tier is added to
# the VPC VR, and the VM is deployed successfully in the tier.
# 11. Create a VPC network in vpc_2 without Internal LB service and spawn a VM, check if the tier is added to
# the VPC VR, and the VM is deployed successfully in the tier.
# 12. Upgrade the VPC network with Internal LB service to one with no Internal LB service and vice-versa, check
# if the VPC Network offering upgrade passed in both directions.
# 13. Delete the VPC network with Internal LB service, check if the tier is successfully deleted.
# 14. Recreate the VPC network with Internal LB service, check if the tier is successfully re-created.
# 15. Delete all the created objects (cleanup).
# Creating VPC offerings
self.debug("Creating Nuage VSP VPC offering with Internal LB service...")
vpc_off_1 = self.create_VpcOffering(self.test_data["nuagevsp"]["vpc_offering_lb"])
self.validate_VpcOffering(vpc_off_1, state="Enabled")
self.debug("Creating Nuage VSP VPC offering without Internal LB service...")
vpc_off_2 = self.create_VpcOffering(self.test_data["nuagevsp"]["vpc_offering"])
self.validate_VpcOffering(vpc_off_2, state="Enabled")
# Creating VPCs
self.debug("Creating a VPC with Internal LB service...")
vpc_1 = self.create_Vpc(vpc_off_1, cidr='10.1.0.0/16')
self.validate_Vpc(vpc_1, state="Enabled")
self.debug("Creating a VPC without Internal LB service...")
vpc_2 = self.create_Vpc(vpc_off_2, cidr='10.1.0.0/16')
self.validate_Vpc(vpc_2, state="Enabled")
# Creating network offerings
self.debug("Creating Nuage VSP VPC Network offering with Internal LB service...")
net_off_1 = self.create_NetworkOffering(
self.test_data["nuagevsp"]["vpc_network_offering_internal_lb"])
self.validate_NetworkOffering(net_off_1, state="Enabled")
self.debug("Creating Nuage VSP VPC Network offering without Internal LB service...")
net_off_2 = self.create_NetworkOffering(self.test_data["nuagevsp"]["vpc_network_offering"])
self.validate_NetworkOffering(net_off_2, state="Enabled")
# Creating VPC networks in VPCs, and deploying VMs
self.debug("Creating a VPC network in vpc_1 with Internal LB service...")
internal_tier_1 = self.create_Network(net_off_1, gateway='10.1.1.1', vpc=vpc_1)
self.validate_Network(internal_tier_1, state="Implemented")
vr_1 = self.get_Router(internal_tier_1)
self.check_Router_state(vr_1, state="Running")
self.debug("Deploying a VM in network - %s" % internal_tier_1.name)
internal_vm_1 = self.create_VM(internal_tier_1)
self.check_VM_state(internal_vm_1, state="Running")
# VSD verification
self.verify_vsp_network(self.domain.id, internal_tier_1, vpc_1)
self.verify_vsp_router(vr_1)
self.verify_vsp_vm(internal_vm_1)
self.debug("Creating one more VPC network in vpc_1 with Internal LB service...")
internal_tier_2 = self.create_Network(net_off_1, gateway='10.1.2.1', vpc=vpc_1)
self.validate_Network(internal_tier_2, state="Implemented")
vr_1 = self.get_Router(internal_tier_2)
self.check_Router_state(vr_1, state="Running")
self.debug("Deploying a VM in network - %s" % internal_tier_2.name)
internal_vm_2 = self.create_VM(internal_tier_2)
self.check_VM_state(internal_vm_2, state="Running")
# VSD verification
self.verify_vsp_network(self.domain.id, internal_tier_2, vpc_1)
self.verify_vsp_router(vr_1)
self.verify_vsp_vm(internal_vm_2)
self.debug("Creating a VPC network in vpc_2 with Internal LB service...")
with self.assertRaises(Exception):
self.create_Network(net_off_1, gateway='10.1.1.1', vpc=vpc_2)
self.debug("VPC Network creation failed as vpc_2 does not support Internal Lb service")
self.debug("Creating a VPC network in vpc_1 without Internal LB service...")
public_tier_1 = self.create_Network(net_off_2, gateway='10.1.3.1', vpc=vpc_1)
self.validate_Network(public_tier_1, state="Implemented")
vr_1 = self.get_Router(public_tier_1)
self.check_Router_state(vr_1, state="Running")
self.debug("Deploying a VM in network - %s" % public_tier_1.name)
public_vm_1 = self.create_VM(public_tier_1)
self.check_VM_state(public_vm_1, state="Running")
# VSD verification
self.verify_vsp_network(self.domain.id, public_tier_1, vpc_1)
self.verify_vsp_router(vr_1)
self.verify_vsp_vm(public_vm_1)
self.debug("Creating a VPC network in vpc_2 without Internal LB service...")
public_tier_2 = self.create_Network(net_off_2, gateway='10.1.1.1', vpc=vpc_2)
self.validate_Network(public_tier_2, state="Implemented")
vr_2 = self.get_Router(public_tier_2)
self.check_Router_state(vr_2, state="Running")
self.debug("Deploying a VM in network - %s" % public_tier_2.name)
public_vm_2 = self.create_VM(public_tier_2)
self.check_VM_state(public_vm_2, state="Running")
# VSD verification
self.verify_vsp_network(self.domain.id, public_tier_2, vpc_2)
self.verify_vsp_router(vr_2)
self.verify_vsp_vm(public_vm_2)
# Upgrading a VPC network
self.debug("Upgrading a VPC network with Internal LB Service to one without Internal LB Service...")
self.upgrade_Network(net_off_2, internal_tier_2)
self.validate_Network(internal_tier_2, state="Implemented")
vr_1 = self.get_Router(internal_tier_2)
self.check_Router_state(vr_1, state="Running")
self.check_VM_state(internal_vm_2, state="Running")
# VSD verification
self.verify_vsp_network(self.domain.id, internal_tier_2, vpc_1)
self.verify_vsp_router(vr_1)
self.verify_vsp_vm(internal_vm_2)
self.debug("Upgrading a VPC network without Internal LB Service to one with Internal LB Service...")
self.upgrade_Network(net_off_1, internal_tier_2)
self.validate_Network(internal_tier_2, state="Implemented")
vr_1 = self.get_Router(internal_tier_2)
self.check_Router_state(vr_1, state="Running")
self.check_VM_state(internal_vm_2, state="Running")
# VSD verification
self.verify_vsp_network(self.domain.id, internal_tier_2, vpc_1)
self.verify_vsp_router(vr_1)
self.verify_vsp_vm(internal_vm_2)
# Deleting and re-creating a VPC network
self.debug("Deleting a VPC network with Internal LB Service...")
self.delete_VM(internal_vm_2)
self.delete_Network(internal_tier_2)
with self.assertRaises(Exception):
self.validate_Network(internal_tier_2)
self.debug("VPC network successfully deleted in CloudStack")
# VSD verification
with self.assertRaises(Exception):
self.verify_vsp_network(self.domain.id, internal_tier_2, vpc_1)
self.debug("VPC network successfully deleted in VSD")
self.debug("Recreating a VPC network with Internal LB Service...")
internal_tier_2 = self.create_Network(net_off_1, gateway='10.1.2.1', vpc=vpc_1)
internal_vm_2 = self.create_VM(internal_tier_2)
self.validate_Network(internal_tier_2, state="Implemented")
vr_1 = self.get_Router(internal_tier_2)
self.check_Router_state(vr_1, state="Running")
self.check_VM_state(internal_vm_2, state="Running")
# VSD verification
self.verify_vsp_network(self.domain.id, internal_tier_2, vpc_1)
self.verify_vsp_router(vr_1)
self.verify_vsp_vm(internal_vm_2)
@attr(tags=["advanced", "nuagevsp"], required_hardware="false")
def test_04_nuage_internallb_rules(self):
"""Test Nuage VSP VPC Internal LB functionality with different combinations of Internal LB rules
"""
# 1. Create an Internal LB Rule with source IP Address specified, check if the Internal LB Rule is successfully
# created.
# 2. Create an Internal LB Rule without source IP Address specified, check if the Internal LB Rule is
# successfully created.
# 3. Create an Internal LB Rule when the specified source IP Address is outside the VPC network (tier) CIDR
# range, check if the Internal LB Rule creation failed as the requested source IP is not in the network's
# CIDR subnet.
# 4. Create an Internal LB Rule when the specified source IP Address is outside the VPC super CIDR range,
# check if the Internal LB Rule creation failed as the requested source IP is not in the network's CIDR
# subnet.
# 5. Create an Internal LB Rule in the tier with LB service provider as VpcInlineLbVm, check if the Internal LB
# Rule creation failed as Scheme Internal is not supported by this network offering.
# 6. Create multiple Internal LB Rules using different Load Balancing source IP Addresses, check if the Internal
# LB Rules are successfully created.
# 7. Create multiple Internal LB Rules with different ports but using the same Load Balancing source IP Address,
# check if the Internal LB Rules are successfully created.
# 8. Create multiple Internal LB Rules with same ports and using the same Load Balancing source IP Address,
# check if the second Internal LB Rule creation failed as it conflicts with the first Internal LB rule.
# 9. Attach a VM to the above created Internal LB Rules, check if the VM is successfully attached to the
# Internal LB Rules.
# 10. Verify the InternalLbVm deployment after successfully creating the first Internal LB Rule and attaching a
# VM to it.
# 11. Verify the failure of attaching a VM from a different tier to an Internal LB Rule created on a tier.
# 12. Delete the above created Internal LB Rules, check if the Internal LB Rules are successfully deleted.
# Creating a VPC offering
self.debug("Creating Nuage VSP VPC offering with Internal LB service...")
vpc_off = self.create_VpcOffering(self.test_data["nuagevsp"]["vpc_offering_lb"])
self.validate_VpcOffering(vpc_off, state="Enabled")
# Creating a VPC
self.debug("Creating a VPC with Internal LB service...")
vpc = self.create_Vpc(vpc_off, cidr='10.1.0.0/16')
self.validate_Vpc(vpc, state="Enabled")
# Creating network offerings
self.debug("Creating Nuage VSP VPC Network offering with Internal LB service...")
net_off_1 = self.create_NetworkOffering(
self.test_data["nuagevsp"]["vpc_network_offering_internal_lb"])
self.validate_NetworkOffering(net_off_1, state="Enabled")
self.debug("Creating Nuage VSP VPC Network offering without Internal LB service...")
net_off_2 = self.create_NetworkOffering(self.test_data["nuagevsp"]["vpc_network_offering"])
self.validate_NetworkOffering(net_off_2, state="Enabled")
# Creating VPC networks in the VPC, and deploying VMs
self.debug("Creating a VPC network with Internal LB service...")
internal_tier = self.create_Network(net_off_1, gateway='10.1.1.1', vpc=vpc)
self.validate_Network(internal_tier, state="Implemented")
vr = self.get_Router(internal_tier)
self.check_Router_state(vr, state="Running")
self.debug("Deploying a VM in network - %s" % internal_tier.name)
internal_vm = self.create_VM(internal_tier)
self.check_VM_state(internal_vm, state="Running")
# VSD verification
self.verify_vsp_network(self.domain.id, internal_tier, vpc)
self.verify_vsp_router(vr)
self.verify_vsp_vm(internal_vm)
self.debug("Creating a VPC network without Internal LB service...")
public_tier = self.create_Network(net_off_2, gateway='10.1.2.1', vpc=vpc)
self.validate_Network(public_tier, state="Implemented")
vr = self.get_Router(public_tier)
self.check_Router_state(vr, state="Running")
self.debug("Deploying a VM in network - %s" % public_tier.name)
public_vm = self.create_VM(public_tier)
self.check_VM_state(public_vm, state="Running")
# VSD verification
self.verify_vsp_network(self.domain.id, public_tier, vpc)
self.verify_vsp_router(vr)
self.verify_vsp_vm(public_vm)
# Creating Internal LB Rules
self.debug("Creating an Internal LB Rule without source IP Address specified...")
int_lb_rule = self.create_Internal_LB_Rule(internal_tier)
self.validate_Internal_LB_Rule(int_lb_rule, state="Add")
# Validating InternalLbVm deployment
with self.assertRaises(Exception):
self.check_InternalLbVm_state(internal_tier, int_lb_rule.sourceipaddress)
self.debug("InternalLbVm is not deployed in the network as there are no VMs assigned to this Internal LB Rule")
self.debug('Deleting the Internal LB Rule - %s' % int_lb_rule.name)
int_lb_rule.delete(self.api_client)
with self.assertRaises(Exception):
self.validate_Internal_LB_Rule(int_lb_rule)
self.debug("Internal LB Rule successfully deleted in CloudStack")
free_source_ip = int_lb_rule.sourceipaddress
self.debug("Creating an Internal LB Rule with source IP Address specified...")
int_lb_rule = self.create_Internal_LB_Rule(internal_tier, source_ip=free_source_ip)
self.validate_Internal_LB_Rule(int_lb_rule, state="Add")
# Validating InternalLbVm deployment
with self.assertRaises(Exception):
self.check_InternalLbVm_state(internal_tier, int_lb_rule.sourceipaddress)
self.debug("InternalLbVm is not deployed in the network as there are no VMs assigned to this Internal LB Rule")
self.debug('Deleting the Internal LB Rule - %s' % int_lb_rule.name)
int_lb_rule.delete(self.api_client)
with self.assertRaises(Exception):
self.validate_Internal_LB_Rule(int_lb_rule)
self.debug("Internal LB Rule successfully deleted in CloudStack")
self.debug("Creating an Internal LB Rule when the specified source IP Address is outside the VPC network CIDR "
"range...")
with self.assertRaises(Exception):
self.create_Internal_LB_Rule(internal_tier, source_ip="10.1.1.256")
self.debug("Internal LB Rule creation failed as the requested IP is not in the network's CIDR subnet")
self.debug("Creating an Internal LB Rule when the specified source IP Address is outside the VPC super CIDR "
"range...")
with self.assertRaises(Exception):
self.create_Internal_LB_Rule(internal_tier, source_ip="10.2.1.256")
self.debug("Internal LB Rule creation failed as the requested IP is not in the network's CIDR subnet")
self.debug("Creating an Internal LB Rule in a VPC network without Internal Lb service...")
with self.assertRaises(Exception):
self.create_Internal_LB_Rule(public_tier)
self.debug("Internal LB Rule creation failed as Scheme Internal is not supported by this network offering")
self.debug("Creating multiple Internal LB Rules using different Load Balancing source IP Addresses...")
int_lb_rule_1 = self.create_Internal_LB_Rule(internal_tier, vm_array=[internal_vm])
self.validate_Internal_LB_Rule(int_lb_rule_1, state="Active", vm_array=[internal_vm])
int_lb_rule_2 = self.create_Internal_LB_Rule(internal_tier, vm_array=[internal_vm])
self.validate_Internal_LB_Rule(int_lb_rule_2, state="Active", vm_array=[internal_vm])
# Validating InternalLbVms deployment and state
int_lb_vm_1 = self.get_InternalLbVm(internal_tier, int_lb_rule_1.sourceipaddress)
self.check_InternalLbVm_state(internal_tier, int_lb_rule_1.sourceipaddress, state="Running")
int_lb_vm_2 = self.get_InternalLbVm(internal_tier, int_lb_rule_2.sourceipaddress)
self.check_InternalLbVm_state(internal_tier, int_lb_rule_2.sourceipaddress, state="Running")
# VSD Verification
self.verify_vsp_LB_device(int_lb_vm_1)
self.verify_vsp_LB_device(int_lb_vm_2)
self.debug('Removing VMs from the Internal LB Rules - %s, %s' % (int_lb_rule_1.name, int_lb_rule_2.name))
int_lb_rule_1.remove(self.api_client, vms=[internal_vm])
with self.assertRaises(Exception):
self.validate_Internal_LB_Rule(int_lb_rule_1, vm_array=[internal_vm])
self.debug("VMs successfully removed from the Internal LB Rule in CloudStack")
int_lb_rule_2.remove(self.api_client, vms=[internal_vm])
with self.assertRaises(Exception):
self.validate_Internal_LB_Rule(int_lb_rule_2, vm_array=[internal_vm])
self.debug("VMs successfully removed from the Internal LB Rule in CloudStack")
# Validating InternalLbVms state
self.check_InternalLbVm_state(internal_tier, int_lb_rule_1.sourceipaddress, state="Running")
self.check_InternalLbVm_state(internal_tier, int_lb_rule_2.sourceipaddress, state="Running")
# VSD Verification
self.verify_vsp_LB_device(int_lb_vm_1)
self.verify_vsp_LB_device(int_lb_vm_2)
self.debug('Deleting the Internal LB Rules - %s, %s' % (int_lb_rule_1.name, int_lb_rule_2.name))
int_lb_rule_1.delete(self.api_client)
with self.assertRaises(Exception):
self.validate_Internal_LB_Rule(int_lb_rule_1)
self.debug("Internal LB Rule successfully deleted in CloudStack")
int_lb_rule_2.delete(self.api_client)
with self.assertRaises(Exception):
self.validate_Internal_LB_Rule(int_lb_rule_2)
self.debug("Internal LB Rule successfully deleted in CloudStack")
# Validating InternalLbVms un-deployment
with self.assertRaises(Exception):
self.check_InternalLbVm_state(internal_tier, int_lb_rule_1.sourceipaddress)
self.debug("InternalLbVm successfully destroyed in CloudStack")
with self.assertRaises(Exception):
self.check_InternalLbVm_state(internal_tier, int_lb_rule_2.sourceipaddress)
self.debug("InternalLbVm successfully destroyed in CloudStack")
# VSD Verification
with self.assertRaises(Exception):
self.verify_vsp_LB_device(int_lb_vm_1)
self.debug("InternalLbVm successfully destroyed in VSD")
with self.assertRaises(Exception):
self.verify_vsp_LB_device(int_lb_vm_2)
self.debug("InternalLbVm successfully destroyed in VSD")
self.debug("Creating multiple Internal LB Rules with different ports but using the same Load Balancing source "
"IP Address...")
int_lb_rule_1 = self.create_Internal_LB_Rule(internal_tier, vm_array=[internal_vm])
self.validate_Internal_LB_Rule(int_lb_rule_1, state="Active", vm_array=[internal_vm])
int_lb_rule_2 = self.create_Internal_LB_Rule(internal_tier,
vm_array=[internal_vm],
services=self.test_data["internal_lbrule_http"],
source_ip=int_lb_rule_1.sourceipaddress
)
self.validate_Internal_LB_Rule(int_lb_rule_2, state="Active", vm_array=[internal_vm])
# Validating InternalLbVm deployment and state
int_lb_vm = self.get_InternalLbVm(internal_tier, int_lb_rule_1.sourceipaddress)
self.check_InternalLbVm_state(internal_tier, int_lb_rule_1.sourceipaddress, state="Running")
# VSD Verification
self.verify_vsp_LB_device(int_lb_vm)
self.debug('Removing VMs from the Internal LB Rules - %s, %s' % (int_lb_rule_1.name, int_lb_rule_2.name))
int_lb_rule_1.remove(self.api_client, vms=[internal_vm])
with self.assertRaises(Exception):
self.validate_Internal_LB_Rule(int_lb_rule_1, vm_array=[internal_vm])
self.debug("VMs successfully removed from the Internal LB Rule in CloudStack")
int_lb_rule_2.remove(self.api_client, vms=[internal_vm])
with self.assertRaises(Exception):
self.validate_Internal_LB_Rule(int_lb_rule_2, vm_array=[internal_vm])
self.debug("VMs successfully removed from the Internal LB Rule in CloudStack")
# Validating InternalLbVm state
self.check_InternalLbVm_state(internal_tier, int_lb_rule_1.sourceipaddress, state="Running")
# VSD Verification
self.verify_vsp_LB_device(int_lb_vm)
self.debug('Deleting the Internal LB Rules - %s, %s' % (int_lb_rule_1.name, int_lb_rule_2.name))
int_lb_rule_1.delete(self.api_client)
with self.assertRaises(Exception):
self.validate_Internal_LB_Rule(int_lb_rule_1)
self.debug("Internal LB Rule successfully deleted in CloudStack")
int_lb_rule_2.delete(self.api_client)
with self.assertRaises(Exception):
self.validate_Internal_LB_Rule(int_lb_rule_2)
self.debug("Internal LB Rule successfully deleted in CloudStack")
# Validating InternalLbVm un-deployment
with self.assertRaises(Exception):
self.check_InternalLbVm_state(internal_tier, int_lb_rule_1.sourceipaddress)
self.debug("InternalLbVm successfully destroyed in CloudStack")
# VSD Verification
with self.assertRaises(Exception):
self.verify_vsp_LB_device(int_lb_vm)
self.debug("InternalLbVm successfully destroyed in VSD")
self.debug("Creating multiple Internal LB Rules with same ports and using the same Load Balacing source IP "
"Address...")
int_lb_rule = self.create_Internal_LB_Rule(internal_tier, vm_array=[internal_vm])
self.validate_Internal_LB_Rule(int_lb_rule, state="Active", vm_array=[internal_vm])
with self.assertRaises(Exception):
self.create_Internal_LB_Rule(internal_tier, vm_array=[internal_vm], source_ip=int_lb_rule.sourceipaddress)
self.debug("Internal LB Rule creation failed as it conflicts with the existing rule")
# Validating InternalLbVm deployment and state
int_lb_vm = self.get_InternalLbVm(internal_tier, int_lb_rule.sourceipaddress)
self.check_InternalLbVm_state(internal_tier, int_lb_rule.sourceipaddress, state="Running")
# VSD Verification
self.verify_vsp_LB_device(int_lb_vm)
self.debug('Removing VMs from the Internal LB Rule - %s' % int_lb_rule.name)
int_lb_rule.remove(self.api_client, vms=[internal_vm])
with self.assertRaises(Exception):
self.validate_Internal_LB_Rule(int_lb_rule, vm_array=[internal_vm])
self.debug("VMs successfully removed from the Internal LB Rule in CloudStack")
# Validating InternalLbVm state
self.check_InternalLbVm_state(internal_tier, int_lb_rule.sourceipaddress, state="Running")
# VSD Verification
self.verify_vsp_LB_device(int_lb_vm)
self.debug('Deleting the Internal LB Rule - %s' % int_lb_rule.name)
int_lb_rule.delete(self.api_client)
with self.assertRaises(Exception):
self.validate_Internal_LB_Rule(int_lb_rule)
self.debug("Internal LB Rule successfully deleted in CloudStack")
# Validating InternalLbVm un-deployment
with self.assertRaises(Exception):
self.check_InternalLbVm_state(internal_tier, int_lb_rule.sourceipaddress)
self.debug("InternalLbVm successfully destroyed in CloudStack")
# VSD Verification
with self.assertRaises(Exception):
self.verify_vsp_LB_device(int_lb_vm)
self.debug("InternalLbVm successfully destroyed in VSD")
self.debug("Attaching a VM from a different tier to an Internal LB Rule created on a tier...")
with self.assertRaises(Exception):
self.create_Internal_LB_Rule(internal_tier, vm_array=[public_vm])
self.debug("Internal LB Rule creation failed as the VM belongs to a different network")
@attr(tags=["advanced", "nuagevsp"], required_hardware="true")
def test_05_nuage_internallb_traffic(self):
"""Test Nuage VSP VPC Internal LB functionality by performing (wget) traffic tests within a VPC
"""
# 1. Create an Internal LB Rule "internal_lbrule" with source IP Address specified on the Internal tier, check
# if the Internal LB Rule is successfully created.
# 2. Create an Internal LB Rule "internal_lbrule_http" with source IP Address (same as above) specified on the
# Internal tier, check if the Internal LB Rule is successfully created.
# 3. Attach a VM to the above created Internal LB Rules, check if the InternalLbVm is successfully deployed in
# the Internal tier.
# 4. Deploy two more VMs in the Internal tier, check if the VMs are successfully deployed.
# 5. Attach the newly deployed VMs to the above created Internal LB Rules, verify the validity of the above
# created Internal LB Rules over three Load Balanced VMs in the Internal tier.
# 6. Create the corresponding Network ACL rules to make the created Internal LB rules (SSH & HTTP) accessible,
# check if the Network ACL rules are successfully added to the internal tier.
# 7. Validate the Internal LB functionality by performing (wget) traffic tests from a VM in the Public tier to
# the Internal load balanced guest VMs in the Internal tier, using Static NAT functionality to access (ssh)
# the VM on the Public tier.
# 8. Verify that the InternalLbVm gets destroyed when the last Internal LB rule is removed from the Internal
# tier.
# 9. Repeat the above steps for one more Internal tier as well, validate the Internal LB functionality.
# Creating a VPC offering
self.debug("Creating Nuage VSP VPC offering with Internal LB service...")
vpc_off = self.create_VpcOffering(self.test_data["nuagevsp"]["vpc_offering_lb"])
self.validate_VpcOffering(vpc_off, state="Enabled")
# Creating a VPC
self.debug("Creating a VPC with Internal LB service...")
vpc = self.create_Vpc(vpc_off, cidr='10.1.0.0/16')
self.validate_Vpc(vpc, state="Enabled")
# Creating network offerings
self.debug("Creating Nuage VSP VPC Network offering with Internal LB service...")
net_off_1 = self.create_NetworkOffering(
self.test_data["nuagevsp"]["vpc_network_offering_internal_lb"])
self.validate_NetworkOffering(net_off_1, state="Enabled")
self.debug("Creating Nuage VSP VPC Network offering without Internal LB service...")
net_off_2 = self.create_NetworkOffering(self.test_data["nuagevsp"]["vpc_network_offering"])
self.validate_NetworkOffering(net_off_2, state="Enabled")
# Creating VPC networks in the VPC, and deploying VMs
self.debug("Creating a VPC network with Internal LB service...")
internal_tier_1 = self.create_Network(net_off_1, gateway='10.1.1.1', vpc=vpc)
self.validate_Network(internal_tier_1, state="Implemented")
vr = self.get_Router(internal_tier_1)
self.check_Router_state(vr, state="Running")
self.debug("Deploying a VM in network - %s" % internal_tier_1.name)
internal_vm_1 = self.create_VM(internal_tier_1)
self.check_VM_state(internal_vm_1, state="Running")
# VSD verification
self.verify_vsp_network(self.domain.id, internal_tier_1, vpc)
self.verify_vsp_router(vr)
self.verify_vsp_vm(internal_vm_1)
self.debug("Creating one more VPC network with Internal LB service...")
internal_tier_2 = self.create_Network(net_off_1, gateway='10.1.2.1', vpc=vpc)
self.validate_Network(internal_tier_2, state="Implemented")
vr = self.get_Router(internal_tier_2)
self.check_Router_state(vr, state="Running")
self.debug("Deploying a VM in network - %s" % internal_tier_2.name)
internal_vm_2 = self.create_VM(internal_tier_2)
self.check_VM_state(internal_vm_2, state="Running")
# VSD verification
self.verify_vsp_network(self.domain.id, internal_tier_2, vpc)
self.verify_vsp_router(vr)
self.verify_vsp_vm(internal_vm_2)
self.debug("Creating a VPC network without Internal LB service...")
public_tier = self.create_Network(net_off_2, gateway='10.1.3.1', vpc=vpc)
self.validate_Network(public_tier, state="Implemented")
vr = self.get_Router(public_tier)
self.check_Router_state(vr, state="Running")
self.debug("Deploying a VM in network - %s" % public_tier.name)
public_vm = self.create_VM(public_tier)
self.check_VM_state(public_vm, state="Running")
# VSD verification
self.verify_vsp_network(self.domain.id, public_tier, vpc)
self.verify_vsp_router(vr)
self.verify_vsp_vm(public_vm)
# Creating Internal LB Rules in the Internal tiers
self.debug("Creating two Internal LB Rules (SSH & HTTP) using the same Load Balancing source IP Address...")
int_lb_rule_1 = self.create_Internal_LB_Rule(internal_tier_1, vm_array=[internal_vm_1])
self.validate_Internal_LB_Rule(int_lb_rule_1, state="Active", vm_array=[internal_vm_1])
int_lb_rule_2 = self.create_Internal_LB_Rule(internal_tier_1,
vm_array=[internal_vm_1],
services=self.test_data["internal_lbrule_http"],
source_ip=int_lb_rule_1.sourceipaddress
)
self.validate_Internal_LB_Rule(int_lb_rule_2, state="Active", vm_array=[internal_vm_1])
# Validating InternalLbVm deployment and state
int_lb_vm_1 = self.get_InternalLbVm(internal_tier_1, int_lb_rule_1.sourceipaddress)
self.check_InternalLbVm_state(internal_tier_1, int_lb_rule_1.sourceipaddress, state="Running")
# VSD Verification
self.verify_vsp_LB_device(int_lb_vm_1)
# Deploying more VMs in the Internal tier
self.debug("Deploying two more VMs in network - %s" % internal_tier_1.name)
internal_vm_1_1 = self.create_VM(internal_tier_1)
internal_vm_1_2 = self.create_VM(internal_tier_1)
# VSD verification
self.verify_vsp_vm(internal_vm_1_1)
self.verify_vsp_vm(internal_vm_1_2)
# Adding newly deployed VMs to the created Internal LB rules
self.debug("Adding two more virtual machines to the created Internal LB rules...")
int_lb_rule_1.assign(self.api_client, [internal_vm_1_1, internal_vm_1_2])
self.validate_Internal_LB_Rule(int_lb_rule_1, state="Active",
vm_array=[internal_vm_1, internal_vm_1_1, internal_vm_1_2])
int_lb_rule_2.assign(self.api_client, [internal_vm_1_1, internal_vm_1_2])
self.validate_Internal_LB_Rule(int_lb_rule_2, state="Active",
vm_array=[internal_vm_1, internal_vm_1_1, internal_vm_1_2])
# Validating InternalLbVm state
self.check_InternalLbVm_state(internal_tier_1, int_lb_rule_1.sourceipaddress, state="Running")
# VSD Verification
self.verify_vsp_LB_device(int_lb_vm_1)
# Adding Network ACL rules in the Internal tier
self.debug("Adding Network ACL rules to make the created Internal LB rules (SSH & HTTP) accessible...")
ssh_rule = self.create_NetworkAclRule(self.test_data["ingress_rule"], network=internal_tier_1)
http_rule = self.create_NetworkAclRule(self.test_data["http_rule"], network=internal_tier_1)
# VSD verification
self.verify_vsp_firewall_rule(ssh_rule)
self.verify_vsp_firewall_rule(http_rule)
# Creating Internal LB Rules in the Internal tier
self.debug("Creating two Internal LB Rules (SSH & HTTP) using the same Load Balancing source IP Address...")
int_lb_rule_3 = self.create_Internal_LB_Rule(internal_tier_2, vm_array=[internal_vm_2])
self.validate_Internal_LB_Rule(int_lb_rule_3, state="Active", vm_array=[internal_vm_2])
int_lb_rule_4 = self.create_Internal_LB_Rule(internal_tier_2,
vm_array=[internal_vm_2],
services=self.test_data["internal_lbrule_http"],
source_ip=int_lb_rule_3.sourceipaddress
)
self.validate_Internal_LB_Rule(int_lb_rule_4, state="Active", vm_array=[internal_vm_2])
# Validating InternalLbVm deployment and state
int_lb_vm_2 = self.get_InternalLbVm(internal_tier_2, int_lb_rule_3.sourceipaddress)
self.check_InternalLbVm_state(internal_tier_2, int_lb_rule_3.sourceipaddress, state="Running")
# VSD Verification
self.verify_vsp_LB_device(int_lb_vm_2)
# Deploying more VMs in the Internal tier
self.debug("Deploying two more VMs in network - %s" % internal_tier_2.name)
internal_vm_2_1 = self.create_VM(internal_tier_2)
internal_vm_2_2 = self.create_VM(internal_tier_2)
# VSD verification
self.verify_vsp_vm(internal_vm_2_1)
self.verify_vsp_vm(internal_vm_2_2)
# Adding newly deployed VMs to the created Internal LB rules
self.debug("Adding two more virtual machines to the created Internal LB rules...")
int_lb_rule_3.assign(self.api_client, [internal_vm_2_1, internal_vm_2_2])
self.validate_Internal_LB_Rule(int_lb_rule_3, state="Active",
vm_array=[internal_vm_2, internal_vm_2_1, internal_vm_2_2])
int_lb_rule_4.assign(self.api_client, [internal_vm_2_1, internal_vm_2_2])
self.validate_Internal_LB_Rule(int_lb_rule_4, state="Active",
vm_array=[internal_vm_2, internal_vm_2_1, internal_vm_2_2])
# Validating InternalLbVm state
self.check_InternalLbVm_state(internal_tier_2, int_lb_rule_3.sourceipaddress, state="Running")
# VSD Verification
self.verify_vsp_LB_device(int_lb_vm_2)
# Adding Network ACL rules in the Internal tier
self.debug("Adding Network ACL rules to make the created Internal LB rules (SSH & HTTP) accessible...")
ssh_rule = self.create_NetworkAclRule(self.test_data["ingress_rule"], network=internal_tier_2)
http_rule = self.create_NetworkAclRule(self.test_data["http_rule"], network=internal_tier_2)
# VSD verification
self.verify_vsp_firewall_rule(ssh_rule)
self.verify_vsp_firewall_rule(http_rule)
# Creating Static NAT Rule for the VM in the Public tier
public_ip = self.acquire_PublicIPAddress(public_tier, vpc)
self.validate_PublicIPAddress(public_ip, public_tier)
self.create_StaticNatRule_For_VM(public_vm, public_ip, public_tier)
self.validate_PublicIPAddress(public_ip, public_tier, static_nat=True, vm=public_vm)
# VSD verification
self.verify_vsp_floating_ip(public_tier, public_vm, public_ip.ipaddress, vpc)
# Adding Network ACL rule in the Public tier
self.debug("Adding Network ACL rule to make the created NAT rule (SSH) accessible...")
public_ssh_rule = self.create_NetworkAclRule(self.test_data["ingress_rule"], network=public_tier)
# VSD verification
self.verify_vsp_firewall_rule(public_ssh_rule)
# Internal LB (wget) traffic tests
ssh_client = self.ssh_into_VM(public_vm, public_ip)
wget_file_1 = self.wget_from_vm_cmd(ssh_client,
int_lb_rule_1.sourceipaddress,
self.test_data["http_rule"]["publicport"]
)
ssh_client = self.ssh_into_VM(public_vm, public_ip)
wget_file_2 = self.wget_from_vm_cmd(ssh_client,
int_lb_rule_3.sourceipaddress,
self.test_data["http_rule"]["publicport"]
)
# Verifying Internal LB (wget) traffic tests
self.verify_lb_wget_file(wget_file_1, [internal_vm_1, internal_vm_1_1, internal_vm_1_2])
self.verify_lb_wget_file(wget_file_2, [internal_vm_2, internal_vm_2_1, internal_vm_2_2])
@attr(tags=["advanced", "nuagevsp"], required_hardware="true")
def test_06_nuage_internallb_algorithms_traffic(self):
"""Test Nuage VSP VPC Internal LB functionality with different LB algorithms by performing (wget) traffic tests
within a VPC
"""
# Repeat the tests in the testcase "test_05_nuage_internallb_traffic" with different Internal LB algorithms:
# 1. Round Robin
# 2. Least connections
# 3. Source
# Verify the above Internal LB algorithms by performing multiple (wget) traffic tests within a VPC.
# Creating a VPC offering
self.debug("Creating Nuage VSP VPC offering with Internal LB service...")
vpc_off = self.create_VpcOffering(self.test_data["nuagevsp"]["vpc_offering_lb"])
self.validate_VpcOffering(vpc_off, state="Enabled")
# Creating a VPC
self.debug("Creating a VPC with Internal LB service...")
vpc = self.create_Vpc(vpc_off, cidr='10.1.0.0/16')
self.validate_Vpc(vpc, state="Enabled")
# Creating network offerings
self.debug("Creating Nuage VSP VPC Network offering with Internal LB service...")
net_off_1 = self.create_NetworkOffering(
self.test_data["nuagevsp"]["vpc_network_offering_internal_lb"])
self.validate_NetworkOffering(net_off_1, state="Enabled")
self.debug("Creating Nuage VSP VPC Network offering without Internal LB service...")
net_off_2 = self.create_NetworkOffering(self.test_data["nuagevsp"]["vpc_network_offering"])
self.validate_NetworkOffering(net_off_2, state="Enabled")
# Creating VPC networks in the VPC, and deploying VMs
self.debug("Creating a VPC network with Internal LB service...")
internal_tier = self.create_Network(net_off_1, gateway='10.1.1.1', vpc=vpc)
self.validate_Network(internal_tier, state="Implemented")
vr = self.get_Router(internal_tier)
self.check_Router_state(vr, state="Running")
self.debug("Deploying a VM in network - %s" % internal_tier.name)
internal_vm = self.create_VM(internal_tier)
self.check_VM_state(internal_vm, state="Running")
# VSD verification
self.verify_vsp_network(self.domain.id, internal_tier, vpc)
self.verify_vsp_router(vr)
self.verify_vsp_vm(internal_vm)
self.debug("Creating a VPC network without Internal LB service...")
public_tier = self.create_Network(net_off_2, gateway='10.1.2.1', vpc=vpc)
self.validate_Network(public_tier, state="Implemented")
vr = self.get_Router(public_tier)
self.check_Router_state(vr, state="Running")
self.debug("Deploying a VM in network - %s" % public_tier.name)
public_vm = self.create_VM(public_tier)
self.check_VM_state(public_vm, state="Running")
# VSD verification
self.verify_vsp_network(self.domain.id, public_tier, vpc)
self.verify_vsp_router(vr)
self.verify_vsp_vm(public_vm)
# Creating Internal LB Rules in the Internal tier with Round Robin Algorithm
self.debug("Creating two Internal LB Rules (SSH & HTTP) with Round Robin Algorithm...")
int_lb_rule_1 = self.create_Internal_LB_Rule(internal_tier, vm_array=[internal_vm])
self.validate_Internal_LB_Rule(int_lb_rule_1, state="Active", vm_array=[internal_vm])
int_lb_rule_2 = self.create_Internal_LB_Rule(internal_tier,
vm_array=[internal_vm],
services=self.test_data["internal_lbrule_http"],
source_ip=int_lb_rule_1.sourceipaddress
)
self.validate_Internal_LB_Rule(int_lb_rule_2, state="Active", vm_array=[internal_vm])
# Validating InternalLbVm deployment and state
int_lb_vm_1 = self.get_InternalLbVm(internal_tier, int_lb_rule_1.sourceipaddress)
self.check_InternalLbVm_state(internal_tier, int_lb_rule_1.sourceipaddress, state="Running")
# VSD Verification
self.verify_vsp_LB_device(int_lb_vm_1)
# Deploying more VMs in the Internal tier
self.debug("Deploying two more VMs in network - %s" % internal_tier.name)
internal_vm_1 = self.create_VM(internal_tier)
internal_vm_2 = self.create_VM(internal_tier)
# VSD verification
self.verify_vsp_vm(internal_vm_1)
self.verify_vsp_vm(internal_vm_2)
# Adding newly deployed VMs to the created Internal LB rules
self.debug("Adding two more virtual machines to the created Internal LB rules...")
int_lb_rule_1.assign(self.api_client, [internal_vm_1, internal_vm_2])
self.validate_Internal_LB_Rule(int_lb_rule_1, state="Active",
vm_array=[internal_vm, internal_vm_1, internal_vm_2])
int_lb_rule_2.assign(self.api_client, [internal_vm_1, internal_vm_2])
self.validate_Internal_LB_Rule(int_lb_rule_2, state="Active",
vm_array=[internal_vm, internal_vm_1, internal_vm_2])
# Validating InternalLbVm state
self.check_InternalLbVm_state(internal_tier, int_lb_rule_1.sourceipaddress, state="Running")
# VSD Verification
self.verify_vsp_LB_device(int_lb_vm_1)
# Creating Internal LB Rules in the Internal tier with Least connections Algorithm
self.debug("Creating two Internal LB Rules (SSH & HTTP) with Least connections Algorithm...")
self.test_data["internal_lbrule"]["algorithm"] = "leastconn"
int_lb_rule_3 = self.create_Internal_LB_Rule(internal_tier,
vm_array=[internal_vm, internal_vm_1, internal_vm_2],
services=self.test_data["internal_lbrule"]
)
self.validate_Internal_LB_Rule(int_lb_rule_3, state="Active",
vm_array=[internal_vm, internal_vm_1, internal_vm_2])
self.test_data["internal_lbrule_http"]["algorithm"] = "leastconn"
int_lb_rule_4 = self.create_Internal_LB_Rule(internal_tier,
vm_array=[internal_vm, internal_vm_1, internal_vm_2],
services=self.test_data["internal_lbrule_http"],
source_ip=int_lb_rule_3.sourceipaddress
)
self.validate_Internal_LB_Rule(int_lb_rule_4, state="Active",
vm_array=[internal_vm, internal_vm_1, internal_vm_2])
# Validating InternalLbVm deployment and state
int_lb_vm_2 = self.get_InternalLbVm(internal_tier, int_lb_rule_3.sourceipaddress)
self.check_InternalLbVm_state(internal_tier, int_lb_rule_3.sourceipaddress, state="Running")
# VSD Verification
self.verify_vsp_LB_device(int_lb_vm_2)
# Creating Internal LB Rules in the Internal tier with Source Algorithm
self.debug("Creating two Internal LB Rules (SSH & HTTP) with Source Algorithm...")
self.test_data["internal_lbrule"]["algorithm"] = "source"
int_lb_rule_5 = self.create_Internal_LB_Rule(internal_tier,
vm_array=[internal_vm, internal_vm_1, internal_vm_2],
services=self.test_data["internal_lbrule"]
)
self.validate_Internal_LB_Rule(int_lb_rule_5, state="Active",
vm_array=[internal_vm, internal_vm_1, internal_vm_2])
self.test_data["internal_lbrule_http"]["algorithm"] = "source"
int_lb_rule_6 = self.create_Internal_LB_Rule(internal_tier,
vm_array=[internal_vm, internal_vm_1, internal_vm_2],
services=self.test_data["internal_lbrule_http"],
source_ip=int_lb_rule_5.sourceipaddress
)
self.validate_Internal_LB_Rule(int_lb_rule_6, state="Active",
vm_array=[internal_vm, internal_vm_1, internal_vm_2])
# Validating InternalLbVm deployment and state
int_lb_vm_3 = self.get_InternalLbVm(internal_tier, int_lb_rule_5.sourceipaddress)
self.check_InternalLbVm_state(internal_tier, int_lb_rule_5.sourceipaddress, state="Running")
# VSD Verification
self.verify_vsp_LB_device(int_lb_vm_3)
# Adding Network ACL rules in the Internal tier
self.debug("Adding Network ACL rules to make the created Internal LB rules (SSH & HTTP) accessible...")
ssh_rule = self.create_NetworkAclRule(self.test_data["ingress_rule"], network=internal_tier)
http_rule = self.create_NetworkAclRule(self.test_data["http_rule"], network=internal_tier)
# VSD verification
self.verify_vsp_firewall_rule(ssh_rule)
self.verify_vsp_firewall_rule(http_rule)
# Creating Static NAT Rule for the VM in the Public tier
public_ip = self.acquire_PublicIPAddress(public_tier, vpc)
self.validate_PublicIPAddress(public_ip, public_tier)
self.create_StaticNatRule_For_VM(public_vm, public_ip, public_tier)
self.validate_PublicIPAddress(public_ip, public_tier, static_nat=True, vm=public_vm)
# VSD verification
self.verify_vsp_floating_ip(public_tier, public_vm, public_ip.ipaddress, vpc)
# Adding Network ACL rule in the Public tier
self.debug("Adding Network ACL rule to make the created NAT rule (SSH) accessible...")
public_ssh_rule = self.create_NetworkAclRule(self.test_data["ingress_rule"], network=public_tier)
# VSD verification
self.verify_vsp_firewall_rule(public_ssh_rule)
# Internal LB (wget) traffic tests with Round Robin Algorithm
ssh_client = self.ssh_into_VM(public_vm, public_ip)
self.validate_internallb_algorithm_traffic(ssh_client,
int_lb_rule_1.sourceipaddress,
self.test_data["http_rule"]["publicport"],
[internal_vm, internal_vm_1, internal_vm_2],
"roundrobin"
)
# Internal LB (wget) traffic tests with Least connections Algorithm
ssh_client = self.ssh_into_VM(public_vm, public_ip)
self.validate_internallb_algorithm_traffic(ssh_client,
int_lb_rule_3.sourceipaddress,
self.test_data["http_rule"]["publicport"],
[internal_vm, internal_vm_1, internal_vm_2],
"leastconn"
)
# Internal LB (wget) traffic tests with Source Algorithm
ssh_client = self.ssh_into_VM(public_vm, public_ip)
self.validate_internallb_algorithm_traffic(ssh_client,
int_lb_rule_5.sourceipaddress,
self.test_data["http_rule"]["publicport"],
[internal_vm, internal_vm_1, internal_vm_2],
"source"
)
@attr(tags=["advanced", "nuagevsp"], required_hardware="true")
def test_07_nuage_internallb_vpc_network_restarts_traffic(self):
"""Test Nuage VSP VPC Internal LB functionality with restarts of VPC network components by performing (wget)
traffic tests within a VPC
"""
# Repeat the tests in the testcase "test_05_nuage_internallb_traffic" with restarts of VPC networks (tiers):
# 1. Restart tier with InternalLbVm (cleanup = false), verify that the InternalLbVm gets destroyed and deployed
# again in the Internal tier.
# 2. Restart tier with InternalLbVm (cleanup = true), verify that the InternalLbVm gets destroyed and deployed
# again in the Internal tier.
# 3. Restart tier without InternalLbVm (cleanup = false), verify that this restart has no effect on the
# InternalLbVm functionality.
# 4. Restart tier without InternalLbVm (cleanup = true), verify that this restart has no effect on the
# InternalLbVm functionality.
# 5. Stop all the VMs configured with InternalLbVm, verify that the InternalLbVm gets destroyed in the Internal
# tier.
# 6. Start all the VMs configured with InternalLbVm, verify that the InternalLbVm gets deployed again in the
# Internal tier.
# 7. Restart VPC, verify that the VPC VR gets rebooted and this restart has no effect on the InternalLbVm
# functionality.
# Verify the above restarts of VPC networks (tiers) by performing (wget) traffic tests within a VPC.
# Creating a VPC offering
self.debug("Creating Nuage VSP VPC offering with Internal LB service...")
vpc_off = self.create_VpcOffering(self.test_data["nuagevsp"]["vpc_offering_lb"])
self.validate_VpcOffering(vpc_off, state="Enabled")
# Creating a VPC
self.debug("Creating a VPC with Internal LB service...")
vpc = self.create_Vpc(vpc_off, cidr='10.1.0.0/16')
self.validate_Vpc(vpc, state="Enabled")
# Creating network offerings
self.debug("Creating Nuage VSP VPC Network offering with Internal LB service...")
net_off_1 = self.create_NetworkOffering(
self.test_data["nuagevsp"]["vpc_network_offering_internal_lb"])
self.validate_NetworkOffering(net_off_1, state="Enabled")
self.debug("Creating Nuage VSP VPC Network offering without Internal LB service...")
net_off_2 = self.create_NetworkOffering(self.test_data["nuagevsp"]["vpc_network_offering"])
self.validate_NetworkOffering(net_off_2, state="Enabled")
# Creating VPC networks in the VPC, and deploying VMs
self.debug("Creating a VPC network with Internal LB service...")
internal_tier = self.create_Network(net_off_1, gateway='10.1.1.1', vpc=vpc)
self.validate_Network(internal_tier, state="Implemented")
vr = self.get_Router(internal_tier)
self.check_Router_state(vr, state="Running")
self.debug("Deploying a VM in network - %s" % internal_tier.name)
internal_vm = self.create_VM(internal_tier)
self.check_VM_state(internal_vm, state="Running")
# VSD verification
self.verify_vsp_network(self.domain.id, internal_tier, vpc)
self.verify_vsp_router(vr)
self.verify_vsp_vm(internal_vm)
self.debug("Creating a VPC network without Internal LB service...")
public_tier = self.create_Network(net_off_2, gateway='10.1.2.1', vpc=vpc)
self.validate_Network(public_tier, state="Implemented")
vr = self.get_Router(public_tier)
self.check_Router_state(vr, state="Running")
self.debug("Deploying a VM in network - %s" % public_tier.name)
public_vm = self.create_VM(public_tier)
self.check_VM_state(public_vm, state="Running")
# VSD verification
self.verify_vsp_network(self.domain.id, public_tier, vpc)
self.verify_vsp_router(vr)
self.verify_vsp_vm(public_vm)
# Creating Internal LB Rules in the Internal tier
self.debug("Creating two Internal LB Rules (SSH & HTTP) using the same Load Balancing source IP Address...")
int_lb_rule_1 = self.create_Internal_LB_Rule(internal_tier, vm_array=[internal_vm])
self.validate_Internal_LB_Rule(int_lb_rule_1, state="Active", vm_array=[internal_vm])
int_lb_rule_2 = self.create_Internal_LB_Rule(internal_tier,
vm_array=[internal_vm],
services=self.test_data["internal_lbrule_http"],
source_ip=int_lb_rule_1.sourceipaddress
)
self.validate_Internal_LB_Rule(int_lb_rule_2, state="Active", vm_array=[internal_vm])
# Validating InternalLbVm deployment and state
int_lb_vm = self.get_InternalLbVm(internal_tier, int_lb_rule_1.sourceipaddress)
self.check_InternalLbVm_state(internal_tier, int_lb_rule_1.sourceipaddress, state="Running")
# VSD Verification
self.verify_vsp_LB_device(int_lb_vm)
# Deploying more VMs in the Internal tier
self.debug("Deploying two more VMs in network - %s" % internal_tier.name)
internal_vm_1 = self.create_VM(internal_tier)
internal_vm_2 = self.create_VM(internal_tier)
# VSD verification
self.verify_vsp_vm(internal_vm_1)
self.verify_vsp_vm(internal_vm_2)
# Adding newly deployed VMs to the created Internal LB rules
self.debug("Adding two more virtual machines to the created Internal LB rules...")
int_lb_rule_1.assign(self.api_client, [internal_vm_1, internal_vm_2])
self.validate_Internal_LB_Rule(int_lb_rule_1, state="Active",
vm_array=[internal_vm, internal_vm_1, internal_vm_2])
int_lb_rule_2.assign(self.api_client, [internal_vm_1, internal_vm_2])
self.validate_Internal_LB_Rule(int_lb_rule_2, state="Active",
vm_array=[internal_vm, internal_vm_1, internal_vm_2])
# Validating InternalLbVm state
self.check_InternalLbVm_state(internal_tier, int_lb_rule_1.sourceipaddress, state="Running")
# VSD Verification
self.verify_vsp_LB_device(int_lb_vm)
# Adding Network ACL rules in the Internal tier
self.debug("Adding Network ACL rules to make the created Internal LB rules (SSH & HTTP) accessible...")
ssh_rule = self.create_NetworkAclRule(self.test_data["ingress_rule"], network=internal_tier)
http_rule = self.create_NetworkAclRule(self.test_data["http_rule"], network=internal_tier)
# VSD verification
self.verify_vsp_firewall_rule(ssh_rule)
self.verify_vsp_firewall_rule(http_rule)
# Creating Static NAT Rule for the VM in the Public tier
public_ip = self.acquire_PublicIPAddress(public_tier, vpc)
self.validate_PublicIPAddress(public_ip, public_tier)
self.create_StaticNatRule_For_VM(public_vm, public_ip, public_tier)
self.validate_PublicIPAddress(public_ip, public_tier, static_nat=True, vm=public_vm)
# VSD verification
self.verify_vsp_floating_ip(public_tier, public_vm, public_ip.ipaddress, vpc)
# Adding Network ACL rule in the Public tier
self.debug("Adding Network ACL rule to make the created NAT rule (SSH) accessible...")
public_ssh_rule = self.create_NetworkAclRule(self.test_data["ingress_rule"], network=public_tier)
# VSD verification
self.verify_vsp_firewall_rule(public_ssh_rule)
# Internal LB (wget) traffic test
ssh_client = self.ssh_into_VM(public_vm, public_ip)
wget_file = self.wget_from_vm_cmd(ssh_client,
int_lb_rule_1.sourceipaddress,
self.test_data["http_rule"]["publicport"]
)
# Verifying Internal LB (wget) traffic test
self.verify_lb_wget_file(wget_file, [internal_vm, internal_vm_1, internal_vm_2])
# Restart Internal tier (cleanup = false)
# InternalLbVm gets destroyed and deployed again in the Internal tier
self.debug("Restarting the Internal tier without cleanup...")
Network.restart(internal_tier, self.api_client, cleanup=False)
self.validate_Network(internal_tier, state="Implemented")
self.check_Router_state(vr, state="Running")
self.check_VM_state(internal_vm, state="Running")
self.check_VM_state(internal_vm_1, state="Running")
self.check_VM_state(internal_vm_2, state="Running")
# VSD verification
self.verify_vsp_network(self.domain.id, internal_tier, vpc)
self.verify_vsp_router(vr)
self.verify_vsp_vm(internal_vm)
self.verify_vsp_vm(internal_vm_1)
self.verify_vsp_vm(internal_vm_2)
self.verify_vsp_firewall_rule(ssh_rule)
self.verify_vsp_firewall_rule(http_rule)
# Validating InternalLbVm state
# InternalLbVm gets destroyed and deployed again in the Internal tier
int_lb_vm = self.get_InternalLbVm(internal_tier, int_lb_rule_1.sourceipaddress)
self.check_InternalLbVm_state(internal_tier, int_lb_rule_1.sourceipaddress, state="Running")
# VSD Verification
self.verify_vsp_LB_device(int_lb_vm)
# Internal LB (wget) traffic test
ssh_client = self.ssh_into_VM(public_vm, public_ip)
tries = 0
while True:
try:
wget_file = self.wget_from_vm_cmd(ssh_client,
int_lb_rule_1.sourceipaddress,
self.test_data["http_rule"]["publicport"]
)
except Exception as e:
self.debug("Failed to wget file via the InternalLbVm after re-starting the Internal tier: %s" % e)
if tries == 10:
break
self.debug("Waiting for the InternalLbVm in the Internal tier to be fully resolved for (wget) traffic "
"test...")
time.sleep(30)
tries += 1
continue
self.debug("Internal LB (wget) traffic test is successful after re-starting the Internal tier")
break
# Verifying Internal LB (wget) traffic test
self.verify_lb_wget_file(wget_file, [internal_vm, internal_vm_1, internal_vm_2])
# Restart Internal tier (cleanup = true)
# InternalLbVm gets destroyed and deployed again in the Internal tier
self.debug("Restarting the Internal tier with cleanup...")
Network.restart(internal_tier, self.api_client, cleanup=True)
self.validate_Network(internal_tier, state="Implemented")
self.check_Router_state(vr, state="Running")
self.check_VM_state(internal_vm, state="Running")
self.check_VM_state(internal_vm_1, state="Running")
self.check_VM_state(internal_vm_2, state="Running")
# VSD verification
self.verify_vsp_network(self.domain.id, internal_tier, vpc)
self.verify_vsp_router(vr)
self.verify_vsp_vm(internal_vm)
self.verify_vsp_vm(internal_vm_1)
self.verify_vsp_vm(internal_vm_2)
self.verify_vsp_firewall_rule(ssh_rule)
self.verify_vsp_firewall_rule(http_rule)
# Validating InternalLbVm state
# InternalLbVm gets destroyed and deployed again in the Internal tier
int_lb_vm = self.get_InternalLbVm(internal_tier, int_lb_rule_1.sourceipaddress)
self.check_InternalLbVm_state(internal_tier, int_lb_rule_1.sourceipaddress, state="Running")
# VSD Verification
self.verify_vsp_LB_device(int_lb_vm)
# Internal LB (wget) traffic test
ssh_client = self.ssh_into_VM(public_vm, public_ip)
tries = 0
while True:
try:
wget_file = self.wget_from_vm_cmd(ssh_client,
int_lb_rule_1.sourceipaddress,
self.test_data["http_rule"]["publicport"]
)
except Exception as e:
self.debug("Failed to wget file via the InternalLbVm after re-starting the Internal tier with cleanup: "
"%s" % e)
if tries == 10:
break
self.debug("Waiting for the InternalLbVm in the Internal tier to be fully resolved for (wget) traffic "
"test...")
time.sleep(30)
tries += 1
continue
self.debug("Internal LB (wget) traffic test is successful after re-starting the Internal tier with cleanup")
break
# Verifying Internal LB (wget) traffic test
self.verify_lb_wget_file(wget_file, [internal_vm, internal_vm_1, internal_vm_2])
# Restart Public tier (cleanup = false)
# This restart has no effect on the InternalLbVm functionality
self.debug("Restarting the Public tier without cleanup...")
Network.restart(public_tier, self.api_client, cleanup=False)
self.validate_Network(public_tier, state="Implemented")
self.check_Router_state(vr, state="Running")
self.check_VM_state(public_vm, state="Running")
self.validate_PublicIPAddress(public_ip, public_tier, static_nat=True, vm=public_vm)
# VSD verification
self.verify_vsp_network(self.domain.id, public_tier, vpc)
self.verify_vsp_router(vr)
self.verify_vsp_vm(public_vm)
self.verify_vsp_floating_ip(public_tier, public_vm, public_ip.ipaddress, vpc)
self.verify_vsp_firewall_rule(public_ssh_rule)
# Validating InternalLbVm state
self.check_InternalLbVm_state(internal_tier, int_lb_rule_1.sourceipaddress, state="Running")
# VSD Verification
self.verify_vsp_LB_device(int_lb_vm)
# Internal LB (wget) traffic test
ssh_client = self.ssh_into_VM(public_vm, public_ip)
wget_file = self.wget_from_vm_cmd(ssh_client,
int_lb_rule_1.sourceipaddress,
self.test_data["http_rule"]["publicport"]
)
# Verifying Internal LB (wget) traffic test
self.verify_lb_wget_file(wget_file, [internal_vm, internal_vm_1, internal_vm_2])
# Restart Public tier (cleanup = true)
# This restart has no effect on the InternalLbVm functionality
self.debug("Restarting the Public tier with cleanup...")
Network.restart(public_tier, self.api_client, cleanup=True)
self.validate_Network(public_tier, state="Implemented")
self.check_Router_state(vr, state="Running")
self.check_VM_state(public_vm, state="Running")
self.validate_PublicIPAddress(public_ip, public_tier, static_nat=True, vm=public_vm)
# VSD verification
self.verify_vsp_network(self.domain.id, public_tier, vpc)
self.verify_vsp_router(vr)
self.verify_vsp_vm(public_vm)
self.verify_vsp_floating_ip(public_tier, public_vm, public_ip.ipaddress, vpc)
self.verify_vsp_firewall_rule(public_ssh_rule)
# Validating InternalLbVm state
self.check_InternalLbVm_state(internal_tier, int_lb_rule_1.sourceipaddress, state="Running")
# VSD Verification
self.verify_vsp_LB_device(int_lb_vm)
# Internal LB (wget) traffic test
ssh_client = self.ssh_into_VM(public_vm, public_ip)
wget_file = self.wget_from_vm_cmd(ssh_client,
int_lb_rule_1.sourceipaddress,
self.test_data["http_rule"]["publicport"]
)
# Verifying Internal LB (wget) traffic test
self.verify_lb_wget_file(wget_file, [internal_vm, internal_vm_1, internal_vm_2])
# Stopping VMs in the Internal tier
# wget traffic test fails as all the VMs in the Internal tier are in stopped state
self.debug("Stopping all the VMs in the Internal tier...")
internal_vm.stop(self.api_client)
internal_vm_1.stop(self.api_client)
internal_vm_2.stop(self.api_client)
self.validate_Network(internal_tier, state="Implemented")
self.check_Router_state(vr, state="Running")
self.check_VM_state(internal_vm, state="Stopped")
self.check_VM_state(internal_vm_1, state="Stopped")
self.check_VM_state(internal_vm_2, state="Stopped")
# VSD verification
self.verify_vsp_network(self.domain.id, internal_tier, vpc)
self.verify_vsp_router(vr)
self.verify_vsp_vm(internal_vm, stopped=True)
self.verify_vsp_vm(internal_vm_1, stopped=True)
self.verify_vsp_vm(internal_vm_2, stopped=True)
self.verify_vsp_firewall_rule(ssh_rule)
self.verify_vsp_firewall_rule(http_rule)
# Validating InternalLbVm state
self.check_InternalLbVm_state(internal_tier, int_lb_rule_1.sourceipaddress, state="Running")
# VSD Verification
self.verify_vsp_LB_device(int_lb_vm)
# Internal LB (wget) traffic test
ssh_client = self.ssh_into_VM(public_vm, public_ip)
with self.assertRaises(Exception):
self.wget_from_vm_cmd(ssh_client,
int_lb_rule_1.sourceipaddress,
self.test_data["http_rule"]["publicport"]
)
self.debug("Failed to wget file as all the VMs in the Internal tier are in stopped state")
# Starting VMs in the Internal tier
# wget traffic test succeeds as all the VMs in the Internal tier are back in running state
self.debug("Starting all the VMs in the Internal tier...")
internal_vm.start(self.api_client)
internal_vm_1.start(self.api_client)
internal_vm_2.start(self.api_client)
self.validate_Network(internal_tier, state="Implemented")
self.check_Router_state(vr, state="Running")
self.check_VM_state(internal_vm, state="Running")
self.check_VM_state(internal_vm_1, state="Running")
self.check_VM_state(internal_vm_2, state="Running")
# VSD verification
self.verify_vsp_network(self.domain.id, internal_tier, vpc)
self.verify_vsp_router(vr)
self.verify_vsp_vm(internal_vm)
self.verify_vsp_vm(internal_vm_1)
self.verify_vsp_vm(internal_vm_2)
self.verify_vsp_firewall_rule(ssh_rule)
self.verify_vsp_firewall_rule(http_rule)
# Validating InternalLbVm state
self.check_InternalLbVm_state(internal_tier, int_lb_rule_1.sourceipaddress, state="Running")
# VSD Verification
self.verify_vsp_LB_device(int_lb_vm)
# Internal LB (wget) traffic test
ssh_client = self.ssh_into_VM(public_vm, public_ip)
tries = 0
while True:
try:
wget_file = self.wget_from_vm_cmd(ssh_client,
int_lb_rule_1.sourceipaddress,
self.test_data["http_rule"]["publicport"]
)
except Exception as e:
self.debug("Failed to wget file via the InternalLbVm after re-starting all the VMs in the Internal tier"
": %s" % e)
if tries == 10:
break
self.debug("Waiting for the InternalLbVm and all the VMs in the Internal tier to be fully resolved for "
"(wget) traffic test...")
time.sleep(30)
tries += 1
continue
self.debug("Internal LB (wget) traffic test is successful after re-starting all the VMs in the Internal "
"tier")
break
# Verifying Internal LB (wget) traffic test
self.verify_lb_wget_file(wget_file, [internal_vm, internal_vm_1, internal_vm_2])
# Restarting VPC (cleanup = false)
# VPC VR gets destroyed and deployed again in the VPC
# This restart has no effect on the InternalLbVm functionality
self.debug("Restarting the VPC without cleanup...")
self.restart_Vpc(vpc, cleanup=False)
self.validate_Network(public_tier, state="Implemented")
self.validate_Network(internal_tier, state="Implemented")
vr = self.get_Router(public_tier)
self.check_Router_state(vr, state="Running")
self.check_VM_state(public_vm, state="Running")
self.check_VM_state(internal_vm, state="Running")
self.check_VM_state(internal_vm_1, state="Running")
self.check_VM_state(internal_vm_2, state="Running")
self.validate_PublicIPAddress(public_ip, public_tier, static_nat=True, vm=public_vm)
# VSD verification
self.verify_vsp_network(self.domain.id, public_tier, vpc)
self.verify_vsp_network(self.domain.id, internal_tier, vpc)
self.verify_vsp_router(vr)
self.verify_vsp_vm(public_vm)
self.verify_vsp_vm(internal_vm)
self.verify_vsp_vm(internal_vm_1)
self.verify_vsp_vm(internal_vm_2)
self.verify_vsp_floating_ip(public_tier, public_vm, public_ip.ipaddress, vpc)
self.verify_vsp_firewall_rule(public_ssh_rule)
self.verify_vsp_firewall_rule(ssh_rule)
self.verify_vsp_firewall_rule(http_rule)
# Validating InternalLbVm state
self.check_InternalLbVm_state(internal_tier, int_lb_rule_1.sourceipaddress, state="Running")
# VSD Verification
self.verify_vsp_LB_device(int_lb_vm)
# Internal LB (wget) traffic test
ssh_client = self.ssh_into_VM(public_vm, public_ip)
wget_file = self.wget_from_vm_cmd(ssh_client,
int_lb_rule_1.sourceipaddress,
self.test_data["http_rule"]["publicport"]
)
# Verifying Internal LB (wget) traffic test
self.verify_lb_wget_file(wget_file, [internal_vm, internal_vm_1, internal_vm_2])
# Restarting VPC (cleanup = true)
# VPC VR gets destroyed and deployed again in the VPC
# This restart has no effect on the InternalLbVm functionality
self.debug("Restarting the VPC with cleanup...")
self.restart_Vpc(vpc, cleanup=True)
self.validate_Network(public_tier, state="Implemented")
self.validate_Network(internal_tier, state="Implemented")
vr = self.get_Router(public_tier)
self.check_Router_state(vr, state="Running")
self.check_VM_state(public_vm, state="Running")
self.check_VM_state(internal_vm, state="Running")
self.check_VM_state(internal_vm_1, state="Running")
self.check_VM_state(internal_vm_2, state="Running")
self.validate_PublicIPAddress(public_ip, public_tier, static_nat=True, vm=public_vm)
# VSD verification
self.verify_vsp_network(self.domain.id, public_tier, vpc)
self.verify_vsp_network(self.domain.id, internal_tier, vpc)
self.verify_vsp_router(vr)
self.verify_vsp_vm(public_vm)
self.verify_vsp_vm(internal_vm)
self.verify_vsp_vm(internal_vm_1)
self.verify_vsp_vm(internal_vm_2)
self.verify_vsp_floating_ip(public_tier, public_vm, public_ip.ipaddress, vpc)
self.verify_vsp_firewall_rule(public_ssh_rule)
self.verify_vsp_firewall_rule(ssh_rule)
self.verify_vsp_firewall_rule(http_rule)
# Validating InternalLbVm state
self.check_InternalLbVm_state(internal_tier, int_lb_rule_1.sourceipaddress, state="Running")
# VSD Verification
self.verify_vsp_LB_device(int_lb_vm)
# Internal LB (wget) traffic test
ssh_client = self.ssh_into_VM(public_vm, public_ip)
wget_file = self.wget_from_vm_cmd(ssh_client,
int_lb_rule_1.sourceipaddress,
self.test_data["http_rule"]["publicport"]
)
# Verifying Internal LB (wget) traffic test
self.verify_lb_wget_file(wget_file, [internal_vm, internal_vm_1, internal_vm_2])
@attr(tags=["advanced", "nuagevsp"], required_hardware="true")
def test_08_nuage_internallb_appliance_operations_traffic(self):
"""Test Nuage VSP VPC Internal LB functionality with InternalLbVm appliance operations by performing (wget)
traffic tests within a VPC
"""
# Repeat the tests in the testcase "test_05_nuage_internallb_traffic" with InternalLbVm appliance operations:
# 1. Verify the InternalLbVm deployment by creating the Internal LB Rules when the VPC VR is in Stopped state,
# VPC VR has no effect on the InternalLbVm functionality.
# 2. Stop the InternalLbVm when the VPC VR is in Stopped State
# 3. Start the InternalLbVm when the VPC VR is in Stopped state
# 4. Stop the InternalLbVm when the VPC VR is in Running State
# 5. Start the InternalLbVm when the VPC VR is in Running state
# 6. Force stop the InternalLbVm when the VPC VR is in Running State
# 7. Start the InternalLbVm when the VPC VR is in Running state
# Verify the above restarts of VPC networks by performing (wget) traffic tests within a VPC.
# Creating a VPC offering
self.debug("Creating Nuage VSP VPC offering with Internal LB service...")
vpc_off = self.create_VpcOffering(self.test_data["nuagevsp"]["vpc_offering_lb"])
self.validate_VpcOffering(vpc_off, state="Enabled")
# Creating a VPC
self.debug("Creating a VPC with Internal LB service...")
vpc = self.create_Vpc(vpc_off, cidr='10.1.0.0/16')
self.validate_Vpc(vpc, state="Enabled")
# Creating network offerings
self.debug("Creating Nuage VSP VPC Network offering with Internal LB service...")
net_off_1 = self.create_NetworkOffering(
self.test_data["nuagevsp"]["vpc_network_offering_internal_lb"])
self.validate_NetworkOffering(net_off_1, state="Enabled")
self.debug("Creating Nuage VSP VPC Network offering without Internal LB service...")
net_off_2 = self.create_NetworkOffering(self.test_data["nuagevsp"]["vpc_network_offering"])
self.validate_NetworkOffering(net_off_2, state="Enabled")
# Creating VPC networks in the VPC, and deploying VMs
self.debug("Creating a VPC network with Internal LB service...")
internal_tier = self.create_Network(net_off_1, gateway='10.1.1.1', vpc=vpc)
self.validate_Network(internal_tier, state="Implemented")
vr = self.get_Router(internal_tier)
self.check_Router_state(vr, state="Running")
self.debug("Deploying a VM in network - %s" % internal_tier.name)
internal_vm = self.create_VM(internal_tier)
self.check_VM_state(internal_vm, state="Running")
# VSD verification
self.verify_vsp_network(self.domain.id, internal_tier, vpc)
self.verify_vsp_router(vr)
self.verify_vsp_vm(internal_vm)
self.debug("Creating a VPC network without Internal LB service...")
public_tier = self.create_Network(net_off_2, gateway='10.1.2.1', vpc=vpc)
self.validate_Network(public_tier, state="Implemented")
vr = self.get_Router(public_tier)
self.check_Router_state(vr, state="Running")
self.debug("Deploying a VM in network - %s" % public_tier.name)
public_vm = self.create_VM(public_tier)
self.check_VM_state(public_vm, state="Running")
# VSD verification
self.verify_vsp_network(self.domain.id, public_tier, vpc)
self.verify_vsp_router(vr)
self.verify_vsp_vm(public_vm)
# Stopping the VPC VR
# VPC VR has no effect on the InternalLbVm functionality
Router.stop(self.api_client, id=vr.id)
self.check_Router_state(vr, state="Stopped")
self.validate_Network(public_tier, state="Implemented")
self.validate_Network(internal_tier, state="Implemented")
# VSD verification
self.verify_vsp_router(vr, stopped=True)
self.verify_vsp_network(self.domain.id, public_tier, vpc)
self.verify_vsp_network(self.domain.id, internal_tier, vpc)
# Creating Internal LB Rules in the Internal tier
self.debug("Creating two Internal LB Rules (SSH & HTTP) using the same Load Balancing source IP Address...")
int_lb_rule_1 = self.create_Internal_LB_Rule(internal_tier, vm_array=[internal_vm])
self.validate_Internal_LB_Rule(int_lb_rule_1, state="Active", vm_array=[internal_vm])
int_lb_rule_2 = self.create_Internal_LB_Rule(internal_tier,
vm_array=[internal_vm],
services=self.test_data["internal_lbrule_http"],
source_ip=int_lb_rule_1.sourceipaddress
)
self.validate_Internal_LB_Rule(int_lb_rule_2, state="Active", vm_array=[internal_vm])
# Validating InternalLbVm deployment and state
int_lb_vm = self.get_InternalLbVm(internal_tier, int_lb_rule_1.sourceipaddress)
self.check_InternalLbVm_state(internal_tier, int_lb_rule_1.sourceipaddress, state="Running")
# VSD Verification
self.verify_vsp_LB_device(int_lb_vm)
# Deploying more VMs in the Internal tier
self.debug("Deploying two more VMs in network - %s" % internal_tier.name)
internal_vm_1 = self.create_VM(internal_tier)
internal_vm_2 = self.create_VM(internal_tier)
# VSD verification
self.verify_vsp_vm(internal_vm_1)
self.verify_vsp_vm(internal_vm_2)
# Adding newly deployed VMs to the created Internal LB rules
self.debug("Adding two more virtual machines to the created Internal LB rules...")
int_lb_rule_1.assign(self.api_client, [internal_vm_1, internal_vm_2])
self.validate_Internal_LB_Rule(int_lb_rule_1, state="Active",
vm_array=[internal_vm, internal_vm_1, internal_vm_2])
int_lb_rule_2.assign(self.api_client, [internal_vm_1, internal_vm_2])
self.validate_Internal_LB_Rule(int_lb_rule_2, state="Active",
vm_array=[internal_vm, internal_vm_1, internal_vm_2])
# Validating InternalLbVm state
self.check_InternalLbVm_state(internal_tier, int_lb_rule_1.sourceipaddress, state="Running")
# VSD Verification
self.verify_vsp_LB_device(int_lb_vm)
# Adding Network ACL rules in the Internal tier
self.debug("Adding Network ACL rules to make the created Internal LB rules (SSH & HTTP) accessible...")
ssh_rule = self.create_NetworkAclRule(self.test_data["ingress_rule"], network=internal_tier)
http_rule = self.create_NetworkAclRule(self.test_data["http_rule"], network=internal_tier)
# VSD verification
self.verify_vsp_firewall_rule(ssh_rule)
self.verify_vsp_firewall_rule(http_rule)
# Creating Static NAT Rule for the VM in the Public tier
public_ip = self.acquire_PublicIPAddress(public_tier, vpc)
self.validate_PublicIPAddress(public_ip, public_tier)
self.create_StaticNatRule_For_VM(public_vm, public_ip, public_tier)
self.validate_PublicIPAddress(public_ip, public_tier, static_nat=True, vm=public_vm)
# VSD verification
self.verify_vsp_floating_ip(public_tier, public_vm, public_ip.ipaddress, vpc)
# Adding Network ACL rule in the Public tier
self.debug("Adding Network ACL rule to make the created NAT rule (SSH) accessible...")
public_ssh_rule = self.create_NetworkAclRule(self.test_data["ingress_rule"], network=public_tier)
# VSD verification
self.verify_vsp_firewall_rule(public_ssh_rule)
# Internal LB (wget) traffic test
ssh_client = self.ssh_into_VM(public_vm, public_ip)
wget_file = self.wget_from_vm_cmd(ssh_client,
int_lb_rule_1.sourceipaddress,
self.test_data["http_rule"]["publicport"]
)
# Verifying Internal LB (wget) traffic test
self.verify_lb_wget_file(wget_file, [internal_vm, internal_vm_1, internal_vm_2])
# # Stopping the InternalLbVm when the VPC VR is in Stopped state
self.stop_InternalLbVm(int_lb_vm)
self.check_InternalLbVm_state(internal_tier, int_lb_rule_1.sourceipaddress, state="Stopped")
# VSD Verification
self.verify_vsp_LB_device(int_lb_vm, stopped=True)
# Internal LB (wget) traffic test
ssh_client = self.ssh_into_VM(public_vm, public_ip)
with self.assertRaises(Exception):
self.wget_from_vm_cmd(ssh_client,
int_lb_rule_1.sourceipaddress,
self.test_data["http_rule"]["publicport"]
)
self.debug("Failed to wget file as the InternalLbVm is in stopped state")
# # Starting the InternalLbVm when the VPC VR is in Stopped state
self.start_InternalLbVm(int_lb_vm)
self.check_InternalLbVm_state(internal_tier, int_lb_rule_1.sourceipaddress, state="Running")
# VSD Verification
self.verify_vsp_LB_device(int_lb_vm)
# Internal LB (wget) traffic test
ssh_client = self.ssh_into_VM(public_vm, public_ip)
tries = 0
while True:
try:
wget_file = self.wget_from_vm_cmd(ssh_client,
int_lb_rule_1.sourceipaddress,
self.test_data["http_rule"]["publicport"]
)
except Exception as e:
self.debug("Failed to wget file via the InternalLbVm after re-starting the InternalLbVm appliance: %s"
% e)
if tries == 10:
break
self.debug("Waiting for the InternalLbVm to be fully resolved for (wget) traffic test...")
time.sleep(30)
tries += 1
continue
self.debug("Internal LB (wget) traffic test is successful after re-starting the InternalLbVm appliance")
break
# Verifying Internal LB (wget) traffic test
self.verify_lb_wget_file(wget_file, [internal_vm, internal_vm_1, internal_vm_2])
# Starting the VPC VR
# VPC VR has no effect on the InternalLbVm functionality
Router.start(self.api_client, id=vr.id)
self.check_Router_state(vr)
self.validate_Network(public_tier, state="Implemented")
self.validate_Network(internal_tier, state="Implemented")
# VSD verification
self.verify_vsp_router(vr)
self.verify_vsp_network(self.domain.id, public_tier, vpc)
self.verify_vsp_network(self.domain.id, internal_tier, vpc)
# # Stopping the InternalLbVm when the VPC VR is in Running state
self.stop_InternalLbVm(int_lb_vm)
self.check_InternalLbVm_state(internal_tier, int_lb_rule_1.sourceipaddress, state="Stopped")
# VSD Verification
self.verify_vsp_LB_device(int_lb_vm, stopped=True)
# Internal LB (wget) traffic test
ssh_client = self.ssh_into_VM(public_vm, public_ip)
with self.assertRaises(Exception):
self.wget_from_vm_cmd(ssh_client,
int_lb_rule_1.sourceipaddress,
self.test_data["http_rule"]["publicport"]
)
self.debug("Failed to wget file as the InternalLbVm is in stopped state")
# # Starting the InternalLbVm when the VPC VR is in Running state
self.start_InternalLbVm(int_lb_vm)
self.check_InternalLbVm_state(internal_tier, int_lb_rule_1.sourceipaddress, state="Running")
# VSD Verification
self.verify_vsp_LB_device(int_lb_vm)
# Internal LB (wget) traffic test
ssh_client = self.ssh_into_VM(public_vm, public_ip)
tries = 0
while True:
try:
wget_file = self.wget_from_vm_cmd(ssh_client,
int_lb_rule_1.sourceipaddress,
self.test_data["http_rule"]["publicport"]
)
except Exception as e:
self.debug("Failed to wget file via the InternalLbVm after re-starting the InternalLbVm appliance: %s"
% e)
if tries == 10:
break
self.debug("Waiting for the InternalLbVm to be fully resolved for (wget) traffic test...")
time.sleep(30)
tries += 1
continue
self.debug("Internal LB (wget) traffic test is successful after re-starting the InternalLbVm appliance")
break
# Verifying Internal LB (wget) traffic test
self.verify_lb_wget_file(wget_file, [internal_vm, internal_vm_1, internal_vm_2])
# # Force Stopping the InternalLbVm when the VPC VR is in Running state
self.stop_InternalLbVm(int_lb_vm, force=True)
self.check_InternalLbVm_state(internal_tier, int_lb_rule_1.sourceipaddress, state="Stopped")
# VSD Verification
self.verify_vsp_LB_device(int_lb_vm, stopped=True)
# Internal LB (wget) traffic test
ssh_client = self.ssh_into_VM(public_vm, public_ip)
with self.assertRaises(Exception):
self.wget_from_vm_cmd(ssh_client,
int_lb_rule_1.sourceipaddress,
self.test_data["http_rule"]["publicport"]
)
self.debug("Failed to wget file as the InternalLbVm is in stopped state")
# # Starting the InternalLbVm when the VPC VR is in Running state
self.start_InternalLbVm(int_lb_vm)
self.check_InternalLbVm_state(internal_tier, int_lb_rule_1.sourceipaddress, state="Running")
# VSD Verification
self.verify_vsp_LB_device(int_lb_vm)
# Internal LB (wget) traffic test
ssh_client = self.ssh_into_VM(public_vm, public_ip)
tries = 0
while True:
try:
wget_file = self.wget_from_vm_cmd(ssh_client,
int_lb_rule_1.sourceipaddress,
self.test_data["http_rule"]["publicport"]
)
except Exception as e:
self.debug("Failed to wget file via the InternalLbVm after re-starting the InternalLbVm appliance: %s"
% e)
if tries == 10:
break
self.debug("Waiting for the InternalLbVm to be fully resolved for (wget) traffic test...")
time.sleep(30)
tries += 1
continue
self.debug("Internal LB (wget) traffic test is successful after re-starting the InternalLbVm appliance")
break
# Verifying Internal LB (wget) traffic test
self.verify_lb_wget_file(wget_file, [internal_vm, internal_vm_1, internal_vm_2])
| 1.632813 | 2 |
venv/Lib/site-packages/PyOpenGL-3.0.1/OpenGL/raw/GL/SUN/convolution_border_modes.py | temelkirci/Motion_Editor | 1 | 12760907 | '''OpenGL extension SUN.convolution_border_modes
Automatically generated by the get_gl_extensions script, do not edit!
'''
from OpenGL import platform, constants, constant, arrays
from OpenGL import extensions
from OpenGL.GL import glget
import ctypes
EXTENSION_NAME = 'GL_SUN_convolution_border_modes'
_DEPRECATED = False
GL_WRAP_BORDER_SUN = constant.Constant( 'GL_WRAP_BORDER_SUN', 0x81D4 )
def glInitConvolutionBorderModesSUN():
'''Return boolean indicating whether this extension is available'''
return extensions.hasGLExtension( EXTENSION_NAME )
| 1.601563 | 2 |
institution/migrations/0006_auto_20180509_0858.py | mmesiti/cogs3 | 1 | 12760908 | <reponame>mmesiti/cogs3
# Generated by Django 2.0.2 on 2018-05-09 08:58
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('institution', '0005_institution_identity_provider'),
]
operations = [
migrations.RemoveField(
model_name='institution',
name='identity_provider_login',
),
migrations.RemoveField(
model_name='institution',
name='identity_provider_logout',
),
]
| 1.507813 | 2 |
app/blog/admin.py | elevoro/pyweb1 | 0 | 12760909 | <gh_stars>0
from django.contrib import admin
from .models import Note
# Меняем формат вывода даты и времени только для РУССКОЙ локализации
# Для всего сайта надо поместить этот код в `settings.py`
from django.conf.locale.ru import formats as ru_formats
ru_formats.DATETIME_FORMAT = "d.m.Y H:i:s"
@admin.register(Note)
class NoteAdmin(admin.ModelAdmin):
# Поля в списке
list_display = ('title', 'public', 'date_add', 'author', 'id', )
# Группировка поля в режиме редактирования
fields = ('date_add', ('title', 'public'), 'message', 'author')
# Поля только для чтения в режиме редактирования
readonly_fields = ('date_add', )
# Поиск по выбранным полям
search_fields = ['title', 'message', ]
# Фильтры справа
list_filter = ('public', 'author', )
def save_model(self, request, obj, form, change):
# Добавляем текущего пользователя (если не выбран) при сохранении модели
# docs: https://docs.djangoproject.com/en/3.1/ref/contrib/admin/#django.contrib.admin.ModelAdmin.save_model
if not hasattr(obj, 'author') or not obj.author:
obj.author = request.user
super().save_model(request, obj, form, change) | 1.9375 | 2 |
setup.py | einSelbst/WebHelpers2 | 0 | 12760910 | <reponame>einSelbst/WebHelpers2
import sys
try:
from setuptools import setup, find_packages
except ImportError:
from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup, find_packages
from setuptools.command.test import test as TestCommand
from webhelpers2 import __version__
class PyTest(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
#import here, cause outside the eggs aren't loaded
import pytest
errno = pytest.main(self.test_args)
sys.exit(errno)
install_requires=[
"MarkupSafe>=0.9.2",
"six>=1.4.0",
],
setup(
name="WebHelpers2",
version=__version__,
description='WebHelpers2',
long_description="""
WebHelpers2 is the successor to the widely-used WebHelpers utilities.
It contains convenience functions to make HTML tags, process text, format numbers, do basic statistics, work with collections, and more.
""",
author='<NAME>, <NAME>, <NAME>',
author_email='<EMAIL>, <EMAIL>, <EMAIL>',
url='https://webhelpers2.readthedocs.org/en/latest/',
packages=find_packages(exclude=['ez_setup']),
zip_safe=False,
include_package_data=True,
install_requires=install_requires,
tests_require=[
'pytest',
],
cmdclass = {'test': PyTest},
classifiers=["Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Programming Language :: Python",
"Topic :: Internet :: WWW/HTTP :: Dynamic Content",
"Topic :: Software Development :: Libraries :: Python Modules",
],
entry_points="""
""",
)
| 1.96875 | 2 |
backend/api/user/resource/fluency.py | blodstone/harness | 0 | 12760911 | from datetime import datetime, timedelta
from random import shuffle
from flask import request
from flask_restful import Resource, abort
from sqlalchemy.sql.expression import func
from backend.model.project import FluencyProject
from backend.model.result import FluencyResultSchema, FluencyResult
from backend.model.project_status import ProjectStatus, ProjectStatusSchema
from backend.model import ma, db
from backend.model.summary import SummarySchema, SanitySummarySchema, \
Summary, SanitySummary, SummaryGroup, SummaryGroupList
class ResSumObj(object):
def __init__(self, result, summary):
self.result = result
self.summary = summary
class ResSumSchema(ma.Schema):
result = ma.Nested(FluencyResultSchema)
summary = ma.Nested(SummarySchema)
class FluencyObj(object):
def __init__(self, res_sums, sanity_summ, proj_status):
self.res_sums = res_sums
self.sanity_summ = sanity_summ
self.proj_status = proj_status
class FluencySchema(ma.Schema):
res_sums = ma.Nested(ResSumSchema, many=True)
sanity_summ = ma.Nested(SanitySummarySchema)
proj_status = ma.Nested(ProjectStatusSchema)
class FluencyResource(Resource):
def post(self):
data = request.get_json()
old_results = []
# print(data['results'])
for result in data['results']:
old_result = FluencyResult.query.get(result['id'])
old_result.fluency = result['fluency']
old_results.append(old_result)
proj_status = ProjectStatus.query.get(data['proj_status']['id'])
proj_status.validity = data['proj_status']['validity']
proj_status.is_finished = data['proj_status']['is_finished']
proj_status.is_active = data['proj_status']['is_active']
proj_status.good_summ_score = data['proj_status']['good_summ_score']
proj_status.mediocre_summ_score = data['proj_status']['mediocre_summ_score']
proj_status.bad_summ_score = data['proj_status']['bad_summ_score']
proj_status.sanity_summ_id = data['proj_status']['sanity_summ_id']
if not proj_status.validity:
# Recreate results
results = []
for result in data['results']:
new_result = FluencyResult(
summary_id=result['summary_id'],
proj_status_id=result['proj_status_id'])
results.append(new_result)
db.session.bulk_save_objects(results)
# Invalidate old results
for old_result in old_results:
old_result.is_invalid = True
db.session.commit()
def get(self, project_id):
project = FluencyProject.query.get(project_id)
if project is None:
return abort(404, message=f"Fluency project {project_id} not found")
else:
# Get one unfinished project_status
current_time = datetime.utcnow()
proj_status = ProjectStatus.query\
.filter_by(fluency_proj_id=project.id,
is_finished=False, is_active=False)\
.order_by(func.rand())\
.first()
if proj_status is None:
proj_status = ProjectStatus.query \
.filter_by(fluency_proj_id=project.id, is_finished=False)\
.filter(ProjectStatus.expired_in < current_time)\
.order_by(func.rand())\
.first()
if proj_status is None:
return abort(404, message=f"No project status is opened.")
# Get related results
results = FluencyResult.query\
.filter_by(proj_status_id=proj_status.id, is_invalid=False)\
.all()
res_sums = []
for result in results:
summary = Summary.query.get(result.summary_id)
res_sums.append(ResSumObj(result=result, summary=summary))
# Get random sanity summaries
# The function rand() is specific to MySql only (https://stackoverflow.com/q/60805)
sanity_summ = SanitySummary.query.order_by(func.rand()).first()
fluency = FluencyObj(
res_sums=res_sums,
sanity_summ=sanity_summ,
proj_status=proj_status)
# Change project status attribute before sending
proj_status.is_active = True
proj_status.expired_in = datetime.utcnow() + timedelta(minutes=project.expire_duration)
db.session.commit()
return FluencySchema().dump(fluency)
| 2.109375 | 2 |
mordor_magic/mordor_app/views.py | Far4Ru/mordor-magic-2 | 0 | 12760912 | from datetime import datetime
from itertools import chain
from rest_framework.response import Response
from rest_framework.views import APIView
from .models import User, Character
from .serializers import *
from rest_framework import generics
from rest_framework.permissions import IsAuthenticated, IsAdminUser
from rest_framework import status
from rest_framework.decorators import api_view, permission_classes
@permission_classes([IsAuthenticated])
class CharacterCreateAPIView(generics.CreateAPIView):
serializer_class = CharacterCreateSerializer
queryset = Character.objects.all()
@permission_classes([IsAuthenticated])
class CharacterListAPIView(generics.ListAPIView):
serializer_class = CharacterPublicSerializer
model = Character
queryset = Character.objects.all()
@permission_classes([IsAuthenticated])
class UserListAPIView(generics.ListAPIView):
serializer_class = UserSerializer
queryset = User.objects.all()
@permission_classes([IsAuthenticated])
class EventListAPIView(generics.ListAPIView):
serializer_class = EventSerializer
queryset = Event.objects.all()
model = Event
def get_queryset(self):
date = self.request.GET.get('date')
if date:
try:
date_time_obj = datetime.strptime(date, '%d.%m.%Y %H:%M:%S')
queryset = self.queryset.filter(period="d")
queryset_week2 = self.queryset.filter(period="w")\
.filter(period_across=2)\
.filter(period_parity=date_time_obj.timetuple().tm_yday // 7 % 2)\
.filter(start_date__iso_week_day=date_time_obj.weekday())
queryset_week1 = self.queryset.filter(period="w")\
.filter(period_across=1)\
.filter(start_date__iso_week_day=date_time_obj.isoweekday())
queryset = list(chain(queryset, queryset_week1, queryset_week2))
except ValueError:
queryset = self.model.objects.none()
return queryset
return self.model.objects.all()
@permission_classes([IsAuthenticated])
class UserAPIView(generics.ListAPIView):
model = User
serializer_class = UserSerializer
queryset = model.objects.all()
def get_queryset(self):
username = self.request.GET.get('username')
if not username:
username = self.request.user.username
if username:
try:
queryset = self.queryset.filter(username=username)
except ValueError:
queryset = self.model.objects.none()
return queryset
return self.model.objects.none()
@staticmethod
def patch(request):
user = request.user
serializer = UserUpdateSerializer(user, data=request.data, partial=True)
if serializer.is_valid():
serializer.save()
return Response(status=status.HTTP_201_CREATED, data=serializer.data)
return Response(status=status.HTTP_400_BAD_REQUEST, data="wrong parameters")
@permission_classes([IsAuthenticated])
class UserCharactersAPIView(generics.ListAPIView):
serializer_class = UserCharactersSerializer
queryset = User.objects.all()
def get_queryset(self):
username = self.request.GET.get('username')
if username:
try:
queryset = self.queryset.filter(username=username)
except ValueError:
queryset = self.User.objects.none()
return queryset
return self.User.objects.none()
@permission_classes([IsAuthenticated])
class CharacterEventsAPIView(generics.ListAPIView):
serializer_class = CharacterEventsSerializer
queryset = Character.objects.all()
def get_queryset(self):
nickname = self.request.GET.get('nickname')
if nickname:
try:
queryset = self.queryset.filter(nickname=nickname)
except ValueError:
queryset = self.Character.objects.none()
return queryset
return self.Character.objects.none()
@permission_classes([IsAuthenticated])
class UserCharacterEventsAPIView(generics.ListAPIView):
serializer_class = CharacterOwnerSerializer
queryset = CharacterOwner.objects.all()
def list(self, request):
date = self.request.GET.get('date')
inner_character_event = CharacterEvent.objects.filter(date=date)
inner_character = Character.objects.filter(character_events__in=inner_character_event)
queryset = CharacterOwner.objects.filter(owner=request.user).filter(character__in=inner_character)
serializer = UserCharacterEventsSerializer(queryset, many=True)
return Response(serializer.data)
@permission_classes([IsAuthenticated])
class CharacterEventsCountAPIView(generics.ListAPIView):
serializer_class = CharacterEventsCountSerializer
queryset = Character.objects.all()
def get_queryset(self):
nickname = self.request.GET.get('nickname')
if nickname:
try:
queryset = self.queryset.filter(nickname=nickname)
except ValueError:
queryset = self.Character.objects.none()
return queryset
return self.Character.objects.none()
@permission_classes([IsAuthenticated])
class CharacterOwnersListAPIView(generics.ListAPIView):
serializer_class = CharacterOwnerSerializer
queryset = CharacterOwner.objects.all()
@permission_classes([IsAuthenticated])
class CharacterEventsListAPIView(generics.ListAPIView):
serializer_class = CharacterEventSerializer
queryset = CharacterEvent.objects.all()
| 2.140625 | 2 |
python-opencv/blog7-blur/demo-filter2D.py | meteor1993/python-learning | 83 | 12760913 | <gh_stars>10-100
import numpy as np
import cv2 as cv
from matplotlib import pyplot as plt
# 读取图片
img = cv.imread("maliao_noise.jpg", cv.IMREAD_UNCHANGED)
rgb_img = cv.cvtColor(img, cv.COLOR_BGR2RGB)
kernel = np.ones((5,5),np.float32)/25
dst = cv.filter2D(rgb_img, -1, kernel)
titles = ['Source Image', 'filter2D Image']
images = [rgb_img, dst]
for i in range(2):
plt.subplot(1, 2, i + 1), plt.imshow(images[i], 'gray')
plt.title(titles[i])
plt.xticks([]), plt.yticks([])
plt.show() | 2.8125 | 3 |
polyglot/commands.py | riccardoFasan/deeply | 2 | 12760914 | <reponame>riccardoFasan/deeply
import asyncio
from typing import Any, Callable, Optional
from abc import ABC, abstractmethod
import colorama
import deepl
import polyglot
from polyglot.utils import (
DownloadedDocumentStream,
get_color_by_percentage,
get_truncated_text,
)
from polyglot.errors import DeeplError
def handle_error(function: Callable) -> Callable:
def function_wrapper(instance: DeeplCommand):
try:
return function(instance)
except deepl.DeepLException as error:
DeeplError(error)
return function_wrapper
class DeeplCommand(ABC):
_license: str
_translator: deepl.Translator
def __init__(self, license: str) -> None:
self._license = license
self._translator = deepl.Translator(self._license)
@abstractmethod
def execute(self) -> Any:
pass
class TranslateCommand(DeeplCommand, ABC):
_content: Any
_target_lang: str
_source_lang: str
def __init__(self, license: str, content: Any, target_lang: str, source_lang: str) -> None:
super().__init__(license)
self._content = content
if target_lang == 'EN': # * EN as a target language is deprecated
target_lang = 'EN-US'
self._target_lang = target_lang
self._source_lang = source_lang
@abstractmethod
def execute(self) -> Any:
pass
class PrintUsageInfo(DeeplCommand):
@handle_error
def execute(self) -> None:
usage: deepl.Usage = self._translator.get_usage()
limit: Optional[int] = usage.character.limit
count: Optional[int] = usage.character.count
print(
f"\nPolyglot version: {polyglot.__version__}\nDeepL version: {deepl.__version__}\nAPI key: {self._license}"
)
if limit is not None:
print(f"Characters limit: {limit}")
if count is not None:
count_text: str = f"Used Characters: {count}"
if limit is not None:
percentage: int = round((count / limit) * 100)
print_color: str = get_color_by_percentage(percentage)
count_text += f" {print_color}({percentage}%)"
print(count_text)
class PrintSupportedLanguages(DeeplCommand):
@handle_error
def execute(self) -> None:
print("\nAvailable source languages:")
for language in self._translator.get_source_languages():
print(f"{language.name} ({language.code})")
print("\nAvailable target languages:")
for language in self._translator.get_target_languages():
lang: str = f"{language.name} ({language.code})"
if language.supports_formality:
lang += " - formality supported"
print(lang)
class TranslateText(TranslateCommand):
__LEN_LIMIT: int = 150
@handle_error
def execute(self) -> str:
truncated_text: str = get_truncated_text(self._content, self.__LEN_LIMIT)
response: Any = self._translator.translate_text(
[self._content],
target_lang=self._target_lang,
source_lang=self._source_lang,
)
try:
translation: str = response[0].text
truncated_translation: str = get_truncated_text(
translation, self.__LEN_LIMIT
)
print(f'"{truncated_text}" => "{truncated_translation}"')
return translation
except KeyError:
print(
f'{colorama.Fore.YELLOW}\nNo traslation found for "{truncated_text}"!\n'
)
return ""
class TranslateDocumentCommand(TranslateCommand):
__document: DownloadedDocumentStream
__remaining: int = 0
@handle_error
def execute(self) -> DownloadedDocumentStream:
document_data: deepl.DocumentHandle = self.__send_document()
asyncio.run(self.__get_document(document_data))
return self.__document
def __send_document(self) -> deepl.DocumentHandle:
with open(self._content, "rb") as document:
return self._translator.translate_document_upload(
document,
target_lang=self._target_lang,
source_lang=self._source_lang,
filename=self._content,
)
async def __get_document(self, document_handle: deepl.DocumentHandle) -> None:
status: deepl.DocumentStatus = self.__check_document_status(document_handle)
if status.ok and status.done:
print(
f"Translation completed. Billed characters: {status.billed_characters}."
)
self.__document = self.__download_translated_document(document_handle)
self.__remaining = 0
return
# * sometimes there are no seconds even if it's still translating
if (
status.seconds_remaining is not None
and self.__remaining != status.seconds_remaining
):
self.__remaining = status.seconds_remaining
print(f"Remaining {status.seconds_remaining} seconds...")
await self.__get_document(document_handle)
def __check_document_status(
self, document_handle: deepl.DocumentHandle
) -> deepl.DocumentStatus:
return self._translator.translate_document_get_status(document_handle)
def __download_translated_document(
self, document_handle: deepl.DocumentHandle
) -> DownloadedDocumentStream:
response: Any = self._translator.translate_document_download(document_handle)
return response.iter_content()
| 2.34375 | 2 |
scrapy/tests/test_selector.py | hobson/scrapy | 1 | 12760915 | <reponame>hobson/scrapy<filename>scrapy/tests/test_selector.py<gh_stars>1-10
import re
import warnings
import weakref
from twisted.trial import unittest
from scrapy.exceptions import ScrapyDeprecationWarning
from scrapy.http import TextResponse, HtmlResponse, XmlResponse
from scrapy.selector import Selector
from scrapy.selector.lxmlsel import XmlXPathSelector, HtmlXPathSelector, XPathSelector
class SelectorTestCase(unittest.TestCase):
sscls = Selector
def test_simple_selection(self):
"""Simple selector tests"""
body = "<p><input name='a'value='1'/><input name='b'value='2'/></p>"
response = TextResponse(url="http://example.com", body=body)
sel = self.sscls(response)
xl = sel.xpath('//input')
self.assertEqual(2, len(xl))
for x in xl:
assert isinstance(x, self.sscls)
self.assertEqual(sel.xpath('//input').extract(),
[x.extract() for x in sel.xpath('//input')])
self.assertEqual([x.extract() for x in sel.xpath("//input[@name='a']/@name")],
[u'a'])
self.assertEqual([x.extract() for x in sel.xpath("number(concat(//input[@name='a']/@value, //input[@name='b']/@value))")],
[u'12.0'])
self.assertEqual(sel.xpath("concat('xpath', 'rules')").extract(),
[u'xpathrules'])
self.assertEqual([x.extract() for x in sel.xpath("concat(//input[@name='a']/@value, //input[@name='b']/@value)")],
[u'12'])
def test_select_unicode_query(self):
body = u"<p><input name='\xa9' value='1'/></p>"
response = TextResponse(url="http://example.com", body=body, encoding='utf8')
sel = self.sscls(response)
self.assertEqual(sel.xpath(u'//input[@name="\xa9"]/@value').extract(), [u'1'])
def test_list_elements_type(self):
"""Test Selector returning the same type in selection methods"""
text = '<p>test<p>'
assert isinstance(self.sscls(text=text).xpath("//p")[0], self.sscls)
assert isinstance(self.sscls(text=text).css("p")[0], self.sscls)
def test_boolean_result(self):
body = "<p><input name='a'value='1'/><input name='b'value='2'/></p>"
response = TextResponse(url="http://example.com", body=body)
xs = self.sscls(response)
self.assertEquals(xs.xpath("//input[<EMAIL>='a']/@name='a'").extract(), [u'1'])
self.assertEquals(xs.xpath("//input[<EMAIL>='a']/@name='n'").extract(), [u'0'])
def test_differences_parsing_xml_vs_html(self):
"""Test that XML and HTML Selector's behave differently"""
# some text which is parsed differently by XML and HTML flavors
text = '<div><img src="a.jpg"><p>Hello</div>'
hs = self.sscls(text=text, type='html')
self.assertEqual(hs.xpath("//div").extract(),
[u'<div><img src="a.jpg"><p>Hello</p></div>'])
xs = self.sscls(text=text, type='xml')
self.assertEqual(xs.xpath("//div").extract(),
[u'<div><img src="a.jpg"><p>Hello</p></img></div>'])
def test_flavor_detection(self):
text = '<div><img src="a.jpg"><p>Hello</div>'
sel = self.sscls(XmlResponse('http://example.com', body=text))
self.assertEqual(sel.type, 'xml')
self.assertEqual(sel.xpath("//div").extract(),
[u'<div><img src="a.jpg"><p>Hello</p></img></div>'])
sel = self.sscls(HtmlResponse('http://example.com', body=text))
self.assertEqual(sel.type, 'html')
self.assertEqual(sel.xpath("//div").extract(),
[u'<div><img src="a.jpg"><p>Hello</p></div>'])
def test_nested_selectors(self):
"""Nested selector tests"""
body = """<body>
<div class='one'>
<ul>
<li>one</li><li>two</li>
</ul>
</div>
<div class='two'>
<ul>
<li>four</li><li>five</li><li>six</li>
</ul>
</div>
</body>"""
response = HtmlResponse(url="http://example.com", body=body)
x = self.sscls(response)
divtwo = x.xpath('//div[@class="two"]')
self.assertEqual(divtwo.xpath("//li").extract(),
["<li>one</li>", "<li>two</li>", "<li>four</li>", "<li>five</li>", "<li>six</li>"])
self.assertEqual(divtwo.xpath("./ul/li").extract(),
["<li>four</li>", "<li>five</li>", "<li>six</li>"])
self.assertEqual(divtwo.xpath(".//li").extract(),
["<li>four</li>", "<li>five</li>", "<li>six</li>"])
self.assertEqual(divtwo.xpath("./li").extract(), [])
def test_mixed_nested_selectors(self):
body = '''<body>
<div id=1>not<span>me</span></div>
<div class="dos"><p>text</p><a href='#'>foo</a></div>
</body>'''
sel = self.sscls(text=body)
self.assertEqual(sel.xpath('//div[@id="1"]').css('span::text').extract(), [u'me'])
self.assertEqual(sel.css('#1').xpath('./span/text()').extract(), [u'me'])
def test_dont_strip(self):
sel = self.sscls(text='<div>fff: <a href="#">zzz</a></div>')
self.assertEqual(sel.xpath("//text()").extract(), [u'fff: ', u'zzz'])
def test_namespaces_simple(self):
body = """
<test xmlns:somens="http://scrapy.org">
<somens:a id="foo">take this</a>
<a id="bar">found</a>
</test>
"""
response = XmlResponse(url="http://example.com", body=body)
x = self.sscls(response)
x.register_namespace("somens", "http://scrapy.org")
self.assertEqual(x.xpath("//somens:a/text()").extract(),
[u'take this'])
def test_namespaces_multiple(self):
body = """<?xml version="1.0" encoding="UTF-8"?>
<BrowseNode xmlns="http://webservices.amazon.com/AWSECommerceService/2005-10-05"
xmlns:b="http://somens.com"
xmlns:p="http://www.scrapy.org/product" >
<b:Operation>hello</b:Operation>
<TestTag b:att="value"><Other>value</Other></TestTag>
<p:SecondTestTag><material>iron</material><price>90</price><p:name>Dried Rose</p:name></p:SecondTestTag>
</BrowseNode>
"""
response = XmlResponse(url="http://example.com", body=body)
x = self.sscls(response)
x.register_namespace("xmlns", "http://webservices.amazon.com/AWSECommerceService/2005-10-05")
x.register_namespace("p", "http://www.scrapy.org/product")
x.register_namespace("b", "http://somens.com")
self.assertEqual(len(x.xpath("//xmlns:TestTag")), 1)
self.assertEqual(x.xpath("//b:Operation/text()").extract()[0], 'hello')
self.assertEqual(x.xpath("//xmlns:TestTag/@b:att").extract()[0], 'value')
self.assertEqual(x.xpath("//p:SecondTestTag/xmlns:price/text()").extract()[0], '90')
self.assertEqual(x.xpath("//p:SecondTestTag").xpath("./xmlns:price/text()")[0].extract(), '90')
self.assertEqual(x.xpath("//p:SecondTestTag/xmlns:material/text()").extract()[0], 'iron')
def test_re(self):
body = """<div>Name: Mary
<ul>
<li>Name: John</li>
<li>Age: 10</li>
<li>Name: Paul</li>
<li>Age: 20</li>
</ul>
Age: 20
</div>"""
response = HtmlResponse(url="http://example.com", body=body)
x = self.sscls(response)
name_re = re.compile("Name: (\w+)")
self.assertEqual(x.xpath("//ul/li").re(name_re),
["John", "Paul"])
self.assertEqual(x.xpath("//ul/li").re("Age: (\d+)"),
["10", "20"])
def test_re_intl(self):
body = """<div>Evento: cumplea\xc3\xb1os</div>"""
response = HtmlResponse(url="http://example.com", body=body, encoding='utf-8')
x = self.sscls(response)
self.assertEqual(x.xpath("//div").re("Evento: (\w+)"), [u'cumplea\xf1os'])
def test_selector_over_text(self):
hs = self.sscls(text='<root>lala</root>')
self.assertEqual(hs.extract(), u'<html><body><root>lala</root></body></html>')
xs = self.sscls(text='<root>lala</root>', type='xml')
self.assertEqual(xs.extract(), u'<root>lala</root>')
self.assertEqual(xs.xpath('.').extract(), [u'<root>lala</root>'])
def test_invalid_xpath(self):
response = XmlResponse(url="http://example.com", body="<html></html>")
x = self.sscls(response)
xpath = "//test[@foo='bar]"
try:
x.xpath(xpath)
except ValueError as e:
assert xpath in str(e), "Exception message does not contain invalid xpath"
except Exception:
raise AssertionError("A invalid XPath does not raise ValueError")
else:
raise AssertionError("A invalid XPath does not raise an exception")
def test_http_header_encoding_precedence(self):
# u'\xa3' = pound symbol in unicode
# u'\xc2\xa3' = pound symbol in utf-8
# u'\xa3' = pound symbol in latin-1 (iso-8859-1)
meta = u'<meta http-equiv="Content-Type" content="text/html; charset=iso-8859-1">'
head = u'<head>' + meta + u'</head>'
body_content = u'<span id="blank">\xa3</span>'
body = u'<body>' + body_content + u'</body>'
html = u'<html>' + head + body + u'</html>'
encoding = 'utf-8'
html_utf8 = html.encode(encoding)
headers = {'Content-Type': ['text/html; charset=utf-8']}
response = HtmlResponse(url="http://example.com", headers=headers, body=html_utf8)
x = self.sscls(response)
self.assertEquals(x.xpath("//span[@id='blank']/text()").extract(),
[u'\xa3'])
def test_empty_bodies(self):
# shouldn't raise errors
r1 = TextResponse('http://www.example.com', body='')
self.sscls(r1).xpath('//text()').extract()
def test_null_bytes(self):
# shouldn't raise errors
r1 = TextResponse('http://www.example.com', \
body='<root>pre\x00post</root>', \
encoding='utf-8')
self.sscls(r1).xpath('//text()').extract()
def test_badly_encoded_body(self):
# \xe9 alone isn't valid utf8 sequence
r1 = TextResponse('http://www.example.com', \
body='<html><p>an Jos\xe9 de</p><html>', \
encoding='utf-8')
self.sscls(r1).xpath('//text()').extract()
def test_select_on_unevaluable_nodes(self):
r = self.sscls(text=u'<span class="big">some text</span>')
# Text node
x1 = r.xpath('//text()')
self.assertEquals(x1.extract(), [u'some text'])
self.assertEquals(x1.xpath('.//b').extract(), [])
# Tag attribute
x1 = r.xpath('//span/@class')
self.assertEquals(x1.extract(), [u'big'])
self.assertEquals(x1.xpath('.//text()').extract(), [])
def test_select_on_text_nodes(self):
r = self.sscls(text=u'<div><b>Options:</b>opt1</div><div><b>Other</b>opt2</div>')
x1 = r.xpath("//div/descendant::text()[preceding-sibling::b[contains(text(), 'Options')]]")
self.assertEquals(x1.extract(), [u'opt1'])
x1 = r.xpath("//div/descendant::text()/preceding-sibling::b[contains(text(), 'Options')]")
self.assertEquals(x1.extract(), [u'<b>Options:</b>'])
def test_nested_select_on_text_nodes(self):
# FIXME: does not work with lxml backend [upstream]
r = self.sscls(text=u'<div><b>Options:</b>opt1</div><div><b>Other</b>opt2</div>')
x1 = r.xpath("//div/descendant::text()")
x2 = x1.xpath("./preceding-sibling::b[contains(text(), 'Options')]")
self.assertEquals(x2.extract(), [u'<b>Options:</b>'])
test_nested_select_on_text_nodes.skip = "Text nodes lost parent node reference in lxml"
def test_weakref_slots(self):
"""Check that classes are using slots and are weak-referenceable"""
x = self.sscls()
weakref.ref(x)
assert not hasattr(x, '__dict__'), "%s does not use __slots__" % \
x.__class__.__name__
def test_remove_namespaces(self):
xml = """<?xml version="1.0" encoding="UTF-8"?>
<feed xmlns="http://www.w3.org/2005/Atom" xml:lang="en-US" xmlns:media="http://search.yahoo.com/mrss/">
<link type="text/html">
<link type="application/atom+xml">
</feed>
"""
sel = self.sscls(XmlResponse("http://example.com/feed.atom", body=xml))
self.assertEqual(len(sel.xpath("//link")), 0)
sel.remove_namespaces()
self.assertEqual(len(sel.xpath("//link")), 2)
def test_remove_attributes_namespaces(self):
xml = """<?xml version="1.0" encoding="UTF-8"?>
<feed xmlns:atom="http://www.w3.org/2005/Atom" xml:lang="en-US" xmlns:media="http://search.yahoo.com/mrss/">
<link atom:type="text/html">
<link atom:type="application/atom+xml">
</feed>
"""
sel = self.sscls(XmlResponse("http://example.com/feed.atom", body=xml))
self.assertEqual(len(sel.xpath("//link/@type")), 0)
sel.remove_namespaces()
self.assertEqual(len(sel.xpath("//link/@type")), 2)
class DeprecatedXpathSelectorTest(unittest.TestCase):
text = '<div><img src="a.jpg"><p>Hello</div>'
def test_warnings(self):
for cls in XPathSelector, HtmlXPathSelector, XPathSelector:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
hs = cls(text=self.text)
assert len(w) == 1, w
assert issubclass(w[0].category, ScrapyDeprecationWarning)
assert 'deprecated' in str(w[-1].message)
hs.select("//div").extract()
assert issubclass(w[1].category, ScrapyDeprecationWarning)
assert 'deprecated' in str(w[-1].message)
def test_xpathselector(self):
with warnings.catch_warnings(record=True):
hs = XPathSelector(text=self.text)
self.assertEqual(hs.select("//div").extract(),
[u'<div><img src="a.jpg"><p>Hello</p></div>'])
self.assertRaises(RuntimeError, hs.css, 'div')
def test_htmlxpathselector(self):
with warnings.catch_warnings(record=True):
hs = HtmlXPathSelector(text=self.text)
self.assertEqual(hs.select("//div").extract(),
[u'<div><img src="a.jpg"><p>Hello</p></div>'])
self.assertRaises(RuntimeError, hs.css, 'div')
def test_xmlxpathselector(self):
with warnings.catch_warnings(record=True):
xs = XmlXPathSelector(text=self.text)
self.assertEqual(xs.select("//div").extract(),
[u'<div><img src="a.jpg"><p>Hello</p></img></div>'])
self.assertRaises(RuntimeError, xs.css, 'div')
| 2.640625 | 3 |
tournaments/urls.py | rusnyder/pelotourney | 0 | 12760916 | from django.urls import path
from . import views
app_name = "tournaments"
urlpatterns = [
# ex: /tournaments/
path("", views.IndexView.as_view(), name="index"),
# ex: /tournaments/new
path("new/", views.CreateView.as_view(), name="create"),
# ex: /tournaments/authorize
path("authorize/", views.LinkProfileView.as_view(), name="authorize"),
# ex: /ZuwX4mKCPgK06sIin8QJxQ/
path("<uid>/", views.DetailView.as_view(), name="detail"),
# ex: /ZuwX4mKCPgK06sIin8QJxQ/edit
path("<uid>/edit/", views.EditView.as_view()),
path("<uid>/edit/<tab>", views.EditView.as_view(), name="edit"),
# ex: /ZuwX4mKCPgK06sIin8QJxQ/sync
path("<uid>/sync", views.SyncView.as_view(), name="sync"),
# ex: /ZuwX4mKCPgK06sIin8QJxQ/rider_search
path("<uid>/rider_search", views.RiderSearchView.as_view(), name="rider_search"),
# ex: /ZuwX4mKCPgK06sIin8QJxQ/teams
path("<uid>/teams", views.EditTeamsView.as_view(), name="teams"),
# ex: /ZuwX4mKCPgK06sIin8QJxQ/teams/bulk
path("<uid>/teams/bulk", views.UpdateTeamsView.as_view(), name="update_teams"),
# ex: /ZuwX4mKCPgK06sIin8QJxQ/rides
path("<uid>/rides", views.EditRidesView.as_view(), name="rides"),
# ex: /ZuwX4mKCPgK06sIin8QJxQ/rides/filters
path("<uid>/rides/filters", views.RideFiltersView.as_view(), name="ride_filters"),
# ex: /ZuwX4mKCPgK06sIin8QJxQ/permissions
path(
"<uid>/permissions", views.UpdatePermissionsView.as_view(), name="permissions"
),
]
| 1.945313 | 2 |
svae/utils/mlflow_utils.py | APodolskiy/SentenceVAE | 0 | 12760917 | <gh_stars>0
import git
from pathlib import Path
from typing import Union, Optional, Tuple, Dict
from mlflow.entities import Run
from mlflow.tracking import MlflowClient
from mlflow.utils.mlflow_tags import MLFLOW_GIT_COMMIT, MLFLOW_GIT_BRANCH
def log_params(client: MlflowClient, run: Run, params: Dict):
for key, value in params.items():
client.log_param(run_id=run.info.run_uuid, key=key, value=value)
def log_metrics(client: MlflowClient, run: Run,
metrics: Dict, step: Optional[int] = None):
for key, value in metrics.items():
client.log_metric(run_id=run.info.run_uuid, key=key, value=value, step=step)
def get_experiment_id(mlflow_client: MlflowClient, experiment_name: str) -> int:
experiment = mlflow_client.get_experiment_by_name(experiment_name)
if experiment is None:
experiment_id = mlflow_client.create_experiment(experiment_name)
else:
experiment_id = experiment.experiment_id
return experiment_id
def get_git_info(path: Union[str, Path]) -> Optional[Tuple[str, str]]:
"""
Mainly adaptation of mlflow.utils.context _get_git_commit function.
:param path:
:return:
"""
path = Path(path)
if not path.exists():
return None
if path.is_file():
path = path.parent
try:
repo = git.Repo(path)
commit = repo.head.commit.hexsha
branch = repo.active_branch.name
return commit, branch
except (git.InvalidGitRepositoryError, git.GitCommandNotFound, ValueError, git.NoSuchPathError):
return None
def get_git_tags(path: Union[str, Path]) -> Optional[Dict]:
tags = None
git_info = get_git_info(path)
if git_info is not None:
tags = {key: value for key, value in zip([MLFLOW_GIT_COMMIT, MLFLOW_GIT_BRANCH], git_info)}
return tags
| 2.171875 | 2 |
forch/radius_datatypes.py | henry54809/forch | 1 | 12760918 | <filename>forch/radius_datatypes.py
"""Radius Attribute Datatypes"""
import struct
import math
from forch.utils import MessageParseError
class DataType():
"""Parent datatype class, subclass should provide implementation for abstractmethods."""
DATA_TYPE_VALUE = None
AVP_HEADER_LEN = 1 + 1
MAX_DATA_LENGTH = 253
MIN_DATA_LENGTH = 1
bytes_data = None # bytes version of raw_data
def parse(self, packed_value):
"""parse"""
return
def pack(self, attribute_type):
"""pack"""
return
def data(self):
"""Subclass should override this as needed.
Returns:
The python type (int, str, bytes) of the bytes_data.
This will perform any decoding as required instead of using the unprocessed bytes_data.
"""
return self.bytes_data
def data_length(self):
"""
Returns:
length of the data field, and not total length of the attribute (including the
type and length).
If total is required use full_length.
"""
return 0
def full_length(self):
"""
Returns:
Length of the whole field include the header (type and length)
"""
return self.data_length() + self.AVP_HEADER_LEN
@classmethod
def is_valid_length(cls, packed_value):
"""Validate data length"""
length = len(packed_value)
if length < cls.MIN_DATA_LENGTH \
or length > cls.MAX_DATA_LENGTH \
or len(packed_value) > cls.MAX_DATA_LENGTH \
or length != len(packed_value):
raise ValueError("RADIUS data type '%s' length must be: %d <= actual_length(%d) <= %d"
""
% (cls.__name__, cls.MIN_DATA_LENGTH, length, cls.MAX_DATA_LENGTH))
class Integer(DataType):
"""Integer type"""
DATA_TYPE_VALUE = 1
MAX_DATA_LENGTH = 4
MIN_DATA_LENGTH = 4
def __init__(self, bytes_data=None, raw_data=None):
if raw_data:
try:
bytes_data = raw_data.to_bytes(self.MAX_DATA_LENGTH, "big")
except OverflowError as range_overflow_error:
raise ValueError("Integer must be >= 0 and <= 2^32-1, was %d" %
raw_data) from range_overflow_error
self.bytes_data = bytes_data
@classmethod
def parse(cls, packed_value):
try:
cls.is_valid_length(packed_value)
return cls(bytes_data=struct.unpack("!4s", packed_value)[0])
except (ValueError, struct.error)as exception:
raise MessageParseError("%s unable to unpack." % cls.__name__) from exception
def pack(self, attribute_type):
return struct.pack("!4s", self.bytes_data)
def data(self):
return int.from_bytes(self.bytes_data, 'big') # pytype: disable=attribute-error
def data_length(self):
return 4
class Enum(DataType):
"""Enum datatype"""
DATA_TYPE_VALUE = 2
MAX_DATA_LENGTH = 4
MIN_DATA_LENGTH = 4
def __init__(self, bytes_data=None, raw_data=None):
if raw_data:
try:
bytes_data = raw_data.to_bytes(self.MAX_DATA_LENGTH, "big")
except OverflowError as range_overflow_error:
raise ValueError("Integer must be >= 0 and <= 2^32-1, was %d" %
raw_data) from range_overflow_error
self.bytes_data = bytes_data
@classmethod
def parse(cls, packed_value):
try:
cls.is_valid_length(packed_value)
return cls(bytes_data=struct.unpack("!4s", packed_value)[0])
except (ValueError, struct.error)as exception:
raise MessageParseError("%s unable to unpack." % cls.__name__) from exception
def pack(self, attribute_type):
return struct.pack("!4s", self.bytes_data)
def data(self):
return int.from_bytes(self.bytes_data, 'big') # pytype: disable=attribute-error
def data_length(self):
return 4
class Text(DataType):
"""Text datatype"""
DATA_TYPE_VALUE = 4
def __init__(self, bytes_data=None, raw_data=None):
if raw_data is not None:
bytes_data = raw_data.encode()
self.is_valid_length(bytes_data)
self.bytes_data = bytes_data
@classmethod
def parse(cls, packed_value):
try:
cls.is_valid_length(packed_value)
return cls(struct.unpack("!%ds" % len(packed_value), packed_value)[0])
except (ValueError, struct.error) as exception:
raise MessageParseError("%s unable to unpack." % cls.__name__) from exception
def pack(self, attribute_type):
return struct.pack("!%ds" % len(self.bytes_data), self.bytes_data)
def data(self):
return self.bytes_data.decode("UTF-8")
def data_length(self):
return len(self.bytes_data)
class String(DataType):
"""String data type"""
# how is this different from Text?? - text is utf8
DATA_TYPE_VALUE = 5
def __init__(self, bytes_data=None, raw_data=None):
if raw_data is not None:
if isinstance(raw_data, bytes):
bytes_data = raw_data
else:
bytes_data = raw_data.encode()
self.is_valid_length(bytes_data)
self.bytes_data = bytes_data
@classmethod
def parse(cls, packed_value):
try:
cls.is_valid_length(packed_value)
return cls(struct.unpack("!%ds" % len(packed_value), packed_value)[0])
except (ValueError, struct.error) as exception:
raise MessageParseError("%s unable to unpack." % cls.__name__) from exception
def pack(self, attribute_type):
return struct.pack("!%ds" % len(self.bytes_data), self.bytes_data)
def data_length(self):
return len(self.bytes_data)
class Concat(DataType):
"""AttributeTypes that use Concat must override their pack()"""
DATA_TYPE_VALUE = 6
def __init__(self, bytes_data=None, raw_data=None):
if raw_data:
bytes_data = bytes.fromhex(raw_data)
# self.is_valid_length(data)
self.bytes_data = bytes_data
@classmethod
def parse(cls, packed_value):
# TODO how do we want to do valid length checking here?
#
# Parsing is (generally) for packets coming from the radius server.
# Packing is (generally) for packets going to the radius server.
#
# Therefore we error out if length is too long
# (you are not allowed to have AVP that are too long)
try:
return cls(struct.unpack("!%ds" % len(packed_value), packed_value)[0])
except struct.error as exception:
raise MessageParseError("%s unable to unpack." % cls.__name__) from exception
def pack(self, attribute_type):
def chunks(data):
length = self.MAX_DATA_LENGTH
list_length = len(data)
return_chunks = []
for _iter in range(0, list_length, self.MAX_DATA_LENGTH):
if _iter + self.MAX_DATA_LENGTH > list_length:
length = list_length % self.MAX_DATA_LENGTH
chunk = data[_iter:_iter + length]
chunk_length = len(chunk)
packed = struct.pack("!BB%ds" % chunk_length, attribute_type,
chunk_length + self.AVP_HEADER_LEN,
chunk)
return_chunks.append(packed)
return return_chunks
packed = b"".join(chunks(self.bytes_data))
return packed
def data(self):
return self.bytes_data
def full_length(self):
return self.AVP_HEADER_LEN * \
(math.ceil(len(self.bytes_data) / self.MAX_DATA_LENGTH + 1)) \
+ len(self.bytes_data) - self.AVP_HEADER_LEN
def data_length(self):
return len(self.bytes_data)
class Vsa(DataType):
"""Vendor Specific Attributes"""
DATA_TYPE_VALUE = 14
VENDOR_ID_LEN = 4
MIN_DATA_LENGTH = 5
def __init__(self, bytes_data=None, raw_data=None):
if raw_data:
bytes_data = raw_data
self.is_valid_length(bytes_data)
self.bytes_data = bytes_data
@classmethod
def parse(cls, packed_value):
# TODO Vsa.parse does not currently separate the vendor-id from the vsa-data
# we could do that at some point (e.g. if we wanted to use Vendor-Specific)
try:
cls.is_valid_length(packed_value)
return cls(struct.unpack("!%ds" % len(packed_value), packed_value)[0])
except (ValueError, struct.error) as exception:
raise MessageParseError("%s unable to unpack." % cls.__name__) from exception
def pack(self, attribute_type):
return struct.pack("!%ds" % (self.data_length()), self.bytes_data)
def data_length(self):
return len(self.bytes_data)
| 3.28125 | 3 |
pychemia/code/fireball/task/__init__.py | petavazohi/PyChemia | 67 | 12760919 | __author__ = '<NAME>'
| 1.085938 | 1 |
fisheye/image_transformation.py | benmaier/python-fisheye | 10 | 12760920 | <reponame>benmaier/python-fisheye
import numpy as np
from scipy.interpolate import interp2d, RectBivariateSpline
from scipy.ndimage.filters import gaussian_filter
def apply_to_image(img_data, F, use_cartesian = False, interpolation_order = 1, gaussian_blur=False):
"""Apply a fisheye effect to image data.
Parameters
----------
img_data : numpy.ndarray
An array containing the image data. It has to be of shape (m, n) for greyscale,
or of shape (m, n, k) for k color channels.
F : fisheye.fisheye
Instance of a fisheye class with defined radius R and focus set (both in pixel
coordinates).
use_cartesian: bool, default = False
Usually, a radial fisheye function is demanded, this switch enforces the cartesian
transformation instead.
interpolation_order : int, default = 1
Order of interpolation for the RectBivariateSpline algorithm
gaussian_blur : bool, default = False
Apply a gaussian blur fliter to the transformed area.
Returns
-------
new_img_data : numpy.ndarray
The transformed data in the shape of img_data.
"""
orig_shape = img_data.shape
k = interpolation_order
if len(img_data.shape) == 2:
data = img_data.reshape((img_data.shape[0],img_data.shape[1],1))
elif len(img_data.shape) != 3:
raise ValueError("Wrong shape of data:", img_data.shape)
else:
data = img_data.copy()
fx, fy = F.focus
R = F.R
x = np.arange(data.shape[0])
y = np.arange(data.shape[1])
if not use_cartesian:
coords = []
for i in range(max(0, int(fx-R)), min(data.shape[0],int(fx+R))):
for j in range(max(0, int(fy-R)), min(data.shape[1],int(fy+R))):
if np.sqrt((fx-i)**2 + (fy-j)**2) < R:
coords.append((i,j))
else:
coords = [ (i,j) for i in range(max(0, int(fx-R)), min(data.shape[0],int(fx+R)))\
for j in range(max(0, int(fy-R)), min(data.shape[1],int(fy+R))) ]
coord_arr = np.array(coords)
if not use_cartesian:
inv_coords = F.inverse_radial_2D(np.array(coord_arr,dtype=float))
else:
inv_coords = F.inverse_cartesian(np.array(coord_arr,dtype=float))
new_data = data.copy()
for color in range(data.shape[2]):
transform_function = RectBivariateSpline(x, y, data[:,:,color],kx=k,ky=k)
transformed = transform_function(inv_coords[:,0].flatten(), inv_coords[:,1].flatten(),grid=False)
new_data[:,:,color] = data[:,:,color]
new_data[coord_arr[:,0], coord_arr[:,1], color] = transformed
if gaussian_blur:
new_data[coord_arr[:,0], coord_arr[:,1], color] = gaussian_filter(
new_data[coord_arr[:,0], coord_arr[:,1], color],
sigma = 2
)
return new_data.reshape(orig_shape)
if __name__ == "__main__":
import matplotlib.pyplot as pl
data = pl.imread('../sandbox/example.png')
from fisheye import fisheye
F = fisheye(300,xw=0.3,mode='default',d=2)
F.set_focus([400,400])
transformed = apply_to_image(data, F)
pl.imshow(data)
pl.figure()
pl.imshow(transformed)
pl.figure()
pl.imshow(apply_to_image(data, F, use_cartesian = True))
pl.show()
| 3.28125 | 3 |
nmigen/test/test_lib_cdc.py | antmicro/nmigen | 0 | 12760921 | # nmigen: UnusedElaboratable=no
from .utils import *
from ..hdl import *
from ..back.pysim import *
from ..lib.cdc import *
class FFSynchronizerTestCase(FHDLTestCase):
def test_stages_wrong(self):
with self.assertRaisesRegex(TypeError,
r"^Synchronization stage count must be a positive integer, not 0$"):
FFSynchronizer(Signal(), Signal(), stages=0)
with self.assertRaisesRegex(ValueError,
r"^Synchronization stage count may not safely be less than 2$"):
FFSynchronizer(Signal(), Signal(), stages=1)
def test_basic(self):
i = Signal()
o = Signal()
frag = FFSynchronizer(i, o)
sim = Simulator(frag)
sim.add_clock(1e-6)
def process():
self.assertEqual((yield o), 0)
yield i.eq(1)
yield Tick()
self.assertEqual((yield o), 0)
yield Tick()
self.assertEqual((yield o), 0)
yield Tick()
self.assertEqual((yield o), 1)
sim.add_process(process)
sim.run()
def test_reset_value(self):
i = Signal(reset=1)
o = Signal()
frag = FFSynchronizer(i, o, reset=1)
sim = Simulator(frag)
sim.add_clock(1e-6)
def process():
self.assertEqual((yield o), 1)
yield i.eq(0)
yield Tick()
self.assertEqual((yield o), 1)
yield Tick()
self.assertEqual((yield o), 1)
yield Tick()
self.assertEqual((yield o), 0)
sim.add_process(process)
sim.run()
class AsyncFFSynchronizerTestCase(FHDLTestCase):
def test_stages_wrong(self):
with self.assertRaisesRegex(TypeError,
r"^Synchronization stage count must be a positive integer, not 0$"):
ResetSynchronizer(Signal(), stages=0)
with self.assertRaisesRegex(ValueError,
r"^Synchronization stage count may not safely be less than 2$"):
ResetSynchronizer(Signal(), stages=1)
def test_edge_wrong(self):
with self.assertRaisesRegex(ValueError,
r"^AsyncFFSynchronizer async edge must be one of 'pos' or 'neg', not 'xxx'$"):
AsyncFFSynchronizer(Signal(), Signal(), domain="sync", async_edge="xxx")
def test_pos_edge(self):
i = Signal()
o = Signal()
m = Module()
m.domains += ClockDomain("sync")
m.submodules += AsyncFFSynchronizer(i, o)
sim = Simulator(m)
sim.add_clock(1e-6)
def process():
# initial reset
self.assertEqual((yield i), 0)
self.assertEqual((yield o), 1)
yield Tick(); yield Delay(1e-8)
self.assertEqual((yield o), 1)
yield Tick(); yield Delay(1e-8)
self.assertEqual((yield o), 0)
yield Tick(); yield Delay(1e-8)
self.assertEqual((yield o), 0)
yield Tick(); yield Delay(1e-8)
yield i.eq(1)
yield Delay(1e-8)
self.assertEqual((yield o), 1)
yield Tick(); yield Delay(1e-8)
self.assertEqual((yield o), 1)
yield i.eq(0)
yield Tick(); yield Delay(1e-8)
self.assertEqual((yield o), 1)
yield Tick(); yield Delay(1e-8)
self.assertEqual((yield o), 0)
yield Tick(); yield Delay(1e-8)
self.assertEqual((yield o), 0)
yield Tick(); yield Delay(1e-8)
sim.add_process(process)
with sim.write_vcd("test.vcd"):
sim.run()
def test_neg_edge(self):
i = Signal(reset=1)
o = Signal()
m = Module()
m.domains += ClockDomain("sync")
m.submodules += AsyncFFSynchronizer(i, o, async_edge="neg")
sim = Simulator(m)
sim.add_clock(1e-6)
def process():
# initial reset
self.assertEqual((yield i), 1)
self.assertEqual((yield o), 1)
yield Tick(); yield Delay(1e-8)
self.assertEqual((yield o), 1)
yield Tick(); yield Delay(1e-8)
self.assertEqual((yield o), 0)
yield Tick(); yield Delay(1e-8)
self.assertEqual((yield o), 0)
yield Tick(); yield Delay(1e-8)
yield i.eq(0)
yield Delay(1e-8)
self.assertEqual((yield o), 1)
yield Tick(); yield Delay(1e-8)
self.assertEqual((yield o), 1)
yield i.eq(1)
yield Tick(); yield Delay(1e-8)
self.assertEqual((yield o), 1)
yield Tick(); yield Delay(1e-8)
self.assertEqual((yield o), 0)
yield Tick(); yield Delay(1e-8)
self.assertEqual((yield o), 0)
yield Tick(); yield Delay(1e-8)
sim.add_process(process)
with sim.write_vcd("test.vcd"):
sim.run()
class ResetSynchronizerTestCase(FHDLTestCase):
def test_stages_wrong(self):
with self.assertRaisesRegex(TypeError,
r"^Synchronization stage count must be a positive integer, not 0$"):
ResetSynchronizer(Signal(), stages=0)
with self.assertRaisesRegex(ValueError,
r"^Synchronization stage count may not safely be less than 2$"):
ResetSynchronizer(Signal(), stages=1)
def test_basic(self):
arst = Signal()
m = Module()
m.domains += ClockDomain("sync")
m.submodules += ResetSynchronizer(arst)
s = Signal(reset=1)
m.d.sync += s.eq(0)
sim = Simulator(m)
sim.add_clock(1e-6)
def process():
# initial reset
self.assertEqual((yield s), 1)
yield Tick(); yield Delay(1e-8)
self.assertEqual((yield s), 1)
yield Tick(); yield Delay(1e-8)
self.assertEqual((yield s), 1)
yield Tick(); yield Delay(1e-8)
self.assertEqual((yield s), 0)
yield Tick(); yield Delay(1e-8)
yield arst.eq(1)
yield Delay(1e-8)
self.assertEqual((yield s), 0)
yield Tick(); yield Delay(1e-8)
self.assertEqual((yield s), 1)
yield arst.eq(0)
yield Tick(); yield Delay(1e-8)
self.assertEqual((yield s), 1)
yield Tick(); yield Delay(1e-8)
self.assertEqual((yield s), 1)
yield Tick(); yield Delay(1e-8)
self.assertEqual((yield s), 0)
yield Tick(); yield Delay(1e-8)
sim.add_process(process)
with sim.write_vcd("test.vcd"):
sim.run()
# TODO: test with distinct clocks
class PulseSynchronizerTestCase(FHDLTestCase):
def test_stages_wrong(self):
with self.assertRaisesRegex(TypeError,
r"^Synchronization stage count must be a positive integer, not 0$"):
PulseSynchronizer("w", "r", stages=0)
with self.assertRaisesRegex(ValueError,
r"^Synchronization stage count may not safely be less than 2$"):
PulseSynchronizer("w", "r", stages=1)
def test_smoke(self):
m = Module()
m.domains += ClockDomain("sync")
ps = m.submodules.dut = PulseSynchronizer("sync", "sync")
sim = Simulator(m)
sim.add_clock(1e-6)
def process():
yield ps.i.eq(0)
# TODO: think about reset
for n in range(5):
yield Tick()
# Make sure no pulses are generated in quiescent state
for n in range(3):
yield Tick()
self.assertEqual((yield ps.o), 0)
# Check conservation of pulses
accum = 0
for n in range(10):
yield ps.i.eq(1 if n < 4 else 0)
yield Tick()
accum += yield ps.o
self.assertEqual(accum, 4)
sim.add_process(process)
sim.run()
| 2.078125 | 2 |
migration-script/migrate_rt_paris.py | iomonad/qgis-tools | 4 | 12760922 | <filename>migration-script/migrate_rt_paris.py
#
# Nomalize Data
# (c) iomonad <<EMAIL>>
#
FALLBACK_NAME = "Trappe d'accès"
layer = iface.activeLayer()
layer.startEditing()
for feature in layer.getFeatures():
if not feature['name']:
layer.changeAttributeValue(feature.id(), 0, FALLBACK_NAME)
continue
layer.changeAttributeValue(feature.id(), 0, str(feature['name']).strip().capitalize())
if "trappe" in str(feature['name']).lower() or "plaque" in str(feature['name']).lower():
layer.changeAttributeValue(feature.id(), 3, str("Abloy"))
layer.changeAttributeValue(feature.id(), 2, str("Trappe"))
if "porte" in str(feature['name']).lower():
layer.changeAttributeValue(feature.id(), 2, str("Porte"))
if "pep" in str(feature['name']).lower():
if feature['descriptio'] and "pep" in str(feature['descriptio']).lower():
layer.changeAttributeValue(feature.id(), 4, str("PEP"))
layer.changeAttributeValue(feature.id(), 4, str("PEP"))
if "ventil" in str(feature['name']).lower():
if feature['descriptio'] and "ventilation" in str(feature['descriptio']).lower():
layer.changeAttributeValue(feature.id(), 4, str("VT"))
layer.changeAttributeValue(feature.id(), 4, str("VT"))
print (feature['name'])
layer.updateFields() | 2.484375 | 2 |
pigs.py | bayankhosi/FarmProgram | 1 | 12760923 | <gh_stars>1-10
from calendar import monthrange
import datetime
import openpyxl as opx
import pandas as pd
import upload
import statscalc
date_format = '%d/%m/%Y'
today = datetime.datetime.now().date()
month = datetime.datetime.now().month # month number
year = datetime.datetime.now().year # year
spread = opx.load_workbook('./Files/spread.xlsx')
# opens current year sheet
individual = spread.worksheets[0]
whole = spread.worksheets[year - 2020]
# total number of pigs
population = int(whole.cell(column=2, row=month + 1).value)
pig_id = individual['M1'].value
def buy_age(population, pig_id): # recording new piglets
population += 1 # add to number of pigs
whole.cell(column=2, row=month + 1).value = population
# to ensure nxt mnt pop not 0
whole.cell(column=2, row=month + 2).value = population
pig_id += 1
individual['M1'].value = pig_id
rw = pig_id + 1
individual.cell(row=rw, column=1).value = pig_id
print("\nThe pig's ID is: ", pig_id)
sex = int(input("\nEnter sex of piglet \nMale(1)\nFemale(0): "))
individual.cell(row=rw, column=12).value = sex
age_bought = int(input("\nEnter Age of piglet (weeks): "))
purchase_date = today # code to record date
date_born = purchase_date - datetime.timedelta(days=7 * age_bought)
individual.cell(row=rw, column=11).value = purchase_date
individual.cell(row=rw, column=2).value = date_born
purchase_price = int(input("\nEnter purchase price: "))
individual.cell(row=rw, column=3).value = purchase_price
breed = str(input("""
Choose Breed:
n = Ncane
m = Mngometulu
t = Motjane
"""))
individual.cell(row=rw, column=8).value = breed
individual.cell(row=rw, column=10).value = 0
def consumables(): # resources spent on well being
""" Record:
population each month
average age each month
feed each month """
consumable_choice = int(
input("\nWhich Consumable are you recording?\n1.Feed\n2.Miscelleneous\n3.Medicine\n"))
if consumable_choice == 1:
print("\nEnter mass of feed bought (Kg)")
feed_weight = int(input()) + whole.cell(column=3, row=month+1).value
# record the amount
whole.cell(column=3, row=month+1).value = feed_weight
print("\nEnter price of feed bought (E)")
feed_price = int(input()) + whole.cell(column=4, row=month+1).value
whole.cell(column=4, row=month+1).value = feed_price
FeedPerPig = whole.cell(column=3, row=month + 1).value / \
whole.cell(column=2, row=month + 1).value
whole.cell(column=5, row=month + 1).value = FeedPerPig
elif consumable_choice == 2:
print("\nEnter price of item (E)")
misc_price = int(input()) + whole.cell(column=5, row=month+1).value
whole.cell(column=5, row=month+1).value = misc_price
elif consumable_choice == 3:
pig_id = int(input("Enter ID of pig medicating: "))
rw = pig_id + 1
med = float(input("Enter amount of medication (ml): "))
individual.cell(row=rw, column=9).value = med
def sale(population): # info on slaughter and sale
# make averages for that individual pig available
# profit on the pig by subtracting average spend on it
pig_id = int(input("\nEnter ID of Pig Slaughtered: "))
rw = pig_id + 1
# check if there is non recorded slaughter for pig_id
if individual.cell(row=rw, column=4).value == None:
# subtract from number of pigs
population -= 1
whole.cell(column=2, row=month + 1).value = population
# to ensure nxt mnt pop not 0
whole.cell(column=2, row=month + 2).value = population
print("\nNew Population: ", population)
# record date of slaughter
slaughter_date = today
individual.cell(row=rw, column=4).value = slaughter_date
# record slaughter age
date_born = datetime.datetime.date(
individual.cell(row=pig_id + 1, column=2).value)
slaughter_age = int((today - date_born).days)
print("\nEnter Slaughter Age of pig: ", slaughter_age, "days")
individual.cell(row=rw, column=6).value = int(slaughter_age)
# record slauhgter mass
slaughter_weight = float(input("\nEnter Slaughter Weight of pig: "))
individual.cell(row=rw, column=5).value = slaughter_weight
price_Kg = float(input("\nPrice per Kg: "))
sale_price = slaughter_weight * price_Kg
individual.cell(row=rw, column=7).value = sale_price
# estimate of total food mass eaten
purchase_date = individual.cell(row=rw, column=11).value
month_bought = purchase_date.month - 1
month_slaughtered = today.month
# don't count month feed if bought after mid-month
if purchase_date.day > 15:
month_bought += 1
# don't count month feed if slaughtered before mid-month
if today.day < 15:
month_slaughtered -= 1
df_month = pd.read_excel('./Files/spread.xlsx',
sheet_name='2021',
index_col=0)
df_month['feed_per_pig'] = df_month.feed_mass / df_month.population
feed_eaten = df_month[month_bought: month_slaughtered]['feed_per_pig'].sum(
)
individual.cell(row=rw, column=10).value = feed_eaten
else:
print("\nThis ID is for a pig that has already been slaughtered.\nTry again.")
def monitor(): # view collected data
View = int(
input("""
View data for:
1. Individual Pig
2. Whole Month Data
3. Statistics
"""))
if View == 1: # individual pig data
pig_id = int(input("\nEnter ID of pig you want to view: "))
purchase_date = datetime.datetime.date(individual.cell(
row=pig_id + 1, column=2).value)
date_born = datetime.datetime.date(
individual.cell(row=pig_id + 1, column=2).value)
print("\nDate Born: ", purchase_date)
print("\nPurchase Price: E", individual.cell(
row=pig_id + 1, column=3).value)
if individual.cell(row=pig_id + 1, column=6).value == None:
currAge = (today-date_born).days
print("\nAge: ", currAge, "days")
else:
print("\nSlaughter Age: ", individual.cell(
row=pig_id + 1, column=6).value, "days")
print("\nSlaughter Weight: ", individual.cell(
row=pig_id + 1, column=5).value, "Kg")
print("\nSale Price: E", individual.cell(
row=pig_id + 1, column=7).value)
print("\nFeed Eaten: ", individual.cell(
row=pig_id + 1, column=10).value)
elif View == 2: # month data
month = int(input("\nEnter month number you want to view: "))
avAge = whole.cell(column=6, row=month + 1).value
Population = whole.cell(column=2, row=month + 1).value
FeedMass = whole.cell(column=3, row=month + 1).value
FeedPerPig = whole.cell(column=3, row=month + 1).value / population
FeedPrice = whole.cell(column=4, row=month + 1).value
print("\nData for", whole.cell(column=1, row=month + 1).value)
print("\nPopulation: ", Population)
print("\nAverage Age: ", avAge)
print("\nFeed Mass Bought: ", FeedMass, "Kg")
print("\nAverage feed per pig: ", FeedPerPig, "Kg/pig")
print("\nPrice of feed: E", FeedPrice)
elif View == 3: # statistics
graph = int(input(("""
Choose a graph
1. Mass-Age
""")))
if graph == 1:
statscalc.stats.mass_age()
""" if graph == 2:
statscalc.stats.feed_age() """
loop = 2
while loop == 2:
action = int(input("""************************************************************************
These are the operations that can be performed\n
[1] - Record New Piglet
[2] - Record Bought Consumable(s)
[3] - Record Slaughter and Sale
[4] - View Data
"""))
if action == 1:
buy_age(population, pig_id)
elif action == 2:
consumables()
elif action == 3:
sale(population)
elif action == 4:
monitor()
elif action == 5:
try:
upload.main()
except:
print("Couldn't upload")
spread.save('./Files/spread.xlsx')
loop = int(input("\n1. Exit, 2. For Other Operation: ",))
print("************************************************************************")
try:
upload.main()
except:
print("Couldn't upload")
| 2.984375 | 3 |
animationengine.py | SergeyRom-23/MB-Lab-master-RU | 0 | 12760924 | # MB-Lab
#
# Сайт ветки MB-Lab: https://github.com/animate1978/MB-Lab
# Сайт ветки перевода на русский язык MB-Lab: https://github.com/SergeyRom-23/MB-Lab-master-RU
#
# ##### НАЧАЛО ЛИЦЕНЗИОННОГО БЛОКА GPL #####
#
# Эта программа является свободным программным обеспечением; Вы можете распространять его и / или
# изменить его в соответствии с условиями GNU General Public License
# как опубликовано Фондом свободного программного обеспечения; либо версия 3
# Лицензии или (по вашему выбору) любой более поздней версии.
#
# Эта программа распространяется в надежде, что она будет полезна,
# но БЕЗ КАКИХ-ЛИБО ГАРАНТИЙ; даже без подразумеваемой гарантии
# ИЗДЕЛИЯ или ПРИГОДНОСТЬ ДЛЯ ОСОБЫХ ЦЕЛЕЙ. Смотрите
# GNU General Public License для более подробной информации.
#
# Вам надо принять Стандартнуюй общественную лицензию GNU
# вместе с этой программой; если нет, напишите в Фонд свободного программного обеспечения,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### КОНЕЦ ЛИЦЕНЗИОННОГО БЛОКА GPL #####
#
# ManuelbastioniLAB - Авторские права (C) 2015-2018 <NAME>
# Перевод (C) 2019 <NAME> 23
import logging
import os
import json
import time
from functools import lru_cache
import mathutils
import bpy
from . import algorithms
from .utils import get_active_armature
logger = logging.getLogger(__name__)
class RetargetEngine:
def __init__(self):
self.has_data = False
self.femaleposes_exist = False
self.maleposes_exist = False
self.data_path = algorithms.get_data_path()
self.maleposes_path = os.path.join(self.data_path, self.data_path, "poses", "male_poses")
self.femaleposes_path = os.path.join(self.data_path, self.data_path, "poses", "female_poses")
if os.path.isdir(self.maleposes_path):
self.maleposes_exist = True
if os.path.isdir(self.femaleposes_path):
self.femaleposes_exist = True
self.body_name = ""
self.armature_name = ""
self.skeleton_mapped = {}
self.lib_filepath = algorithms.get_blendlibrary_path()
self.knowledge_path = os.path.join(self.data_path, "retarget_knowledge.json")
if os.path.isfile(self.lib_filepath) and os.path.isfile(self.knowledge_path):
self.knowledge_database = algorithms.load_json_data(self.knowledge_path, "Skeleton knowledge data")
self.local_rotation_bones = self.knowledge_database["local_rotation_bones"]
self.last_selected_bone_name = None
self.stored_animations = {}
self.correction_is_sync = True
self.is_animated_bone = ""
self.rot_type = ""
self.has_data = True
else:
logger.critical("Retarget database not found. Please check your Blender addons directory.")
@staticmethod
def get_selected_posebone():
if bpy.context.selected_pose_bones:
if bpy.context.selected_pose_bones:
return bpy.context.selected_pose_bones[0]
return None
def is_editable_bone(self):
armat = get_active_armature()
if armat:
if armat.animation_data:
if armat.animation_data.action:
if self.rot_type in ["EULER", "QUATERNION"]:
self.is_animated_bone = "VALID_BONE"
else:
self.is_animated_bone = "The bone has not anim. data"
else:
self.is_animated_bone = "{0} has not action data".format(armat.name)
else:
self.is_animated_bone = "{0} has not animation data".format(armat.name)
else:
self.is_animated_bone = "No armature selected"
@staticmethod
def get_action(target_armature):
if target_armature and target_armature.animation_data:
return target_armature.animation_data.action
return None
def check_correction_sync(self):
scn = bpy.context.scene
selected_bone = self.get_selected_posebone()
if selected_bone:
if self.last_selected_bone_name != selected_bone.name:
self.get_bone_rot_type()
offsets = self.get_offset_values()
if scn.mblab_rot_offset_0 != offsets[0]:
self.correction_is_sync = False
if scn.mblab_rot_offset_1 != offsets[1]:
self.correction_is_sync = False
if scn.mblab_rot_offset_2 != offsets[2]:
self.correction_is_sync = False
self.is_editable_bone()
self.last_selected_bone_name = selected_bone.name
def get_offset_values(self):
offsets = [0, 0, 0]
for i in (0, 1, 2):
if self.rot_type == "QUATERNION":
channel = i+1
else:
channel = i
armat_name, animation_curve, animation_data_id = self.get_curve_data(channel)
if armat_name in self.stored_animations.keys():
if animation_data_id in self.stored_animations[armat_name].keys():
animation_data = self.stored_animations[armat_name][animation_data_id]
if animation_curve:
if animation_curve.keyframe_points:
offsets[i] = animation_curve.keyframe_points[0].co[1] - animation_data[0]
return offsets
def identify_curve_rot(self, bone):
r_type = "NO_CURVES"
armat = get_active_armature()
if armat:
action = self.get_action(armat)
if action and bone:
d_path1 = f'pose.bones["{bone.name}"].rotation_quaternion'
d_path2 = f'pose.bones["{bone.name}"].rotation_axis_angle'
d_path3 = f'pose.bones["{bone.name}"].rotation_euler'
animation_curve1 = action.fcurves.find(d_path1, index=0)
animation_curve2 = action.fcurves.find(d_path2, index=0)
animation_curve3 = action.fcurves.find(d_path3, index=0)
if animation_curve1:
r_type = "QUATERNION"
if animation_curve2:
r_type = "AXIS_ANGLE"
if animation_curve3:
r_type = "EULER"
return r_type
def get_bone_rot_type(self):
selected_bone = self.get_selected_posebone()
self.rot_type = self.identify_curve_rot(selected_bone)
def get_bone_curve_id(self, selected_bone):
if self.rot_type == "QUATERNION":
return f'pose.bones["{selected_bone.name}"].rotation_quaternion'
if self.rot_type == "EULER":
return f'pose.bones["{selected_bone.name}"].rotation_euler'
return None
def get_curve_data(self, channel):
armat = get_active_armature()
d_path = None
if armat:
action = self.get_action(armat)
if action:
selected_bone = self.get_selected_posebone()
if selected_bone:
d_path = self.get_bone_curve_id(selected_bone)
if d_path:
animation_curve = action.fcurves.find(d_path, index=channel)
animation_data_id = f'{d_path}{str(channel)}'
if animation_curve:
return (armat.name, animation_curve, animation_data_id)
return (None, None, None)
def reset_bones_correction(self):
self.stored_animations = {}
def correct_bone_angle(self, channel, value):
scn = bpy.context.scene
if self.rot_type == "QUATERNION":
channel += 1
armat_name, animation_curve, animation_data_id = self.get_curve_data(channel)
if armat_name and animation_curve and animation_data_id:
if armat_name not in self.stored_animations.keys():
self.stored_animations[armat_name] = {}
if animation_data_id not in self.stored_animations[armat_name].keys():
animation_data = []
for kpoint in animation_curve.keyframe_points:
animation_data.append(kpoint.co[1])
self.stored_animations[armat_name][animation_data_id] = animation_data
else:
animation_data = self.stored_animations[armat_name][animation_data_id]
for i, _ in enumerate(animation_data):
animation_curve.keyframe_points[i].co[1] = animation_data[i] + value
animation_curve.update()
scn.frame_set(scn.frame_current)
def align_bones_z_axis(self, target_armature, source_armature):
armature_z_axis = {}
if target_armature:
if source_armature:
logger.info("Aligning Z axis of %s with Z axis of %s",
target_armature.name, source_armature.name)
algorithms.select_and_change_mode(source_armature, 'EDIT')
for x_bone in target_armature.data.bones:
b_name = x_bone.name
source_bone_name = self.get_mapped_name(b_name)
if source_bone_name is not None:
armature_z_axis[b_name] = source_armature.data.edit_bones[source_bone_name].z_axis.copy()
else:
logger.debug("Bone %s non mapped", b_name)
algorithms.select_and_change_mode(source_armature, 'POSE')
algorithms.select_and_change_mode(target_armature, 'EDIT')
for armat_bone in target_armature.data.edit_bones:
if armat_bone.name in armature_z_axis:
z_axis = armature_z_axis[armat_bone.name]
armat_bone.align_roll(z_axis)
algorithms.select_and_change_mode(target_armature, 'POSE')
def reset_skeleton_mapped(self):
self.skeleton_mapped = {}
def init_skeleton_map(self, source_armat):
self.reset_skeleton_mapped()
self.already_mapped_bones = []
self.spine_bones_names = None
self.rarm_bones_names = None
self.larm_bones_names = None
self.rleg_bones_names = None
self.lleg_bones_names = None
self.head_bones_names = None
self.pelvis_bones_names = None
self.rtoe1_bones_names = None
self.rtoe2_bones_names = None
self.rtoe3_bones_names = None
self.rtoe4_bones_names = None
self.rtoe5_bones_names = None
self.ltoe1_bones_names = None
self.ltoe2_bones_names = None
self.ltoe3_bones_names = None
self.ltoe4_bones_names = None
self.ltoe5_bones_names = None
self.rfinger0_bones_names = None
self.rfinger1_bones_names = None
self.rfinger2_bones_names = None
self.rfinger3_bones_names = None
self.rfinger4_bones_names = None
self.lfinger0_bones_names = None
self.lfinger1_bones_names = None
self.lfinger2_bones_names = None
self.lfinger3_bones_names = None
self.lfinger4_bones_names = None
self.map_main_bones(source_armat)
@staticmethod
def name_combinations(bone_identifiers, side):
combinations = []
if side == 'RIGHT':
side_id = ("r", "right")
junctions = (".", "_", "-", "")
elif side == 'LEFT':
side_id = ("l", "left")
junctions = (".", "_", "-", "")
else:
side_id = [""]
junctions = [""]
for b_id in bone_identifiers:
for s_id in side_id:
for junct in junctions:
combinations.append(f'{b_id}{junct}{s_id}')
combinations.append(f'{s_id}{junct}{b_id}')
return combinations
def get_bone_by_exact_id(self, bones_to_scan, bone_identifiers, side):
if bones_to_scan:
name_combinations = self.name_combinations(bone_identifiers, side)
for b_name in bones_to_scan:
if b_name.lower() in name_combinations:
return b_name
return None
def get_bone_by_childr(self, armat, bones_to_scan, childr_identifiers):
if childr_identifiers:
for bone_name in bones_to_scan:
x_bone = self.get_bone(armat, bone_name)
if not x_bone:
return None
for ch_bone in x_bone.children:
for ch_id in childr_identifiers:
c1 = algorithms.is_string_in_string(ch_id, ch_bone.name)
c2 = ch_bone.name in bones_to_scan
c3 = algorithms.is_too_much_similar(x_bone.name, ch_bone.name)
if c1 and c2 and not c3:
return x_bone.name
return None
@staticmethod
def get_bones_by_index(bones_chain, index_data):
index = None
if bones_chain:
if len(index_data) == 1:
if index_data[0] == "LAST":
index = len(bones_chain)-1
else:
index = index_data[0]
if len(index_data) == 3:
if len(bones_chain) == index_data[0]:
index = index_data[1]
else:
index = index_data[2]
if index == "None":
index = None
if index is not None:
try:
return bones_chain[index]
except IndexError:
logger.warning("The chain %s of mocap file has less bones than the chain in MB-Lab", bones_chain)
return None
# def get_bones_by_parent(self, armat, bones_to_scan, parent_IDs):
# found_bones = set()
# for bone_name in bones_to_scan:
# parent_name = self.bone_parent_name(armat, bone_name)
# for pr_id in parent_IDs:
# if algorithms.is_string_in_string(pr_id, parent_name):
# found_bones.add(bone_name)
# return found_bones
@staticmethod
def get_bone_chains(armat, bone_names):
found_chains = []
for bone_name in bone_names:
bn = armat.data.bones[bone_name]
chain = [bone_name] + [b.name for b in bn.parent_recursive]
found_chains.append(chain)
return found_chains
@staticmethod
def get_all_bone_names(armat):
bone_names = []
for bn in armat.data.bones:
bone_names.append(bn.name)
return bone_names
@staticmethod
@lru_cache(maxsize=2)
def generate_bones_ids(side):
bone_ids = ("forearm", "elbow", "lowerarm", "hand", "wrist", "finger", "thumb", "index",
"ring", "pink", "thigh", "upperleg", "upper_leg", "leg", "knee", "shin", "calf",
"lowerleg", "lower_leg", "toe", "ball", "foot")
bn_pos = "r" if side == "RIGHT" else "l"
combo_bones_start = []
combo_bones_end = []
for b_id in bone_ids:
combo_bones_start.append(f'{bn_pos}{b_id}')
combo_bones_end.append(f'{b_id}{bn_pos}')
return combo_bones_start, combo_bones_end
def is_in_side(self, bone_names, side):
score_level = 0.0
if side == "RIGHT":
id_side2 = "right"
id_side3 = ("r.", "r_")
id_side4 = ("_r", ".r")
if side == "LEFT":
id_side2 = "left"
id_side3 = ("l.", "l_")
id_side4 = ("_l", ".l")
combo_bones_start, combo_bones_end = self.generate_bones_ids(side)
for bone_name in bone_names:
bone_name = bone_name.lower()
if len(bone_name) > 3:
c1 = bone_name[:2] in id_side3
c2 = bone_name[-2:] in id_side4
c3 = id_side2 in bone_name
c4 = algorithms.is_in_list(bone_names, combo_bones_start, "START")
c5 = algorithms.is_in_list(bone_names, combo_bones_end, "END")
if c1 or c2 or c3 or c4 or c5:
score_level += 1
if bone_names:
return score_level/len(bone_names)
return 0
@staticmethod
def order_with_list(bones_set, bones_list):
ordered_bones = []
for bone in bones_list:
if bone in bones_set:
ordered_bones.append(bone)
return ordered_bones
def chains_intersection(self, chains):
chain_sets = []
chain_inters = None
result_chain = []
for chain in chains:
chain_sets.append(set(chain))
for i, chain in enumerate(chain_sets):
chain_inters = chain if chain_inters is None else chain_inters.intersection(chain)
result_chain = self.order_with_list(chain_inters, chains[i])
return result_chain
@staticmethod
def filter_chains_by_max_length(chains):
longer_chains = []
max_length = 0
for chain in chains:
max_length = max(max_length, len(chain))
for chain in chains:
if len(chain) == max_length:
longer_chains.append(chain)
return longer_chains
def chains_difference(self, chain_list, subchain_list):
subchain_set = set(subchain_list)
chain_set = set(chain_list)
d_chain = chain_set.difference(subchain_set)
return self.order_with_list(d_chain, chain_list)
def filter_chains_by_side(self, chains):
left_chains = []
right_chains = []
center_chains = []
for chain in chains:
score_left = self.is_in_side(chain, "LEFT")
score_right = self.is_in_side(chain, "RIGHT")
if score_left > 0:
left_chains.append(chain)
elif score_right > 0:
right_chains.append(chain)
else:
center_chains.append(chain)
if not center_chains:
score_threshold = 0
for chain in chains:
score_left = self.is_in_side(chain, "LEFT")
score_right = self.is_in_side(chain, "RIGHT")
score_center = 1.0-score_left-score_right
if score_center > score_threshold:
score_threshold = score_center
center_chain = chain
center_chains.append(center_chain)
return left_chains, center_chains, right_chains
@staticmethod
def filter_chains_by_tail(chains, chain_ids):
target_chains_lists = []
if chains:
for chain in chains:
chain_tail = chain[0]
if algorithms.is_in_list(chain_ids, [chain_tail]):
target_chains_lists.append(chain)
return target_chains_lists
@staticmethod
def clear_chain_by_dot_product(chain, armature):
algorithms.select_and_change_mode(armature, 'EDIT')
if len(chain) > 2:
edit_bones = algorithms.get_edit_bones(armature)
bone_name = chain[0]
if bone_name in edit_bones:
e_bone = edit_bones[bone_name]
if e_bone.parent:
v1 = e_bone.vector.normalized()
v2 = e_bone.parent.vector.normalized()
if v1.dot(v2) < 0.5:
logger.info("Retarget: Bone %s removed BY DOT", bone_name)
chain.remove(bone_name)
algorithms.select_and_change_mode(armature, 'POSE') # TODO: store the status and restore it
return chain
@staticmethod
def clear_chain_by_length(chain, armature):
algorithms.select_and_change_mode(armature, 'EDIT')
for bone_name in chain:
edit_bones = algorithms.get_edit_bones(armature)
if bone_name in edit_bones:
e_bone = edit_bones[bone_name]
if e_bone.parent:
if e_bone.length < e_bone.parent.length/8:
logger.info("Retarget: Bone %s removed BY LENGTH", bone_name)
chain.remove(bone_name)
algorithms.select_and_change_mode(armature, 'POSE') # TODO: store the status and restore it
return chain
def filter_chains_by_dotprod(self, armature):
self.spine_bones_names = self.clear_chain_by_dot_product(self.spine_bones_names, armature)
self.head_bones_names = self.clear_chain_by_dot_product(self.head_bones_names, armature)
self.rarm_bones_names = self.clear_chain_by_dot_product(self.rarm_bones_names, armature)
self.larm_bones_names = self.clear_chain_by_dot_product(self.larm_bones_names, armature)
self.pelvis_bones_names = self.clear_chain_by_dot_product(self.pelvis_bones_names, armature)
self.ltoe_and_leg_names = self.clear_chain_by_dot_product(self.ltoe_and_leg_names, armature)
self.rtoe_and_leg_names = self.clear_chain_by_dot_product(self.rtoe_and_leg_names, armature)
self.rfinger0_bones_names = self.clear_chain_by_dot_product(self.rfinger0_bones_names, armature)
self.rfinger1_bones_names = self.clear_chain_by_dot_product(self.rfinger1_bones_names, armature)
self.rfinger2_bones_names = self.clear_chain_by_dot_product(self.rfinger2_bones_names, armature)
self.rfinger3_bones_names = self.clear_chain_by_dot_product(self.rfinger3_bones_names, armature)
self.rfinger4_bones_names = self.clear_chain_by_dot_product(self.rfinger4_bones_names, armature)
self.lfinger0_bones_names = self.clear_chain_by_dot_product(self.lfinger0_bones_names, armature)
self.lfinger1_bones_names = self.clear_chain_by_dot_product(self.lfinger1_bones_names, armature)
self.lfinger2_bones_names = self.clear_chain_by_dot_product(self.lfinger2_bones_names, armature)
self.lfinger3_bones_names = self.clear_chain_by_dot_product(self.lfinger3_bones_names, armature)
self.lfinger4_bones_names = self.clear_chain_by_dot_product(self.lfinger4_bones_names, armature)
def filter_chains_by_length(self, armature):
self.head_bones_names = self.clear_chain_by_length(self.head_bones_names, armature)
self.rarm_bones_names = self.clear_chain_by_length(self.rarm_bones_names, armature)
self.larm_bones_names = self.clear_chain_by_length(self.larm_bones_names, armature)
self.rleg_bones_names = self.clear_chain_by_length(self.rleg_bones_names, armature)
self.lleg_bones_names = self.clear_chain_by_length(self.lleg_bones_names, armature)
self.ltoe_and_leg_names = self.clear_chain_by_length(self.ltoe_and_leg_names, armature)
self.rtoe_and_leg_names = self.clear_chain_by_length(self.rtoe_and_leg_names, armature)
self.rfinger0_bones_names = self.clear_chain_by_length(self.rfinger0_bones_names, armature)
self.rfinger1_bones_names = self.clear_chain_by_length(self.rfinger1_bones_names, armature)
self.rfinger2_bones_names = self.clear_chain_by_length(self.rfinger2_bones_names, armature)
self.rfinger3_bones_names = self.clear_chain_by_length(self.rfinger3_bones_names, armature)
self.rfinger4_bones_names = self.clear_chain_by_length(self.rfinger4_bones_names, armature)
self.lfinger0_bones_names = self.clear_chain_by_length(self.lfinger0_bones_names, armature)
self.lfinger1_bones_names = self.clear_chain_by_length(self.lfinger1_bones_names, armature)
self.lfinger2_bones_names = self.clear_chain_by_length(self.lfinger2_bones_names, armature)
self.lfinger3_bones_names = self.clear_chain_by_length(self.lfinger3_bones_names, armature)
self.lfinger4_bones_names = self.clear_chain_by_length(self.lfinger4_bones_names, armature)
@staticmethod
def filter_chains_by_id(chains, chain_ids):
target_chains_lists = []
for chain in chains:
if algorithms.is_in_list(chain_ids, chain):
target_chains_lists.append(chain)
return target_chains_lists
@staticmethod
def filter_chains_by_order(chains, n_ord):
named_fingers = ("thu", "ind", "mid", "ring", "pink")
identifiers = []
for chain in chains:
if chain:
identifiers.append(chain[0])
identifiers.sort()
result_chain = []
chain_order = None
chain_id = None
if algorithms.is_in_list(named_fingers, identifiers):
chain_order = "NAMED"
else:
chain_order = "NUMBERED"
if chain_order == "NAMED":
chain_id = named_fingers[n_ord]
if chain_order == "NUMBERED":
if len(identifiers) > n_ord:
chain_id = identifiers[n_ord]
if chain_id:
chain_id = chain_id.lower()
for chain in chains:
chain_tail = chain[0]
chain_tail = chain_tail.lower()
if chain_id in chain_tail:
result_chain = chain
return result_chain
return result_chain
def identify_bone_chains(self, chains):
left_chains, center_chains, right_chains = self.filter_chains_by_side(chains)
# ARM_CHAIN_IDS
arm_chain_ids = ("arm", "elbow", "hand", "wrist", "finger", "thumb", "index",
"ring", "pink", "mid")
arms_tail_chains = self.filter_chains_by_id(chains, arm_chain_ids)
arms_tail_chains = self.filter_chains_by_max_length(arms_tail_chains)
spine_chain = self.chains_intersection(arms_tail_chains)
right_arm_tail_chains = self.filter_chains_by_tail(right_chains, arm_chain_ids)
right_arm_tail_chains = self.filter_chains_by_max_length(right_arm_tail_chains)
r_arm_spine_chain = self.chains_intersection(right_arm_tail_chains)
right_arm_chain = self.chains_difference(r_arm_spine_chain, spine_chain)
left_arm_tail_chains = self.filter_chains_by_tail(left_chains, arm_chain_ids)
left_arm_tail_chains = self.filter_chains_by_max_length(left_arm_tail_chains)
l_arm_spine_chain = self.chains_intersection(left_arm_tail_chains)
left_arm_chain = self.chains_difference(l_arm_spine_chain, spine_chain)
# HEAD_CHAIN_IDS
head_chain_ids = ("head", "neck", "skull", "face", "spine")
head_tail_chains = self.filter_chains_by_id(center_chains, head_chain_ids)
head_tail_chains = self.filter_chains_by_max_length(head_tail_chains)
head_and_spine_chains = self.chains_intersection(head_tail_chains)
head_chain = self.chains_difference(head_and_spine_chains, spine_chain)
# FINGER_CHAIN_IDS
finger_chain_ids = ("finger", "thumb", "index", "ring", "pink", "mid")
# RIGHT
right_fingers_tail_chains = self.filter_chains_by_tail(right_chains, finger_chain_ids)
r_finger_arm_spine_chain = self.chains_intersection(right_fingers_tail_chains)
right_fingers_chain = [self.chains_difference(fingr, r_finger_arm_spine_chain)
for fingr in right_fingers_tail_chains]
# LEFT
left_fingers_tail_chains = self.filter_chains_by_tail(left_chains, finger_chain_ids)
l_finger_arm_spine_chain = self.chains_intersection(left_fingers_tail_chains)
left_fingers_chain = [self.chains_difference(fingr, l_finger_arm_spine_chain)
for fingr in left_fingers_tail_chains]
# FOOT_CHAIN_IDS
foot_chain_ids = ("foot", "ankle", "toe", "ball")
right_foot_tail_chains = self.filter_chains_by_tail(right_chains, foot_chain_ids)
right_foot_tail_chains.sort()
self.rtoe_and_leg_names = right_foot_tail_chains[0]
right_foot_tail_chains = self.filter_chains_by_max_length(right_foot_tail_chains)
r_leg_and_spine_chain = self.chains_intersection(right_foot_tail_chains)
right_leg_chain = self.chains_difference(r_leg_and_spine_chain, spine_chain)
right_toes_chain = [self.chains_difference(toe, r_leg_and_spine_chain) for toe in right_foot_tail_chains]
right_toes_chain = self.filter_chains_by_max_length(right_toes_chain)
left_foot_tail_chains = self.filter_chains_by_tail(left_chains, foot_chain_ids)
left_foot_tail_chains.sort()
self.ltoe_and_leg_names = left_foot_tail_chains[0]
left_foot_tail_chains = self.filter_chains_by_max_length(left_foot_tail_chains)
l_leg_and_spine_chain = self.chains_intersection(left_foot_tail_chains)
left_leg_chain = self.chains_difference(l_leg_and_spine_chain, spine_chain)
left_toes_chain = [self.chains_difference(toe, l_leg_and_spine_chain) for toe in left_foot_tail_chains]
left_toes_chain = self.filter_chains_by_max_length(left_toes_chain)
feet_tail_chains = self.filter_chains_by_tail(chains, foot_chain_ids)
# TODO not used
# leg_chain_IDs = ["thigh", "upperleg", "upper_leg", "leg", "knee", "shin",
# "calf", "lowerleg", "lower_leg", "foot", "ankle", "toe", "ball"]
pelvis_chain = self.chains_intersection(feet_tail_chains)
self.spine_bones_names = spine_chain
self.head_bones_names = head_chain
self.rarm_bones_names = right_arm_chain
self.larm_bones_names = left_arm_chain
self.rleg_bones_names = right_leg_chain
self.lleg_bones_names = left_leg_chain
self.pelvis_bones_names = pelvis_chain
self.rfinger0_bones_names = self.filter_chains_by_order(right_fingers_chain, 0)
self.rfinger1_bones_names = self.filter_chains_by_order(right_fingers_chain, 1)
self.rfinger2_bones_names = self.filter_chains_by_order(right_fingers_chain, 2)
self.rfinger3_bones_names = self.filter_chains_by_order(right_fingers_chain, 3)
self.rfinger4_bones_names = self.filter_chains_by_order(right_fingers_chain, 4)
self.lfinger0_bones_names = self.filter_chains_by_order(left_fingers_chain, 0)
self.lfinger1_bones_names = self.filter_chains_by_order(left_fingers_chain, 1)
self.lfinger2_bones_names = self.filter_chains_by_order(left_fingers_chain, 2)
self.lfinger3_bones_names = self.filter_chains_by_order(left_fingers_chain, 3)
self.lfinger4_bones_names = self.filter_chains_by_order(left_fingers_chain, 4)
@staticmethod
def get_ending_bones(armat):
found_bones = set()
for bn in armat.data.bones:
if not bn.children:
found_bones.add(bn.name)
return found_bones
@staticmethod
def string_similarity(main_string, identifiers, side):
m_string = main_string.lower()
sub_string_found = False
substrings = []
if side == 'LEFT':
substrings = ["l-", "-l", "_l", "l_", ".l", "l.", "left"]
if side == 'RIGHT':
substrings = ["r-", "-r", "_r", "r_", ".r", "r.", "right"]
for id_string in identifiers:
if id_string in m_string:
sub_string_found = True
if sub_string_found:
strings_to_subtract = identifiers + substrings
for s_string in strings_to_subtract:
s_string = s_string.lower()
if s_string in m_string:
m_string = m_string.replace(s_string, "")
return len(m_string)
return 1000
def get_bone_by_similar_id(self, bones_to_scan, bone_identifiers, side):
diff_length = 100
result = None
if bones_to_scan:
for bone_name in bones_to_scan:
score = self.string_similarity(bone_name, bone_identifiers, side)
if score < diff_length:
diff_length = score
result = bone_name
return result
def find_bone(self, armat, bone_type, search_method):
if not self.knowledge_database:
return None
bone_knowledge = self.knowledge_database[bone_type]
main_ids = bone_knowledge["main_IDs"]
children_ids = bone_knowledge["children_IDs"]
# parent_IDs = bone_knowledge["parent_IDs"]
side = bone_knowledge["side"]
chain_id = bone_knowledge["chain_ID"]
position_in_chain = bone_knowledge["position_in_chain"]
bones_chain = None
if chain_id == "spine_bones_names":
bones_chain = self.spine_bones_names
elif chain_id == "rarm_bones_names":
bones_chain = self.rarm_bones_names
elif chain_id == "larm_bones_names":
bones_chain = self.larm_bones_names
elif chain_id == "rleg_bones_names":
bones_chain = self.rleg_bones_names
elif chain_id == "lleg_bones_names":
bones_chain = self.lleg_bones_names
elif chain_id == "head_bones_names":
bones_chain = self.head_bones_names
elif chain_id == "pelvis_bones_names":
bones_chain = self.pelvis_bones_names
elif chain_id == "rtoe_and_leg_names":
bones_chain = self.rtoe_and_leg_names
elif chain_id == "ltoe_and_leg_names":
bones_chain = self.ltoe_and_leg_names
elif chain_id == "rfinger0_bones_names":
bones_chain = self.rfinger0_bones_names
elif chain_id == "rfinger1_bones_names":
bones_chain = self.rfinger1_bones_names
elif chain_id == "rfinger2_bones_names":
bones_chain = self.rfinger2_bones_names
elif chain_id == "rfinger3_bones_names":
bones_chain = self.rfinger3_bones_names
elif chain_id == "rfinger4_bones_names":
bones_chain = self.rfinger4_bones_names
elif chain_id == "lfinger0_bones_names":
bones_chain = self.lfinger0_bones_names
elif chain_id == "lfinger1_bones_names":
bones_chain = self.lfinger1_bones_names
elif chain_id == "lfinger2_bones_names":
bones_chain = self.lfinger2_bones_names
elif chain_id == "lfinger3_bones_names":
bones_chain = self.lfinger3_bones_names
elif chain_id == "lfinger4_bones_names":
bones_chain = self.lfinger4_bones_names
elif chain_id == "all_chains":
bones_chain = self.get_all_bone_names(armat)
if bones_chain:
all_methods = ["by_exact_name", "by_chain_index", "by_similar_name", "by_children"]
search_sequence = [search_method] # The first method is the one in knowledge
for methd in all_methods:
if methd not in search_sequence:
search_sequence.append(methd)
for s_method in search_sequence:
if s_method == "by_exact_name":
result = self.get_bone_by_exact_id(bones_chain, main_ids, side)
if result:
logger.info("Retarget: Bone %s found BY EXACT NAME", bone_type)
if result not in self.already_mapped_bones:
self.already_mapped_bones.append(result)
logger.info("Retarget: %s added to mapped bones", result)
return result
if s_method == "by_similar_name":
result = self.get_bone_by_similar_id(bones_chain, main_ids, side)
if result:
logger.info("Retarget: Bone %s found BY SIMILAR NAME", bone_type)
if result not in self.already_mapped_bones:
self.already_mapped_bones.append(result)
logger.info("Retarget: %s added to mapped bones", result)
return result
if s_method == "by_children":
result = self.get_bone_by_childr(armat, bones_chain, children_ids)
if result:
logger.info("Retarget: Bone %s found BY CHILDREN", bone_type)
if result not in self.already_mapped_bones:
self.already_mapped_bones.append(result)
logger.info("Retarget: %s added to mapped bones", result)
return result
if s_method == "by_chain_index":
result = self.get_bones_by_index(bones_chain, position_in_chain)
if result:
logger.info("Retarget: Bone %s found BY CHAIN INDEX", bone_type)
if result not in self.already_mapped_bones:
self.already_mapped_bones.append(result)
logger.info("Retarget: %s added to mapped bones", result)
return result
logger.warning("All retarget methods failed for %s.", bone_type)
#logger.warning(No candidates found in: {0}, or the candidate found is already mapped to another bone".format(bones_chain))
return None
def bone_parent_name(self, armat, b_name):
x_bone = self.get_bone(armat, b_name)
if x_bone:
if x_bone.parent:
return x_bone.parent.name
return None
def get_bone(self, armat, b_name, b_type="TARGET"):
if armat:
if b_type == "TARGET":
if b_name:
if b_name in armat.pose.bones:
return armat.pose.bones[b_name]
if b_type == "SOURCE":
b_name = self.get_mapped_name(b_name)
if b_name:
if b_name in armat.pose.bones:
return armat.pose.bones[b_name]
return None
@staticmethod
def get_target_editbone(armat, b_name,):
if bpy.context.object.mode == "EDIT":
if b_name:
ebone = algorithms.get_edit_bone(armat, b_name)
if ebone:
return ebone
logger.warning("%s not found in edit mode of target armature %s", b_name, armat)
return None
else:
logger.warning("Warning: Can't get the edit bone of %s because the mode is %s",
bpy.context.scene.objects.active, bpy.context.object.mode)
return None
def get_source_editbone(self, armat, b_name):
if bpy.context.object.mode == "EDIT":
b_name = self.get_mapped_name(b_name)
if b_name:
ebone = algorithms.get_edit_bone(armat, b_name)
if ebone:
return ebone
logger.warning("%s not found in edit mode of source armature %s", b_name, armat)
return None
else:
logger.warning("Warning: Can't get the edit bone of %s because the mode is %s",
bpy.context.scene.objects.active, bpy.context.object.mode)
return None
def get_mapped_name(self, b_name):
return self.skeleton_mapped.get(b_name)
def map_bone(self, armat, b_name, b_type, s_method):
mapped_name = self.find_bone(armat, b_type, s_method)
if mapped_name is not None:
self.skeleton_mapped[b_name] = mapped_name
def map_by_direct_parent(self, armat, childr_name, map_name):
childr_bone_name = self.get_mapped_name(childr_name)
if childr_bone_name:
parent_bone_name = self.bone_parent_name(armat, childr_bone_name)
if parent_bone_name:
if parent_bone_name not in self.already_mapped_bones:
self.skeleton_mapped[map_name] = parent_bone_name
self.already_mapped_bones.append(parent_bone_name)
return True
logger.warning("Error in mapping %s as direct parent of %s", map_name, childr_name)
return False
def map_main_bones(self, armat):
ending_bones = self.get_ending_bones(armat)
chains = self.get_bone_chains(armat, ending_bones)
self.identify_bone_chains(chains)
self.filter_chains_by_length(armat)
self.filter_chains_by_dotprod(armat)
for bone in (
("clavicle_L", "LCLAVICLE", "by_exact_name"),
("clavicle_R", "RCLAVICLE", "by_exact_name"),
("head", "HEAD", "by_exact_name"),
("lowerarm_R", "RFOREARM", "by_exact_name"),
("lowerarm_L", "LFOREARM", "by_exact_name"),
("upperarm_R", "RUPPERARM", "by_children"),
("upperarm_L", "LUPPERARM", "by_children"),
("hand_R", "RHAND", "by_exact_name"),
("hand_L", "LHAND", "by_exact_name"),
("breast_R", "RBREAST", "by_exact_name"),
("breast_L", "LBREAST", "by_exact_name"),
("calf_R", "RCALF", "by_exact_name"),
("calf_L", "LCALF", "by_exact_name"),
("foot_R", "RFOOT", "by_exact_name"),
("foot_L", "LFOOT", "by_exact_name"),
("toes_R", "RTOE", "by_exact_name"),
("toes_L", "LTOE", "by_exact_name"),
("pelvis", "PELVIS", "by_exact_name"),
("spine03", "CHEST", "by_chain_index"),
):
self.map_bone(armat, *bone)
if not self.map_by_direct_parent(armat, "head", "neck"):
self.map_bone(armat, "neck", "NECK", "by_similar_name") # TODO: integrate in find function
self.map_by_direct_parent(armat, "spine03", "spine02")
self.map_by_direct_parent(armat, "spine02", "spine01")
self.map_by_direct_parent(armat, "calf_R", "thigh_R")
self.map_by_direct_parent(armat, "calf_L", "thigh_L")
for bone in (
("thumb03_R", "RTHUMB03", "by_chain_index"),
("thumb02_R", "RTHUMB02", "by_chain_index"),
("thumb01_R", "RTHUMB01", "by_chain_index"),
("index03_R", "RINDEX03", "by_chain_index"),
("index02_R", "RINDEX02", "by_chain_index"),
("index01_R", "RINDEX01", "by_chain_index"),
("index00_R", "RINDEX00", "by_exact_name"),
("middle03_R", "RMIDDLE03", "by_chain_index"),
("middle02_R", "RMIDDLE02", "by_chain_index"),
("middle01_R", "RMIDDLE01", "by_chain_index"),
("middle00_R", "RMIDDLE00", "by_exact_name"),
("ring03_R", "RRING03", "by_chain_index"),
("ring02_R", "RRING02", "by_chain_index"),
("ring01_R", "RRING01", "by_chain_index"),
("ring00_R", "RRING00", "by_exact_name"),
("pinky03_R", "RPINKY03", "by_chain_index"),
("pinky02_R", "RPINKY02", "by_chain_index"),
("pinky01_R", "RPINKY01", "by_chain_index"),
("pinky00_R", "RPINKY00", "by_exact_name"),
("thumb03_L", "LTHUMB03", "by_chain_index"),
("thumb02_L", "LTHUMB02", "by_chain_index"),
("thumb01_L", "LTHUMB01", "by_chain_index"),
("index03_L", "LINDEX03", "by_chain_index"),
("index02_L", "LINDEX02", "by_chain_index"),
("index01_L", "LINDEX01", "by_chain_index"),
("index00_L", "LINDEX00", "by_exact_name"),
("middle03_L", "LMIDDLE03", "by_chain_index"),
("middle02_L", "LMIDDLE02", "by_chain_index"),
("middle01_L", "LMIDDLE01", "by_chain_index"),
("middle00_L", "LMIDDLE00", "by_exact_name"),
("ring03_L", "LRING03", "by_chain_index"),
("ring02_L", "LRING02", "by_chain_index"),
("ring01_L", "LRING01", "by_chain_index"),
("ring00_L", "LRING00", "by_exact_name"),
("pinky03_L", "LPINKY03", "by_chain_index"),
("pinky02_L", "LPINKY02", "by_chain_index"),
("pinky01_L", "LPINKY01", "by_chain_index"),
("pinky00_L", "LPINKY00", "by_exact_name"),
("upperarm_twist_R", "RUPPERARM_TWIST", "by_exact_name"),
("upperarm_twist_L", "LUPPERARM_TWIST", "by_exact_name"),
("lowerarm_twist_R", "RFOREARM_TWIST", "by_exact_name"),
("lowerarm_twist_L", "LFOREARM_TWIST", "by_exact_name"),
("thigh_twist_R", "RUPPERLEG_TWIST", "by_exact_name"),
("thigh_twist_L", "LUPPERLEG_TWIST", "by_exact_name"),
("thigh_calf_R", "RCALF_TWIST", "by_exact_name"),
("thigh_calf_L", "LCALF_TWIST", "by_exact_name"),
):
self.map_bone(armat, *bone)
def bake_animation(self, target_armat, source_armat):
f_range = [0, bpy.context.scene.frame_current]
algorithms.select_and_change_mode(target_armat, 'POSE')
if source_armat.animation_data:
source_action = source_armat.animation_data.action
f_range = source_action.frame_range
bpy.ops.nla.bake(frame_start=f_range[0], frame_end=f_range[1], only_selected=False,
visual_keying=True, clear_constraints=False, use_current_action=True, bake_types={'POSE'})
self.remove_armature_constraints(target_armat)
@staticmethod
def reset_bones_rotations(armat):
reset_val = mathutils.Quaternion((1.0, 0.0, 0.0, 0.0))
for p_bone in armat.pose.bones:
if p_bone.rotation_mode == 'QUATERNION':
reset_val = mathutils.Quaternion((1.0, 0.0, 0.0, 0.0))
p_bone.rotation_quaternion = reset_val
elif p_bone.rotation_mode == 'AXIS_ANGLE':
reset_val = mathutils.Vector((0.0, 0.0, 1.0, 0.0))
p_bone.rotation_axis_angle = reset_val
else:
reset_val = mathutils.Euler((0.0, 0.0, 0.0))
p_bone.rotation_euler = reset_val
# TODO skeleton structure check
def calculate_skeleton_vectors(self, armat, armat_type, rot_type):
algorithms.select_and_change_mode(armat, "EDIT")
if armat_type == 'SOURCE':
head_bone = self.get_source_editbone(armat, "head")
pelvis_bone = self.get_source_editbone(armat, "pelvis")
hand_bone1 = self.get_source_editbone(armat, "hand_R")
hand_bone2 = self.get_source_editbone(armat, "hand_L")
if not head_bone:
head_bone = self.get_source_editbone(armat, "neck")
if not hand_bone1:
hand_bone1 = self.get_source_editbone(armat, "lowerarm_R")
if not hand_bone2:
hand_bone2 = self.get_source_editbone(armat, "lowerarm_L")
elif armat_type == 'TARGET':
head_bone = self.get_target_editbone(armat, "head")
pelvis_bone = self.get_target_editbone(armat, "pelvis")
hand_bone1 = self.get_target_editbone(armat, "hand_R")
hand_bone2 = self.get_target_editbone(armat, "hand_L")
if not head_bone:
head_bone = self.get_target_editbone(armat, "neck")
if not hand_bone1:
hand_bone1 = self.get_target_editbone(armat, "lowerarm_R")
if not hand_bone2:
hand_bone2 = self.get_target_editbone(armat, "lowerarm_L")
if head_bone and pelvis_bone and hand_bone1 and hand_bone2:
vect1 = head_bone.head - pelvis_bone.head
vect2 = hand_bone2.head - hand_bone1.head
algorithms.select_and_change_mode(armat, "POSE")
if rot_type == "ALIGN_SPINE":
return vect1.normalized()
if rot_type == "ALIGN_SHOULDERS":
return vect2.normalized()
else:
algorithms.select_and_change_mode(armat, "POSE")
return None
@staticmethod
def define_angle_direction(vect1, vect2, rot_axis, angle):
angle1 = mathutils.Quaternion(rot_axis, angle)
angle2 = mathutils.Quaternion(rot_axis, -angle)
v_rot1 = vect1.copy()
v_rot2 = vect1.copy()
v_rot1.rotate(angle1)
v_rot2.rotate(angle2)
v_dot1 = v_rot1.dot(vect2)
v_dot2 = v_rot2.dot(vect2)
if v_dot1 >= 0 and v_dot1 >= v_dot2:
return angle1
if v_dot2 >= 0 and v_dot2 >= v_dot1:
return angle2
return mathutils.Quaternion((0.0, 0.0, 1.0), 0)
def align_skeleton(self, target_armat, source_armat):
self.calculate_skeleton_rotations(target_armat, source_armat, "ALIGN_SPINE")
self.calculate_skeleton_rotations(target_armat, source_armat, "ALIGN_SHOULDERS")
def calculate_skeleton_rotations(self, target_armat, source_armat, rot_type):
algorithms.apply_object_transformation(source_armat)
source_vectors = self.calculate_skeleton_vectors(source_armat, 'SOURCE', rot_type)
if source_vectors:
target_vectors = self.calculate_skeleton_vectors(target_armat, 'TARGET', rot_type)
if rot_type == "ALIGN_SHOULDERS":
source_vectors.z = 0.0
if target_vectors:
angle = source_vectors.angle(target_vectors)
rot_axis = source_vectors.cross(target_vectors)
rot = self.define_angle_direction(source_vectors, target_vectors, rot_axis, angle)
self.rotate_skeleton(source_armat, rot)
algorithms.apply_object_transformation(source_armat)
else:
logger.warning("Cannot calculate the target vector for armature alignment")
else:
logger.warning("Cannot calculate the source vector for armature alignment")
@staticmethod
def rotate_skeleton(armat, rot_quat):
armat.rotation_mode = 'QUATERNION'
armat.rotation_quaternion = rot_quat
bpy.context.view_layer.update()
def use_animation_pelvis(self, target_armat, source_armat):
if target_armat and source_armat:
v1 = None
v2 = None
armat_prop = self.get_armature_proportion(target_armat, source_armat)
algorithms.select_and_change_mode(source_armat, 'EDIT')
source_pelvis = self.get_source_editbone(source_armat, "pelvis")
r_thigh_bone = self.get_source_editbone(source_armat, "thigh_R")
l_thigh_bone = self.get_source_editbone(source_armat, "thigh_L")
if source_pelvis and r_thigh_bone and l_thigh_bone:
p1 = (r_thigh_bone.head + l_thigh_bone.head) * 0.5
p2 = source_pelvis.head
p3 = source_pelvis.tail
v1 = armat_prop * (p2 - p1)
v2 = armat_prop * (p3 - p2)
algorithms.select_and_change_mode(source_armat, 'POSE')
if v1 and v2:
algorithms.select_and_change_mode(target_armat, 'EDIT')
target_pelvis = self.get_target_editbone(target_armat, "pelvis")
r_thigh_bone = self.get_target_editbone(target_armat, "thigh_R")
l_thigh_bone = self.get_target_editbone(target_armat, "thigh_L")
if target_pelvis and r_thigh_bone and l_thigh_bone:
p1a = (r_thigh_bone.head + l_thigh_bone.head) * 0.5
target_pelvis.head = p1a + v1
target_pelvis.tail = target_pelvis.head + v2
algorithms.select_and_change_mode(target_armat, 'POSE')
def armature_height(self, armat, armat_type):
if not armat:
logger.warning("Cannot found the source armature for height calculation")
return 0
algorithms.set_object_visible(armat)
algorithms.select_and_change_mode(armat, 'EDIT')
upper_point = None
lower_point = None
if armat_type == 'SOURCE':
r_foot_bone = self.get_source_editbone(armat, "foot_R")
l_foot_bone = self.get_source_editbone(armat, "foot_L")
r_calf_bone = self.get_source_editbone(armat, "calf_R")
l_calf_bone = self.get_source_editbone(armat, "calf_L")
r_clavicle_bone = self.get_source_editbone(armat, "clavicle_R")
l_clavicle_bone = self.get_source_editbone(armat, "clavicle_L")
r_upperarm_bone = self.get_source_editbone(armat, "upperarm_R")
l_upperarm_bone = self.get_source_editbone(armat, "upperarm_L")
elif armat_type == 'TARGET':
r_foot_bone = self.get_target_editbone(armat, "foot_R")
l_foot_bone = self.get_target_editbone(armat, "foot_L")
r_calf_bone = self.get_target_editbone(armat, "calf_R")
l_calf_bone = self.get_target_editbone(armat, "calf_L")
r_clavicle_bone = self.get_target_editbone(armat, "clavicle_R")
l_clavicle_bone = self.get_target_editbone(armat, "clavicle_L")
r_upperarm_bone = self.get_target_editbone(armat, "upperarm_R")
l_upperarm_bone = self.get_target_editbone(armat, "upperarm_L")
if l_clavicle_bone and r_clavicle_bone:
upper_point = (l_clavicle_bone.head + r_clavicle_bone.head) * 0.5
elif l_upperarm_bone and r_upperarm_bone:
upper_point = (l_upperarm_bone.tail + r_upperarm_bone.tail) * 0.5
else:
logger.warning("Cannot calculate armature height: clavicles not found")
if l_foot_bone and r_foot_bone:
lower_point = (l_foot_bone.head + r_foot_bone.head)*0.5
elif l_calf_bone and r_calf_bone:
lower_point = (l_calf_bone.head + r_calf_bone.head)*0.5
else:
logger.warning("Cannot calculate armature height: feet not found")
if upper_point and lower_point:
height = upper_point-lower_point
algorithms.select_and_change_mode(armat, 'POSE')
return height.length
return 0
@staticmethod
def remove_armature_constraints(target_armature):
for b in target_armature.pose.bones:
if b.constraints:
for cstr in b.constraints:
if "mbastlab_" in cstr.name:
b.constraints.remove(cstr)
def add_copy_rotations(self, target_armat, source_armat, bones_to_rotate, space='WORLD'):
for b in target_armat.pose.bones:
if b.name in self.skeleton_mapped and b.name in bones_to_rotate:
if self.skeleton_mapped[b.name] and "mbastlab_rot" not in b.constraints:
cstr = b.constraints.new('COPY_ROTATION')
cstr.target = source_armat
cstr.subtarget = self.skeleton_mapped[b.name]
cstr.target_space = space
cstr.owner_space = space
cstr.name = "mbastlab_rot"
def add_copy_location(self, target_armat, source_armat, bones_to_move):
for b in target_armat.pose.bones:
if b.name in self.skeleton_mapped and b.name in bones_to_move:
if "mbastlab_loc" not in b.constraints:
cstr = b.constraints.new('COPY_LOCATION')
cstr.target = source_armat
cstr.subtarget = self.skeleton_mapped[b.name]
cstr.target_space = "WORLD"
cstr.owner_space = "WORLD"
cstr.name = "mbastlab_loc"
def add_armature_constraints(self, target_armat, source_armat):
bones_to_rotate = []
for b in target_armat.pose.bones:
if b.name not in self.local_rotation_bones:
bones_to_rotate.append(b.name)
self.add_copy_rotations(target_armat, source_armat, bones_to_rotate)
self.add_copy_rotations(target_armat, source_armat, self.local_rotation_bones, 'LOCAL')
self.add_copy_location(target_armat, source_armat, ["pelvis"])
def scale_armat(self, target_armat, source_armat):
scale = self.get_armature_proportion(target_armat, source_armat)
source_armat.scale = [scale, scale, scale]
@staticmethod
def clear_animation(armat):
if armat:
armat.animation_data_clear()
def get_armature_proportion(self, target_armat, source_armat):
t_height = self.armature_height(target_armat, 'TARGET')
s_height = self.armature_height(source_armat, 'SOURCE')
if s_height != 0:
armat_prop = t_height/s_height
else:
armat_prop = 1
return armat_prop
def reset_pose(self, armat=None, reset_location=True):
if not armat:
armat = get_active_armature()
if armat:
self.clear_animation(armat)
algorithms.stop_animation()
for p_bone in armat.pose.bones:
algorithms.reset_bone_rot(p_bone)
if reset_location:
if p_bone.name == "pelvis":
p_bone.location = [0, 0, 0]
def load_bones_quaternions(self, armat, data_path):
self.reset_pose(armat)
if armat:
matrix_data = algorithms.load_json_data(data_path, "Pose data")
algorithms.set_object_visible(armat)
algorithms.select_and_change_mode(armat, "POSE")
pose_bones = algorithms.get_pose_bones(armat)
for p_bone in pose_bones:
if p_bone.name in matrix_data:
algorithms.set_bone_rotation(p_bone, mathutils.Quaternion(matrix_data[p_bone.name]))
else:
algorithms.reset_bone_rot(p_bone)
@staticmethod
def save_pose(armat, filepath):
if not armat:
logger.warning('could not save pose')
return
algorithms.select_and_change_mode(armat, "POSE")
matrix_data = {}
algorithms.set_object_visible(armat)
pose_bones = algorithms.get_pose_bones(armat)
for p_bone in pose_bones:
if "muscle" not in p_bone.name and "IK_" not in p_bone.name:
matrix_data[p_bone.name] = [value for value in algorithms.get_bone_rotation(p_bone)]
with open(filepath, 'w') as fp:
json.dump(matrix_data, fp)
def load_pose(self, filepath, target_armature=None, use_retarget=False):
if not target_armature:
target_armature = get_active_armature()
if not target_armature:
return False
self.reset_bones_correction()
self.reset_pose(target_armature)
if use_retarget:
source_armature = algorithms.import_object_from_lib(
self.lib_filepath, "MBLab_skeleton_base_fk", "temporary_armature")
if source_armature:
self.load_bones_quaternions(source_armature, filepath)
self.retarget(target_armature, source_armature, bake_animation=True)
algorithms.remove_object(source_armature)
algorithms.stop_animation()
else:
self.load_bones_quaternions(target_armature, filepath)
self.clear_animation(target_armature)
return True
def load_animation(self, bvh_path, debug_mode=False):
time1 = time.time()
target_armature = get_active_armature()
if not target_armature:
return
self.reset_bones_correction()
if target_armature:
existing_obj_names = algorithms.collect_existing_objects()
self.load_bvh(bvh_path)
source_armature = algorithms.get_newest_object(existing_obj_names)
if source_armature:
if not debug_mode:
self.retarget(target_armature, source_armature, True)
algorithms.remove_object(source_armature)
else:
self.retarget(target_armature, source_armature, False)
algorithms.play_animation()
logger.info("Animation loaded in %s sec.", time.time()-time1)
@staticmethod
def load_bvh(bvh_path):
bpy.context.scene.frame_end = 0
try:
bpy.ops.import_anim.bvh(
filepath=bvh_path,
use_fps_scale=True,
update_scene_duration=True
)
except (FileNotFoundError, IOError):
logger.warning("Standard bvh operator not found: can't import animation.")
def retarget(self, target_armature, source_armature, bake_animation=True):
logger.info("retarget with %s", source_armature.name)
if source_armature and target_armature:
self.init_skeleton_map(source_armature)
self.clear_animation(target_armature)
self.align_skeleton(target_armature, source_armature)
self.scale_armat(target_armature, source_armature)
self.reset_bones_rotations(target_armature)
self.use_animation_pelvis(target_armature, source_armature)
self.align_bones_z_axis(target_armature, source_armature)
self.remove_armature_constraints(target_armature)
self.add_armature_constraints(target_armature, source_armature)
if bake_animation:
scene_modifiers_status = algorithms.get_scene_modifiers_status()
algorithms.set_scene_modifiers_status(False)
algorithms.set_scene_modifiers_status_by_type('ARMATURE', True)
self.bake_animation(target_armature, source_armature)
algorithms.set_scene_modifiers_status(False, scene_modifiers_status)
class ExpressionEngineShapeK:
def __init__(self):
self.has_data = False
self.data_path = algorithms.get_data_path()
self.human_expression_path = os.path.join(
self.data_path,
"expressions_comb",
"human_expressions")
self.anime_expression_path = os.path.join(
self.data_path,
"expressions_comb",
"anime_expressions")
self.expressions_labels = set()
self.human_expressions_data = self.load_expression_database(self.human_expression_path)
self.anime_expressions_data = self.load_expression_database(self.anime_expression_path)
self.expressions_data = {}
self.model_type = "NONE"
self.has_data = True
def identify_model_type(self):
self.model_type = "NONE"
obj = algorithms.get_active_body()
if obj:
current_shapekes_names = algorithms.get_shapekeys_names(obj)
if current_shapekes_names:
if "Expressions_IDHumans_max" in current_shapekes_names:
self.model_type = "HUMAN"
return
if "Expressions_IDAnime_max" in current_shapekes_names:
self.model_type = "ANIME"
return
@staticmethod
def load_expression(filepath):
charac_data = algorithms.load_json_data(filepath, "Character data")
expressions_id = algorithms.simple_path(filepath)
if "manuellab_vers" in charac_data:
if not algorithms.check_version(charac_data["manuellab_vers"]):
logger.info("%s created with vers. %s.",
expressions_id, charac_data["manuellab_vers"])
else:
logger.info("No lab version specified in %s", expressions_id)
if "structural" in charac_data:
char_data = charac_data["structural"]
else:
logger.warning("No structural data in %s", expressions_id)
char_data = None
return char_data
def load_expression_database(self, dirpath):
expressions_data = {}
if algorithms.exists_database(dirpath):
for expression_filename in os.listdir(dirpath):
expression_filepath = os.path.join(dirpath, expression_filename)
e_item, extension = os.path.splitext(expression_filename)
if "json" in extension:
self.expressions_labels.add(e_item)
expressions_data[e_item] = self.load_expression(expression_filepath)
return expressions_data
def sync_expression_to_gui(self):
# Process all expressions: reset all them and then update all them.
# according the GUI value. TODO: optimize.
obj = algorithms.get_active_body()
for expression_name in self.expressions_data:
# Perhaps these two lines are not required
if not hasattr(obj, expression_name):
setattr(obj, expression_name, 0.0)
if hasattr(obj, expression_name):
self.reset_expression(expression_name)
for expression_name in sorted(self.expressions_data.keys()):
if hasattr(obj, expression_name):
express_val = getattr(obj, expression_name)
if express_val != 0:
self.update_expression(expression_name, express_val)
def reset_expressions_gui(self):
obj = algorithms.get_active_body()
for expression_name in self.expressions_data:
if hasattr(obj, expression_name):
setattr(obj, expression_name, 0.0)
self.reset_expression(expression_name)
def update_expressions_data(self):
self.identify_model_type()
if self.model_type == "ANIME":
self.expressions_data = self.anime_expressions_data
if self.model_type == "HUMAN":
self.expressions_data = self.human_expressions_data
if self.model_type == "NONE":
self.expressions_data = {}
def update_expression(self, expression_name, express_val):
obj = algorithms.get_active_body()
if not obj:
return
if not obj.data.shape_keys:
return
if expression_name in self.expressions_data:
expr_data = self.expressions_data[expression_name]
for name, value in expr_data.items():
sk_value = 0
if value < 0.5:
name = f"{name}_min"
sk_value = (0.5 - value) * 2
else:
name = f"{name}_max"
sk_value = (value - 0.5) * 2
sk_value = sk_value*express_val
if sk_value != 0 and hasattr(obj.data.shape_keys, 'key_blocks'):
if name in obj.data.shape_keys.key_blocks:
current_val = obj.data.shape_keys.key_blocks[name].value
obj.data.shape_keys.key_blocks[name].value = min(current_val + sk_value, 1.0)
else:
logger.warning("Expression %s: shapekey %s not found", expression_name, name)
def reset_expression(self, expression_name):
obj = algorithms.get_active_body()
if not obj:
return
if not obj.data.shape_keys:
return
if expression_name in self.expressions_data:
expr_data = self.expressions_data[expression_name]
for name, value in expr_data.items():
name = f"{name}_min" if value < 0.5 else f"{name}_max"
if hasattr(obj.data.shape_keys, 'key_blocks'):
if name in obj.data.shape_keys.key_blocks:
obj.data.shape_keys.key_blocks[name].value = 0
@staticmethod
def keyframe_expression():
obj = algorithms.get_active_body()
if not obj:
return
if not obj.data.shape_keys:
return
if hasattr(obj.data.shape_keys, 'key_blocks'):
for sk in obj.data.shape_keys.key_blocks:
if "Expressions_" in sk.name:
sk.keyframe_insert(data_path="value")
| 2.171875 | 2 |
examples/zoo/__init__.py | kzagorulko/sanic-restplus | 1 | 12760925 | from sanic_restplus import Api
from .cat import api as cat_api
from .dog import api as dog_api
api = Api(
title='Zoo API',
version='1.0',
description='A simple demo API',
additional_css="/static/testme.css"
)
api.add_namespace(cat_api)
api.add_namespace(dog_api)
| 1.59375 | 2 |
xls/dslx/import_routines.py | isabella232/xls | 0 | 12760926 | <reponame>isabella232/xls
# Lint as: python3
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Performs an import, as in the 'import' keyword."""
import functools
import os
from typing import Dict
from absl import logging
from xls.common import runfiles
from xls.dslx import import_fn
from xls.dslx import parse_and_typecheck
def do_import(
subject: import_fn.ImportTokens, cache: Dict[import_fn.ImportTokens,
import_fn.ModuleInfo]
) -> import_fn.ModuleInfo:
"""Imports the module identified (globally) by 'subject'.
Resolves against an existing import in 'cache' if it is present.
Args:
subject: Tokens that globally uniquely identify the module to import; e.g.
something built-in like ('std',) for the standard library or something
fully qualified like ('xls', 'lib', 'math').
cache: Cache that we resolve against so we don't waste resources
re-importing things in the import DAG.
Returns:
The imported module information.
"""
assert subject
if subject in cache:
return cache[subject]
if subject in [('std',), ('float32',), ('bfloat16',)]:
path = 'xls/dslx/stdlib/{}.x'.format(subject[0])
else:
path = os.path.join(*subject) + '.x'
f_import = functools.partial(do_import, cache=cache)
fully_qualified_name = '.'.join(subject)
if os.path.exists(path):
with open(path, mode='rb') as f:
contents = f.read().decode('utf-8')
elif os.path.exists(os.path.join(os.path.pardir, path)):
# Genrules in-house execute inside a subdirectory, so we also search
# starting from the parent directory for now.
#
# An alternative would be to explicitly note the DSLX_PATH when invoking the
# tool in this special genrule context, but since we expect module paths to
# be fully qualified at the moment, we opt for this kluge.
path = os.path.join(os.path.pardir, path)
with open(path, mode='rb') as f:
contents = f.read().decode('utf-8')
else:
contents = runfiles.get_contents_as_text(path)
path = runfiles.get_path(path)
logging.vlog(3, 'Parsing and typechecking %r: start', fully_qualified_name)
m, node_to_type = parse_and_typecheck.parse_text(
contents,
fully_qualified_name,
f_import=f_import,
filename=path,
print_on_error=True)
logging.vlog(3, 'Parsing and typechecking %r: done', fully_qualified_name)
assert node_to_type is not None
cache[subject] = (m, node_to_type)
return m, node_to_type
| 2.125 | 2 |
interpolation_routines.py | mfkiwl/PW_from_GPS | 4 | 12760927 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 18 10:12:42 2020
@author: shlomi
"""
from PW_paths import work_yuval
ims_path = work_yuval / 'IMS_T'
gis_path = work_yuval / 'gis'
awd_path = work_yuval/'AW3D30'
def interpolate_var_ds_at_multiple_dts(var_ds, geo_var_df, predict_df,
time_dim='time', dem_path=awd_path,
H_constant=None):
import pandas as pd
times_df = var_ds[time_dim].to_pandas()
df = pd.DataFrame()
for dt in times_df:
print('interpolating on datetime: {}.'.format(dt))
hdf = slice_var_ds_at_dt_and_convert_to_dataframe(var_ds, geo_var_df,
dt=dt.strftime('%Y-%m-%dT%H:%M:%S'))
# if H is None:
# # estimate scale height H by using all stations' data:
if H_constant is not None:
H = H_constant
else:
H = get_var_lapse_rate(hdf, model='LR', plot=False)
print('scale height is: {} meters.'.format(H))
new_hdf = apply_lapse_rate_change(hdf, H)
df_inter = interpolate_at_one_dt(new_hdf, H, predict_df=predict_df,
dem_path=dem_path, ppd=50)
df_inter['datetime'] = dt
df_inter['H'] = H
df = df.append(df_inter)
df['name'] = df.index
df['datetime'] = pd.to_datetime(df['datetime'])
df.set_index('datetime', inplace=True)
df.index.name = 'time'
return df
def slice_var_ds_at_dt_and_convert_to_dataframe(var_ds, df, dt='2018-04-15T22:00:00'):
"""
slice the var dataset (PWV) with specific datetime and add lat, lon and alt from df
Parameters
----------
var_ds : Xarray Dataset
containing variable such as PWV vs. time.
df : Pandas DataFrame
containing lat, lon and alt cols, indexed by var_ds data_vars.
dt : datetime string, optional
DESCRIPTION. The default is '2018-04-15T22:00:00'.
Returns
-------
hdf : pandas dataframe
sliced var indexed by alt.
"""
time_dim = list(set(var_ds.dims))[0]
var_dt = var_ds.sel({time_dim: dt}).expand_dims(time_dim)
hdf = var_dt.to_dataframe().T
hdf = hdf.join(df[['lat', 'lon', 'alt']])
hdf = hdf.set_index('alt')
hdf = hdf.sort_index().dropna()
return hdf
def get_pressure_lapse_rate(path=ims_path, model='LR', plot=False):
from aux_gps import linear_fit_using_scipy_da_ts
import matplotlib.pyplot as plt
import xarray as xr
from aux_gps import keep_iqr
bp = xr.load_dataset(ims_path / 'IMS_BP_israeli_10mins.nc')
bps = [keep_iqr(bp[x]) for x in bp]
bp = xr.merge(bps)
mean_p = bp.mean('time').to_array('alt')
mean_p.name = 'mean_pressure'
alts = [bp[x].attrs['station_alt'] for x in bp.data_vars]
mean_p['alt'] = alts
_, results = linear_fit_using_scipy_da_ts(mean_p, model=model, slope_factor=1, not_time=True)
slope = results['slope']
inter = results['intercept']
modeled_var = slope * mean_p['alt'] + inter
if plot:
fig, ax = plt.subplots()
modeled_var.plot(ax=ax, color='r')
mean_p.plot.line(linewidth=0., marker='o', ax=ax, color='b')
# lr = 1000 * abs(slope)
textstr = 'Pressure lapse rate: {:.1f} hPa/km'.format(1000 * slope)
props = dict(boxstyle='round', facecolor='wheat', alpha=0.5)
# place a text box in upper left in axes coords
ax.text(0.5, 0.95, textstr, transform=ax.transAxes, fontsize=12,
verticalalignment='top', bbox=props)
ax.set_xlabel('Height a.s.l [m]')
ax.set_ylabel('Mean Pressure [hPa]')
return results
def get_var_lapse_rate(hdf, model='LR', plot=False):
from aux_gps import linear_fit_using_scipy_da_ts
import matplotlib.pyplot as plt
import numpy as np
hda = hdf.iloc[:, 0].to_xarray()
dt = hda.name.strftime('%Y-%m-%d %H:%M')
hda.name = ''
log_hda = np.log(hda)
# assume pwv = pwv0*exp(-h/H)
# H is the water vapor scale height
_, results = linear_fit_using_scipy_da_ts(log_hda, model=model, slope_factor=1, not_time=True)
H = -1.0 / results['slope']
a0 = np.exp(results['intercept'])
modeled_var = a0 * np.exp(-hda['alt'] / H)
if plot:
fig, ax = plt.subplots()
modeled_var.plot(ax=ax, color='r')
hda.plot.line(linewidth=0., marker='o', ax=ax, color='b')
# lr = 1000 * abs(slope)
ax.set_title(dt)
textstr = 'WV scale height: {:.1f} m'.format(H)
props = dict(boxstyle='round', facecolor='wheat', alpha=0.5)
# place a text box in upper left in axes coords
ax.text(0.5, 0.95, textstr, transform=ax.transAxes, fontsize=12,
verticalalignment='top', bbox=props)
ax.set_xlabel('Height a.s.l [m]')
ax.set_ylabel('PWV [mm]')
return H
def apply_lapse_rate_change(hdf, H):
import numpy as np
# make sure lapse rate is negative:
assert H > 0
new_hdf = hdf.copy()
new_hdf.iloc[:, 0] = hdf.iloc[:, 0] * np.exp(hdf.index / H)
return new_hdf
def interpolate_at_one_dt(new_hdf, H, predict_df=None, dem_path=awd_path,
ppd=50):
from aux_gps import coarse_dem
import numpy as np
from pykrige.rk import Krige
""" interpolate to Israel grid the values in new_hdf (already removed the lapse rate)
with ppd being the map resolution. if predict_df is not None,
interpolate only to df's locations and altitudes. predict_df should have lat, lon and alt columns"""
# create mesh and load DEM:
da = create_lat_lon_mesh(points_per_degree=ppd) # 500?
# populate the empty mesh grid with stations data:
for i, row in new_hdf.iterrows():
lat = da.sel(lat=row['lat'], method='nearest').lat.values
lon = da.sel(lon=row['lon'], method='nearest').lon.values
da.loc[{'lat': lat, 'lon': lon}] = row.iloc[0]
c = np.linspace(min(da.lat.values), max(da.lat.values), da.shape[0])
r = np.linspace(min(da.lon.values), max(da.lon.values), da.shape[1])
rr, cc = np.meshgrid(r, c)
vals = ~np.isnan(da.values)
X = np.column_stack([rr[vals], cc[vals]])
rr_cc_as_cols = np.column_stack([rr.flatten(), cc.flatten()])
# y = da_scaled.values[vals]
y = da.values[vals]
model = Krige(method='ordinary', variogram_model='spherical',
verbose=True)
model.fit(X, y)
if predict_df is None:
# i.e., interpolate to all map coords:
interpolated = model.predict(rr_cc_as_cols).reshape(da.values.shape)
da_inter = da.copy(data=interpolated)
awd = coarse_dem(da, dem_path=dem_path)
assert H > 0
da_inter *= np.exp(-1.0 * awd / H)
return da_inter
else:
predict_lats = np.linspace(predict_df.lat.min(
), predict_df.lat.max(), predict_df.lat.values.shape[0])
predict_lons = np.linspace(predict_df.lon.min(
), predict_df.lon.max(), predict_df.lon.values.shape[0])
predict_lons_lats_as_cols = np.column_stack(
[predict_lons, predict_lats])
interpolated = model.predict(
predict_lons_lats_as_cols).reshape((predict_lats.shape))
df_inter = predict_df.copy()
df_inter['interpolated'] = interpolated
# fix for lapse rate:
assert H > 0
df_inter['interpolated_lr_fixed'] = df_inter['interpolated'] * np.exp(-1.0 * df_inter['alt'] / H)
return df_inter
def create_lat_lon_mesh(lats=[29.5, 33.5], lons=[34, 36],
points_per_degree=1000):
import xarray as xr
import numpy as np
lat = np.arange(lats[0], lats[1], 1.0 / points_per_degree)
lon = np.arange(lons[0], lons[1], 1.0 / points_per_degree)
nans = np.nan * np.ones((len(lat), len(lon)))
da = xr.DataArray(nans, dims=['lat', 'lon'])
da['lat'] = lat
da['lon'] = lon
return da
def Interpolating_models_ims(time='2013-10-19T22:00:00', var='TD', plot=True,
gis_path=gis_path, method='okrig',
dem_path=work_yuval / 'AW3D30', lapse_rate=5.,
cv=None, rms=None, gridsearch=False):
"""main 2d_interpolation from stations to map"""
# cv usage is {'kfold': 5} or {'rkfold': [2, 3]}
# TODO: try 1d modeling first, like T=f(lat)
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.neighbors import KNeighborsRegressor
from pykrige.rk import Krige
import numpy as np
from sklearn.svm import SVR
from sklearn.linear_model import LinearRegression
from sklearn.ensemble import RandomForestRegressor
from scipy.spatial import Delaunay
from scipy.interpolate import griddata
from sklearn.metrics import mean_squared_error
from aux_gps import coarse_dem
import seaborn as sns
import matplotlib.pyplot as plt
import pyproj
from sklearn.utils.estimator_checks import check_estimator
from pykrige.compat import GridSearchCV
lla = pyproj.Proj(proj='latlong', ellps='WGS84', datum='WGS84')
ecef = pyproj.Proj(proj='geocent', ellps='WGS84', datum='WGS84')
def parse_cv(cv):
from sklearn.model_selection import KFold
from sklearn.model_selection import RepeatedKFold
from sklearn.model_selection import LeaveOneOut
"""input:cv number or string"""
# check for integer:
if 'kfold' in cv.keys():
n_splits = cv['kfold']
print('CV is KFold with n_splits={}'.format(n_splits))
return KFold(n_splits=n_splits)
if 'rkfold' in cv.keys():
n_splits = cv['rkfold'][0]
n_repeats = cv['rkfold'][1]
print('CV is ReapetedKFold with n_splits={},'.format(n_splits) +
' n_repeates={}'.format(n_repeats))
return RepeatedKFold(n_splits=n_splits, n_repeats=n_repeats,
random_state=42)
if 'loo' in cv.keys():
return LeaveOneOut()
# from aux_gps import scale_xr
da = create_lat_lon_mesh(points_per_degree=250) # 500?
awd = coarse_dem(da)
awd = awd.values
geo_snap = geo_pandas_time_snapshot(var=var, datetime=time, plot=False)
if var == 'TD':
[a, b] = np.polyfit(geo_snap['alt'].values, geo_snap['TD'].values, 1)
if lapse_rate == 'auto':
lapse_rate = np.abs(a) * 1000
fig, ax_lapse = plt.subplots(figsize=(10, 6))
sns.regplot(data=geo_snap, x='alt', y='TD', color='r',
scatter_kws={'color': 'b'}, ax=ax_lapse)
suptitle = time.replace('T', ' ')
ax_lapse.set_xlabel('Altitude [m]')
ax_lapse.set_ylabel('Temperature [degC]')
ax_lapse.text(0.5, 0.95, 'Lapse_rate: {:.2f} degC/km'.format(lapse_rate),
horizontalalignment='center', verticalalignment='center',
transform=ax_lapse.transAxes, fontsize=12, color='k',
fontweight='bold')
ax_lapse.grid()
ax_lapse.set_title(suptitle, fontsize=14, fontweight='bold')
# fig.suptitle(suptitle, fontsize=14, fontweight='bold')
alts = []
for i, row in geo_snap.iterrows():
lat = da.sel(lat=row['lat'], method='nearest').lat.values
lon = da.sel(lon=row['lon'], method='nearest').lon.values
alt = row['alt']
if lapse_rate is not None and var == 'TD':
da.loc[{'lat': lat, 'lon': lon}] = row[var] + \
lapse_rate * alt / 1000.0
alts.append(alt)
elif lapse_rate is None or var != 'TD':
da.loc[{'lat': lat, 'lon': lon}] = row[var]
alts.append(alt)
# da_scaled = scale_xr(da)
c = np.linspace(min(da.lat.values), max(da.lat.values), da.shape[0])
r = np.linspace(min(da.lon.values), max(da.lon.values), da.shape[1])
rr, cc = np.meshgrid(r, c)
vals = ~np.isnan(da.values)
if lapse_rate is None:
Xrr, Ycc, Z = pyproj.transform(
lla, ecef, rr[vals], cc[vals], np.array(alts), radians=False)
X = np.column_stack([Xrr, Ycc, Z])
XX, YY, ZZ = pyproj.transform(lla, ecef, rr, cc, awd.values,
radians=False)
rr_cc_as_cols = np.column_stack([XX.flatten(), YY.flatten(), ZZ.flatten()])
else:
X = np.column_stack([rr[vals], cc[vals]])
rr_cc_as_cols = np.column_stack([rr.flatten(), cc.flatten()])
# y = da_scaled.values[vals]
y = da.values[vals]
if method == 'gp-rbf':
from sklearn.gaussian_process.kernels import RBF
from sklearn.gaussian_process.kernels import WhiteKernel
kernel = 1.0 * RBF(length_scale=0.25, length_scale_bounds=(1e-2, 1e3)) \
+ WhiteKernel(noise_level=0.01, noise_level_bounds=(1e-10, 1e+1))
# kernel = None
model = GaussianProcessRegressor(alpha=0.0, kernel=kernel,
n_restarts_optimizer=5,
random_state=42, normalize_y=True)
elif method == 'gp-qr':
from sklearn.gaussian_process.kernels import RationalQuadratic
from sklearn.gaussian_process.kernels import WhiteKernel
kernel = RationalQuadratic(length_scale=100.0) \
+ WhiteKernel(noise_level=0.01, noise_level_bounds=(1e-10, 1e+1))
model = GaussianProcessRegressor(alpha=0.0, kernel=kernel,
n_restarts_optimizer=5,
random_state=42, normalize_y=True)
elif method == 'knn':
model = KNeighborsRegressor(n_neighbors=5, weights='distance')
elif method == 'svr':
model = SVR(C=1.0, cache_size=200, coef0=0.0, degree=3, epsilon=0.1,
gamma='auto_deprecated', kernel='rbf', max_iter=-1,
shrinking=True, tol=0.001, verbose=False)
elif method == 'okrig':
model = Krige(method='ordinary', variogram_model='spherical',
verbose=True)
elif method == 'ukrig':
model = Krige(method='universal', variogram_model='linear',
verbose=True)
# elif method == 'okrig3d':
# # don't bother - MemoryError...
# model = OrdinaryKriging3D(rr[vals], cc[vals], np.array(alts),
# da.values[vals], variogram_model='linear',
# verbose=True)
# awd = coarse_dem(da)
# interpolated, ss = model.execute('grid', r, c, awd['data'].values)
# elif method == 'rkrig':
# # est = LinearRegression()
# est = RandomForestRegressor()
# model = RegressionKriging(regression_model=est, n_closest_points=5,
# verbose=True)
# p = np.array(alts).reshape(-1, 1)
# model.fit(p, X, y)
# P = awd.flatten().reshape(-1, 1)
# interpolated = model.predict(P, rr_cc_as_cols).reshape(da.values.shape)
# try:
# u = check_estimator(model)
# except TypeError:
# u = False
# pass
if cv is not None and not gridsearch: # and u is None):
# from sklearn.model_selection import cross_validate
from sklearn import metrics
cv = parse_cv(cv)
ytests = []
ypreds = []
for train_idx, test_idx in cv.split(X):
X_train, X_test = X[train_idx], X[test_idx] # requires arrays
y_train, y_test = y[train_idx], y[test_idx]
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
# there is only one y-test and y-pred per iteration over the loo.split,
# so to get a proper graph, we append them to respective lists.
ytests += list(y_test)
ypreds += list(y_pred)
true_vals = np.array(ytests)
predicted = np.array(ypreds)
r2 = metrics.r2_score(ytests, ypreds)
ms_error = metrics.mean_squared_error(ytests, ypreds)
print("R^2: {:.5f}%, MSE: {:.5f}".format(r2*100, ms_error))
if gridsearch:
cv = parse_cv(cv)
param_dict = {"method": ["ordinary", "universal"],
"variogram_model": ["linear", "power", "gaussian",
"spherical"],
# "nlags": [4, 6, 8],
# "weight": [True, False]
}
estimator = GridSearchCV(Krige(), param_dict, verbose=True, cv=cv,
scoring='neg_mean_absolute_error',
return_train_score=True, n_jobs=1)
estimator.fit(X, y)
if hasattr(estimator, 'best_score_'):
print('best_score = {:.3f}'.format(estimator.best_score_))
print('best_params = ', estimator.best_params_)
return estimator
# if (cv is not None and not u):
# from sklearn import metrics
# cv = parse_cv(cv)
# ytests = []
# ypreds = []
# for train_idx, test_idx in cv.split(X):
# X_train, X_test = X[train_idx], X[test_idx] # requires arrays
# y_train, y_test = y[train_idx], y[test_idx]
## model = UniversalKriging(X_train[:, 0], X_train[:, 1], y_train,
## variogram_model='linear', verbose=False,
## enable_plotting=False)
# model.X_ORIG = X_train[:, 0]
# model.X_ADJUSTED = model.X_ORIG
# model.Y_ORIG = X_train[:, 1]
# model.Y_ADJUSTED = model.Y_ORIG
# model.Z = y_train
# y_pred, ss = model.execute('points', X_test[0, 0],
# X_test[0, 1])
# # there is only one y-test and y-pred per iteration over the loo.split,
# # so to get a proper graph, we append them to respective lists.
# ytests += list(y_test) cmap = plt.get_cmap('spring', 10)
Q = ax.quiver(isr['X'], isr['Y'], isr['U'], isr['V'],
isr['cm_per_year'], cmap=cmap)
fig.colorbar(Q, extend='max')
# ypreds += list(y_pred)
# true_vals = np.array(ytests)
# predicted = np.array(ypreds)
# r2 = metrics.r2_score(ytests, ypreds)
# ms_error = metrics.mean_squared_error(ytests, ypreds)
# print("R^2: {:.5f}%, MSE: {:.5f}".format(r2*100, ms_error))
# cv_results = cross_validate(gp, X, y, cv=cv, scoring='mean_squared_error',
# return_train_score=True, n_jobs=-1)
# test = xr.DataArray(cv_results['test_score'], dims=['kfold'])
# train = xr.DataArray(cv_results['train_score'], dims=['kfold'])
# train.name = 'train'
# cds = test.to_dataset(name='test')
# cds['train'] = train
# cds['kfold'] = np.arange(len(cv_results['test_score'])) + 1
# cds['mean_train'] = cds.train.mean('kfold')
# cds['mean_test'] = cds.test.mean('kfold')
# interpolated=griddata(X, y, (rr, cc), method='nearest')
model.fit(X, y)
interpolated = model.predict(rr_cc_as_cols).reshape(da.values.shape)
da_inter = da.copy(data=interpolated)
if lapse_rate is not None and var == 'TD':
da_inter -= lapse_rate * awd / 1000.0
if (rms is not None and cv is None): # or (rms is not None and not u):
predicted = []
true_vals = []
for i, row in geo_snap.iterrows():
lat = da.sel(lat=row['lat'], method='nearest').lat.values
lon = da.sel(lon=row['lon'], method='nearest').lon.values
pred = da_inter.loc[{'lat': lat, 'lon': lon}].values.item()
true = row[var]
predicted.append(pred)
true_vals.append(true)
predicted = np.array(predicted)
true_vals = np.array(true_vals)
ms_error = mean_squared_error(true_vals, predicted)
print("MSE: {:.5f}".format(ms_error))
if plot:
import salem
from salem import DataLevels, Map
import cartopy.crs as ccrs
# import cartopy.io.shapereader as shpreader
import matplotlib.pyplot as plt
# fname = gis_path / 'ne_10m_admin_0_sovereignty.shp'
# fname = gis_path / 'gadm36_ISR_0.shp'
# ax = plt.axes(projection=ccrs.PlateCarree())
f, ax = plt.subplots(figsize=(6, 10))
# shdf = salem.read_shapefile(salem.get_demo_file('world_borders.shp'))
shdf = salem.read_shapefile(gis_path / 'Israel_and_Yosh.shp')
# shdf = shdf.loc[shdf['CNTRY_NAME'] == 'Israel'] # remove other countries
shdf.crs = {'init': 'epsg:4326'}
dsr = da_inter.salem.roi(shape=shdf)
grid = dsr.salem.grid
grid = da_inter.salem.grid
sm = Map(grid)
# sm.set_shapefile(gis_path / 'Israel_and_Yosh.shp')
# sm = dsr.salem.quick_map(ax=ax)
# sm2 = salem.Map(grid, factor=1)
# sm2.set_shapefile(gis_path/'gis_osm_water_a_free_1.shp',
# edgecolor='k')
sm.set_data(dsr)
# sm.set_nlevels(7)
# sm.visualize(ax=ax, title='Israel {} interpolated temperature from IMS'.format(method),
# cbar_title='degC')
sm.set_shapefile(gis_path/'gis_osm_water_a_free_1.shp',
edgecolor='k') # , facecolor='aqua')
# sm.set_topography(awd.values, crs=awd.crs)
# sm.set_rgb(crs=shdf.crs, natural_earth='hr') # ad
# lakes = salem.read_shapefile(gis_path/'gis_osm_water_a_free_1.shp')
sm.set_cmap(cm='rainbow')
sm.visualize(ax=ax, title='Israel {} interpolated temperature from IMS'.format(method),
cbar_title='degC')
dl = DataLevels(geo_snap[var], levels=sm.levels)
dl.set_cmap(sm.cmap)
x, y = sm.grid.transform(geo_snap.lon.values, geo_snap.lat.values)
ax.scatter(x, y, color=dl.to_rgb(), s=20, edgecolors='k', linewidths=0.5)
suptitle = time.replace('T', ' ')
f.suptitle(suptitle, fontsize=14, fontweight='bold')
if (rms is not None or cv is not None) and (not gridsearch):
import seaborn as sns
f, ax = plt.subplots(1, 2, figsize=(12, 6))
sns.scatterplot(x=true_vals, y=predicted, ax=ax[0], marker='.',
s=100)
resid = predicted - true_vals
sns.distplot(resid, bins=5, color='c', label='residuals',
ax=ax[1])
rmean = np.mean(resid)
rstd = np.std(resid)
rmedian = np.median(resid)
rmse = np.sqrt(mean_squared_error(true_vals, predicted))
plt.axvline(rmean, color='r', linestyle='dashed', linewidth=1)
_, max_ = plt.ylim()
plt.text(rmean + rmean / 10, max_ - max_ / 10,
'Mean: {:.2f}, RMSE: {:.2f}'.format(rmean, rmse))
f.tight_layout()
# lakes.plot(ax=ax, color='b', edgecolor='k')
# lake_borders = gpd.overlay(countries, capitals, how='difference')
# adm1_shapes = list(shpreader.Reader(fname).geometries())
# ax = plt.axes(projection=ccrs.PlateCarree())
# ax.coastlines(resolution='10m')
# ax.add_geometries(adm1_shapes, ccrs.PlateCarree(),
# edgecolor='black', facecolor='gray', alpha=0.5)
# da_inter.plot.pcolormesh('lon', 'lat', ax=ax)
#geo_snap.plot(ax=ax, column=var, cmap='viridis', edgecolor='black',
# legend=False)
return da_inter
| 2.15625 | 2 |
face_alignment/detection/dlib/dlib_detector.py | NovemberJoy/VTuber_Unity | 669 | 12760928 | <filename>face_alignment/detection/dlib/dlib_detector.py
import os
import cv2
import dlib
try:
import urllib.request as request_file
except BaseException:
import urllib as request_file
from ..core import FaceDetector
from ...utils import appdata_dir
class DlibDetector(FaceDetector):
def __init__(self, device, path_to_detector=None, verbose=False):
super().__init__(device, verbose)
base_path = os.path.join(appdata_dir('face_alignment'), "data")
# Initialise the face detector
if 'cuda' in device:
if path_to_detector is None:
path_to_detector = os.path.join(
base_path, "mmod_human_face_detector.dat")
if not os.path.isfile(path_to_detector):
print("Downloading the face detection CNN. Please wait...")
path_to_temp_detector = os.path.join(
base_path, "mmod_human_face_detector.dat.download")
if os.path.isfile(path_to_temp_detector):
os.remove(os.path.join(path_to_temp_detector))
request_file.urlretrieve(
"https://www.adrianbulat.com/downloads/dlib/mmod_human_face_detector.dat",
os.path.join(path_to_temp_detector))
os.rename(os.path.join(path_to_temp_detector), os.path.join(path_to_detector))
self.face_detector = dlib.cnn_face_detection_model_v1(path_to_detector)
else:
self.face_detector = dlib.get_frontal_face_detector()
def detect_from_image(self, tensor_or_path):
image = self.tensor_or_path_to_ndarray(tensor_or_path, rgb=False)
detected_faces = self.face_detector(cv2.cvtColor(image, cv2.COLOR_BGR2GRAY))
if 'cuda' not in self.device:
detected_faces = [[d.left(), d.top(), d.right(), d.bottom()] for d in detected_faces]
else:
detected_faces = [[d.rect.left(), d.rect.top(), d.rect.right(), d.rect.bottom()] for d in detected_faces]
return detected_faces
@property
def reference_scale(self):
return 195
@property
def reference_x_shift(self):
return 0
@property
def reference_y_shift(self):
return 0
| 2.734375 | 3 |
xlsUtil.py | angelzouxin/CountOJ | 2 | 12760929 | # coding=utf-8
__author__ = 'zouxin'
import xlrd
import xlwt
class xlsUtil:
heading_xf = xlwt.easyxf('font: bold on; align:wrap on, vert centre, horiz center')
@staticmethod
def write_xls(sheet_name, headings, data):
heading_xf = xlsUtil.heading_xf
# data_xfs = [WriteXLSUtil.kind_to_xf_map[k] for k in WriteXLSUtil.kinds]
rowx = 0
for colx, value in enumerate(headings):
sheet_name.write(rowx, colx, value, heading_xf)
sheet_name.set_panes_frozen(True) # frozenheadings instead of split panes
sheet_name.set_horz_split_pos(rowx + 1) # ingeneral, freeze after last heading row
sheet_name.set_remove_splits(True) # if userdoes unfreeze, don't leave a split there
for row in data:
rowx += 1
for colx, value in enumerate(row):
# print ("row = {} col = {} value = {}".format(rowx, colx, value))
sheet_name.write(rowx, colx, value)
@staticmethod
def read_xls(file_name, sheet_name):
data = xlrd.open_workbook(file_name)
tabel = data.sheet_by_name(sheet_name)
rows, cols = tabel.nrows, tabel.ncols
head = tabel.row_values(0)
data = [tabel.row_values(row) for row in range(1, rows)]
return head, data
| 3.015625 | 3 |
growser/commands/github.py | JohnnyPeng18/growser | 0 | 12760930 | from growser.cmdr import Command
class UpdateFromGitHubAPI(Command):
def __init__(self, name):
self.name = name
def __repr__(self):
return '<{} {}>'.format(self.__class__.__name__, self.name)
class BatchUpdateFromGitHubAPI(Command):
def __init__(self, limit: int, batch_size: int, rating_window: int=90,
task_window: int=30, min_events: int=100):
"""Update local repository data using the GitHub API.
For example, to update 1,250 repositories in batches of 100 based on the
most number of ratings in the prior 180 days that have not already been
updated in the prior 45 days::
command = BatchUpdateFromGitHubAPI(1250, 100, 180, 45)
:param limit: Total number of repositories to update.
:param batch_size: Number of API requests to wait for before updating
our local data.
:param rating_window: Prioritize repositories based on the number of
events within this window of days.
:param task_window: Don't include repositories that have already been
updated within this number of days.
:param min_events: Minimum number of events within `rating_window`.
..note:: Will be deprecated once event listeners have been implemented.
"""
self.limit = limit
self.batch_size = batch_size
self.rating_window = rating_window
self.task_window = task_window
self.min_events = min_events
| 2.984375 | 3 |
app/admin.py | Kanogaelias/neighbourhoodwatch | 1 | 12760931 | <filename>app/admin.py
from django.contrib import admin
from .models import Neighbourhood,Profile,Business
admin.site.register(Neighbourhood)
admin.site.register(Profile)
admin.site.register(Business) | 1.351563 | 1 |
scripts/squid/JS124S/configs/j2_phaseshift_by_config.py | ShabaniLab/DataAnalysis | 6 | 12760932 | <gh_stars>1-10
#: Common folder in which the data file are related.
DATA_ROOT_FOLDER = '/Users/mdartiailh/Labber/Data/2019/'
#: Dictionary of parallel field, file path.
DATA_PATHS = {400: '03/Data_0316/JS124S_BM002_390.hdf5',
# 350: '03/Data_0317/JS124S_BM002_392.hdf5',
# 300: '03/Data_0318/JS124S_BM002_394.hdf5',
250: '03/Data_0318/JS124S_BM002_395.hdf5',
# 200: '03/Data_0318/JS124S_BM002_396.hdf5',
# 150: '03/Data_0319/JS124S_BM002_397.hdf5',
100: '03/Data_0321/JS124S_BM002_405.hdf5',
# 50: '03/Data_0320/JS124S_BM002_402.hdf5',
-300: '04/Data_0430/JS124S_BM002_532.hdf5',
}
#: Perpendicular field range to fit for each parallel field
FIELD_RANGES = {400: (-8e-3, -5.5e-3),
350: (None, -6e-3),
300: (-6.59e-3, -4.75e-3),
250: (),
200: (),
150: (-5.05e-3, None),
100: (-3.9e-3, -1.1e-3),
50: (-2.2e-3, None),
-300: (-1e-3, 1.2e-3),
}
#: Guess for the transparency of the junctions as a function of the field.
TRANSPARENCY_GUESS = {400: 0.01,
350: 0.1,
300: 0.2,
250: 0.3,
200: 0.4,
150: 0.6,
100: 0.8,
-300: 0.2,
}
#: Name/index of the gate column.
GATE_COLUMN = 1
#: Gate values for which to skip the analysis. The values should be present
#: in the datasets.
EXCLUDED_GATES = [-4.75, -3.5, -2.5, -2.0, -1.0, 1.0, 2.0, 3.0]
#: Name/index of the perpendicular field column.
FIELD_COLUMN = 2
#: Name/index of the bias current column.
BIAS_COLUMN = 0
#: Name/column of the differential resistance column.
RESISTANCE_COLUMN = 3
#: Threshold value used to determine the switching current.
RESISTANCE_THRESHOLD = 1.4e-7
#: Should we plot the extracted switching current on top of the SQUID
#: oscillations
PLOT_EXTRACTED_SWITCHING_CURRENT = False
#: Enforce equality of the transparencies
EQUAL_TRANSPARENCIES = True
#: Sign of the phase difference created by the perpendicular field.
PHASE_SIGN = 1
#: Handedness of the system.
HANDEDNESS = -1
#: Correction factor to apply on the estimated pulsation
CONVERSION_FACTOR_CORRECTION = 1.07
#: Should we plot the initial guess for each trace.
PLOT_INITIAL_GUESS = True
#: Should we plot the fit for each trace.
PLOT_FITS = True
#: Path to which save the graphs and fitted parameters.
# ANALYSIS_PATH = ('/Users/mdartiailh/Documents/PostDocNYU/DataAnalysis/'
# 'SQUID/phaseshift_low_field/j2/By/active_t_fixed')
| 1.882813 | 2 |
exp/exp10_performance_ggn_evals/call_run_time_evecs.py | jbzrE7bp/vivit | 0 | 12760933 | """Execute time benchmarks in a separate python session for each config."""
import os
from typing import Iterable, Tuple
from run_time_evecs import __name__ as SCRIPT
from run_time_evecs import get_output_file
from shared import layerwise_group, one_group
from shared_call import run
from shared_evecs import (
frac_batch_exact,
frac_batch_mc,
full_batch_exact,
full_batch_mc,
)
from exp.utils.deepobs import (
cifar10_3c3d,
cifar10_resnet32,
cifar10_resnet56,
cifar100_allcnnc,
fmnist_2c2d,
)
# Define settings
computations_cases = [
full_batch_exact.__name__,
full_batch_mc.__name__,
frac_batch_exact.__name__,
frac_batch_mc.__name__,
]
param_groups_cases = [
one_group.__name__,
layerwise_group.__name__,
]
architecture_cases = [
cifar10_3c3d.__name__,
fmnist_2c2d.__name__,
cifar100_allcnnc.__name__,
cifar10_resnet32.__name__,
cifar10_resnet56.__name__,
]
device_cases = ["cuda", "cpu"]
batch_sizes = {
cifar10_3c3d.__name__: "128",
fmnist_2c2d.__name__: "128",
cifar100_allcnnc.__name__: "64",
cifar10_resnet32.__name__: "128",
cifar10_resnet56.__name__: "128",
}
K_MAX = 10
def configurations_no_k() -> Iterable[Tuple[str, str, str, str, str]]:
"""Yield all configurations without looping over k."""
for architecture in architecture_cases:
N = batch_sizes[architecture]
for device in device_cases:
for param_groups in param_groups_cases:
for computations in computations_cases:
yield N, architecture, device, param_groups, computations
def configurations() -> Iterable[Tuple[str, str, str, str, str, str]]:
"""Yield all configurations."""
for N, architecture, device, param_groups, computations in configurations_no_k():
for K in range(1, K_MAX + 1):
yield N, architecture, device, param_groups, computations, str(K)
if __name__ == "__main__":
# Launch eigenpair run time benchmark which creates an output file
for N, architecture, device, param_groups, computations, K in configurations():
DATA_FILE = get_output_file(
architecture, device, param_groups, computations, N, K
)
if os.path.exists(DATA_FILE):
print(
"[exp10] Skipping computation. "
+ f"Output file already exists: {DATA_FILE}"
)
continue
print(
f"\narchitecture = {architecture}\n"
+ f"param_groups = {param_groups}\n"
+ f"computations = {computations}\n"
+ f"device = {device}\n"
+ f"N = {N}\n"
+ f"K = {K}\n"
)
cmd = [
"python",
f"{SCRIPT}.py",
N,
device,
architecture,
param_groups,
computations,
K,
]
run(cmd)
| 1.960938 | 2 |
bourbon/test_data_quality_controller.py | LaPetiteSouris/KuronoSoshiki | 1 | 12760934 | # -*- coding: utf-8 -*-
import data_quality_controller as controller
def test_check_email():
assert controller.check_email("<EMAIL>") is True
assert controller.check_email("totododo") is False
def test_check_ip():
assert controller.check_ip("192.168.127.12") is True
assert controller.check_ip("130.xx.82.195") is False
def test_full_check():
data = {
"id": 1,
"first_name": "barthel",
"last_name": "kittel",
"email": "<EMAIL>",
"gender": "Male",
"ip_address": "192.168.127.12",
"date": "06/05/2018",
"country": "france"
}
expected = {
"id": None,
"first_name": None,
"last_name": None,
"email": True,
"gender": None,
"ip_address": True,
"date": None,
"country": None
}
assert expected == controller.perform_full_check(data) | 2.703125 | 3 |
tests/test_observable/test_takewhile.py | MichaelSchneeberger/RxPY | 0 | 12760935 | import unittest
import rx
from rx import operators as ops
from rx.testing import TestScheduler, ReactiveTest, is_prime
on_next = ReactiveTest.on_next
on_completed = ReactiveTest.on_completed
on_error = ReactiveTest.on_error
subscribe = ReactiveTest.subscribe
subscribed = ReactiveTest.subscribed
disposed = ReactiveTest.disposed
created = ReactiveTest.created
class TestTakeWhile(unittest.TestCase):
def test_take_while_complete_Before(self):
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(on_next(90, -1), on_next(110, -1), on_next(210, 2), on_next(260, 5), on_next(290, 13), on_next(
320, 3), on_completed(330), on_next(350, 7), on_next(390, 4), on_next(410, 17), on_next(450, 8), on_next(500, 23), on_completed(600))
invoked = 0
def factory():
def predicate(x):
nonlocal invoked
invoked += 1
return is_prime(x)
return xs.pipe(ops.take_while(predicate))
results = scheduler.start(factory)
assert results.messages == [on_next(210, 2), on_next(
260, 5), on_next(290, 13), on_next(320, 3), on_completed(330)]
assert xs.subscriptions == [subscribe(200, 330)]
assert(invoked == 4)
def test_take_while_complete_after(self):
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(on_next(90, -1), on_next(110, -1), on_next(210, 2), on_next(260, 5), on_next(290, 13), on_next(
320, 3), on_next(350, 7), on_next(390, 4), on_next(410, 17), on_next(450, 8), on_next(500, 23), on_completed(600))
invoked = 0
def factory():
def predicate(x):
nonlocal invoked
invoked += 1
return is_prime(x)
return xs.pipe(ops.take_while(predicate))
results = scheduler.start(factory)
assert results.messages == [on_next(210, 2), on_next(260, 5), on_next(
290, 13), on_next(320, 3), on_next(350, 7), on_completed(390)]
assert xs.subscriptions == [subscribe(200, 390)]
assert(invoked == 6)
def test_take_while_error_before(self):
ex = 'ex'
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(on_next(90, -1), on_next(110, -1), on_next(210, 2), on_next(260, 5), on_error(
270, ex), on_next(290, 13), on_next(320, 3), on_next(350, 7), on_next(390, 4), on_next(410, 17), on_next(450, 8), on_next(500, 23))
invoked = 0
def factory():
def predicate(x):
nonlocal invoked
invoked += 1
return is_prime(x)
return xs.pipe(ops.take_while(predicate))
results = scheduler.start(factory)
assert results.messages == [on_next(210, 2), on_next(260, 5), on_error(270, ex)]
assert xs.subscriptions == [subscribe(200, 270)]
assert(invoked == 2)
def test_take_while_error_after(self):
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(on_next(90, -1), on_next(110, -1), on_next(210, 2), on_next(260, 5), on_next(290, 13), on_next(
320, 3), on_next(350, 7), on_next(390, 4), on_next(410, 17), on_next(450, 8), on_next(500, 23), on_error(600, 'ex'))
invoked = 0
def factory():
def predicate(x):
nonlocal invoked
invoked += 1
return is_prime(x)
return xs.pipe(ops.take_while(predicate))
results = scheduler.start(factory)
assert results.messages == [on_next(210, 2), on_next(260, 5), on_next(
290, 13), on_next(320, 3), on_next(350, 7), on_completed(390)]
assert xs.subscriptions == [subscribe(200, 390)]
assert(invoked == 6)
def test_take_while_dispose_before(self):
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(on_next(90, -1), on_next(110, -1), on_next(210, 2), on_next(260, 5), on_next(290, 13), on_next(
320, 3), on_next(350, 7), on_next(390, 4), on_next(410, 17), on_next(450, 8), on_next(500, 23), on_completed(600))
invoked = 0
def create():
def predicate(x):
nonlocal invoked
invoked += 1
return is_prime(x)
return xs.pipe(ops.take_while(predicate))
results = scheduler.start(create, disposed=300)
assert results.messages == [on_next(210, 2), on_next(260, 5), on_next(290, 13)]
assert xs.subscriptions == [subscribe(200, 300)]
assert(invoked == 3)
def test_take_while_dispose_after(self):
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(on_next(90, -1), on_next(110, -1), on_next(210, 2), on_next(260, 5), on_next(290, 13), on_next(
320, 3), on_next(350, 7), on_next(390, 4), on_next(410, 17), on_next(450, 8), on_next(500, 23), on_completed(600))
invoked = 0
def create():
def predicate(x):
nonlocal invoked
invoked += 1
return is_prime(x)
return xs.pipe(ops.take_while(predicate))
results = scheduler.start(create, disposed=400)
assert results.messages == [on_next(210, 2), on_next(260, 5), on_next(
290, 13), on_next(320, 3), on_next(350, 7), on_completed(390)]
assert xs.subscriptions == [subscribe(200, 390)]
assert(invoked == 6)
def test_take_while_zero(self):
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(on_next(90, -1), on_next(110, -1), on_next(205, 100), on_next(210, 2), on_next(260, 5), on_next(
290, 13), on_next(320, 3), on_next(350, 7), on_next(390, 4), on_next(410, 17), on_next(450, 8), on_next(500, 23), on_completed(600))
invoked = 0
def create():
def predicate(x):
nonlocal invoked
invoked += 1
return is_prime(x)
return xs.pipe(ops.take_while(predicate))
results = scheduler.start(create, disposed=300)
assert results.messages == [on_completed(205)]
assert xs.subscriptions == [subscribe(200, 205)]
assert (invoked == 1)
def test_take_while_on_error(self):
ex = 'ex'
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(on_next(90, -1), on_next(110, -1), on_next(210, 2), on_next(260, 5), on_next(290, 13), on_next(
320, 3), on_next(350, 7), on_next(390, 4), on_next(410, 17), on_next(450, 8), on_next(500, 23), on_completed(600))
invoked = 0
def factory():
def predicate(x):
nonlocal invoked
invoked += 1
if invoked == 3:
raise Exception(ex)
return is_prime(x)
return xs.pipe(ops.take_while(predicate))
results = scheduler.start(factory)
assert results.messages == [on_next(210, 2), on_next(260, 5), on_error(290, ex)]
assert xs.subscriptions == [subscribe(200, 290)]
assert(invoked == 3)
def test_take_while_index(self):
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(on_next(90, -1), on_next(110, -1), on_next(205, 100), on_next(210, 2), on_next(260, 5), on_next(
290, 13), on_next(320, 3), on_next(350, 7), on_next(390, 4), on_next(410, 17), on_next(450, 8), on_next(500, 23), on_completed(600))
def factory():
return xs.pipe(ops.take_while_indexed(lambda x, i: i < 5))
results = scheduler.start(factory)
assert results.messages == [on_next(205, 100), on_next(210, 2), on_next(
260, 5), on_next(290, 13), on_next(320, 3), on_completed(350)]
assert xs.subscriptions == [subscribe(200, 350)]
| 2.71875 | 3 |
corehq/apps/domain/views/repeaters.py | rochakchauhan/commcare-hq | 0 | 12760936 | <reponame>rochakchauhan/commcare-hq
from django.contrib import messages
from django.http import HttpResponseRedirect
from django.utils.translation import ugettext as _
from django.views.decorators.http import require_POST
import csv
from corehq.apps.domain.utils import send_repeater_payloads
from corehq.apps.users.decorators import require_can_edit_web_users
@require_POST
@require_can_edit_web_users
def generate_repeater_payloads(request, domain):
try:
email_id = request.POST.get('email_id')
repeater_id = request.POST.get('repeater_id')
data = csv.reader(request.FILES['payload_ids_file'])
payload_ids = [row[0] for row in data]
except Exception as e:
messages.error(request, _("Could not process the file. %s") % str(e))
else:
send_repeater_payloads.delay(repeater_id, payload_ids, email_id)
messages.success(request, _("Successfully queued request. You should receive an email shortly."))
return HttpResponseRedirect(request.META.get('HTTP_REFERER', '/'))
| 2.09375 | 2 |
src/vsc/model/field_const_array_model.py | aneels3/pyvsc | 0 | 12760937 | <reponame>aneels3/pyvsc<gh_stars>0
'''
Created on Jun 21, 2020
@author: ballance
'''
from vsc.model.field_array_model import FieldArrayModel
class FieldConstArrayModel(FieldArrayModel):
def __init__(self, name, data):
super().__init__(name, True, 32, False, False, False)
# Create elements from the data
max_v = max(data)
min_v = min(data)
if max_v < 0 or min_v < 0:
self.is_signed = True
else:
self.width = 1
while max_v > 0:
self.width += 1
max_v >>= 1
# Create fields to match
for v in data:
f = self.add_field()
f.set_val(v)
self.size.set_val(len(data))
| 2.75 | 3 |
cfg/base/Config.py | vancebs/EasyCoding3 | 0 | 12760938 | #!/usr/bin/python
# coding=utf-8
from GlobalConfig import GlobalConfig
class Config(GlobalConfig):
cfgName = 'NA'
# global config
# cfgGlobalBaseDir = 'NA'
# cfgGlobalUrlRepoPull = 'NA'
# cfgGlobalUrlRepoPush = 'NA'
# cfgGlobalUserName = 'NA'
# cfgGlobalUserEmail = 'NA'
cfgGlobalBackupDir = 'NA' # generated by __init__()
# program config. auto generated
cfgProgramDir = 'NA'
cfgProgramCmdDir = 'NA'
cfgProgramCfgDir = 'NA'
cfgProgramCmdList = 'NA'
cfgProgramCfgList = 'NA'
cfgProgramCfgFile = 'NA'
# project config. configured by cfg
cfgProjectName = 'NA'
cfgProjectBranch = 'NA'
cfgProjectRootDir = 'NA' # generated by __init__()
cfgProjectRootDirName = 'NA'
cfgProjectOutDir = 'NA' # generated by __init__()
cfgProjectBackupDir = 'NA' # generated by __init__()
cfgProjectEnvSetup = './build/envsetup.sh'
cfgProjectEnvConfig = 'NA'
cfgProjectFlashMap = {
'modem': 'NON-HLOS.bin',
'sbl1': 'sbl1.mbn',
'sbl2': 'sbl2.mbn',
'sbl3': 'sbl3.mbn',
'tz': 'tz.mbn',
'rpm': 'rpm.mbn',
'boot': 'boot.img',
'cache': 'cache.img',
'system': 'system.img',
'persist': 'persist.img',
'userdata': 'userdata.img',
'recovery': 'recovery.img',
'custpack': 'custpack.img',
'vendor': 'vendor.img'
}
cfgProjectUrlRepoPull = 'NA' # generated by __init__()
cfgProjectUrlRepoPush = 'NA' # generated by __init__()
cfgProjectUrlRepoPullManifest = 'manifests'
cfgProjectUrlRepoRepository = 'quicl'
cfgProjectRepoBin = 'repo'
def __init__(self):
self.cfgName = self.__class__.__name__
# global
self.cfgGlobalBackupDir = '%s/backup' % self.cfgGlobalBaseDir
# project
self.cfgProjectRootDir = '%s/%s' % (self.cfgGlobalBaseDir, self.cfgProjectRootDirName)
self.cfgProjectOutDir = '%s/out/target/product/%s' % (self.cfgProjectRootDir, self.cfgProjectName)
self.cfgProjectBackupDir = '%s/%s' % (self.cfgGlobalBackupDir, self.cfgProjectRootDirName)
self.cfgProjectUrlRepoPull = '%s%s/%s.git' % (self.cfgGlobalUrlRepoPull,
self.cfgProjectUrlRepoRepository,
self.cfgProjectUrlRepoPullManifest)
self.cfgProjectUrlRepoPush = '%s%s' % (self.cfgGlobalUrlRepoPush, self.cfgProjectUrlRepoRepository)
| 1.867188 | 2 |
backends/paltosf/__init__.py | snlab/alto-server | 0 | 12760939 | #!/usr/bin/env python3
from .sfbackend import create_instance
__all__ = []
| 1.078125 | 1 |
output.py | AdamGlass/route_tools | 0 | 12760940 | from geojson import LineString, Feature, FeatureCollection, Point
import geojson
def simple_output(parser, place_data):
for d in place_data:
print('{0}, {1}, {2}'.format(d.name, d.address, d.opening_hours_text))
def geojson_output(parser, place_data):
features = []
route = LineString(list(map(lambda p: (p['lon'], p['lat']), parser.points())))
route_feature = Feature(geometry=route)
features.append(route_feature)
places = list(map(lambda p: Feature(geometry=Point(p.coord),properties=p._asdict()), place_data))
features.extend(places)
collection = FeatureCollection(features)
dump = geojson.dumps(collection, sort_keys=True, indent=4)
print(dump)
| 3.109375 | 3 |
pychron/pyscripts/commands/bakeout.py | ASUPychron/pychron | 31 | 12760941 | # ===============================================================================
# Copyright 2012 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from __future__ import absolute_import
from traits.api import Float, Str
from traitsui.api import View, Item, VGroup, EnumEditor
# ============= standard library imports ========================
# ============= local library imports ==========================
from pychron.pyscripts.commands.core import Command
from traitsui.menu import OKCancelButtons
class Ramp(Command):
setpoint = Float(1)
rate = Float(1)
start = Str
_default_period = 60
period = Float(_default_period)
def traits_view(self):
v = View(
Item("setpoint", label="Setpoint (C)"),
Item("rate", label="Rate C/hr"),
VGroup(
Item("start", label="Start Setpoint (C)"),
Item("period", label="Update Period (s)"),
show_border=True,
label="Optional",
),
buttons=OKCancelButtons,
)
return v
def _to_string(self):
start = None
try:
start = float(start)
except (ValueError, TypeError):
pass
words = [
("setpoint", self.setpoint, True),
("rate", self.rate, True),
]
if start is not None:
words.append(("start", start, True))
if self.period != self._default_period:
words.append(("period", self.period, True))
return self._keywords(words)
time_dict = dict(h="hours", m="minutes", s="seconds")
class Setpoint(Command):
setpoint = Float
duration = Float
units = Str("h")
def _get_view(self):
v = VGroup(
Item("setpoint", label="Temperature (C)"),
Item("duration", label="Duration (units)"),
Item(
"units",
editor=EnumEditor(values=time_dict),
),
)
return v
def _to_string(self):
words = [
("temperature", self.setpoint, True),
("duration", self.duration, True),
("units", self.units),
]
return self._keywords(words)
# ============= EOF =============================================
| 1.609375 | 2 |
tests/test_spacetrack.py | panyicast/spacetrack | 2 | 12760942 | <reponame>panyicast/spacetrack
# coding: utf-8
from __future__ import absolute_import, division, print_function
import datetime as dt
import json
import pytest
import requests
import responses
from requests import HTTPError, Response
from spacetrack import AuthenticationError, SpaceTrackClient
from spacetrack.base import (
Predicate, _iter_content_generator, _iter_lines_generator,
_raise_for_status)
try:
from unittest.mock import Mock, call, patch
except ImportError:
from mock import Mock, call, patch
def test_iter_lines_generator():
"""Test that lines are split correctly."""
def mock_iter_content(self, chunk_size, decode_unicode):
for chunk in ['1\r\n2\r\n', '3\r', '\n4', '\r\n5']:
yield chunk
with patch.object(Response, 'iter_content', mock_iter_content):
result = list(
_iter_lines_generator(response=Response(), decode_unicode=True))
assert result == ['1', '2', '3', '4', '5']
def test_iter_content_generator():
"""Test CRLF -> LF newline conversion."""
def mock_iter_content(self, chunk_size, decode_unicode):
for chunk in ['1\r\n2\r\n', '3\r', '\n4', '\r\n5']:
yield chunk
with patch.object(Response, 'iter_content', mock_iter_content):
result = list(
_iter_content_generator(response=Response(), decode_unicode=True))
assert result == ['1\n2\n', '3', '\n4', '\n5']
result = list(
_iter_content_generator(response=Response(), decode_unicode=False))
assert result == ['1\r\n2\r\n', '3\r', '\n4', '\r\n5']
def test_generic_request_exceptions():
st = SpaceTrackClient('identity', 'password')
with pytest.raises(ValueError):
st.generic_request(class_='tle', iter_lines=True, iter_content=True)
with pytest.raises(ValueError):
st.generic_request(class_='thisclassdoesnotexist')
def mock_get_predicates(self, class_):
return []
patch_authenticate = patch.object(SpaceTrackClient, 'authenticate')
patch_get_predicates = patch.object(
SpaceTrackClient, 'get_predicates', mock_get_predicates)
with patch_authenticate, patch_get_predicates:
with pytest.raises(TypeError):
st.generic_request('tle', madeupkeyword=None)
with pytest.raises(ValueError):
st.generic_request(class_='tle', controller='nonsense')
with pytest.raises(ValueError):
st.generic_request(class_='nonsense', controller='basicspacedata')
with pytest.raises(AttributeError):
st.basicspacedata.blahblah
def test_get_predicates_exceptions():
st = SpaceTrackClient('identity', 'password')
with pytest.raises(ValueError):
st.get_predicates(class_='tle', controller='nonsense')
with pytest.raises(ValueError):
st.get_predicates(class_='nonsense', controller='basicspacedata')
def test_get_predicates():
st = SpaceTrackClient('identity', 'password')
patch_authenticate = patch.object(SpaceTrackClient, 'authenticate')
patch_get_predicates = patch.object(SpaceTrackClient, 'get_predicates')
with patch_authenticate, patch_get_predicates as mock_get_predicates:
st.tle.get_predicates()
st.basicspacedata.tle.get_predicates()
st.basicspacedata.get_predicates('tle')
st.get_predicates('tle')
st.get_predicates('tle', 'basicspacedata')
expected_calls = [
call(class_='tle', controller='basicspacedata'),
call(class_='tle', controller='basicspacedata'),
call(class_='tle', controller='basicspacedata'),
call('tle'),
call('tle', 'basicspacedata')
]
assert mock_get_predicates.call_args_list == expected_calls
@responses.activate
def test_generic_request():
responses.add(
responses.POST, 'https://www.space-track.org/ajaxauth/login', json='""')
responses.add(
responses.GET,
'https://www.space-track.org/basicspacedata/modeldef/class/tle_publish',
json={
'controller': 'basicspacedata',
'data': [
{
'Default': '0000-00-00 00:00:00',
'Extra': '',
'Field': 'PUBLISH_EPOCH',
'Key': '',
'Null': 'NO',
'Type': 'datetime'
},
{
'Default': '',
'Extra': '',
'Field': 'TLE_LINE1',
'Key': '',
'Null': 'NO',
'Type': 'char(71)'
},
{
'Default': '',
'Extra': '',
'Field': 'TLE_LINE2',
'Key': '',
'Null': 'NO',
'Type': 'char(71)'
}
]})
tle = (
'1 25544U 98067A 08264.51782528 -.00002182 00000-0 -11606-4 0 2927\r\n'
'2 25544 51.6416 247.4627 0006703 130.5360 325.0288 15.72125391563537\r\n')
normalised_tle = tle.replace('\r\n', '\n')
responses.add(
responses.GET,
'https://www.space-track.org/basicspacedata/query/class/tle_publish'
'/format/tle',
body=tle)
st = SpaceTrackClient('identity', 'password')
assert st.tle_publish(format='tle') == normalised_tle
lines = list(
st.tle_publish(iter_lines=True, format='tle'))
assert lines == [
'1 25544U 98067A 08264.51782528 -.00002182 00000-0 -11606-4 0 2927',
'2 25544 51.6416 247.4627 0006703 130.5360 325.0288 15.72125391563537'
]
responses.add(
responses.GET,
'https://www.space-track.org/basicspacedata/query/class/tle_publish',
json={'a': 5})
result = st.tle_publish()
assert result['a'] == 5
# Just use datetime to disambiguate URL from those above.
responses.add(
responses.GET,
'https://www.space-track.org/basicspacedata/query/class/tle_publish'
'/publish_epoch/1986-01-28%2016:39:13',
body='a' * (100 * 1024) + 'b')
result = list(st.tle_publish(
iter_content=True, publish_epoch=dt.datetime(1986, 1, 28, 16, 39, 13)))
assert result[0] == 'a' * (100 * 1024)
assert result[1] == 'b'
@responses.activate
def test_bytes_response():
responses.add(
responses.POST, 'https://www.space-track.org/ajaxauth/login', json='""')
responses.add(
responses.GET,
'https://www.space-track.org/fileshare/modeldef/class/download',
json={
'controller': 'fileshare',
'data': [
{
'Default': '0',
'Extra': '',
'Field': 'FILE_ID',
'Key': '',
'Null': 'NO',
'Type': 'int(10) unsigned'
},
{
'Default': None,
'Extra': '',
'Field': 'FILE_CONTENET',
'Key': '',
'Null': 'YES',
'Type': 'longblob'
}
]})
data = b'bytes response \r\n'
responses.add(
responses.GET,
'https://www.space-track.org/fileshare/query/class/download'
'/format/stream',
body=data)
st = SpaceTrackClient('identity', 'password')
assert st.download(format='stream') == data
with pytest.raises(ValueError):
st.download(iter_lines=True, format='stream')
# Just use file_id to disambiguate URL from those above
responses.add(
responses.GET,
'https://www.space-track.org/fileshare/query/class/download'
'/file_id/1',
body=b'a' * (100 * 1024) + b'b')
result = list(st.download(
iter_content=True, file_id=1))
assert result[0] == b'a' * (100 * 1024)
assert result[1] == b'b'
@responses.activate
def test_ratelimit_error():
responses.add(
responses.POST, 'https://www.space-track.org/ajaxauth/login', json='""')
responses.add(
responses.GET,
'https://www.space-track.org/basicspacedata/modeldef/class/tle_publish',
json={
'controller': 'basicspacedata',
'data': [
{
'Default': '0000-00-00 00:00:00',
'Extra': '',
'Field': 'PUBLISH_EPOCH',
'Key': '',
'Null': 'NO',
'Type': 'datetime'
},
{
'Default': '',
'Extra': '',
'Field': 'TLE_LINE1',
'Key': '',
'Null': 'NO',
'Type': 'char(71)'
},
{
'Default': '',
'Extra': '',
'Field': 'TLE_LINE2',
'Key': '',
'Null': 'NO',
'Type': 'char(71)'
}
]})
responses.add(
responses.GET,
'https://www.space-track.org/basicspacedata/query/class/tle_publish',
status=500, body='violated your query rate limit')
st = SpaceTrackClient('identity', 'password')
# Change ratelimiter period to speed up test
st._ratelimiter.period = 1
# Do it first without our own callback, then with.
# Catch the exception when URL is called a second time and still gets HTTP 500
with pytest.raises(HTTPError):
st.tle_publish()
mock_callback = Mock()
st.callback = mock_callback
# Catch the exception when URL is called a second time and still gets HTTP 500
with pytest.raises(HTTPError):
st.tle_publish()
assert mock_callback.call_count == 1
@responses.activate
def test_non_ratelimit_error():
responses.add(
responses.POST, 'https://www.space-track.org/ajaxauth/login', json='""')
responses.add(
responses.GET,
'https://www.space-track.org/basicspacedata/modeldef/class/tle_publish',
json={
'controller': 'basicspacedata',
'data': [
{
'Default': '0000-00-00 00:00:00',
'Extra': '',
'Field': 'PUBLISH_EPOCH',
'Key': '',
'Null': 'NO',
'Type': 'datetime'
},
{
'Default': '',
'Extra': '',
'Field': 'TLE_LINE1',
'Key': '',
'Null': 'NO',
'Type': 'char(71)'
},
{
'Default': '',
'Extra': '',
'Field': 'TLE_LINE2',
'Key': '',
'Null': 'NO',
'Type': 'char(71)'
}
]})
st = SpaceTrackClient('identity', 'password')
# Change ratelimiter period to speed up test
st._ratelimiter.period = 1
mock_callback = Mock()
st.callback = mock_callback
responses.add(
responses.GET,
'https://www.space-track.org/basicspacedata/query/class/tle_publish',
status=500, body='some other error')
with pytest.raises(HTTPError):
st.tle_publish()
assert not mock_callback.called
@responses.activate
def test_predicate_parse_modeldef():
st = SpaceTrackClient('identity', 'password')
predicates_data = [
{
'Default': '',
'Extra': '',
'Field': 'TEST',
'Key': '',
'Null': 'NO',
'Type': '%brokentype'
}
]
with pytest.raises(ValueError):
st._parse_predicates_data(predicates_data)
predicates_data = [
{
'Default': '',
'Extra': '',
'Field': 'TEST',
'Key': '',
'Null': 'NO',
'Type': 'unknowntype'
}
]
with pytest.raises(ValueError):
st._parse_predicates_data(predicates_data)
predicates_data = [
{
'Default': '',
'Extra': '',
'Field': 'TEST',
'Key': '',
'Null': 'NO',
'Type': 'enum()'
}
]
with pytest.raises(ValueError):
st._parse_predicates_data(predicates_data)
predicates_data = [
{
'Default': '',
'Extra': '',
'Field': 'TEST',
'Key': '',
'Null': 'NO',
'Type': "enum('a','b')"
}
]
predicate = st._parse_predicates_data(predicates_data)[0]
assert predicate.values == ('a', 'b')
predicates_data = [
{
'Default': '',
'Extra': '',
'Field': 'TEST',
'Key': '',
'Null': 'NO',
'Type': "enum('a')"
}
]
predicate = st._parse_predicates_data(predicates_data)[0]
assert predicate.values == ('a',)
predicates_data = [
{
'Default': '',
'Extra': '',
'Field': 'TEST',
'Key': '',
'Null': 'NO',
'Type': "enum('a','b','c')"
}
]
predicate = st._parse_predicates_data(predicates_data)[0]
assert predicate.values == ('a', 'b', 'c')
def test_bare_spacetrack_methods():
"""Verify that e.g. st.tle_publish calls st.generic_request('tle_publish')"""
st = SpaceTrackClient('identity', 'password')
seen = set()
with patch.object(SpaceTrackClient, 'generic_request') as mock_generic_request:
for controller, classes in st.request_controllers.items():
for class_ in classes:
if class_ in seen:
continue
seen.add(class_)
method = getattr(st, class_)
method()
expected = call(class_=class_, controller=controller)
assert mock_generic_request.call_args == expected
with pytest.raises(AttributeError):
st.madeupmethod()
def test_controller_spacetrack_methods():
st = SpaceTrackClient('identity', 'password')
with patch.object(SpaceTrackClient, 'generic_request') as mock_generic_request:
for controller, classes in st.request_controllers.items():
for class_ in classes:
controller_proxy = getattr(st, controller)
method = getattr(controller_proxy, class_)
method()
expected = call(class_=class_, controller=controller)
assert mock_generic_request.call_args == expected
@responses.activate
def test_authenticate():
def request_callback(request):
if 'wrongpassword' in request.body:
return (200, dict(), json.dumps({'Login': 'Failed'}))
elif 'unknownresponse' in request.body:
# Space-Track doesn't respond like this, but make sure anything
# other than {'Login': 'Failed'} doesn't raise AuthenticationError
return (200, dict(), json.dumps({'Login': 'Successful'}))
else:
return (200, dict(), json.dumps(''))
responses.add_callback(
responses.POST, 'https://www.space-track.org/ajaxauth/login',
callback=request_callback, content_type='application/json')
st = SpaceTrackClient('identity', '<PASSWORD>password')
with pytest.raises(AuthenticationError):
st.authenticate()
assert len(responses.calls) == 1
st.password = '<PASSWORD>'
st.authenticate()
st.authenticate()
# Check that only one login request was made since successful
# authentication
assert len(responses.calls) == 2
st = SpaceTrackClient('identity', 'unknownresponse')
st.authenticate()
@responses.activate
def test_raise_for_status():
responses.add(responses.GET, 'http://example.com/1',
json={'error': 'problem'}, status=400)
responses.add(responses.GET, 'http://example.com/2',
json={'wrongkey': 'problem'}, status=400)
responses.add(responses.GET, 'http://example.com/3',
json='problem', status=400)
responses.add(responses.GET, 'http://example.com/4',
status=400)
response1 = requests.get('http://example.com/1')
response2 = requests.get('http://example.com/2')
response3 = requests.get('http://example.com/3')
response4 = requests.get('http://example.com/4')
with pytest.raises(HTTPError) as exc:
_raise_for_status(response1)
assert 'Space-Track' in str(exc.value)
assert '\nproblem' in str(exc.value)
with pytest.raises(HTTPError) as exc:
_raise_for_status(response2)
assert 'Space-Track' in str(exc.value)
assert '{"wrongkey": "problem"}' in str(exc.value)
with pytest.raises(HTTPError) as exc:
_raise_for_status(response3)
assert 'Space-Track' in str(exc.value)
assert '\n"problem"' in str(exc.value)
with pytest.raises(HTTPError) as exc:
_raise_for_status(response4)
assert 'Space-Track' not in str(exc.value)
def test_repr():
st = SpaceTrackClient('<EMAIL>', 'mypassword')
assert repr(st) == "SpaceTrackClient<identity='<EMAIL>'>"
assert 'mypassword' not in repr(st)
predicate = Predicate(name='a', type_='int', nullable=True, default=None)
reprstr = "Predicate(name='a', type_='int', nullable=True, default=None)"
assert repr(predicate) == reprstr
predicate = Predicate(
name='a', type_='enum', nullable=True, values=('a', 'b'), default=None)
reprstr = ("Predicate(name='a', type_='enum', nullable=True, "
"default=None, values=('a', 'b'))")
assert repr(predicate) == reprstr
controller_proxy = st.basicspacedata
reprstr = "_ControllerProxy<controller='basicspacedata'>"
assert repr(controller_proxy) == reprstr
def test_dir():
st = SpaceTrackClient('<EMAIL>', '<PASSWORD>')
assert dir(st) == [
'_authenticated',
'_controller_proxies',
'_predicates',
'_ratelimiter',
'announcement',
'basicspacedata',
'boxscore',
'callback',
'cdm',
'decay',
'delete',
'download',
'expandedspacedata',
'file',
'file_history',
'fileshare',
'identity',
'launch_site',
'maneuver',
'maneuver_history',
'omm',
'organization',
'password',
'satcat',
'satcat_change',
'satcat_debut',
'session',
'spephemeris',
'tip',
'tle',
'tle_latest',
'tle_publish',
'upload',
]
@pytest.mark.parametrize('predicate, input, output', [
(Predicate('a', 'float'), '0.5', 0.5),
(Predicate('a', 'int'), '5', 5),
(Predicate('a', 'datetime'), '2017-01-01 01:02:03',
dt.datetime(2017, 1, 1, 1, 2, 3)),
(Predicate('a', 'date'), '2017-01-01',
dt.date(2017, 1, 1)),
(Predicate('a', 'enum', values=('a', 'b')), 'a', 'a'),
(Predicate('a', 'int'), None, None),
])
def test_predicate_parse_type(predicate, input, output):
assert predicate.parse(input) == output
@responses.activate
def test_parse_types():
responses.add(
responses.POST, 'https://www.space-track.org/ajaxauth/login', json='""')
responses.add(
responses.GET,
'https://www.space-track.org/basicspacedata/modeldef/class/tle_publish',
json={
'controller': 'basicspacedata',
'data': [
{
'Default': '0000-00-00 00:00:00',
'Extra': '',
'Field': 'PUBLISH_EPOCH',
'Key': '',
'Null': 'NO',
'Type': 'datetime'
},
{
'Default': '',
'Extra': '',
'Field': 'TLE_LINE1',
'Key': '',
'Null': 'NO',
'Type': 'char(71)'
},
{
'Default': '',
'Extra': '',
'Field': 'TLE_LINE2',
'Key': '',
'Null': 'NO',
'Type': 'char(71)'
}
]})
responses.add(
responses.GET,
'https://www.space-track.org/basicspacedata/query/class/tle_publish',
json=[{
# Test a type that is parsed.
'PUBLISH_EPOCH': '2017-01-02 03:04:05',
# Test a type that is passed through.
'TLE_LINE1': 'The quick brown fox jumps over the lazy dog.',
# Test a field there was no predicate for.
'OTHER_FIELD': 'Spam and eggs.'
}])
st = SpaceTrackClient('identity', 'password')
result, = st.tle_publish(parse_types=True)
assert result['PUBLISH_EPOCH'] == dt.datetime(2017, 1, 2, 3, 4, 5)
assert result['TLE_LINE1'] == 'The quick brown fox jumps over the lazy dog.'
assert result['OTHER_FIELD'] == 'Spam and eggs.'
with pytest.raises(ValueError) as exc_info:
st.tle_publish(format='tle', parse_types=True)
assert 'parse_types' in exc_info.value.args[0]
| 2.296875 | 2 |
ea_sim/visualization/visualize_results.py | lis-epfl/Tensoft-G21 | 1 | 12760943 | <filename>ea_sim/visualization/visualize_results.py
#! /usr/bin/env python3
## OLD SCRIPT - NO MORE USED ##
import os
import re
import json
import argparse
import numpy as np
import matplotlib
# select matplotlib backend
matplotlib.use('pdf')
import matplotlib.pyplot as plt
def prepare_env(results_folder):
# collect which experiments should be visualized
experiment_folders = [f for f in os.listdir(results_folder)
if re.match('^(s)?[0-9]+\_.*', f)]
# prepare output folder
plots_folder = os.path.join(results_folder, 'plots')
if not os.path.exists(plots_folder):
os.mkdir(plots_folder, os.O_RDWR)
return experiment_folders, plots_folder
def load_json(json_file):
with open(json_file) as raw_data:
exp_data = json.load(raw_data)
return exp_data
def store_json(data, json_file):
os.makedirs(os.path.dirname(json_file), exist_ok=True)
with open(json_file, 'w') as out_file:
json.dump(data, out_file)
def compute_stats(data):
return {
'median': np.median(data),
'avg': np.mean(data),
'std': np.std(data),
'min': np.min(data),
'max': np.max(data),
'q1': np.percentile(data, 25),
'q3': np.percentile(data, 75)
}
def sum_stats(s1, s2):
return {
'median': s1['median'] + s2['median'],
'avg': s1['avg'] + s2['avg'],
'std': s1['std'] + s2['std'],
'min': min(s1['min'], s2['min']),
'max': max(s1['max'], s2['max']),
'q1': s1['q1'] + s2['q1'],
'q3': s1['q3'] + s2['q3']
}
def divide_stats(s, divisor):
return {
'median': float(s['median']/divisor),
'avg': float(s['avg']/divisor),
'std': float(s['std']/divisor),
'min': float(s['min']),
'max': float(s['max']),
'q1': float(s['q1']/divisor),
'q3': float(s['q3']/divisor)
}
def process_sims_data(sim_files, results_folder, exp):
# variable for storing all the statistics of each monitored property
# for each generation of each simulation
final_fits = []
sims_fit_values = []
for j, sim in enumerate(sim_files):
generations = load_json(os.path.join(results_folder, exp, sim))
sims_fit_values.append([max([ind['fitness'] for ind in gen['population']])
for gen in generations])
sims_fit_values = np.asarray(sims_fit_values)
mean_fit = []
medians_fit = []
std_fit = []
q1s = []
q3s = []
# take the values (median, Q1, Q3) from all the different simulations (different seeds)
for gen in range(len(sims_fit_values[0])):
mean_fit.append(np.mean(sims_fit_values[:, gen]))
medians_fit.append(np.median(sims_fit_values[:, gen]))
std_fit.append(np.std(sims_fit_values[:, gen]))
q1s.append(np.percentile(sims_fit_values[:, gen], 25))
q3s.append(np.percentile(sims_fit_values[:, gen], 75))
return mean_fit, medians_fit, std_fit, q1s, q3s
def augment_data_map(data):
# first 25 simulation checkpoints, MAP elites does not produce anything
# therefore zero data point are inserted
new_data = ([0.0]*25, [0.0]*25, [0.0]*25, [0.0]*25, [0.0]*25)
# copy each data point, duplicating the first and second ones every three points
i = 0
while i < len(data[0]):
for j in range(len(data)):
new_data[j].append(data[j][i])
if i % 3 != 0:
new_data[j].append(data[j][i])
new_data[j].append(data[j][i])
if i % 3 == 0 and i % 6 == 0:
new_data[j].append(data[j][i])
i += 1
return new_data
def plot_experiments(medians, q1s, q3s, config, title='',
colors=None, y_max=40, y_label='Median Max Fitness [cm]'):
font = {'family': 'Source Sans Pro', 'size': 12, 'weight': 'light'}
matplotlib.rc('font', **font)
matplotlib.rcParams["axes.titlepad"] = 15
matplotlib.rcParams['figure.dpi'] = 300
fig = plt.figure(figsize=(12, 8))
ax = fig.gca()
if colors is None:
colors = plt.cm.viridis(np.linspace(0, 1, len(medians)))
for i in range(len(medians)):
ax.plot(list(range(len(medians[i]))), medians[i],
label=config['labels'][i],
color=colors[i], ms=2.5)
ax.fill_between(list(range(len(q1s[i]))),
q1s[i],
q3s[i],
color=colors[i],
alpha=0.2,
antialiased=True)
ax.set_title(title, size=20, fontweight='normal')
ax.set_xlabel('Simulations Checkpoints', labelpad=15)
ax.set_ylabel(y_label, labelpad=10)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.set_xlim(0, max([len(m) for m in medians]))
ax.set_ylim(0, y_max)
ax.margins(0)
ax.grid()
ax.legend(loc=8, borderaxespad=1, ncol=4, frameon=False)
out_file = '{}.pdf'.format(os.path.join(config['base_dir'], config['output_file']))
os.makedirs(os.path.dirname(out_file), exist_ok=True)
plt.savefig(out_file, bbox_inches='tight')
plt.close()
def _get_null_values():
return [([0.0], [0.0], [0.0])]
def main(results_folder):
exps_folders, plots_folder = prepare_env(results_folder)
# process each experiment according to its numerical order
exps_vals = {}
colors = plt.cm.viridis(np.linspace(0, 1, len(exps_folders)))
for i, exp in enumerate(exps_folders):
print('\rProcessing experiment: {}'.format(i + 1), end='')
# collect each simulation file
sim_files = [f for f in os.listdir(os.path.join(results_folder, exp))
if re.match('^evo.*\.json', f)]
# load data into an appropriate data structure
mean, median, std, q1, q3 = process_sims_data(sim_files, results_folder, exp)
exps_vals[exp] = (mean, median, std, q1, q3)
# consider the different method for check-pointing in MAP-Elites
if exp == '17_map-elites_selected_noiseless' or exp == '18_map-elites_selected_noisy':
exps_vals[exp] = augment_data_map(exps_vals[exp])
exp_comps = exp.split('_')
title = '{} {}'.format(exp_comps[1], exp_comps[-1])
config = {
'labels': [title],
'base_dir': plots_folder,
'output_file': '_'.join(exp_comps[:2] + [exp_comps[-1]] + ['fit'])
}
plot_experiments([median], [q1], [q3], config, title)
#noiseless_exps = ['01_vie_selected_noiseless', '09_mu+l_selected_noiseless', '17_map-elites_selected_noiseless']
noisy_exps = ['05_vie_selected_noisy', '13_mu+l_selected_noisy', '18_map-elites_selected_noisy']
# noiseless_medians, noiseless_q1s, noiseless_q3s =\
# list(zip(*[exps_vals[nle] if nle in exps_folders else _get_null_values() for nle in noiseless_exps]))
n_means, n_medians, n_stds, n_q1s, n_q3s =\
list(zip(*[exps_vals[nye] if nye in exps_folders else _get_null_values() for nye in noisy_exps]))
print('\nPlot EAs comparisons...')
# plot_experiments(noiseless_medians,
# noiseless_q1s,
# noiseless_q3s,
# {
# 'labels': noiseless_exps,
# 'base_dir': plots_folder,
# 'output_file': 'alg_comparison_noiseless'
# },
# 'EAs comparison - Noiseless'
# )
plot_experiments(n_medians,
n_q1s,
n_q3s,
{
'labels': noisy_exps,
'base_dir': plots_folder,
'output_file': 'alg_comparison_median'
},
'EAs comparison'
)
plot_experiments(n_means,
[np.asarray(m) - np.asarray(s) for m, s in zip(n_means, n_stds)],
[np.asarray(m) + np.asarray(s) for m, s in zip(n_means, n_stds)],
{
'labels': noisy_exps,
'base_dir': plots_folder,
'output_file': 'alg_comparison_avg'
},
'EAs comparison'
, y_label='Avg. Max Fitness [cm]'
)
print('Done!')
if __name__ == '__main__':
print('\n## OLD SCRIPT - NO MORE USED ##\n')
parser = argparse.ArgumentParser(description='Script for plotting the progress and results'
'of the evolutionary algorithm over different experiments')
parser.add_argument('in_folder', metavar='in_folder', type=str, nargs='?',
default='../results', help='folder where experiments data are stored.')
args = parser.parse_args()
main(args.in_folder) | 2.5625 | 3 |
HyperclassifierSearch/__init__.py | dabln/HyperclassifierSearch | 1 | 12760944 | from .HyperclassifierSearch import HyperclassifierSearch
# module level doc-string
__doc__ = """
HyperclassifierSearch allows to train multiple classifiers/pipelines in Python with GridSearchCV or RandomizedSearchCV.
"""
| 2.109375 | 2 |
app/post/tests/test_tags_api.py | talhaozcan/django-recipe | 0 | 12760945 | from django.contrib.auth import get_user_model
from django.urls import reverse
from django.test import TestCase
from rest_framework import status
from rest_framework.test import APIClient
from core.models import Tag
from post.serializers import TagSerializer
TAGS_URL = reverse('post:tag-list')
class PublicTagsApiTests(TestCase):
"""Tests for public tags api funtions"""
def setUp(self):
self.client = APIClient()
def test_login_required(self):
"""Test that login is required to list the tags"""
res = self.client.get(TAGS_URL)
self.assertEquals(res.status_code, status.HTTP_401_UNAUTHORIZED)
class PrivateTagsApiTests(TestCase):
"""Tests for private(authed) tags api functions"""
def setUp(self):
params = {
'email': '<EMAIL>',
'password': '<PASSWORD>',
'name': 'Test User'
}
self.user = get_user_model().objects.create_user(**params)
self.client = APIClient()
self.client.force_authenticate(user=self.user)
def test_retrieve_tags(self):
"""Test retrieving tags"""
Tag.objects.create(user=self.user, name='TestTag')
Tag.objects.create(user=self.user, name='TestTag2')
tags = Tag.objects.all().order_by('-name')
serializer = TagSerializer(tags, many=True)
res = self.client.get(TAGS_URL)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data, serializer.data)
def test_retrieve_tags_limited_to_user(self):
"""Test retrieving tags belong to the specific user"""
user2 = get_user_model().objects.create_user(
email='<EMAIL>',
password='<PASSWORD>'
)
tag = Tag.objects.create(user=self.user, name='TestTag')
Tag.objects.create(user=user2, name='TestTag2')
res = self.client.get(TAGS_URL)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(len(res.data), 1)
self.assertEqual(res.data[0]['name'], tag.name)
def test_create_tags_successful(self):
"""Test creating a tag successfully"""
params = {
'name': 'tag1',
}
res = self.client.post(TAGS_URL, params)
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
tags_exists = Tag.objects.filter(user=self.user, name='tag1').exists()
self.assertTrue(tags_exists)
def test_create_tags_invalid(self):
"""Test creating a tag invalid attrs"""
params = {
'name': '',
}
res = self.client.post(TAGS_URL, params)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
| 2.578125 | 3 |
tests/api/urls.py | drf-tools/drf-app-generators | 3 | 12760946 | from rest_framework.routers import SimpleRouter
router = SimpleRouter()
urlpatterns = router.urls
| 1.28125 | 1 |
bin/test.py | timb-machine/pyattck | 0 | 12760947 | <filename>bin/test.py
from pyattck import Attck
attack = Attck()
# Examples of MITRE Enterprise ATT&CK
for actor in attack.enterprise.actors:
print(actor.id)
print(actor.name)
# accessing malware used by an actor or group
for malware in actor.malwares:
print(malware.id)
print(malware.name)
# accessing tools used by an actor or group
for tool in actor.tools:
print(tool.id)
print(tool.name)
# accessing techniques used by an actor or group
for technique in actor.techniques:
print(technique.id)
print(technique.name)
# you can also access generated data sets on aa technique
print(technique.command_list)
print(technique.commands)
print(technique.queries)
print(technique.datasets)
print(technique.possible_detections)
# accessing malware
for malware in attack.enterprise.malwares:
print(malware.id)
print(malware.name)
# accessing actor or groups using this malware
for actor in malware.actors:
print(actor.id)
print(actor.name)
# accessing techniques that this malware is used in
for technique in malware.techniques:
print(technique.id)
print(technique.name)
# accessing mitigation
for mitigation in attack.enterprise.mitigations:
print(mitigation.id)
print(mitigation.name)
# accessing techniques related to mitigation recommendations
for technique in mitigation.techniques:
print(technique.id)
print(technique.name)
# you can also access generated data sets on aa technique
print(technique.command_list)
print(technique.commands)
print(technique.queries)
print(technique.datasets)
print(technique.possible_detections)
# accessing tactics
for tactic in attack.enterprise.tactics:
print(tactic.id)
print(tactic.name)
# accessing techniques related to this tactic
for technique in tactic.techniques:
print(technique.id)
print(technique.name)
# you can also access generated data sets on aa technique
print(technique.command_list)
print(technique.commands)
print(technique.queries)
print(technique.datasets)
print(technique.possible_detections)
# accessing techniques
for technique in attack.enterprise.techniques:
print(technique.id)
print(technique.name)
# you can also access generated data sets on aa technique
print(technique.command_list)
print(technique.commands)
print(technique.queries)
print(technique.datasets)
print(technique.possible_detections)
# accessing tactics that this technique belongs to
for tactic in technique.tactics:
print(tactic.id)
print(tactic.name)
# accessing mitigation recommendations for this technique
for mitigation in technique.mitigations:
print(mitigation.id)
print(mitigation.name)
# accessing actors using this technique
for actor in technique.actors:
print(actor.id)
print(actor.name)
# accessing tools
for tool in attack.enterprise.tools:
print(tool.id)
print(tool.name)
# accessing techniques this tool is used in
for technique in tool.techniques:
print(technique.id)
print(technique.name)
# you can also access generated data sets on aa technique
print(technique.command_list)
print(technique.commands)
print(technique.queries)
print(technique.datasets)
print(technique.possible_detections)
# accessing actor or groups using this tool
for actor in tool.actors:
print(actor.id)
print(actor.name)
# Examples of MITRE PRE-ATT&CK
for actor in attack.preattack.actors:
print(actor.id)
print(actor.name)
# accessing techniques used by an actor or group
for technique in actor.techniques:
print(technique.id)
print(technique.name)
# accessing tactics
for tactic in attack.preattack.tactics:
print(tactic.id)
print(tactic.name)
# accessing techniques related to this tactic
for technique in tactic.techniques:
print(technique.id)
print(technique.name)
# accessing techniques
for technique in attack.preattack.techniques:
print(technique.id)
print(technique.name)
# accessing tactics that this technique belongs to
for tactic in technique.tactics:
print(tactic.id)
print(tactic.name)
# accessing actors using this technique
for actor in technique.actors:
print(actor.id)
print(actor.name)
# Examples of MITRE Mobile ATT&CK
for actor in attack.mobile.actors:
print(actor.id)
print(actor.name)
# accessing malware used by an actor or group
for malware in actor.malwares:
print(malware.id)
print(malware.name)
# accessing tools used by an actor or group
for tool in actor.tools:
print(tool.id)
print(tool.name)
# accessing techniques used by an actor or group
for technique in actor.techniques:
print(technique.id)
print(technique.name)
# you can also access generated data sets on aa technique
print(technique.command_list)
print(technique.commands)
print(technique.queries)
print(technique.datasets)
print(technique.possible_detections)
# accessing malware
for malware in attack.mobile.malwares:
print(malware.id)
print(malware.name)
# accessing actor or groups using this malware
for actor in malware.actors:
print(actor.id)
print(actor.name)
# accessing techniques that this malware is used in
for technique in malware.techniques:
print(technique.id)
print(technique.name)
# accessing mitigation
for mitigation in attack.mobile.mitigations:
print(mitigation.id)
print(mitigation.name)
# accessing techniques related to mitigation recommendations
for technique in mitigation.techniques:
print(technique.id)
print(technique.name)
# you can also access generated data sets on aa technique
print(technique.command_list)
print(technique.commands)
print(technique.queries)
print(technique.datasets)
print(technique.possible_detections)
# accessing tactics
for tactic in attack.mobile.tactics:
print(tactic.id)
print(tactic.name)
# accessing techniques related to this tactic
for technique in tactic.techniques:
print(technique.id)
print(technique.name)
# you can also access generated data sets on aa technique
print(technique.command_list)
print(technique.commands)
print(technique.queries)
print(technique.datasets)
print(technique.possible_detections)
# accessing techniques
for technique in attack.mobile.techniques:
print(technique.id)
print(technique.name)
# you can also access generated data sets on aa technique
print(technique.command_list)
print(technique.commands)
print(technique.queries)
print(technique.datasets)
print(technique.possible_detections)
# accessing tactics that this technique belongs to
for tactic in technique.tactics:
print(tactic.id)
print(tactic.name)
# accessing mitigation recommendations for this technique
for mitigation in technique.mitigations:
print(mitigation.id)
print(mitigation.name)
# accessing actors using this technique
for actor in technique.actors:
print(actor.id)
print(actor.name)
# accessing tools
for tool in attack.mobile.tools:
print(tool.id)
print(tool.name)
# accessing techniques this tool is used in
for technique in tool.techniques:
print(technique.id)
print(technique.name)
# you can also access generated data sets on aa technique
print(technique.command_list)
print(technique.commands)
print(technique.queries)
print(technique.datasets)
print(technique.possible_detections)
# accessing actor or groups using this tool
for actor in tool.actors:
print(actor.id)
print(actor.name)
| 2.390625 | 2 |
stocks.py | arii/fun | 0 | 12760948 | <filename>stocks.py<gh_stars>0
#!/usr/bin/env python
import urllib2, urllib, json
def get_query(stocks):
start_date = "2015-03-02"
hist_select = "select Close from yahoo.finance.historicaldata where symbol in "
curr_select = "select LastTradePriceOnly from yahoo.finance.quote where symbol in "
names = "(%s)" % ','.join(['"%s"'% s for s in stocks])
curr_query = curr_select + names
hist_query = hist_select + names + 'and startDate = "%s" and endDate = "%s" ' % (start_date, start_date)
baseurl = "https://query.yahooapis.com/v1/public/yql?"
endurl = "&format=json&diagnostics=true&env=store%3A%2F%2Fdatatables.org%2Falltableswithkeys"
def get_response(select):
yql_url = baseurl + urllib.urlencode({'q':select}) + endurl
result = urllib2.urlopen(yql_url).read()
data = json.loads(result)
return [float(d.values()[0])for d in data['query']['results']['quote']]
return zip(get_response(hist_query), get_response(curr_query))
def main():
stocks = ["YELP", "VA", "INTC", "F", "BABA", "BAC", "PCG", "PG", "BP", "BEBE", "ANF", "AMD", "NVAX", "GDP", "GRO", "VLTC", "EDU", "P", "RST", "GME", "FIX", "CRC", "AUMN", "APP", "REGI", "FE", "C", "BORN", "X", "AAPL", "YHOO", "ORCL", "JBLU", "HLS", "EMC", "SGMS", "DOW", "AAL", "GTIM", "ATRA", "CDXS", "GM", "BXP", "HLT", "NVDA", "M", "AMC"]
res = get_query(stocks)
difference = []
perc_difference = []
o_total = 0
n_total = 0
for (o, n) in res:
difference.append(n-o)
o_total += o
n_total += n
perc_difference.append( 100.0*(n-o)/o )
total_change = sum(difference)
print "previous total on march 2nd, 2015 = %.2f\ncurrent total = %.2f \ntotal net change = %.2f\n" % (o_total, n_total, total_change)
print "\npositive percent increases"
for i,p in enumerate(perc_difference):
if p > 0:
print "%s:\t%.2f,\t%.2f%%" % (stocks[i], res[i][1], p)
print "\nnegative percent increases"
for i,p in enumerate(perc_difference):
if p < 0:
print "%s:\t%.2f,\t%.2f%%" % (stocks[i], res[i][1], p)
if __name__=="__main__":
main()
| 3.03125 | 3 |
routers/helpers/__init__.py | packethost/network-helpers | 12 | 12760949 | from json.decoder import JSONDecodeError
from typing import Any, Dict, List, Optional
import requests
def fetch_ip_addresses(
headers: Dict[str, str] = {}, instance: Optional[str] = None
) -> Any:
url = "https://api.packet.net/devices/{}".format(instance)
response = requests.get(url, headers=headers)
try:
response_payload = response.json()
if "ip_addresses" not in response_payload:
return []
else:
return response_payload["ip_addresses"]
except JSONDecodeError as e:
raise JSONDecodeError(
"Unable to decode API/metadata response for {}. {}".format(url, e.msg),
e.doc,
e.pos,
)
def fetch_bgp(
use_metadata: bool = True,
headers: Dict[str, str] = {},
instance: Optional[str] = None,
) -> Any:
url = "https://metadata.packet.net/metadata"
ip_addresses = []
if not use_metadata:
if not instance:
raise ValueError("Instance ID must be specified when not using metadata")
url = "https://api.packet.net/devices/{}/bgp/neighbors".format(instance)
ip_addresses = fetch_ip_addresses(headers=headers, instance=instance)
response = requests.get(url, headers=headers)
try:
response_payload = response.json()
if not use_metadata:
response_payload["network"] = {"addresses": ip_addresses}
return response_payload
except JSONDecodeError as e:
raise JSONDecodeError(
"Unable to decode API/metadata response for {}. {}".format(url, e.msg),
e.doc,
e.pos,
)
| 2.9375 | 3 |
ngram/__init__.py | msathia/open-tamil | 1 | 12760950 | <reponame>msathia/open-tamil
# -*- coding: utf-8 -*-
#
# (C) முத்தையா அண்ணாமலை 2013-2015
#
# N-gram language model for Tamil in letters and words
#
# Ref: "CS 388: Natural Language Processing: N-Gram Language Models",
# <NAME>, U of Texas, Austin.
# Ref: N-gram models https://en.wikipedia.org/wiki/N-gram
from . import LetterModels
from . import WordModels
from . import Corpus
from . import Distance | 1.453125 | 1 |