code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
|---|---|---|---|---|---|
# Copyright 2014 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import os
import sys
import logging
from python_qt_binding.QtGui import QCheckBox
from python_qt_binding.QtGui import QHBoxLayout
from python_qt_binding.QtGui import QIcon
from python_qt_binding.QtGui import QPushButton
from python_qt_binding.QtGui import QVBoxLayout
from python_qt_binding.QtGui import QWidget
from qt_gui.plugin import Plugin
import rosprofiler_adapter
from diarc import qt_view
from blacklist import BlacklistDialog
TOPIC_BLACKLIST = ['/clock', '/topology', '/statistics']
NODE_BLACKLIST = ['/rosout']
# set this environment variable to enable diarc debug printing
if 'DIARC_DEBUG' in os.environ:
logging.getLogger('diarc').setLevel(logging.DEBUG)
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(logging.DEBUG)
logging.getLogger('diarc').addHandler(ch)
class VisualizerPlugin(Plugin):
def __init__(self, context):
super(VisualizerPlugin, self).__init__(context)
self.setObjectName('VisualizerPlugin')
# Process standalone plugin command-line arguments
from argparse import ArgumentParser
parser = ArgumentParser()
# Add argument(s) to the parser.
parser.add_argument("-q", "--quiet", action="store_true",
dest="quiet", help="Put plugin in silent mode")
args, unknowns = parser.parse_known_args(context.argv())
context.add_widget(VisualizerWidget())
def shutdown_plugin(self):
pass
def save_settings(self, plugin_settings, instance_settings):
pass
def restore_settings(self, plugin_settings, instance_settings):
pass
class VisualizerWidget(QWidget):
def __init__(self, parent=None):
super(VisualizerWidget, self).__init__(parent)
self.setWindowTitle('Graph Profiler Visualizer')
vbox = QVBoxLayout()
self.setLayout(vbox)
toolbar_layout = QHBoxLayout()
refresh_button = QPushButton()
refresh_button.setIcon(QIcon.fromTheme('view-refresh'))
auto_refresh_checkbox = QCheckBox("Auto Refresh")
hide_disconnected_topics = QCheckBox("Hide Disconnected Topics")
topic_blacklist_button = QPushButton("Topic Blacklist")
node_blacklist_button = QPushButton("Node Blacklist")
refresh_button.clicked.connect(self._refresh)
topic_blacklist_button.clicked.connect(self._edit_topic_blacklist)
node_blacklist_button.clicked.connect(self._edit_node_blacklist)
auto_refresh_checkbox.setCheckState(2)
auto_refresh_checkbox.stateChanged.connect(self._autorefresh_changed)
hide_disconnected_topics.setCheckState(2)
hide_disconnected_topics.stateChanged.connect(self._hidedisconnectedtopics_changed)
toolbar_layout.addWidget(refresh_button)
toolbar_layout.addWidget(auto_refresh_checkbox)
toolbar_layout.addStretch(0)
toolbar_layout.addWidget(hide_disconnected_topics)
toolbar_layout.addWidget(topic_blacklist_button)
toolbar_layout.addWidget(node_blacklist_button)
vbox.addLayout(toolbar_layout)
# Initialize the Visualizer
self._view = qt_view.QtView()
self._adapter = rosprofiler_adapter.ROSProfileAdapter(self._view)
self._adapter.set_topic_quiet_list(TOPIC_BLACKLIST)
self._adapter.set_node_quiet_list(NODE_BLACKLIST)
vbox.addWidget(self._view)
def _edit_topic_blacklist(self):
""" Opens topic blacklist Dialog and modifies the blacklist """
topics = self._adapter.get_topic_quiet_list()
topic_blacklist = BlacklistDialog.get_blacklist(values=topics)
self._adapter.set_topic_quiet_list(topic_blacklist)
self._adapter.topology_update()
def _edit_node_blacklist(self):
""" Opens node blacklist Dialog and modifies the blacklist """
nodes = self._adapter.get_node_quiet_list()
node_blacklist = BlacklistDialog.get_blacklist(values=nodes)
self._adapter.set_node_quiet_list(node_blacklist)
self._adapter.topology_update()
def _autorefresh_changed(self, value):
if value == 2:
print("Enabling Autorefresh")
self._adapter.enable_auto_update()
self._refresh()
elif value == 0:
print("Disabling Autorefresh")
self._adapter.disable_auto_update()
else:
raise Exception()
def _hidedisconnectedtopics_changed(self, value):
if value == 2:
print("Hiding disconnected topics")
self._adapter.hide_disconnected_topics()
elif value == 0:
print("Showing disconnected topics")
self._adapter.show_disconnected_topics()
else:
raise Exception()
def _refresh(self):
self._adapter.topology_update()
self._adapter.statistics_update()
|
osrf/rqt_graphprofiler
|
src/rqt_graphprofiler/visualizer_plugin.py
|
Python
|
apache-2.0
| 5,480
|
#!/usr/bin/env python
# Author: Kelcey Jamison-Damage
# Python: 2.66 +
# OS: CentOS | Other
# Portable: True
# License: Apache 2.0
# License
#-----------------------------------------------------------------------#
# Copyright [2016] [Kelcey Jamison-Damage]
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#-----------------------------------------------------------------------#
# Imports
#-----------------------------------------------------------------------#
import world_layer_01
import world_layer_02
# Main
#-----------------------------------------------------------------------#
config = {
0: world_layer_01,
1: world_layer_02
}
|
kelceydamage/learning
|
config/world/registry.py
|
Python
|
apache-2.0
| 1,169
|
#
# Copyright 2015 Red Hat. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""A test module to exercise the Gnocchi API with gabbi."""
from alembic import context
from logging import config as logconfig
from gnocchi.indexer import sqlalchemy
from gnocchi.indexer import sqlalchemy_base
# this is the Alembic Config object, which provides
# access to the values within the .ini file in use.
config = context.config
# Interpret the config file for Python logging.
# This line sets up loggers basically.
logconfig.fileConfig(config.config_file_name)
# add your model's MetaData object here
# for 'autogenerate' support
# from myapp import mymodel
# target_metadata = mymodel.Base.metadata
target_metadata = sqlalchemy_base.Base.metadata
# other values from the config, defined by the needs of env.py,
# can be acquired:
# my_important_option = config.get_main_option("my_important_option")
# ... etc.
def run_migrations_offline():
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
conf = config.conf
context.configure(url=conf.indexer.url,
target_metadata=target_metadata)
with context.begin_transaction():
context.run_migrations()
def run_migrations_online():
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
conf = config.conf
indexer = sqlalchemy.SQLAlchemyIndexer(conf)
indexer.connect()
connectable = indexer.engine_facade.get_engine()
with connectable.connect() as connection:
context.configure(
connection=connection,
target_metadata=target_metadata
)
with context.begin_transaction():
context.run_migrations()
# If `alembic' was used directly from the CLI
if not hasattr(config, "conf"):
from gnocchi import service
config.conf = service.prepare_service([])
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()
|
idegtiarov/gnocchi-rep
|
gnocchi/indexer/alembic/env.py
|
Python
|
apache-2.0
| 2,809
|
## Copyright 2022 Google LLC
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## https://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
"""Sends a text message to the user with a Live agent request suggestion.
It allows the user to connect with a Live agent.
Read more: https://developers.google.com/business-communications/business-messages/guides/how-to/message/send?hl=en#live_agent_request_suggestion
This code is based on the https://github.com/google-business-communications/python-businessmessages
Python Business Messages client library.
"""
import uuid
from businessmessages import businessmessages_v1_client as bm_client
from businessmessages.businessmessages_v1_messages import BusinessmessagesConversationsMessagesCreateRequest
from businessmessages.businessmessages_v1_messages import BusinessMessagesLiveAgentRequest
from businessmessages.businessmessages_v1_messages import BusinessMessagesMessage
from businessmessages.businessmessages_v1_messages import BusinessMessagesRepresentative
from businessmessages.businessmessages_v1_messages import BusinessMessagesSuggestion
from oauth2client.service_account import ServiceAccountCredentials
# Edit the values below:
path_to_service_account_key = './service_account_key.json'
conversation_id = 'EDIT_HERE'
credentials = ServiceAccountCredentials.from_json_keyfile_name(
path_to_service_account_key,
scopes=['https://www.googleapis.com/auth/businessmessages'])
client = bm_client.BusinessmessagesV1(credentials=credentials)
# Create a text message with a live agent request action and fallback text
# Follow instructions at https://developers.google.com/business-communications/business-messages/guides/how-to/message/send?hl=en#live_agent_request_suggestion
message = BusinessMessagesMessage(
messageId=str(uuid.uuid4().int),
representative=BusinessMessagesRepresentative( # Must be sent from a BOT representative
representativeType=BusinessMessagesRepresentative.RepresentativeTypeValueValuesEnum.BOT
),
text='Would you like to chat with a live agent?',
fallback='Would you like to chat with a live agent?',
suggestions=[
BusinessMessagesSuggestion(
liveAgentRequest=BusinessMessagesLiveAgentRequest()
)
])
# Create the message request
create_request = BusinessmessagesConversationsMessagesCreateRequest(
businessMessagesMessage=message,
parent='conversations/' + conversation_id)
# Send the message
bm_client.BusinessmessagesV1.ConversationsMessagesService(
client=client).Create(request=create_request)
|
google-business-communications/bm-snippets-python
|
send-message-live-agent-request-suggestion.py
|
Python
|
apache-2.0
| 3,016
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import Counter
from functools import partial
from unittest.case import TestCase
import pickle
from bndl.util.objects import LazyObject
class Expensive(object):
def __init__(self, counters):
self.counters = counters
self.counters['created'] += 1
self.x = '1'
def stop(self):
self.counters['destroyed'] += 1
def __eq__(self, other):
return isinstance(other, Expensive) and self.x == other.x
class TestLazyObject(TestCase):
def setUp(self):
self.counters = Counter()
self.l = LazyObject(partial(Expensive, self.counters), destructor='stop')
def test_factory(self):
self.assertEqual(self.l.x, '1')
def test_destructor(self):
self.assertEqual(self.counters['created'], 0)
self.assertEqual(self.counters['destroyed'], 0)
self.assertEqual(self.l.x, '1')
self.assertEqual(self.counters['created'], 1)
self.assertEqual(self.counters['destroyed'], 0)
self.l.stop()
self.assertEqual(self.counters['created'], 1)
self.assertEqual(self.counters['destroyed'], 1)
self.assertEqual(self.l.x, '1')
self.assertEqual(self.counters['created'], 2)
self.assertEqual(self.counters['destroyed'], 1)
def test_pickle(self):
p = pickle.dumps(self.l)
l2 = pickle.loads(p)
self.assertEqual(self.l, l2)
|
bndl/bndl
|
bndl/util/tests/test_objects.py
|
Python
|
apache-2.0
| 1,955
|
__source__ = 'https://leetcode.com/problems/word-ladder-ii/description/'
# https://github.com/kamyu104/LeetCode/blob/master/Python/word-ladder-ii.py
# Time: O(n * d), n is length of string, d is size of dictionary
# Space: O(d)
# Back_Track + BFS
#
# Description: Leetcode # 126. Word Ladder II
#
# Given two words (start and end), and a dictionary,
# find all shortest transformation sequence(s) from start to end, such that:
#
# Only one letter can be changed at a time
# Each intermediate word must exist in the dictionary
# For example,
#
# Given:
# start = "hit"
# end = "cog"
# dict = ["hot","dot","dog","lot","log"]
# Return
# [
# ["hit","hot","dot","dog","cog"],
# ["hit","hot","lot","log","cog"]
# ]
#
# Note:
# Return an empty list if there is no such transformation sequence.
# All words have the same length.
# All words contain only lowercase alphabetic characters.
# You may assume no duplicates in the word list.
# You may assume beginWord and endWord are non-empty and are not the same.
# UPDATE (2017/1/20):
# The wordList parameter had been changed to a list of strings (instead of a set of strings).
# Please reload the code definition to get the latest changes.
#
# Companies
# Amazon Yelp
# Related Topics
# Array Backtracking Breadth-first Search String
#
import unittest
# BFS
class Solution:
# @param start, a string
# @param end, a string
# @param dict, a set of string
# @return an integer
def findLadders(self, start, end, dict):
dict.add(start)
dict.add(end)
result, cur = [], [start]
visited = set([start])
found = False
trace = {word : [] for word in dict}
print trace
while cur and not found:
for word in cur:
visited.add(word)
next = set([])
for word in cur:
for i in xrange(len(word)):
for j in 'abcdefghijklmnopqrstuvwxyz':
candidate = word[:i] + j + word[i+1:]
if candidate not in visited and candidate in dict:
if candidate == end:
found = True
next.add(candidate)
trace[candidate].append(word)
cur = next
if found:
self.backtrack(result, trace, [], end)
return result
def backtrack(self, result, trace, path, word):
if not trace[word]:
result.append([word] + path)
else:
for prev in trace[word]:
self.backtrack(result, trace, [word] + path, prev)
# http://www.cnblogs.com/zuoyuan/p/3697045.html
class SolutionOther:
# @param start, a string
# @param end, a string
# @param dict, a set of string
# @return a list of lists of string
def findLadders(self, start, end, dict):
self.result = []
self.prevMap = {}
length = len(start)
for i in dict:
self.prevMap[i] = []
candidates = [set(), set()]
current = 0
previous = 1
candidates[current].add(start)
print candidates, current, previous
while True:
current, previous = previous, current
for i in candidates[previous]:
try:
print dict, i
dict.remove(i)
except ValueError:
pass
candidates[current].clear()
for word in candidates[previous]:
for i in range(length):
part1 = word[:i]
part2 = word[i+1:]
for j in 'abcdefghijklmnopqrstuvwxyz':
if word[i] != j:
nextword = part1 + j +part2
if nextword in dict:
self.prevMap[nextword].append(word)
candidates[current].add(nextword)
#print self.prevMap, candidates
if len(candidates[current]) == 0:
#print self.result
return self.result
if end in candidates[current]:
break
self.buildpath([], end)
return self.result
def buildpath(self, path, word):
if len(self.prevMap[word]) == 0:
path.append(word)
currPath = path[:]
currPath.reverse()
self.result.append(currPath)
path.pop()
return
path.append(word)
for iter in self.prevMap[word]:
self.buildpath(path, iter)
path.pop()
# Test
class TestMethods(unittest.TestCase):
def test_Local(self):
self.assertEqual(1, 1)
dict1 = [ "hot","dot","dog","lot","log" ]
dict2 = ["a","b","c"]
test = SolutionOther()
#print test.findLadders("hit", "cog", dict1)
#print test.findLadders("a", "b", dict2)
print Solution().findLadders("hit", "cog", set(["hot","dot","dog","lot","log"]))
if __name__ == '__main__':
unittest.main()
Java = '''
#Thought:
public class Solution {
public List<List<String>> findLadders(String beginWord, String endWord, Set<String> wordList) {
Set<String> unvisited = new HashSet<>(wordList);
unvisited.add(endWord);
unvisited.remove(beginWord);
Set<String> roundVisited = new HashSet<>();
Queue<String> queue = new LinkedList<>();
queue.add(beginWord);
Map<String, List<String>> nextMap = new HashMap<>();
for (String str : unvisited) {
nextMap.put(str, new ArrayList<>());
}
nextMap.put(beginWord, new ArrayList<>());
Map<String, Integer> levelMap = new HashMap<>();
levelMap.put(beginWord, 0);
int prevLevel = 0;
int minLevel = Integer.MAX_VALUE;
while (!queue.isEmpty()) {
String curr = queue.poll();
int level = levelMap.get(curr);
if (level > minLevel) {
break;
}
if (prevLevel != level) {
unvisited.removeAll(roundVisited);
roundVisited.clear();
prevLevel = level;
}
char[] arr = curr.toCharArray();
for (int i = 0; i < arr.length; i++) {
char originalChar = arr[i];
boolean found = false;
for (char newChar = 'a'; newChar <= 'z'; newChar++) {
if (newChar == originalChar) {
continue;
}
arr[i] = newChar;
String newString = new String(arr);
if (unvisited.contains(newString)) {
nextMap.get(curr).add(newString);
if (newString.equals(endWord)) {
found = true;
minLevel = Math.min(minLevel, level + 1);
break;
}
if (!roundVisited.contains(newString)) {
roundVisited.add(newString);
queue.add(newString);
levelMap.put(newString, level + 1);
}
}
}
if (found) {
break;
}
arr[i] = originalChar;
}
}
List<List<String>> result = new ArrayList<>();
if (minLevel == Integer.MAX_VALUE) {
return result;
}
findPaths(endWord, beginWord, result, new ArrayList<>(), minLevel, nextMap);
return result;
}
private void findPaths(String endWord, String currWord, List<List<String>> result, List<String> path,
int level, Map<String, List<String>> nextMap) {
if (level < 0) {
return;
}
level--;
path.add(currWord);
if (currWord.equals(endWord)) {
result.add(new ArrayList<>(path));
} else {
for (String nextWord : nextMap.get(currWord)) {
findPaths(endWord, nextWord, result, path, level, nextMap);
}
}
path.remove(path.size() - 1);
}
}
public class Solution {
public List<List<String>> findLadders(String beginWord, String endWord, Set<String> wordList) {
List<List<String>> result = new ArrayList<>();
Set<String> unvisited = new HashSet<>();
Map<String, List<String>> nextMap = new HashMap<>();
Queue<LevelString> queue = new LinkedList<>();
unvisited.addAll(wordList);
unvisited.add(beginWord);
unvisited.remove(endWord);
for (String str : unvisited) {
nextMap.put(str, new ArrayList<String>());
}
queue.add(new LevelString(endWord, 0));
Set<String> visited = new HashSet<>();
int currLevel = 0;
int prevLevel = 0;
int foundLevel = Integer.MAX_VALUE;
while (!queue.isEmpty()) {
LevelString currLevelString = queue.poll();
String currString = currLevelString.string;
currLevel = currLevelString.level;
if (currLevel > foundLevel) {
break;
}
if (currLevel > prevLevel) {
unvisited.removeAll(visited);
}
prevLevel = currLevel;
char[] currArr = currString.toCharArray();
for (int i = 0; i < currArr.length; i++) {
char originChar = currArr[i];
boolean currFound = false;
for (char newChar = 'a'; newChar <= 'z'; newChar++) {
currArr[i] = newChar;
String newString = new String(currArr);
if (newChar != originChar && unvisited.contains(newString)) {
nextMap.get(newString).add(currString);
if (beginWord.equals(newString)) {
currFound = true;
foundLevel = currLevel;
break;
}
if (!visited.contains(newString)) {
visited.add(newString);
queue.add(new LevelString(newString, currLevel + 1));
}
}
}
if (currFound) {
break;
}
currArr[i] = originChar;
}
}
if (foundLevel != Integer.MAX_VALUE) {
List<String> path = new ArrayList<>();
path.add(beginWord);
findResult(endWord, path, foundLevel + 1, nextMap, result);
}
return result;
}
private void findResult(String endWord, List<String> currPath, int level, Map<String, List<String>> nextMap, List<List<String>> result) {
if (level < 0) {
return;
}
String currWord = currPath.get(currPath.size() - 1);
if (currWord.equals(endWord)) {
result.add(new ArrayList<String>(currPath));
return;
}
List<String> nextWords = nextMap.get(currWord);
for (String nextWord : nextWords) {
currPath.add(nextWord);
findResult(endWord, currPath, level - 1, nextMap, result);
currPath.remove(currPath.size() - 1);
}
}
}
class LevelString {
String string;
int level;
public LevelString(String string, int level) {
this.string = string;
this.level = level;
}
}
'''
# below is for 2017 version
Leecode2017 = '''
#29ms 94.81%
public class Solution {
boolean isConnected = false;
public List<List<String>> findLadders(String beginWord, String endWord, List<String> wordList){
List<List<String>> result = new ArrayList<List<String>>();
Set<String> dict = new HashSet<>(wordList);
if(!dict.contains(endWord)){
return result;
}
Set<String> fwd = new HashSet<String>();
fwd.add(beginWord);
Set<String> bwd = new HashSet<String>();
bwd.add(endWord);
Map<String, List<String>> hs = new HashMap<String, List<String>>();
BFS(fwd, bwd, dict, false, hs);
if (!isConnected) return result;
List<String> temp = new ArrayList<String>();
temp.add(beginWord);
DFS(result, temp, beginWord, endWord, hs);
return result;
}
public void BFS (Set<String> forward, Set<String> backward, Set<String> dict, boolean swap, Map<String, List<String>> hs){
if (forward.isEmpty() || backward.isEmpty()){
return;
}
if (forward.size() > backward.size()){
BFS(backward, forward, dict, !swap, hs);
return;
}
dict.removeAll(forward);
dict.removeAll(backward);
Set<String> set3 = new HashSet<String>();
for (String str : forward){
for (int i = 0; i < str.length(); i++){
char[] ary = str.toCharArray();
for (char j = 'a'; j <= 'z'; j++){
ary[i] = j;
String temp = new String(ary);
if(!backward.contains(temp) && !dict.contains(temp)){
continue;
}
String key = !swap ? str : temp;
String val = !swap ? temp : str;
if (!hs.containsKey(key)) hs.put(key, new ArrayList<String>());
if (backward.contains(temp)){
hs.get(key).add(val);
isConnected = true;
}
if (!isConnected && dict.contains(temp)){
hs.get(key).add(val);
set3.add(temp);
}
}
}
}
if (!isConnected){
BFS(set3, backward, dict, swap, hs);
}
}
public void DFS (List<List<String>> result, List<String> temp, String start, String end, Map<String, List<String>> hs){
if(start.equals(end)){
result.add(new ArrayList<String>(temp));
return;
}
if (!hs.containsKey(start)) return;
for (String s : hs.get(start)){
temp.add(s);
DFS(result, temp, s, end, hs);
temp.remove(temp.size() - 1);
}
}
}
'''
|
JulyKikuAkita/PythonPrac
|
cs15211/WordLadderII.py
|
Python
|
apache-2.0
| 14,710
|
from django.conf import settings
from django.contrib.admin import site, ModelAdmin
from site_content.settings import RTE_CONFIG_URI
from site_slider.models import Slider, SliderSlide
class SliderAdmin(ModelAdmin):
list_display = ('label', 'code', 'css_class')
list_editable = ('code', 'css_class')
site.register(Slider, SliderAdmin)
class SliderSlideAdmin(ModelAdmin):
list_display = ('get_admin_label', 'slider', 'weight')
list_editable = ('weight',)
change_form_template = 'admin/site_content/change_form.html'
class Media:
css = {'all': ('site_content/css/grappelli-tinymce.css',)}
js = (getattr(settings, 'STATIC_URL', '') + 'grappelli/tinymce/jscripts/tiny_mce/tiny_mce.js', RTE_CONFIG_URI)
def get_admin_label(self, obj):
return u'%s - %s' % (obj.slider.label, obj.weight)
site.register(SliderSlide, SliderSlideAdmin)
|
wlashell/lyrical_page
|
site_slider/admin.py
|
Python
|
apache-2.0
| 886
|
from __future__ import division
import numpy as np
try:
import cPickle as pickle
except:
import pickle #--- To handle data export
import imgPage
import sys, argparse #--- To handle console arguments
def main():
parser = argparse.ArgumentParser(description='Page Layout Extraction')
parser.add_argument('-i', required=True, action="store", help="Pointer to XML's folder")
parser.add_argument('-out', required=True, action="store", help="Pointer to XML's folder")
args = parser.parse_args()
fh = open(args.i, 'r')
imgData = pickle.load(fh)
fh.close()
for img in imgData:
(row, col, feat) = img.Xdata.shape
a = np.empty(row*col, dtype='str')
a = np.apply_along_axis('-'.join, 1, img.Xdata.reshape(row*col,feat).astype('int').astype('str'))
l = img.labels.reshape(row*col)
print "Working on {} ...".format(img.name)
it = np.nditer(a, flags=['f_index'])
b = np.empty(row*col, dtype='a600')
while not it.finished:
#print it.index
if (it.index == 0):
b[it.index] = "{0:d}\tw[t]={1:s}\tw[n]={2:s}\tw[b]={3:s}\tw[t]|w[n]={1}|{2}\tw[t]|w[b]={1}|{3}".format(l[it.index], a[it.index], a[it.index+1], a[it.index+col+1])
elif (it.index == it.itersize-1):
b[it.index] = "{0:d}\tw[t]={1:s}\tw[p]={2:s}\tw[u]={3:s}\tw[u]|w[t]={3}|{1}\tw[p]|w[t]={2}|{1}".format(l[it.index], a[it.index], a[it.index-1], a[it.index-col-1])
elif (it.index <= col):
b[it.index] = "{0:d}\tw[t]={1:s}\tw[p]={2:s}\tw[n]={3:s}\tw[b]={4:s}\tw[p]|w[t]={2}|{1}\tw[t]|w[n]={1}|{3}\tw[t]|w[b]={1}|{4}".format(l[it.index], a[it.index], a[it.index-1], a[it.index+1], a[it.index+col+1])
elif (it.index >= it.itersize-col-1):
b[it.index] = "{0:d}\tw[t]={1:s}\tw[p]={2:s}\tw[n]={3:s}\tw[u]={4:s}\tw[u]|w[t]={4}|{1}\tw[p]|w[t]={2}|{1}\tw[t]|w[n]={1}|{3}".format(l[it.index], a[it.index], a[it.index-1], a[it.index+1], a[it.index-col-1])
else:
b[it.index] = "{0:d}\tw[t]={1:s}\tw[p]={2:s}\tw[n]={3:s}\tw[u]={4:s}\tw[b]{5:s}\tw[u]|w[t]={4}|{1}\tw[p]|w[t]={2}|{1}\tw[t]|w[n]={1}|{3}\tw[t]|w[b]={1}|{5}".format(l[it.index], a[it.index], a[it.index-1], a[it.index+1], a[it.index-col-1], a[it.index+col+1])
if (it.index != 0 and (it.index + 1) % (col) == 0):
b[it.index] = b[it.index] + "\t__EOS__\n"
if ((it.index) % (col) == 0 ):
b[it.index] = b[it.index] + "\t__BOS__"
it.iternext()
np.savetxt(args.out + img.name, b, fmt="%s")
if __name__ == '__main__':
main()
|
lquirosd/TFM
|
ILA/code/struct_sentByrow.py
|
Python
|
apache-2.0
| 2,659
|
#
#
# Copyright 2015 Marco Bartolini, bartolini@ira.inaf.it
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
import sys
from discosbackend import server
from discosbackend.handlers import DBProtocolHandler
from roach2_backend import Roach2_Backend
#filename='example.log'
logging.basicConfig(filename='example.log',level=logging.DEBUG)
server.run_server(int(sys.argv[1]),
DBProtocolHandler(Roach2_Backend()))
|
discos/discos-backend
|
test/run_server.py
|
Python
|
apache-2.0
| 967
|
from typing import Any, Dict, List, Optional
from django.http import HttpRequest
from django.conf import settings
from zerver.models import UserProfile, get_realm, Realm
from zproject.backends import (
any_oauth_backend_enabled,
password_auth_enabled,
require_email_format_usernames,
auth_enabled_helper,
AUTH_BACKEND_NAME_MAP,
)
from zerver.lib.bugdown import convert as bugdown_convert
from zerver.lib.send_email import FromAddress
from zerver.lib.subdomains import get_subdomain
from zerver.lib.realm_icon import get_realm_icon_url
from zerver.lib.realm_logo import get_realm_logo_url
from version import ZULIP_VERSION, LATEST_RELEASE_VERSION, \
LATEST_RELEASE_ANNOUNCEMENT, LATEST_MAJOR_VERSION
def common_context(user: UserProfile) -> Dict[str, Any]:
"""Common context used for things like outgoing emails that don't
have a request.
"""
return {
'realm_uri': user.realm.uri,
'realm_name': user.realm.name,
'root_domain_uri': settings.ROOT_DOMAIN_URI,
'external_uri_scheme': settings.EXTERNAL_URI_SCHEME,
'external_host': settings.EXTERNAL_HOST,
'user_name': user.full_name,
}
def get_realm_from_request(request: HttpRequest) -> Optional[Realm]:
if hasattr(request, "user") and hasattr(request.user, "realm"):
return request.user.realm
subdomain = get_subdomain(request)
return get_realm(subdomain)
def zulip_default_context(request: HttpRequest) -> Dict[str, Any]:
"""Context available to all Zulip Jinja2 templates that have a request
passed in. Designed to provide the long list of variables at the
bottom of this function in a wide range of situations: logged-in
or logged-out, subdomains or not, etc.
The main variable in the below is whether we know what realm the
user is trying to interact with.
"""
realm = get_realm_from_request(request)
if realm is None:
realm_uri = settings.ROOT_DOMAIN_URI
realm_name = None
realm_icon = None
realm_logo = None
realm_description = None
realm_invite_required = False
realm_plan_type = 0
else:
realm_uri = realm.uri
realm_name = realm.name
realm_icon = get_realm_icon_url(realm)
realm_logo = get_realm_logo_url(realm)
realm_description_raw = realm.description or "The coolest place in the universe."
realm_description = bugdown_convert(realm_description_raw, message_realm=realm)
realm_invite_required = realm.invite_required
realm_plan_type = realm.plan_type
register_link_disabled = settings.REGISTER_LINK_DISABLED
login_link_disabled = settings.LOGIN_LINK_DISABLED
find_team_link_disabled = settings.FIND_TEAM_LINK_DISABLED
allow_search_engine_indexing = False
if (settings.ROOT_DOMAIN_LANDING_PAGE
and get_subdomain(request) == Realm.SUBDOMAIN_FOR_ROOT_DOMAIN):
register_link_disabled = True
login_link_disabled = True
find_team_link_disabled = False
allow_search_engine_indexing = True
apps_page_url = 'https://zulipchat.com/apps/'
if settings.ZILENCER_ENABLED:
apps_page_url = '/apps/'
user_is_authenticated = False
if hasattr(request, 'user') and hasattr(request.user, 'is_authenticated'):
user_is_authenticated = request.user.is_authenticated.value
if settings.DEVELOPMENT:
secrets_path = "zproject/dev-secrets.conf"
settings_path = "zproject/dev_settings.py"
settings_comments_path = "zproject/prod_settings_template.py"
else:
secrets_path = "/etc/zulip/zulip-secrets.conf"
settings_path = "/etc/zulip/settings.py"
settings_comments_path = "/etc/zulip/settings.py"
if hasattr(request, "client") and request.client.name == "ZulipElectron":
platform = "ZulipElectron" # nocoverage
else:
platform = "ZulipWeb"
context = {
'root_domain_landing_page': settings.ROOT_DOMAIN_LANDING_PAGE,
'custom_logo_url': settings.CUSTOM_LOGO_URL,
'register_link_disabled': register_link_disabled,
'login_link_disabled': login_link_disabled,
'terms_of_service': settings.TERMS_OF_SERVICE,
'privacy_policy': settings.PRIVACY_POLICY,
'login_url': settings.HOME_NOT_LOGGED_IN,
'only_sso': settings.ONLY_SSO,
'external_host': settings.EXTERNAL_HOST,
'external_uri_scheme': settings.EXTERNAL_URI_SCHEME,
'realm_invite_required': realm_invite_required,
'realm_uri': realm_uri,
'realm_name': realm_name,
'realm_icon': realm_icon,
'realm_logo': realm_logo,
'realm_description': realm_description,
'realm_plan_type': realm_plan_type,
'root_domain_uri': settings.ROOT_DOMAIN_URI,
'apps_page_url': apps_page_url,
'open_realm_creation': settings.OPEN_REALM_CREATION,
'password_auth_enabled': password_auth_enabled(realm),
'require_email_format_usernames': require_email_format_usernames(realm),
'any_oauth_backend_enabled': any_oauth_backend_enabled(realm),
'no_auth_enabled': not auth_enabled_helper(list(AUTH_BACKEND_NAME_MAP.keys()), realm),
'development_environment': settings.DEVELOPMENT,
'support_email': FromAddress.SUPPORT,
'find_team_link_disabled': find_team_link_disabled,
'password_min_length': settings.PASSWORD_MIN_LENGTH,
'password_min_guesses': settings.PASSWORD_MIN_GUESSES,
'jitsi_server_url': settings.JITSI_SERVER_URL,
'two_factor_authentication_enabled': settings.TWO_FACTOR_AUTHENTICATION_ENABLED,
'zulip_version': ZULIP_VERSION,
'latest_release_version': LATEST_RELEASE_VERSION,
'latest_major_version': LATEST_MAJOR_VERSION,
'latest_release_announcement': LATEST_RELEASE_ANNOUNCEMENT,
'user_is_authenticated': user_is_authenticated,
'settings_path': settings_path,
'secrets_path': secrets_path,
'settings_comments_path': settings_comments_path,
'platform': platform,
'allow_search_engine_indexing': allow_search_engine_indexing,
}
# Add the keys for our standard authentication backends.
for auth_backend_name in AUTH_BACKEND_NAME_MAP:
name_lower = auth_backend_name.lower()
key = "%s_auth_enabled" % (name_lower,)
context[key] = auth_enabled_helper([auth_backend_name], realm)
return context
|
dhcrzf/zulip
|
zerver/context_processors.py
|
Python
|
apache-2.0
| 6,478
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for fused batch norm operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.compiler.tests import test_utils
from tensorflow.compiler.tests.xla_test import XLATestCase
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import nn
from tensorflow.python.platform import test
class FusedBatchNormTest(XLATestCase, parameterized.TestCase):
def _reference_training(self, x, scale, offset, epsilon, data_format):
if data_format != "NHWC":
raise ValueError("data_format must be NHWC, got %s." % data_format)
x_square = x * x
x_square_sum = np.sum(x_square, (0, 1, 2))
x_sum = np.sum(x, axis=(0, 1, 2))
element_count = np.size(x) / int(np.shape(x)[-1])
mean = x_sum / element_count
var = x_square_sum / element_count - mean * mean
normalized = (x - mean) / np.sqrt(var + epsilon)
return (normalized * scale + offset), mean, var
def _reference_grad(self, x, grad_y, scale, mean, var, epsilon, data_format):
# Use the following formulas to calculate gradients:
# grad_scale =
# sum(grad_y * (x - mean)) * rsqrt(var + epsilon)
#
# grad_offset = sum(output_y)
#
# grad_x =
# 1/N * scale * rsqrt(var + epsilon) * (N * grad_y - sum(grad_y) -
# (x - mean) * sum(grad_y * (x - mean)) / (var + epsilon))
if data_format != "NHWC":
raise ValueError("data_format must be NHWC, got %s." % data_format)
grad_x = scale * (grad_y - np.mean(grad_y, axis=(0, 1, 2)) -
(x - mean) * np.mean(grad_y *
(x - mean), axis=(0, 1, 2)) /
(var + epsilon)) / np.sqrt(var + epsilon)
grad_scale = np.sum(
grad_y * (x - mean) / np.sqrt(var + epsilon), axis=(0, 1, 2))
grad_offset = np.sum(grad_y, axis=(0, 1, 2))
return grad_x, grad_scale, grad_offset
@parameterized.named_parameters(
("_data_format_NHWC", "NHWC"),
("_data_format_NCHW", "NCHW"),
("_data_format_HWNC", "HWNC"),
("_data_format_HWCN", "HWCN"),
)
def testInference(self, data_format):
channel = 3
x_shape = [2, 2, 6, channel]
scale_shape = [channel]
x_val = np.random.random_sample(x_shape).astype(np.float32)
scale_val = np.random.random_sample(scale_shape).astype(np.float32)
offset_val = np.random.random_sample(scale_shape).astype(np.float32)
epsilon = 0.001
data_format_src = "NHWC"
y_ref, mean_ref, var_ref = self._reference_training(
x_val, scale_val, offset_val, epsilon, data_format_src)
with self.test_session() as sess, self.test_scope():
# To avoid constant folding
x_val_converted = test_utils.ConvertBetweenDataFormats(
x_val, data_format_src, data_format)
y_ref_converted = test_utils.ConvertBetweenDataFormats(
y_ref, data_format_src, data_format)
t_val = array_ops.placeholder(
np.float32, shape=x_val_converted.shape, name="x")
scale = array_ops.placeholder(np.float32, shape=scale_shape, name="scale")
offset = array_ops.placeholder(
np.float32, shape=scale_shape, name="offset")
y, mean, variance = nn.fused_batch_norm(
t_val,
scale,
offset,
mean=mean_ref,
variance=var_ref,
epsilon=epsilon,
data_format=data_format,
is_training=False)
y_val, _, _ = sess.run([y, mean, variance], {
t_val: x_val_converted,
scale: scale_val,
offset: offset_val
})
self.assertAllClose(y_val, y_ref_converted, atol=1e-3)
def _testLearning(self, use_gradient_checker, data_format):
channel = 3
x_shape = [2, 2, 6, channel]
scale_shape = [channel]
x_val = np.random.random_sample(x_shape).astype(np.float32)
scale_val = np.random.random_sample(scale_shape).astype(np.float32)
offset_val = np.random.random_sample(scale_shape).astype(np.float32)
mean_val = np.random.random_sample(scale_shape).astype(np.float32)
var_val = np.random.random_sample(scale_shape).astype(np.float32)
epsilon = 0.001
data_format_src = "NHWC"
y_ref, mean_ref, var_ref = self._reference_training(
x_val, scale_val, offset_val, epsilon, data_format_src)
with self.test_session() as sess, self.test_scope():
# To avoid constant folding
x_val_converted = test_utils.ConvertBetweenDataFormats(
x_val, data_format_src, data_format)
y_ref_converted = test_utils.ConvertBetweenDataFormats(
y_ref, data_format_src, data_format)
t_val = array_ops.placeholder(
np.float32, shape=x_val_converted.shape, name="x")
scale = array_ops.placeholder(np.float32, shape=scale_shape, name="scale")
offset = array_ops.placeholder(
np.float32, shape=scale_shape, name="offset")
y, mean, var = nn.fused_batch_norm(
t_val,
scale,
offset,
mean=None,
variance=None,
epsilon=epsilon,
data_format=data_format,
is_training=True)
# Check gradient.
if use_gradient_checker:
err = gradient_checker.compute_gradient_error(
t_val,
x_val_converted.shape,
y,
x_val_converted.shape,
extra_feed_dict={
t_val: x_val_converted,
scale: scale_val,
offset: offset_val
})
self.assertLess(err, 1e-3)
y_val, mean_val, var_val = sess.run([y, mean, var], {
t_val: x_val_converted,
scale: scale_val,
offset: offset_val
})
self.assertAllClose(mean_val, mean_ref, atol=1e-3)
self.assertAllClose(y_val, y_ref_converted, atol=1e-3)
self.assertAllClose(var_val, var_ref, atol=1e-3)
@parameterized.named_parameters(
("_data_format_NHWC", "NHWC"),
("_data_format_NCHW", "NCHW"),
("_data_format_HWNC", "HWNC"),
("_data_format_HWCN", "HWCN"),
)
def testLearning(self, data_format):
self._testLearning(False, data_format)
@parameterized.named_parameters(
("_data_format_NHWC", "NHWC"),
("_data_format_NCHW", "NCHW"),
("_data_format_HWNC", "HWNC"),
("_data_format_HWCN", "HWCN"),
)
def testLearningWithGradientChecker(self, data_format):
self._testLearning(True, data_format)
@parameterized.named_parameters(
("_data_format_NHWC", "NHWC"),
("_data_format_NCHW", "NCHW"),
("_data_format_HWNC", "HWNC"),
("_data_format_HWCN", "HWCN"),
)
def testGradientTraining(self, data_format):
# TODO(b/64270657): Use gradient_checker here in addition to comparing with
# this reference implementation.
channel = 3
x_shape = [2, 2, 6, channel]
scale_shape = [channel]
grad_val = np.random.random_sample(x_shape).astype(np.float32)
x_val = np.random.random_sample(x_shape).astype(np.float32)
scale_val = np.random.random_sample(scale_shape).astype(np.float32)
mean_val = np.random.random_sample(scale_shape).astype(np.float32)
var_val = np.random.random_sample(scale_shape).astype(np.float32)
epsilon = 0.001
data_format_src = "NHWC"
grad_x_ref, grad_scale_ref, grad_offset_ref = self._reference_grad(
x_val, grad_val, scale_val, mean_val, var_val, epsilon, data_format_src)
with self.test_session() as sess, self.test_scope():
grad_val_converted = test_utils.ConvertBetweenDataFormats(
grad_val, data_format_src, data_format)
x_val_converted = test_utils.ConvertBetweenDataFormats(
x_val, data_format_src, data_format)
grad_x_ref_converted = test_utils.ConvertBetweenDataFormats(
grad_x_ref, data_format_src, data_format)
grad = array_ops.placeholder(
np.float32, shape=x_val_converted.shape, name="grad")
x = array_ops.placeholder(
np.float32, shape=x_val_converted.shape, name="x")
mean = array_ops.placeholder(np.float32, shape=scale_shape, name="mean")
var = array_ops.placeholder(np.float32, shape=scale_shape, name="var")
scale = array_ops.placeholder(np.float32, shape=scale_shape, name="scale")
grad_x, grad_scale, grad_offset, _, _ = gen_nn_ops.fused_batch_norm_grad(
grad, x, scale, mean, var, data_format=data_format, is_training=True)
grad_x_val, grad_scale_val, grad_offset_val = sess.run(
[grad_x, grad_scale, grad_offset], {
grad: grad_val_converted,
x: x_val_converted,
mean: mean_val,
var: var_val,
scale: scale_val
})
self.assertAllClose(grad_x_val, grad_x_ref_converted, atol=1e-2)
self.assertAllClose(grad_scale_val, grad_scale_ref, atol=1e-2)
self.assertAllClose(grad_offset_val, grad_offset_ref, atol=1e-3)
@parameterized.named_parameters(
("_data_format_NHWC", "NHWC"),
("_data_format_NCHW", "NCHW"),
("_data_format_HWNC", "HWNC"),
("_data_format_HWCN", "HWCN"),
)
def testGradientInference(self, data_format):
# TODO(b/64270657): Use gradient_checker here in addition to comparing with
# this reference implementation.
channel = 3
x_shape = [2, 2, 6, channel]
scale_shape = [channel]
grad_val = np.random.random_sample(x_shape).astype(np.float32)
x_val = np.random.random_sample(x_shape).astype(np.float32)
scale_val = np.random.random_sample(scale_shape).astype(np.float32)
mean_val = np.random.random_sample(scale_shape).astype(np.float32)
var_val = np.random.random_sample(scale_shape).astype(np.float32)
data_format_src = "NHWC"
with self.test_session() as sess, self.test_scope():
grad_val_converted = test_utils.ConvertBetweenDataFormats(
grad_val, data_format_src, data_format)
x_val_converted = test_utils.ConvertBetweenDataFormats(
x_val, data_format_src, data_format)
grad = array_ops.placeholder(
np.float32, shape=x_val_converted.shape, name="grad")
x = array_ops.placeholder(
np.float32, shape=x_val_converted.shape, name="x")
mean = array_ops.placeholder(np.float32, shape=scale_shape, name="mean")
var = array_ops.placeholder(np.float32, shape=scale_shape, name="var")
scale = array_ops.placeholder(np.float32, shape=scale_shape, name="scale")
with self.test_scope():
out = gen_nn_ops.fused_batch_norm_grad(
grad,
x,
scale,
mean,
var,
data_format=data_format,
is_training=False)
grad_x, grad_scale, grad_offset, _, _ = out
ref_x, ref_scale, ref_offset, _, _ = gen_nn_ops.fused_batch_norm_grad(
grad, x, scale, mean, var, data_format=data_format, is_training=False)
grad_x_val, grad_scale_val, grad_offset_val, = sess.run(
[grad_x, grad_scale, grad_offset], {
grad: grad_val_converted,
x: x_val_converted,
mean: mean_val,
var: var_val,
scale: scale_val
})
grad_x_ref, grad_scale_ref, grad_offset_ref, = sess.run(
[ref_x, ref_scale, ref_offset], {
grad: grad_val_converted,
x: x_val_converted,
mean: mean_val,
var: var_val,
scale: scale_val
})
self.assertAllClose(grad_x_val, grad_x_ref, atol=1e-2)
self.assertAllClose(grad_scale_val, grad_scale_ref, atol=1e-2)
self.assertAllClose(grad_offset_val, grad_offset_ref, atol=1e-3)
if __name__ == "__main__":
test.main()
|
meteorcloudy/tensorflow
|
tensorflow/compiler/tests/fused_batchnorm_test.py
|
Python
|
apache-2.0
| 12,531
|
from django.apps import AppConfig
class TypesOfPaymentConfig(AppConfig):
name = 'types_of_payment'
|
jojoriveraa/titulacion-NFCOW
|
NFCow/types_of_payment/apps.py
|
Python
|
apache-2.0
| 105
|
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Database code for Swift """
from contextlib import contextmanager, closing
import hashlib
import logging
import os
from uuid import uuid4
import sys
import time
import errno
import cPickle as pickle
from swift import gettext_ as _
from tempfile import mkstemp
from eventlet import sleep, Timeout
import sqlite3
from swift.common.utils import json, Timestamp, renamer, \
mkdirs, lock_parent_directory, fallocate
from swift.common.exceptions import LockTimeout
#: Whether calls will be made to preallocate disk space for database files.
DB_PREALLOCATION = False
#: Timeout for trying to connect to a DB
BROKER_TIMEOUT = 25
#: Pickle protocol to use
PICKLE_PROTOCOL = 2
#: Max number of pending entries
PENDING_CAP = 131072
def utf8encode(*args):
return [(s.encode('utf8') if isinstance(s, unicode) else s) for s in args]
def utf8encodekeys(metadata):
uni_keys = [k for k in metadata if isinstance(k, unicode)]
for k in uni_keys:
sv = metadata[k]
del metadata[k]
metadata[k.encode('utf-8')] = sv
def _db_timeout(timeout, db_file, call):
with LockTimeout(timeout, db_file):
retry_wait = 0.001
while True:
try:
return call()
except sqlite3.OperationalError as e:
if 'locked' not in str(e):
raise
sleep(retry_wait)
retry_wait = min(retry_wait * 2, 0.05)
class DatabaseConnectionError(sqlite3.DatabaseError):
"""More friendly error messages for DB Errors."""
def __init__(self, path, msg, timeout=0):
self.path = path
self.timeout = timeout
self.msg = msg
def __str__(self):
return 'DB connection error (%s, %s):\n%s' % (
self.path, self.timeout, self.msg)
class DatabaseAlreadyExists(sqlite3.DatabaseError):
"""More friendly error messages for DB Errors."""
def __init__(self, path):
self.path = path
def __str__(self):
return 'DB %s already exists' % self.path
class GreenDBConnection(sqlite3.Connection):
"""SQLite DB Connection handler that plays well with eventlet."""
def __init__(self, database, timeout=None, *args, **kwargs):
if timeout is None:
timeout = BROKER_TIMEOUT
self.timeout = timeout
self.db_file = database
super(GreenDBConnection, self).__init__(database, 0, *args, **kwargs)
def cursor(self, cls=None):
if cls is None:
cls = GreenDBCursor
return sqlite3.Connection.cursor(self, cls)
def commit(self):
return _db_timeout(
self.timeout, self.db_file,
lambda: sqlite3.Connection.commit(self))
class GreenDBCursor(sqlite3.Cursor):
"""SQLite Cursor handler that plays well with eventlet."""
def __init__(self, *args, **kwargs):
self.timeout = args[0].timeout
self.db_file = args[0].db_file
super(GreenDBCursor, self).__init__(*args, **kwargs)
def execute(self, *args, **kwargs):
return _db_timeout(
self.timeout, self.db_file, lambda: sqlite3.Cursor.execute(
self, *args, **kwargs))
def dict_factory(crs, row):
"""
This should only be used when you need a real dict,
i.e. when you're going to serialize the results.
"""
return dict(
((col[0], row[idx]) for idx, col in enumerate(crs.description)))
def chexor(old, name, timestamp):
"""
Each entry in the account and container databases is XORed by the 128-bit
hash on insert or delete. This serves as a rolling, order-independent hash
of the contents. (check + XOR)
:param old: hex representation of the current DB hash
:param name: name of the object or container being inserted
:param timestamp: internalized timestamp of the new record
:returns: a hex representation of the new hash value
"""
if name is None:
raise Exception('name is None!')
new = hashlib.md5(('%s-%s' % (name, timestamp)).encode('utf8')).hexdigest()
return '%032x' % (int(old, 16) ^ int(new, 16))
def get_db_connection(path, timeout=30, okay_to_create=False):
"""
Returns a properly configured SQLite database connection.
:param path: path to DB
:param timeout: timeout for connection
:param okay_to_create: if True, create the DB if it doesn't exist
:returns: DB connection object
"""
try:
connect_time = time.time()
conn = sqlite3.connect(path, check_same_thread=False,
factory=GreenDBConnection, timeout=timeout)
if path != ':memory:' and not okay_to_create:
# attempt to detect and fail when connect creates the db file
stat = os.stat(path)
if stat.st_size == 0 and stat.st_ctime >= connect_time:
os.unlink(path)
raise DatabaseConnectionError(path,
'DB file created by connect?')
conn.row_factory = sqlite3.Row
conn.text_factory = str
with closing(conn.cursor()) as cur:
cur.execute('PRAGMA synchronous = NORMAL')
cur.execute('PRAGMA count_changes = OFF')
cur.execute('PRAGMA temp_store = MEMORY')
cur.execute('PRAGMA journal_mode = DELETE')
conn.create_function('chexor', 3, chexor)
except sqlite3.DatabaseError:
import traceback
raise DatabaseConnectionError(path, traceback.format_exc(),
timeout=timeout)
return conn
class DatabaseBroker(object):
"""Encapsulates working with a database."""
def __init__(self, db_file, timeout=BROKER_TIMEOUT, logger=None,
account=None, container=None, pending_timeout=None,
stale_reads_ok=False):
"""Encapsulates working with a database."""
self.conn = None
self.db_file = db_file
self.pending_file = self.db_file + '.pending'
self.pending_timeout = pending_timeout or 10
self.stale_reads_ok = stale_reads_ok
self.db_dir = os.path.dirname(db_file)
self.timeout = timeout
self.logger = logger or logging.getLogger()
self.account = account
self.container = container
self._db_version = -1
def __str__(self):
"""
Returns a string identifying the entity under broker to a human.
The baseline implementation returns a full pathname to a database.
This is vital for useful diagnostics.
"""
return self.db_file
def initialize(self, put_timestamp=None, storage_policy_index=None):
"""
Create the DB
The storage_policy_index is passed through to the subclass's
``_initialize`` method. It is ignored by ``AccountBroker``.
:param put_timestamp: internalized timestamp of initial PUT request
:param storage_policy_index: only required for containers
"""
if self.db_file == ':memory:':
tmp_db_file = None
conn = get_db_connection(self.db_file, self.timeout)
else:
mkdirs(self.db_dir)
fd, tmp_db_file = mkstemp(suffix='.tmp', dir=self.db_dir)
os.close(fd)
conn = sqlite3.connect(tmp_db_file, check_same_thread=False,
factory=GreenDBConnection, timeout=0)
# creating dbs implicitly does a lot of transactions, so we
# pick fast, unsafe options here and do a big fsync at the end.
with closing(conn.cursor()) as cur:
cur.execute('PRAGMA synchronous = OFF')
cur.execute('PRAGMA temp_store = MEMORY')
cur.execute('PRAGMA journal_mode = MEMORY')
conn.create_function('chexor', 3, chexor)
conn.row_factory = sqlite3.Row
conn.text_factory = str
conn.executescript("""
CREATE TABLE outgoing_sync (
remote_id TEXT UNIQUE,
sync_point INTEGER,
updated_at TEXT DEFAULT 0
);
CREATE TABLE incoming_sync (
remote_id TEXT UNIQUE,
sync_point INTEGER,
updated_at TEXT DEFAULT 0
);
CREATE TRIGGER outgoing_sync_insert AFTER INSERT ON outgoing_sync
BEGIN
UPDATE outgoing_sync
SET updated_at = STRFTIME('%s', 'NOW')
WHERE ROWID = new.ROWID;
END;
CREATE TRIGGER outgoing_sync_update AFTER UPDATE ON outgoing_sync
BEGIN
UPDATE outgoing_sync
SET updated_at = STRFTIME('%s', 'NOW')
WHERE ROWID = new.ROWID;
END;
CREATE TRIGGER incoming_sync_insert AFTER INSERT ON incoming_sync
BEGIN
UPDATE incoming_sync
SET updated_at = STRFTIME('%s', 'NOW')
WHERE ROWID = new.ROWID;
END;
CREATE TRIGGER incoming_sync_update AFTER UPDATE ON incoming_sync
BEGIN
UPDATE incoming_sync
SET updated_at = STRFTIME('%s', 'NOW')
WHERE ROWID = new.ROWID;
END;
""")
if not put_timestamp:
put_timestamp = Timestamp(0).internal
self._initialize(conn, put_timestamp,
storage_policy_index=storage_policy_index)
conn.commit()
if tmp_db_file:
conn.close()
with open(tmp_db_file, 'r+b') as fp:
os.fsync(fp.fileno())
with lock_parent_directory(self.db_file, self.pending_timeout):
if os.path.exists(self.db_file):
# It's as if there was a "condition" where different parts
# of the system were "racing" each other.
raise DatabaseAlreadyExists(self.db_file)
renamer(tmp_db_file, self.db_file)
self.conn = get_db_connection(self.db_file, self.timeout)
else:
self.conn = conn
def delete_db(self, timestamp):
"""
Mark the DB as deleted
:param timestamp: internalized delete timestamp
"""
# first, clear the metadata
cleared_meta = {}
for k in self.metadata:
cleared_meta[k] = ('', timestamp)
self.update_metadata(cleared_meta)
# then mark the db as deleted
with self.get() as conn:
self._delete_db(conn, timestamp)
conn.commit()
def possibly_quarantine(self, exc_type, exc_value, exc_traceback):
"""
Checks the exception info to see if it indicates a quarantine situation
(malformed or corrupted database). If not, the original exception will
be reraised. If so, the database will be quarantined and a new
sqlite3.DatabaseError will be raised indicating the action taken.
"""
if 'database disk image is malformed' in str(exc_value):
exc_hint = 'malformed'
elif 'file is encrypted or is not a database' in str(exc_value):
exc_hint = 'corrupted'
else:
raise exc_type, exc_value, exc_traceback
prefix_path = os.path.dirname(self.db_dir)
partition_path = os.path.dirname(prefix_path)
dbs_path = os.path.dirname(partition_path)
device_path = os.path.dirname(dbs_path)
quar_path = os.path.join(device_path, 'quarantined',
self.db_type + 's',
os.path.basename(self.db_dir))
try:
renamer(self.db_dir, quar_path)
except OSError as e:
if e.errno not in (errno.EEXIST, errno.ENOTEMPTY):
raise
quar_path = "%s-%s" % (quar_path, uuid4().hex)
renamer(self.db_dir, quar_path)
detail = _('Quarantined %s to %s due to %s database') % \
(self.db_dir, quar_path, exc_hint)
self.logger.error(detail)
raise sqlite3.DatabaseError(detail)
@contextmanager
def get(self):
"""Use with the "with" statement; returns a database connection."""
if not self.conn:
if self.db_file != ':memory:' and os.path.exists(self.db_file):
try:
self.conn = get_db_connection(self.db_file, self.timeout)
except (sqlite3.DatabaseError, DatabaseConnectionError):
self.possibly_quarantine(*sys.exc_info())
else:
raise DatabaseConnectionError(self.db_file, "DB doesn't exist")
conn = self.conn
self.conn = None
try:
yield conn
conn.rollback()
self.conn = conn
except sqlite3.DatabaseError:
try:
conn.close()
except Exception:
pass
self.possibly_quarantine(*sys.exc_info())
except (Exception, Timeout):
conn.close()
raise
@contextmanager
def lock(self):
"""Use with the "with" statement; locks a database."""
if not self.conn:
if self.db_file != ':memory:' and os.path.exists(self.db_file):
self.conn = get_db_connection(self.db_file, self.timeout)
else:
raise DatabaseConnectionError(self.db_file, "DB doesn't exist")
conn = self.conn
self.conn = None
orig_isolation_level = conn.isolation_level
conn.isolation_level = None
conn.execute('BEGIN IMMEDIATE')
try:
yield True
except (Exception, Timeout):
pass
try:
conn.execute('ROLLBACK')
conn.isolation_level = orig_isolation_level
self.conn = conn
except (Exception, Timeout):
logging.exception(
_('Broker error trying to rollback locked connection'))
conn.close()
def newid(self, remote_id):
"""
Re-id the database. This should be called after an rsync.
:param remote_id: the ID of the remote database being rsynced in
"""
with self.get() as conn:
row = conn.execute('''
UPDATE %s_stat SET id=?
''' % self.db_type, (str(uuid4()),))
row = conn.execute('''
SELECT ROWID FROM %s ORDER BY ROWID DESC LIMIT 1
''' % self.db_contains_type).fetchone()
sync_point = row['ROWID'] if row else -1
conn.execute('''
INSERT OR REPLACE INTO incoming_sync (sync_point, remote_id)
VALUES (?, ?)
''', (sync_point, remote_id))
self._newid(conn)
conn.commit()
def _newid(self, conn):
# Override for additional work when receiving an rsynced db.
pass
def _is_deleted(self, conn):
"""
Check if the database is considered deleted
:param conn: database conn
:returns: True if the DB is considered to be deleted, False otherwise
"""
raise NotImplementedError()
def is_deleted(self):
"""
Check if the DB is considered to be deleted.
:returns: True if the DB is considered to be deleted, False otherwise
"""
if self.db_file != ':memory:' and not os.path.exists(self.db_file):
return True
self._commit_puts_stale_ok()
with self.get() as conn:
return self._is_deleted(conn)
def merge_timestamps(self, created_at, put_timestamp, delete_timestamp):
"""
Used in replication to handle updating timestamps.
:param created_at: create timestamp
:param put_timestamp: put timestamp
:param delete_timestamp: delete timestamp
"""
with self.get() as conn:
old_status = self._is_deleted(conn)
conn.execute('''
UPDATE %s_stat SET created_at=MIN(?, created_at),
put_timestamp=MAX(?, put_timestamp),
delete_timestamp=MAX(?, delete_timestamp)
''' % self.db_type, (created_at, put_timestamp, delete_timestamp))
if old_status != self._is_deleted(conn):
timestamp = Timestamp(time.time())
self._update_status_changed_at(conn, timestamp.internal)
conn.commit()
def get_items_since(self, start, count):
"""
Get a list of objects in the database between start and end.
:param start: start ROWID
:param count: number to get
:returns: list of objects between start and end
"""
self._commit_puts_stale_ok()
with self.get() as conn:
curs = conn.execute('''
SELECT * FROM %s WHERE ROWID > ? ORDER BY ROWID ASC LIMIT ?
''' % self.db_contains_type, (start, count))
curs.row_factory = dict_factory
return [r for r in curs]
def get_sync(self, id, incoming=True):
"""
Gets the most recent sync point for a server from the sync table.
:param id: remote ID to get the sync_point for
:param incoming: if True, get the last incoming sync, otherwise get
the last outgoing sync
:returns: the sync point, or -1 if the id doesn't exist.
"""
with self.get() as conn:
row = conn.execute(
"SELECT sync_point FROM %s_sync WHERE remote_id=?"
% ('incoming' if incoming else 'outgoing'), (id,)).fetchone()
if not row:
return -1
return row['sync_point']
def get_syncs(self, incoming=True):
"""
Get a serialized copy of the sync table.
:param incoming: if True, get the last incoming sync, otherwise get
the last outgoing sync
:returns: list of {'remote_id', 'sync_point'}
"""
with self.get() as conn:
curs = conn.execute('''
SELECT remote_id, sync_point FROM %s_sync
''' % ('incoming' if incoming else 'outgoing'))
result = []
for row in curs:
result.append({'remote_id': row[0], 'sync_point': row[1]})
return result
def get_max_row(self):
query = '''
SELECT SQLITE_SEQUENCE.seq
FROM SQLITE_SEQUENCE
WHERE SQLITE_SEQUENCE.name == '%s'
LIMIT 1
''' % (self.db_contains_type)
with self.get() as conn:
row = conn.execute(query).fetchone()
return row[0] if row else -1
def get_replication_info(self):
"""
Get information about the DB required for replication.
:returns: dict containing keys from get_info plus max_row and metadata
Note:: get_info's <db_contains_type>_count is translated to just
"count" and metadata is the raw string.
"""
info = self.get_info()
info['count'] = info.pop('%s_count' % self.db_contains_type)
info['metadata'] = self.get_raw_metadata()
info['max_row'] = self.get_max_row()
return info
def get_info(self):
self._commit_puts_stale_ok()
with self.get() as conn:
curs = conn.execute('SELECT * from %s_stat' % self.db_type)
curs.row_factory = dict_factory
return curs.fetchone()
def put_record(self, record):
if self.db_file == ':memory:':
self.merge_items([record])
return
if not os.path.exists(self.db_file):
raise DatabaseConnectionError(self.db_file, "DB doesn't exist")
with lock_parent_directory(self.pending_file, self.pending_timeout):
pending_size = 0
try:
pending_size = os.path.getsize(self.pending_file)
except OSError as err:
if err.errno != errno.ENOENT:
raise
if pending_size > PENDING_CAP:
self._commit_puts([record])
else:
with open(self.pending_file, 'a+b') as fp:
# Colons aren't used in base64 encoding; so they are our
# delimiter
fp.write(':')
fp.write(pickle.dumps(
self.make_tuple_for_pickle(record),
protocol=PICKLE_PROTOCOL).encode('base64'))
fp.flush()
def _commit_puts(self, item_list=None):
"""
Scan for .pending files and commit the found records by feeding them
to merge_items(). Assume that lock_parent_directory has already been
called.
:param item_list: A list of items to commit in addition to .pending
"""
if self.db_file == ':memory:' or not os.path.exists(self.pending_file):
return
if item_list is None:
item_list = []
self._preallocate()
if not os.path.getsize(self.pending_file):
if item_list:
self.merge_items(item_list)
return
with open(self.pending_file, 'r+b') as fp:
for entry in fp.read().split(':'):
if entry:
try:
self._commit_puts_load(item_list, entry)
except Exception:
self.logger.exception(
_('Invalid pending entry %(file)s: %(entry)s'),
{'file': self.pending_file, 'entry': entry})
if item_list:
self.merge_items(item_list)
try:
os.ftruncate(fp.fileno(), 0)
except OSError as err:
if err.errno != errno.ENOENT:
raise
def _commit_puts_stale_ok(self):
"""
Catch failures of _commit_puts() if broker is intended for
reading of stats, and thus does not care for pending updates.
"""
if self.db_file == ':memory:' or not os.path.exists(self.pending_file):
return
try:
with lock_parent_directory(self.pending_file,
self.pending_timeout):
self._commit_puts()
except LockTimeout:
if not self.stale_reads_ok:
raise
def _commit_puts_load(self, item_list, entry):
"""
Unmarshall the :param:entry and append it to :param:item_list.
This is implemented by a particular broker to be compatible
with its :func:`merge_items`.
"""
raise NotImplementedError
def make_tuple_for_pickle(self, record):
"""
Turn this db record dict into the format this service uses for
pending pickles.
"""
raise NotImplementedError
def merge_syncs(self, sync_points, incoming=True):
"""
Merge a list of sync points with the incoming sync table.
:param sync_points: list of sync points where a sync point is a dict of
{'sync_point', 'remote_id'}
:param incoming: if True, get the last incoming sync, otherwise get
the last outgoing sync
"""
with self.get() as conn:
for rec in sync_points:
try:
conn.execute('''
INSERT INTO %s_sync (sync_point, remote_id)
VALUES (?, ?)
''' % ('incoming' if incoming else 'outgoing'),
(rec['sync_point'], rec['remote_id']))
except sqlite3.IntegrityError:
conn.execute('''
UPDATE %s_sync SET sync_point=max(?, sync_point)
WHERE remote_id=?
''' % ('incoming' if incoming else 'outgoing'),
(rec['sync_point'], rec['remote_id']))
conn.commit()
def _preallocate(self):
"""
The idea is to allocate space in front of an expanding db. If it gets
within 512k of a boundary, it allocates to the next boundary.
Boundaries are 2m, 5m, 10m, 25m, 50m, then every 50m after.
"""
if not DB_PREALLOCATION or self.db_file == ':memory:':
return
MB = (1024 * 1024)
def prealloc_points():
for pm in (1, 2, 5, 10, 25, 50):
yield pm * MB
while True:
pm += 50
yield pm * MB
stat = os.stat(self.db_file)
file_size = stat.st_size
allocated_size = stat.st_blocks * 512
for point in prealloc_points():
if file_size <= point - MB / 2:
prealloc_size = point
break
if allocated_size < prealloc_size:
with open(self.db_file, 'rb+') as fp:
fallocate(fp.fileno(), int(prealloc_size))
def get_raw_metadata(self):
with self.get() as conn:
try:
metadata = conn.execute('SELECT metadata FROM %s_stat' %
self.db_type).fetchone()[0]
except sqlite3.OperationalError as err:
if 'no such column: metadata' not in str(err):
raise
metadata = ''
return metadata
@property
def metadata(self):
"""
Returns the metadata dict for the database. The metadata dict values
are tuples of (value, timestamp) where the timestamp indicates when
that key was set to that value.
"""
metadata = self.get_raw_metadata()
if metadata:
metadata = json.loads(metadata)
utf8encodekeys(metadata)
else:
metadata = {}
return metadata
def update_metadata(self, metadata_updates):
"""
Updates the metadata dict for the database. The metadata dict values
are tuples of (value, timestamp) where the timestamp indicates when
that key was set to that value. Key/values will only be overwritten if
the timestamp is newer. To delete a key, set its value to ('',
timestamp). These empty keys will eventually be removed by
:func:`reclaim`
"""
old_metadata = self.metadata
if set(metadata_updates).issubset(set(old_metadata)):
for key, (value, timestamp) in metadata_updates.iteritems():
if timestamp > old_metadata[key][1]:
break
else:
return
with self.get() as conn:
try:
md = conn.execute('SELECT metadata FROM %s_stat' %
self.db_type).fetchone()[0]
md = json.loads(md) if md else {}
utf8encodekeys(md)
except sqlite3.OperationalError as err:
if 'no such column: metadata' not in str(err):
raise
conn.execute("""
ALTER TABLE %s_stat
ADD COLUMN metadata TEXT DEFAULT '' """ % self.db_type)
md = {}
for key, value_timestamp in metadata_updates.iteritems():
value, timestamp = value_timestamp
if key not in md or timestamp > md[key][1]:
md[key] = value_timestamp
conn.execute('UPDATE %s_stat SET metadata = ?' % self.db_type,
(json.dumps(md),))
conn.commit()
def reclaim(self, age_timestamp, sync_timestamp):
"""
Delete rows from the db_contains_type table that are marked deleted
and whose created_at timestamp is < age_timestamp. Also deletes rows
from incoming_sync and outgoing_sync where the updated_at timestamp is
< sync_timestamp.
In addition, this calls the DatabaseBroker's :func:`_reclaim` method.
:param age_timestamp: max created_at timestamp of object rows to delete
:param sync_timestamp: max update_at timestamp of sync rows to delete
"""
if self.db_file != ':memory:' and os.path.exists(self.pending_file):
with lock_parent_directory(self.pending_file,
self.pending_timeout):
self._commit_puts()
with self.get() as conn:
conn.execute('''
DELETE FROM %s WHERE deleted = 1 AND %s < ?
''' % (self.db_contains_type, self.db_reclaim_timestamp),
(age_timestamp,))
try:
conn.execute('''
DELETE FROM outgoing_sync WHERE updated_at < ?
''', (sync_timestamp,))
conn.execute('''
DELETE FROM incoming_sync WHERE updated_at < ?
''', (sync_timestamp,))
except sqlite3.OperationalError as err:
# Old dbs didn't have updated_at in the _sync tables.
if 'no such column: updated_at' not in str(err):
raise
DatabaseBroker._reclaim(self, conn, age_timestamp)
conn.commit()
def _reclaim(self, conn, timestamp):
"""
Removes any empty metadata values older than the timestamp using the
given database connection. This function will not call commit on the
conn, but will instead return True if the database needs committing.
This function was created as a worker to limit transactions and commits
from other related functions.
:param conn: Database connection to reclaim metadata within.
:param timestamp: Empty metadata items last updated before this
timestamp will be removed.
:returns: True if conn.commit() should be called
"""
try:
md = conn.execute('SELECT metadata FROM %s_stat' %
self.db_type).fetchone()[0]
if md:
md = json.loads(md)
keys_to_delete = []
for key, (value, value_timestamp) in md.iteritems():
if value == '' and value_timestamp < timestamp:
keys_to_delete.append(key)
if keys_to_delete:
for key in keys_to_delete:
del md[key]
conn.execute('UPDATE %s_stat SET metadata = ?' %
self.db_type, (json.dumps(md),))
return True
except sqlite3.OperationalError as err:
if 'no such column: metadata' not in str(err):
raise
return False
def update_put_timestamp(self, timestamp):
"""
Update the put_timestamp. Only modifies it if it is greater than
the current timestamp.
:param timestamp: internalized put timestamp
"""
with self.get() as conn:
conn.execute(
'UPDATE %s_stat SET put_timestamp = ?'
' WHERE put_timestamp < ?' % self.db_type,
(timestamp, timestamp))
conn.commit()
def update_status_changed_at(self, timestamp):
"""
Update the status_changed_at field in the stat table. Only
modifies status_changed_at if the timestamp is greater than the
current status_changed_at timestamp.
:param timestamp: internalized timestamp
"""
with self.get() as conn:
self._update_status_changed_at(conn, timestamp)
conn.commit()
def _update_status_changed_at(self, conn, timestamp):
conn.execute(
'UPDATE %s_stat SET status_changed_at = ?'
' WHERE status_changed_at < ?' % self.db_type,
(timestamp, timestamp))
|
sarvesh-ranjan/swift
|
swift/common/db.py
|
Python
|
apache-2.0
| 32,509
|
# Copyright 2011 Nicholas Bray
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#@PydevCodeAnalysisIgnore
module('lists')
output('../temp')
config(checkTypes=True)
entryPoint('listTest', inst('float'), inst('float'), inst('float'))
entryPoint('addConstList')
entryPoint('addHybridList', inst('int'))
entryPoint('swap', inst('int'), inst('int'))
entryPoint('unpackConstCompound')
entryPoint('unpackCompound', inst('int'), inst('int'), inst('int'), inst('int'))
entryPoint('index')
|
ncbray/pystream
|
bin/tests/full/makelists.py
|
Python
|
apache-2.0
| 982
|
import copy
#import six
from eclcli.common import command
from eclcli.common import utils
#from ..dhclient import exceptions
#from ..dhclient import utils
class ListServer(command.Lister):
def get_parser(self, prog_name):
parser = super(ListServer, self).get_parser(prog_name)
parser.add_argument(
"--detail",
help="Detailed view of server list",
action='store_true',
)
parser.add_argument(
"--changes-since",
help="Filter by changes after a date",
metavar='<changes-since>'
)
parser.add_argument(
"--marker",
help="List servers after the marker. Marker must be a server UUID",
metavar='<marker>',
)
parser.add_argument(
"--limit",
help="Limit the list items",
metavar='<limit>',
)
parser.add_argument(
"--name",
help="Filter by server name",
metavar='<name>',
)
parser.add_argument(
"--image",
help="Filter by image in URL format",
metavar='<image>',
)
parser.add_argument(
"--flavor",
help="Filter by flavor in URL format",
metavar='<flavor>',
)
parser.add_argument(
"--status",
help="Filter by server status",
metavar='<status>',
)
return parser
def take_action(self, parsed_args):
dh_client = self.app.client_manager.dh
search_opts = {
"changes-since":parsed_args.changes_since,
"marker":parsed_args.marker,
"limit":parsed_args.limit,
"name":parsed_args.name,
"image":parsed_args.image,
"flavor":parsed_args.flavor,
"status":parsed_args.status
}
self.log.debug('search options: %s', search_opts)
if parsed_args.detail:
columns = ['ID',
'Name',
'Status',
'Description',
'Hypervisor Type',
'imageRef',
'Baremetal Server'
]
column_headers = columns
else:
columns = ['ID',
'Name',
'Links']
column_headers = columns
mixed_case_fields = ['imageRef']
data = dh_client.servers.list(search_opts=search_opts,detailed=parsed_args.detail)
return (column_headers,
(utils.get_item_properties(
s, columns,
mixed_case_fields=mixed_case_fields
) for s in data))
class ShowServer(command.ShowOne):
def get_parser(self, prog_name):
parser = super(ShowServer, self).get_parser(prog_name)
parser.add_argument(
"server_id",
help="ID of server to be shown",
metavar='<server-id>'
)
return parser
def take_action(self, parsed_args):
dh_client = self.app.client_manager.dh
self.log.debug('server-id: %s',parsed_args.server_id)
rows = ['ID',
'Name',
'Status',
'Description',
'Hypervisor Type',
'imageRef',
'Baremetal Server'
]
row_headers = rows
mixed_case_fields = ['imageRef']
data = dh_client.servers.get(server_id=parsed_args.server_id)
return (row_headers, (utils.get_item_properties(
data, rows, mixed_case_fields=mixed_case_fields
)))
class CreateServer(command.ShowOne):
def get_parser(self, prog_name):
parser = super(CreateServer, self).get_parser(prog_name)
parser.add_argument(
"name",
help="Server Name",
metavar='<name>'
)
parser.add_argument(
"--description",
help="Server description",
metavar='<description>'
)
parser.add_argument(
"networks",
help="Comma separated list of upto 2 Logical Network ID(s) which belong to different plane(Data/Storage), eg UUID1,UUID2",
metavar='<networks>'
)
parser.add_argument(
"--adminPass",
help="Password for the administrator",
metavar='<adminPass>'
)
parser.add_argument(
"imageRef",
help="Image ID",
metavar='<imageRef>'
)
parser.add_argument(
"flavorRef",
help="Flavor ID",
metavar='<flavorRef>'
)
parser.add_argument(
"--availability_zone",
help="The availability zone name in which to launch the server. If omit this parameter, target availability zone is random",
metavar='<availability-zone>'
)
parser.add_argument(
"--metadata",
help="Metadata key and value pairs. The maximum size of the metadata key and value is 255 bytes each",
metavar='<metadata>'
)
return parser
def take_action(self, parsed_args):
dh_client = self.app.client_manager.dh
nics=[]
net_list = parsed_args.networks.split(",")
for net in net_list:
nics.append({"uuid":net})
nics.append({"uuid":net})
rows = [
'ID',
'Links',
'adminPass'
]
row_headers = rows
mixed_case_fields = ['adminPass']
data = dh_client.servers.create(name=parsed_args.name, networks=nics, image_id=parsed_args.imageRef, flavor_id=parsed_args.flavorRef,
admin_pass=parsed_args.adminPass, metadata=parsed_args.metadata, availability_zone=parsed_args.availability_zone, description=parsed_args.description)
return (row_headers,
utils.get_item_properties(
data, rows, mixed_case_fields=mixed_case_fields
))
class DeleteServer(command.Command):
def get_parser(self, prog_name):
parser = super(DeleteServer, self).get_parser(prog_name)
parser.add_argument(
"server_ids",
nargs="+",
help="IDs of servers to be deleted",
metavar='<server-ids>'
)
return parser
def take_action(self, parsed_args):
dh_client = self.app.client_manager.dh
self.log.debug('server-ids: %s',parsed_args.server_ids)
for server_id in parsed_args.server_ids:
dh_client.servers.delete(server_id)
|
anythingrandom/eclcli
|
eclcli/dh/v2/server.py
|
Python
|
apache-2.0
| 6,761
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: release-1.23
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from kubernetes.client.configuration import Configuration
class V1ComponentStatusList(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'api_version': 'str',
'items': 'list[V1ComponentStatus]',
'kind': 'str',
'metadata': 'V1ListMeta'
}
attribute_map = {
'api_version': 'apiVersion',
'items': 'items',
'kind': 'kind',
'metadata': 'metadata'
}
def __init__(self, api_version=None, items=None, kind=None, metadata=None, local_vars_configuration=None): # noqa: E501
"""V1ComponentStatusList - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._api_version = None
self._items = None
self._kind = None
self._metadata = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
self.items = items
if kind is not None:
self.kind = kind
if metadata is not None:
self.metadata = metadata
@property
def api_version(self):
"""Gets the api_version of this V1ComponentStatusList. # noqa: E501
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:return: The api_version of this V1ComponentStatusList. # noqa: E501
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""Sets the api_version of this V1ComponentStatusList.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:param api_version: The api_version of this V1ComponentStatusList. # noqa: E501
:type: str
"""
self._api_version = api_version
@property
def items(self):
"""Gets the items of this V1ComponentStatusList. # noqa: E501
List of ComponentStatus objects. # noqa: E501
:return: The items of this V1ComponentStatusList. # noqa: E501
:rtype: list[V1ComponentStatus]
"""
return self._items
@items.setter
def items(self, items):
"""Sets the items of this V1ComponentStatusList.
List of ComponentStatus objects. # noqa: E501
:param items: The items of this V1ComponentStatusList. # noqa: E501
:type: list[V1ComponentStatus]
"""
if self.local_vars_configuration.client_side_validation and items is None: # noqa: E501
raise ValueError("Invalid value for `items`, must not be `None`") # noqa: E501
self._items = items
@property
def kind(self):
"""Gets the kind of this V1ComponentStatusList. # noqa: E501
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:return: The kind of this V1ComponentStatusList. # noqa: E501
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""Sets the kind of this V1ComponentStatusList.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:param kind: The kind of this V1ComponentStatusList. # noqa: E501
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""Gets the metadata of this V1ComponentStatusList. # noqa: E501
:return: The metadata of this V1ComponentStatusList. # noqa: E501
:rtype: V1ListMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""Sets the metadata of this V1ComponentStatusList.
:param metadata: The metadata of this V1ComponentStatusList. # noqa: E501
:type: V1ListMeta
"""
self._metadata = metadata
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1ComponentStatusList):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1ComponentStatusList):
return True
return self.to_dict() != other.to_dict()
|
kubernetes-client/python
|
kubernetes/client/models/v1_component_status_list.py
|
Python
|
apache-2.0
| 7,014
|
# python3
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common utils for orchestrate commands.
"""
import optparse
import os
import subprocess
from orchestrate import config
def get_gcloud_config_value(key, default=None):
"""Returns gcloud config value.
Args:
key: Config key to retrieve, e.g. project, compute/zone
default: Default value to return if key is not found or an error occurs
trying to retrieve its value.
"""
command = 'gcloud config get-value ' + key
command = command.split()
try:
value = subprocess.check_output(command)
return value.strip()
except subprocess.CalledProcessError:
return default
def get_common_option_defaults():
"""Returns a dictionary with the default values for command-line options."""
# Get GCP config values directly from gcloud config
project = get_gcloud_config_value('project').decode()
zone = get_gcloud_config_value('compute/zone', 'us-central1-a').decode()
# Get Orchestrate-specific values from:
# - environment variables
# - Orchestrate's user config file, i.e.: ~/.config/orchestrate/config_default
# - Sensible default wherever possible
api_host = os.environ.get('ORCHESTRATE_API_HOST')
if not api_host:
api_host = config.get_value('api/host', 'localhost:50051')
api_key = os.environ.get('ORCHESTRATE_API_KEY')
if not api_key:
api_key = config.get_value('api/key')
# Note, there's no API in environ variable name.
api_project = os.environ.get('ORCHESTRATE_PROJECT')
if not api_project:
api_project = config.get_value('api/project')
return dict(
project=project,
zone=zone,
api_project=api_project,
api_host=api_host,
api_key=api_key,
verbose=False,
)
def get_common_options():
"""Returns parser options common to all Orchestrate commands."""
options = [
optparse.Option('-p', '--project', help=(
'Create resources in project. Default is %default')),
optparse.Option('-z', '--zone', help=(
'Create in zone - make sure GPU_TYPE is available in selected zone.'
' Default is %default')),
optparse.Option('--api-project', help=(
'GCP project hosting the Orchestrate API server. Uses api/project'
' value in the orchestrate config_default file or the'
' ORCHESTRATE_API_PROJECT environment variable.')),
optparse.Option('--api-host', help=(
'Orchestrate API server. Uses the ORCHESTRATE_API_URL environment variable,'
' if set. Defaults to localhost otherwise. Based on the current'
' environment the default is: %default')),
# DO NOT show the api-key %default value in help text below
optparse.Option('--api-key', help=(
'Orchestrate API key. Uses the ORCHESTRATE_API_KEY environment variable,'
' if set. Defaults to None otherwise.')),
optparse.Option('-v', '--verbose', action='store_true', help=(
'verbose output.')),
]
return options
|
GoogleCloudPlatform/solutions-cloud-orchestrate
|
cli/src/orchestrate/utils.py
|
Python
|
apache-2.0
| 3,524
|
import pytz
import json
from django.core.exceptions import ValidationError
from rest_framework import serializers as ser
from rest_framework import exceptions
from api.base.exceptions import Conflict
from api.base.utils import absolute_reverse, get_user_auth
from website.project.metadata.utils import is_prereg_admin_not_project_admin
from website.exceptions import NodeStateError
from website.project.model import NodeUpdateError
from api.files.serializers import OsfStorageFileSerializer
from api.nodes.serializers import NodeSerializer, NodeProviderSerializer
from api.nodes.serializers import NodeLinksSerializer, NodeLicenseSerializer
from api.nodes.serializers import NodeContributorsSerializer
from api.base.serializers import (IDField, RelationshipField, LinksField, HideIfWithdrawal,
FileCommentRelationshipField, NodeFileHyperLinkField, HideIfRegistration,
ShowIfVersion, VersionedDateTimeField, ValuesListField)
from framework.auth.core import Auth
from osf.exceptions import ValidationValueError
class BaseRegistrationSerializer(NodeSerializer):
title = ser.CharField(read_only=True)
description = ser.CharField(read_only=True)
category_choices = NodeSerializer.category_choices
category_choices_string = NodeSerializer.category_choices_string
category = HideIfWithdrawal(ser.ChoiceField(read_only=True, choices=category_choices, help_text='Choices: ' + category_choices_string))
date_modified = VersionedDateTimeField(source='last_logged', read_only=True)
fork = HideIfWithdrawal(ser.BooleanField(read_only=True, source='is_fork'))
collection = HideIfWithdrawal(ser.BooleanField(read_only=True, source='is_collection'))
access_requests_enabled = HideIfWithdrawal(ser.BooleanField(read_only=True))
node_license = HideIfWithdrawal(NodeLicenseSerializer(read_only=True))
tags = HideIfWithdrawal(ValuesListField(attr_name='name', child=ser.CharField(), required=False))
public = HideIfWithdrawal(ser.BooleanField(source='is_public', required=False,
help_text='Nodes that are made public will give read-only access '
'to everyone. Private nodes require explicit read '
'permission. Write and admin access are the same for '
'public and private nodes. Administrators on a parent '
'node have implicit read permissions for all child nodes'))
current_user_permissions = HideIfWithdrawal(ser.SerializerMethodField(help_text='List of strings representing the permissions '
'for the current user on this node.'))
pending_embargo_approval = HideIfWithdrawal(ser.BooleanField(read_only=True, source='is_pending_embargo',
help_text='The associated Embargo is awaiting approval by project admins.'))
pending_registration_approval = HideIfWithdrawal(ser.BooleanField(source='is_pending_registration', read_only=True,
help_text='The associated RegistrationApproval is awaiting approval by project admins.'))
pending_withdrawal = HideIfWithdrawal(ser.BooleanField(source='is_pending_retraction', read_only=True,
help_text='The registration is awaiting withdrawal approval by project admins.'))
withdrawn = ser.BooleanField(source='is_retracted', read_only=True,
help_text='The registration has been withdrawn.')
date_registered = VersionedDateTimeField(source='registered_date', read_only=True, help_text='Date time of registration.')
date_withdrawn = VersionedDateTimeField(source='retraction.date_retracted', read_only=True, help_text='Date time of when this registration was retracted.')
embargo_end_date = HideIfWithdrawal(ser.SerializerMethodField(help_text='When the embargo on this registration will be lifted.'))
withdrawal_justification = ser.CharField(source='retraction.justification', read_only=True)
template_from = HideIfWithdrawal(ser.CharField(read_only=True, allow_blank=False, allow_null=False,
help_text='Specify a node id for a node you would like to use as a template for the '
'new node. Templating is like forking, except that you do not copy the '
'files, only the project structure. Some information is changed on the top '
'level project by submitting the appropriate fields in the request body, '
'and some information will not change. By default, the description will '
'be cleared and the project will be made private.'))
registration_supplement = ser.SerializerMethodField()
registered_meta = HideIfWithdrawal(ser.SerializerMethodField(
help_text='A dictionary with supplemental registration questions and responses.'))
registered_by = HideIfWithdrawal(RelationshipField(
related_view='users:user-detail',
related_view_kwargs={'user_id': '<registered_user._id>'}
))
registered_from = HideIfWithdrawal(RelationshipField(
related_view='nodes:node-detail',
related_view_kwargs={'node_id': '<registered_from._id>'}
))
children = HideIfWithdrawal(RelationshipField(
related_view='registrations:registration-children',
related_view_kwargs={'node_id': '<_id>'},
related_meta={'count': 'get_node_count'},
))
comments = HideIfWithdrawal(RelationshipField(
related_view='registrations:registration-comments',
related_view_kwargs={'node_id': '<_id>'},
related_meta={'unread': 'get_unread_comments_count'},
filter={'target': '<_id>'}
))
contributors = RelationshipField(
related_view='registrations:registration-contributors',
related_view_kwargs={'node_id': '<_id>'},
related_meta={'count': 'get_contrib_count'}
)
implicit_contributors = RelationshipField(
related_view='registrations:registration-implicit-contributors',
related_view_kwargs={'node_id': '<_id>'},
help_text='This feature is experimental and being tested. It may be deprecated.'
)
files = HideIfWithdrawal(RelationshipField(
related_view='registrations:registration-providers',
related_view_kwargs={'node_id': '<_id>'}
))
wikis = HideIfWithdrawal(RelationshipField(
related_view='registrations:registration-wikis',
related_view_kwargs={'node_id': '<_id>'},
))
forked_from = HideIfWithdrawal(RelationshipField(
related_view=lambda n: 'registrations:registration-detail' if getattr(n, 'is_registration', False) else 'nodes:node-detail',
related_view_kwargs={'node_id': '<forked_from_id>'}
))
template_node = HideIfWithdrawal(RelationshipField(
related_view='nodes:node-detail',
related_view_kwargs={'node_id': '<template_node._id>'}
))
license = HideIfWithdrawal(RelationshipField(
related_view='licenses:license-detail',
related_view_kwargs={'license_id': '<node_license.node_license._id>'},
))
logs = HideIfWithdrawal(RelationshipField(
related_view='registrations:registration-logs',
related_view_kwargs={'node_id': '<_id>'},
))
forks = HideIfWithdrawal(RelationshipField(
related_view='registrations:registration-forks',
related_view_kwargs={'node_id': '<_id>'},
related_meta={'count': 'get_forks_count'},
))
node_links = ShowIfVersion(HideIfWithdrawal(RelationshipField(
related_view='registrations:registration-pointers',
related_view_kwargs={'node_id': '<_id>'},
related_meta={'count': 'get_pointers_count'},
help_text='This feature is deprecated as of version 2.1. Use linked_nodes instead.'
)), min_version='2.0', max_version='2.0')
linked_by_nodes = HideIfWithdrawal(RelationshipField(
related_view='registrations:registration-linked-by-nodes',
related_view_kwargs={'node_id': '<_id>'},
related_meta={'count': 'get_linked_by_nodes_count'},
))
linked_by_registrations = HideIfWithdrawal(RelationshipField(
related_view='registrations:registration-linked-by-registrations',
related_view_kwargs={'node_id': '<_id>'},
related_meta={'count': 'get_linked_by_registrations_count'},
))
parent = HideIfWithdrawal(RelationshipField(
related_view='registrations:registration-detail',
related_view_kwargs={'node_id': '<parent_node._id>'},
filter_key='parent_node'
))
root = HideIfWithdrawal(RelationshipField(
related_view='registrations:registration-detail',
related_view_kwargs={'node_id': '<root._id>'}
))
affiliated_institutions = HideIfWithdrawal(RelationshipField(
related_view='registrations:registration-institutions',
related_view_kwargs={'node_id': '<_id>'}
))
registration_schema = RelationshipField(
related_view='metaschemas:registration-metaschema-detail',
related_view_kwargs={'metaschema_id': '<registered_schema_id>'}
)
settings = HideIfRegistration(RelationshipField(
related_view='nodes:node-settings',
related_view_kwargs={'node_id': '<_id>'}
))
registrations = HideIfRegistration(RelationshipField(
related_view='nodes:node-registrations',
related_view_kwargs={'node_id': '<_id>'}
))
draft_registrations = HideIfRegistration(RelationshipField(
related_view='nodes:node-draft-registrations',
related_view_kwargs={'node_id': '<_id>'}
))
preprints = HideIfWithdrawal(HideIfRegistration(RelationshipField(
related_view='nodes:node-preprints',
related_view_kwargs={'node_id': '<_id>'}
)))
identifiers = HideIfWithdrawal(RelationshipField(
related_view='registrations:identifier-list',
related_view_kwargs={'node_id': '<_id>'}
))
linked_nodes = HideIfWithdrawal(RelationshipField(
related_view='registrations:linked-nodes',
related_view_kwargs={'node_id': '<_id>'},
related_meta={'count': 'get_node_links_count'},
self_view='registrations:node-pointer-relationship',
self_view_kwargs={'node_id': '<_id>'}
))
linked_registrations = HideIfWithdrawal(RelationshipField(
related_view='registrations:linked-registrations',
related_view_kwargs={'node_id': '<_id>'},
related_meta={'count': 'get_registration_links_count'},
self_view='registrations:node-registration-pointer-relationship',
self_view_kwargs={'node_id': '<_id>'}
))
view_only_links = HideIfWithdrawal(RelationshipField(
related_view='registrations:registration-view-only-links',
related_view_kwargs={'node_id': '<_id>'},
related_meta={'count': 'get_view_only_links_count'},
))
citation = HideIfWithdrawal(RelationshipField(
related_view='registrations:registration-citation',
related_view_kwargs={'node_id': '<_id>'}
))
links = LinksField({'self': 'get_registration_url', 'html': 'get_absolute_html_url'})
def get_registration_url(self, obj):
return absolute_reverse('registrations:registration-detail', kwargs={
'node_id': obj._id,
'version': self.context['request'].parser_context['kwargs']['version']
})
def get_absolute_url(self, obj):
return self.get_registration_url(obj)
def create(self, validated_data):
auth = get_user_auth(self.context['request'])
draft = validated_data.pop('draft')
registration_choice = validated_data.pop('registration_choice', 'immediate')
embargo_lifted = validated_data.pop('lift_embargo', None)
reviewer = is_prereg_admin_not_project_admin(self.context['request'], draft)
try:
draft.validate_metadata(metadata=draft.registration_metadata, reviewer=reviewer, required_fields=True)
except ValidationValueError as e:
raise exceptions.ValidationError(e.message)
registration = draft.register(auth, save=True)
if registration_choice == 'embargo':
if not embargo_lifted:
raise exceptions.ValidationError('lift_embargo must be specified.')
embargo_end_date = embargo_lifted.replace(tzinfo=pytz.utc)
try:
registration.embargo_registration(auth.user, embargo_end_date)
except ValidationError as err:
raise exceptions.ValidationError(err.message)
else:
try:
registration.require_approval(auth.user)
except NodeStateError as err:
raise exceptions.ValidationError(err)
registration.save()
return registration
def get_registered_meta(self, obj):
if obj.registered_meta:
meta_values = obj.registered_meta.values()[0]
try:
return json.loads(meta_values)
except TypeError:
return meta_values
except ValueError:
return meta_values
return None
def get_embargo_end_date(self, obj):
if obj.embargo_end_date:
return obj.embargo_end_date
return None
def get_registration_supplement(self, obj):
if obj.registered_schema:
schema = obj.registered_schema.first()
if schema is None:
return None
return schema.name
return None
def get_current_user_permissions(self, obj):
return NodeSerializer.get_current_user_permissions(self, obj)
def get_view_only_links_count(self, obj):
return obj.private_links.filter(is_deleted=False).count()
def update(self, registration, validated_data):
auth = Auth(self.context['request'].user)
# Update tags
if 'tags' in validated_data:
new_tags = validated_data.pop('tags', [])
try:
registration.update_tags(new_tags, auth=auth)
except NodeStateError as err:
raise Conflict(err.message)
is_public = validated_data.get('is_public', None)
if is_public is not None:
if is_public:
try:
registration.update(validated_data, auth=auth)
except NodeUpdateError as err:
raise exceptions.ValidationError(err.reason)
except NodeStateError as err:
raise exceptions.ValidationError(err.message)
else:
raise exceptions.ValidationError('Registrations can only be turned from private to public.')
return registration
class Meta:
type_ = 'registrations'
class RegistrationSerializer(BaseRegistrationSerializer):
"""
Overrides BaseRegistrationSerializer to add draft_registration, registration_choice, and lift_embargo fields
"""
draft_registration = ser.CharField(write_only=True)
registration_choice = ser.ChoiceField(write_only=True, choices=['immediate', 'embargo'])
lift_embargo = VersionedDateTimeField(write_only=True, default=None, input_formats=['%Y-%m-%dT%H:%M:%S'])
class RegistrationDetailSerializer(BaseRegistrationSerializer):
"""
Overrides BaseRegistrationSerializer to make id required.
"""
id = IDField(source='_id', required=True)
class RegistrationNodeLinksSerializer(NodeLinksSerializer):
def get_absolute_url(self, obj):
return absolute_reverse(
'registrations:registration-pointer-detail',
kwargs={
'node_link_id': obj._id,
'node_id': self.context['request'].parser_context['kwargs']['node_id'],
'version': self.context['request'].parser_context['kwargs']['version']
}
)
class RegistrationContributorsSerializer(NodeContributorsSerializer):
def get_absolute_url(self, obj):
return absolute_reverse(
'registrations:registration-contributor-detail',
kwargs={
'user_id': obj.user._id,
'node_id': self.context['request'].parser_context['kwargs']['node_id'],
'version': self.context['request'].parser_context['kwargs']['version']
}
)
class RegistrationFileSerializer(OsfStorageFileSerializer):
files = NodeFileHyperLinkField(
related_view='registrations:registration-files',
related_view_kwargs={'node_id': '<node._id>', 'path': '<path>', 'provider': '<provider>'},
kind='folder'
)
comments = FileCommentRelationshipField(related_view='registrations:registration-comments',
related_view_kwargs={'node_id': '<node._id>'},
related_meta={'unread': 'get_unread_comments_count'},
filter={'target': 'get_file_guid'}
)
node = RelationshipField(related_view='registrations:registration-detail',
related_view_kwargs={'node_id': '<node._id>'},
help_text='The registration that this file belongs to'
)
class RegistrationProviderSerializer(NodeProviderSerializer):
"""
Overrides NodeProviderSerializer to lead to correct registration file links
"""
files = NodeFileHyperLinkField(
related_view='registrations:registration-files',
related_view_kwargs={'node_id': '<node._id>', 'path': '<path>', 'provider': '<provider>'},
kind='folder',
never_embed=True
)
|
sloria/osf.io
|
api/registrations/serializers.py
|
Python
|
apache-2.0
| 17,984
|
# Copyright (c) 2010 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
conductor_group = cfg.OptGroup(
'conductor',
title='Conductor Options')
use_local = cfg.BoolOpt(
'use_local',
default=False,
help='DEPRECATED: Perform nova-conductor operations locally. '
'This legacy mode was introduced to bridge a gap during '
'the transition to the conductor service. It no longer '
'represents a reasonable alternative for deployers. '
'Removal may be as early as 14.0',
deprecated_for_removal='True')
topic = cfg.StrOpt(
'topic',
default='conductor',
help='The topic on which conductor nodes listen')
manager = cfg.StrOpt(
'manager',
default='nova.conductor.manager.ConductorManager',
help='Full class name for the Manager for conductor')
workers = cfg.IntOpt(
'workers',
help='Number of workers for OpenStack Conductor service. '
'The default will be the number of CPUs available.')
ALL_OPTS = [
use_local,
topic,
manager,
workers]
def register_opts(conf):
conf.register_group(conductor_group)
conf.register_opts(ALL_OPTS, group=conductor_group)
def list_opts():
return {conductor_group: ALL_OPTS}
|
dims/nova
|
nova/conf/conductor.py
|
Python
|
apache-2.0
| 1,828
|
"""
Django settings for perpustakaan project.
Generated by 'django-admin startproject' using Django 1.9.2.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '*yjts5mnyqvvz0@jmszuy08t_c%@(yvz@5t7yc#nc44e96n4n*'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'perpus',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'perpustakaan.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'perpustakaan.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'OPTIONS': {
'read_default_file': os.path.join(BASE_DIR, 'perpustakaan/db.cnf'),
},
}
}
'''
Contoh pengaturan db.cnf
--------------------------
# db.cnf
[client]
database = nama_database
user = user_database
password = password_database
default-character-set = utf8
'''
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'id'
TIME_ZONE = 'Asia/Jakarta'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
# Static for development
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static"),
# '/var/www/static/',
]
# Static for deployment
# STATIC_ROOT = os.path.join(BASE_DIR, "static")
|
akbarpn136/perpus-dj
|
perpustakaan/settings.py
|
Python
|
apache-2.0
| 3,676
|
#!/usr/bin/env python
from __future__ import print_function
description = """
Upload binary data to Midas.
Since every local Git repository contains a copy of the entire project history,
it is important to avoid adding large binary files directly to the repository.
Large binary files added and removed through the history of a project will cause
the repository to become bloated, take up too much disk space, require excessive
time and bandwidth to download, etc.
The solution to this problem adopted by this project is to store binary files,
such as images, in a separate location outside the repository, then download the
files at build time with CMake.
A "content link" file containing an identifying MD5 checksum is stored in the
Git repository at the path where the file would exist with the ".md5" extension
added to the file. CMake will find these content link files at build time,
download them from a list of server resources, and create symlinks or copies of
the original files at the corresponding location in the build tree.
The Midas server at
http://midas3.kitware.com/midas/
is an ITK community resource where any community member can upload binary data
files. This script automates the upload of data to the server and generation of
the content link file. Before using this script, please go to the Midas
website, register, and join the ITK community.
This script requires the Python module pydas:
https://github.com/midasplatform/pydas
which can be installed with::
pip install pydas
Pass in the input binary files to be uploaded. These should be files
located in the SimpleITK source tree. They will be uploaded and
replaced with a *.md5 file that can be added to the current
commit with "git add -- path/to/file.md5".
"""
import optparse
import hashlib
import os
import subprocess
import sys
import pydas
def connect_to_midas(email=None, api_key=None):
midas_url = 'http://midas3.kitware.com/midas/'
#pydas.login(url=midas_url, email=email, api_key=api_key)
try:
pydas.login(url=midas_url, email=email, api_key=api_key)
except:
print('Error occurred while logging in to ' + midas_url)
sys.exit(1)
session = pydas.session
communicator = session.communicator
return session, communicator
def upload_to_midas(input_file, output_file, folders, session, communicator):
# get the MD5 checksum
print('Computing MD5 checksum...')
md5 = hashlib.md5()
with open(input_file, 'rb') as fp:
for chunk in iter(lambda: fp.read(128 * md5.block_size), b''):
md5.update(chunk)
md5hash = md5.hexdigest()
print('Checksum: ' + md5hash)
# upload to Midas
def get_child_folder(parent, child_name):
children = communicator.folder_children(session.token,
parent['folder_id'])
for folder in children['folders']:
if folder['name'] == child_name:
return folder
return None
itk_community = communicator.get_community_by_name('ITK')
itk_public = get_child_folder(itk_community, 'Public')
simpleitk = get_child_folder(itk_public, 'SimpleITK')
current_folder = simpleitk
for folder in folders:
child_folder = get_child_folder(current_folder, folder)
if child_folder is None:
print('Creating folder: ' + folder)
current_folder = \
communicator.create_folder(session.token,
folder,
current_folder['folder_id'])
else:
current_folder = child_folder
# get the existing or create a new item to hold the file
item_name = os.path.basename(input_file)
item_id = None
current_folder_children = \
communicator.folder_children(session.token,
current_folder['folder_id'])
if 'items' in current_folder_children:
for item in current_folder_children['items']:
if item['name'] == item_name:
item_id = item['item_id']
break
if item_id is None:
new_item = communicator.create_item(session.token, item_name,
current_folder['folder_id'])
item_id = new_item['item_id']
upload_token = communicator.generate_upload_token(session.token,
item_id,
item_name,
md5hash)
if upload_token != "":
communicator.perform_upload(upload_token,
item_name,
item_id=item_id,
revision='head',
filepath=input_file)
# write the content link file
with open(output_file, 'w') as fp:
fp.write(md5hash)
fp.write('\n')
def find_git_dir(filepath):
"""Find our best estimate of GIT_DIR to locate the root of the SimpleITK
repository."""
filepath = os.path.abspath(filepath)
head, tail = os.path.split(filepath)
previous_head = ''
while head != previous_head:
if os.path.exists(os.path.join(head, '.git')):
return head
previous_head = head
head, tail = os.path.split(head)
print('Could not find the root of the SimpleITK repository!')
sys.exit(1)
def run(input_files, output_files,
email=None, api_key=None,
no_delete=False):
git_dir = find_git_dir(input_files[0])
git_email_cmd = subprocess.Popen(['git', 'config', 'user.email'],
cwd=git_dir,
stdout=subprocess.PIPE)
if git_email_cmd.wait() is 0:
git_email = git_email_cmd.stdout.readline().strip()
email_input = raw_input('Email [' + git_email + ']: ')
if email_input == '':
email = git_email
else:
email = email_input
session, communicator = connect_to_midas(email, api_key)
for ii in range(len(input_files)):
input_abspath = os.path.abspath(input_files[ii])
folders = input_abspath[len(git_dir)+1:].split(os.path.sep)[:-1]
upload_to_midas(input_files[ii], output_files[ii], folders,
session, communicator)
if not no_delete:
os.remove(input_files[ii])
if __name__ == '__main__':
usage = "%prog [options] input1 input2 ... inputN"
parser = optparse.OptionParser(usage=usage)
parser.add_option('--api-key-file', '-k', dest='api_key_file',
help="A file that contains your Midas user's API key.")
parser.add_option('--email', '-e',
help="Email address associated with your Midas account.")
parser.add_option('--no-delete', '-n', action='store_true',
dest='no_delete',
help='Do not remove the input files after upload.')
(options, input_files) = parser.parse_args()
if options.api_key_file:
with open(options.api_key_file, 'r') as fp:
api_key = fp.readline()
api_key = api_key.strip()
else:
api_key = None
output_files = []
for ii in range(len(input_files)):
output_files.append(input_files[ii] + '.md5')
no_delete = options.no_delete
run(input_files, output_files,
email=options.email, api_key=api_key,
no_delete=no_delete)
|
hendradarwin/SimpleITK
|
Utilities/UploadBinaryData.py
|
Python
|
apache-2.0
| 7,528
|
import pexpect
import getpass
version = raw_input('Version: ')
secret = getpass.getpass('Enter Passphrase: ')
github_username = 'ianjuma'
clean = pexpect.spawn('fab clean')
clean.expect('Passphrase for private key: ')
clean.sendline(secret)
deploy = pexpect.spawn('fab deploy:%s' % (version,))
deploy.expect('Passphrase for private key: ')
deploy.sendline(secret)
deploy.expect("Username for 'https://github.com': ")
deploy.sendline(github_username)
deploy.expect("Password for 'https://ianjuma@github.com': ")
deploy.sendline(secret)
|
ianjuma/errand-runner
|
automata.py
|
Python
|
apache-2.0
| 538
|
# Copyright 2019 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module setuptools script."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from setuptools import find_packages
from setuptools import setup
description = """A synthetic dataset of school-level mathematics questions.
This dataset code generates mathematical question and answer pairs, from a range
of question types (such as in arithmetic, algebra, probability, etc), at roughly
school-level difficulty. This is designed to test the mathematical learning and
reasoning skills of learning models.
Original paper: Analysing Mathematical Reasoning Abilities of Neural Models
(Saxton, Grefenstette, Hill, Kohli) (https://openreview.net/pdf?id=H1gR5iR5FX).
"""
setup(
name='mathematics_dataset',
version='1.0.1',
description='A synthetic dataset of school-level mathematics questions',
long_description=description,
author='DeepMind',
author_email='saxton@google.com',
license='Apache License, Version 2.0',
keywords='mathematics dataset',
url='https://github.com/deepmind/mathematics_dataset',
packages=find_packages(),
install_requires=[
'absl-py>=0.1.0',
'numpy>=1.10',
'six',
'sympy>=1.2',
],
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
],
)
|
deepmind/mathematics_dataset
|
setup.py
|
Python
|
apache-2.0
| 2,458
|
from __future__ import unicode_literals
from django.db import models
from django.contrib.auth.models import User
from django.contrib import admin
# Create your models here.
class Faculty(models.Model):
user = models.OneToOneField(User)
class Meta:
verbose_name_plural = 'Faculties'
def __str__(self):
return self.user.get_full_name()
class Sakha(models.Model):
user = models.OneToOneField(User)
class Meta:
verbose_name_plural="Sakhayein"
def __str__(self):
return self.user.get_full_name()
class Parent(models.Model):
user = models.OneToOneField(User)
def __str__(self):
return self.user.get_full_name()
class Learner(models.Model):
user = models.OneToOneField(User)
parent = models.ForeignKey(Parent)
sakha = models.ForeignKey(Sakha)
def __str__(self):
return self.user.get_full_name()
class Meta:
ordering = ['user__first_name','user__last_name',]
admin.site.register(Faculty)
admin.site.register(Sakha)
admin.site.register(Parent)
admin.site.register(Learner)
|
pankajlal/prabandh
|
users/models.py
|
Python
|
apache-2.0
| 1,093
|
# Copyright 2022 The Brax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint:disable=g-multiple-import
"""Some example environments to help get started quickly with brax."""
import functools
from typing import Callable, Optional, Union, overload
import brax
from brax.envs import acrobot
from brax.envs import ant
from brax.envs import fast
from brax.envs import fetch
from brax.envs import grasp
from brax.envs import halfcheetah
from brax.envs import hopper
from brax.envs import humanoid
from brax.envs import humanoid_standup
from brax.envs import inverted_double_pendulum
from brax.envs import inverted_pendulum
from brax.envs import reacher
from brax.envs import reacherangle
from brax.envs import swimmer
from brax.envs import ur5e
from brax.envs import walker2d
from brax.envs import wrappers
from brax.envs.env import Env, State, Wrapper
import gym
_envs = {
'acrobot': acrobot.Acrobot,
'ant': ant.Ant,
'fast': fast.Fast,
'fetch': fetch.Fetch,
'grasp': grasp.Grasp,
'halfcheetah': halfcheetah.Halfcheetah,
'hopper': hopper.Hopper,
'humanoid': humanoid.Humanoid,
'humanoidstandup': humanoid_standup.HumanoidStandup,
'inverted_pendulum': inverted_pendulum.InvertedPendulum,
'inverted_double_pendulum': inverted_double_pendulum.InvertedDoublePendulum,
'reacher': reacher.Reacher,
'reacherangle': reacherangle.ReacherAngle,
'swimmer': swimmer.Swimmer,
'ur5e': ur5e.Ur5e,
'walker2d': walker2d.Walker2d,
}
def get_environment(env_name, **kwargs):
return _envs[env_name](**kwargs)
def create(env_name: str,
episode_length: int = 1000,
action_repeat: int = 1,
auto_reset: bool = True,
batch_size: Optional[int] = None,
eval_metrics: bool = False,
**kwargs) -> Env:
"""Creates an Env with a specified brax system."""
env = _envs[env_name](**kwargs)
if episode_length is not None:
env = wrappers.EpisodeWrapper(env, episode_length, action_repeat)
if batch_size:
env = wrappers.VectorWrapper(env, batch_size)
if auto_reset:
env = wrappers.AutoResetWrapper(env)
if eval_metrics:
env = wrappers.EvalWrapper(env)
return env # type: ignore
def create_fn(env_name: str, **kwargs) -> Callable[..., Env]:
"""Returns a function that when called, creates an Env."""
return functools.partial(create, env_name, **kwargs)
@overload
def create_gym_env(env_name: str,
batch_size: None = None,
seed: int = 0,
backend: Optional[str] = None,
**kwargs) -> gym.Env:
...
@overload
def create_gym_env(env_name: str,
batch_size: int,
seed: int = 0,
backend: Optional[str] = None,
**kwargs) -> gym.vector.VectorEnv:
...
def create_gym_env(env_name: str,
batch_size: Optional[int] = None,
seed: int = 0,
backend: Optional[str] = None,
**kwargs) -> Union[gym.Env, gym.vector.VectorEnv]:
"""Creates a `gym.Env` or `gym.vector.VectorEnv` from a Brax environment."""
environment = create(env_name=env_name, batch_size=batch_size, **kwargs)
if batch_size is None:
return wrappers.GymWrapper(environment, seed=seed, backend=backend)
if batch_size <= 0:
raise ValueError(
'`batch_size` should either be None or a positive integer.')
return wrappers.VectorGymWrapper(environment, seed=seed, backend=backend)
|
google/brax
|
brax/envs/__init__.py
|
Python
|
apache-2.0
| 4,030
|
# coding=utf-8
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Testing suite for the PyTorch Speech2Text model. """
import copy
import inspect
import os
import tempfile
import unittest
from transformers.file_utils import cached_property
from transformers.testing_utils import (
is_torch_available,
require_sentencepiece,
require_tokenizers,
require_torch,
require_torchaudio,
slow,
torch_device,
)
from .test_configuration_common import ConfigTester
from .test_generation_utils import GenerationTesterMixin
from .test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
if is_torch_available():
import torch
from transformers import (
Speech2TextConfig,
Speech2TextForConditionalGeneration,
Speech2TextModel,
Speech2TextProcessor,
)
from transformers.models.speech_to_text.modeling_speech_to_text import Speech2TextDecoder, Speech2TextEncoder
def prepare_speech_to_text_inputs_dict(
config,
input_features,
decoder_input_ids,
attention_mask=None,
decoder_attention_mask=None,
head_mask=None,
decoder_head_mask=None,
cross_attn_head_mask=None,
):
if attention_mask is None:
attention_mask = input_features.ne(0)
if decoder_attention_mask is None:
decoder_attention_mask = decoder_input_ids.ne(config.pad_token_id)
if head_mask is None:
head_mask = torch.ones(config.encoder_layers, config.encoder_attention_heads, device=torch_device)
if decoder_head_mask is None:
decoder_head_mask = torch.ones(config.decoder_layers, config.decoder_attention_heads, device=torch_device)
if cross_attn_head_mask is None:
cross_attn_head_mask = torch.ones(config.decoder_layers, config.decoder_attention_heads, device=torch_device)
return {
# "input_ids": input_features,
"input_features": input_features,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_torch
class Speech2TextModelTester:
def __init__(
self,
parent,
batch_size=13,
seq_length=7,
is_training=True,
use_labels=False,
vocab_size=99,
hidden_size=16,
num_hidden_layers=2,
num_attention_heads=4,
intermediate_size=4,
num_conv_layers=2,
conv_kernel_sizes=(5, 5),
conv_channels=32,
input_feat_per_channel=24,
input_channels=1,
hidden_act="relu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=20,
max_source_positions=20,
max_target_positions=20,
eos_token_id=2,
pad_token_id=1,
bos_token_id=0,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_labels = use_labels
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.num_conv_layers = num_conv_layers
self.conv_kernel_sizes = conv_kernel_sizes
self.conv_channels = conv_channels
self.input_feat_per_channel = input_feat_per_channel
self.input_channels = input_channels
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.max_source_positions = max_source_positions
self.max_target_positions = max_target_positions
self.eos_token_id = eos_token_id
self.pad_token_id = pad_token_id
self.bos_token_id = bos_token_id
def prepare_config_and_inputs(self):
input_features = floats_tensor(
[self.batch_size, self.seq_length, self.input_feat_per_channel], self.vocab_size
)
attention_mask = torch.ones([self.batch_size, self.seq_length], dtype=torch.long, device=torch_device)
decoder_input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size).clamp(2)
config = Speech2TextConfig(
vocab_size=self.vocab_size,
d_model=self.hidden_size,
encoder_layers=self.num_hidden_layers,
decoder_layers=self.num_hidden_layers,
encoder_attention_heads=self.num_attention_heads,
decoder_attention_heads=self.num_attention_heads,
encoder_ffn_dim=self.intermediate_size,
decoder_ffn_dim=self.intermediate_size,
num_conv_layers=self.num_conv_layers,
conv_kernel_sizes=self.conv_kernel_sizes,
conv_channels=self.conv_channels,
input_feat_per_channel=self.input_feat_per_channel,
input_channels=self.input_channels,
dropout=self.hidden_dropout_prob,
attention_dropout=self.attention_probs_dropout_prob,
max_position_embeddings=self.max_position_embeddings,
max_source_positions=self.max_source_positions,
max_target_positions=self.max_target_positions,
eos_token_id=self.eos_token_id,
bos_token_id=self.bos_token_id,
pad_token_id=self.pad_token_id,
)
inputs_dict = prepare_speech_to_text_inputs_dict(
config,
input_features=input_features,
decoder_input_ids=decoder_input_ids,
attention_mask=attention_mask,
)
return config, inputs_dict
def prepare_config_and_inputs_for_common(self):
config, inputs_dict = self.prepare_config_and_inputs()
return config, inputs_dict
def get_subsampled_output_lengths(self, input_lengths):
"""
Computes the output length of the convolutional layers
"""
for i in range(self.num_conv_layers):
input_lengths = (input_lengths - 1) // 2 + 1
return input_lengths
def create_and_check_decoder_model_past_large_inputs(self, config, inputs_dict):
model = Speech2TextModel(config=config).get_decoder().to(torch_device).eval()
input_ids = inputs_dict["decoder_input_ids"]
attention_mask = inputs_dict["decoder_attention_mask"]
# first forward pass
outputs = model(input_ids, attention_mask=attention_mask, use_cache=True)
output, past_key_values = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size).clamp(2)
next_attn_mask = ids_tensor((self.batch_size, 3), 2)
# append to next input_ids and
next_input_ids = torch.cat([input_ids, next_tokens], dim=-1)
next_attention_mask = torch.cat([attention_mask, next_attn_mask], dim=-1)
output_from_no_past = model(next_input_ids, attention_mask=next_attention_mask)["last_hidden_state"]
output_from_past = model(next_tokens, attention_mask=next_attention_mask, past_key_values=past_key_values)[
"last_hidden_state"
]
# select random slice
random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item()
output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach()
output_from_past_slice = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-2))
def check_encoder_decoder_model_standalone(self, config, inputs_dict):
model = Speech2TextModel(config=config).to(torch_device).eval()
outputs = model(**inputs_dict)
encoder_last_hidden_state = outputs.encoder_last_hidden_state
last_hidden_state = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
encoder = model.get_encoder()
encoder.save_pretrained(tmpdirname)
encoder = Speech2TextEncoder.from_pretrained(tmpdirname).to(torch_device)
encoder_last_hidden_state_2 = encoder(
inputs_dict["input_features"], attention_mask=inputs_dict["attention_mask"]
)[0]
self.parent.assertTrue((encoder_last_hidden_state_2 - encoder_last_hidden_state).abs().max().item() < 1e-3)
with tempfile.TemporaryDirectory() as tmpdirname:
decoder = model.get_decoder()
decoder.save_pretrained(tmpdirname)
decoder = Speech2TextDecoder.from_pretrained(tmpdirname).to(torch_device)
last_hidden_state_2 = decoder(
input_ids=inputs_dict["decoder_input_ids"],
attention_mask=inputs_dict["decoder_attention_mask"],
encoder_hidden_states=encoder_last_hidden_state,
encoder_attention_mask=inputs_dict["attention_mask"],
)[0]
self.parent.assertTrue((last_hidden_state_2 - last_hidden_state).abs().max().item() < 1e-3)
@require_torch
class Speech2TextModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase):
all_model_classes = (Speech2TextModel, Speech2TextForConditionalGeneration) if is_torch_available() else ()
all_generative_model_classes = (Speech2TextForConditionalGeneration,) if is_torch_available() else ()
is_encoder_decoder = True
test_pruning = False
test_missing_keys = False
test_torchscript = True
input_name = "input_features"
def setUp(self):
self.model_tester = Speech2TextModelTester(self)
self.config_tester = ConfigTester(self, config_class=Speech2TextConfig)
self.maxDiff = 3000
def test_config(self):
self.config_tester.run_common_tests()
def test_save_load_strict(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
model = model_class(config)
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname)
model2, info = model_class.from_pretrained(tmpdirname, output_loading_info=True)
self.assertEqual(info["missing_keys"], [])
def test_decoder_model_past_with_large_inputs(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs)
def test_encoder_decoder_model_standalone(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*config_and_inputs)
def test_inputs_embeds(self):
pass
# training is not supported yet
def test_training(self):
pass
def test_training_gradient_checkpointing(self):
pass
def test_generate_fp16(self):
config, input_dict = self.model_tester.prepare_config_and_inputs()
input_features = input_dict["input_features"]
attention_mask = input_dict["attention_mask"]
model = Speech2TextForConditionalGeneration(config).eval().to(torch_device)
if torch_device == "cuda":
input_features = input_features.half()
model.half()
model.generate(input_features, attention_mask=attention_mask)
model.generate(input_features, num_beams=4, do_sample=True, early_stopping=False, num_return_sequences=3)
def test_forward_signature(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
signature = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
arg_names = [*signature.parameters.keys()]
expected_arg_names = [
"input_features",
"attention_mask",
"decoder_input_ids",
"decoder_attention_mask",
]
expected_arg_names.extend(
["head_mask", "decoder_head_mask", "cross_attn_head_mask", "encoder_outputs"]
if "head_mask" and "decoder_head_mask" and "cross_attn_head_mask" in arg_names
else ["encoder_outputs"]
)
self.assertListEqual(arg_names[: len(expected_arg_names)], expected_arg_names)
def test_hidden_states_output(self):
def check_hidden_states_output(inputs_dict, config, model_class):
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
hidden_states = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
expected_num_layers = getattr(
self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1
)
self.assertEqual(len(hidden_states), expected_num_layers)
if hasattr(self.model_tester, "encoder_seq_length"):
seq_length = self.model_tester.encoder_seq_length
else:
seq_length = self.model_tester.seq_length
subsampled_seq_length = model._get_subsampled_output_lengths(seq_length)
self.assertListEqual(
list(hidden_states[0].shape[-2:]),
[subsampled_seq_length, self.model_tester.hidden_size],
)
if config.is_encoder_decoder:
hidden_states = outputs.decoder_hidden_states
self.assertIsInstance(hidden_states, (list, tuple))
self.assertEqual(len(hidden_states), expected_num_layers)
seq_len = getattr(self.model_tester, "seq_length", None)
decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", seq_len)
self.assertListEqual(
list(hidden_states[0].shape[-2:]),
[decoder_seq_length, self.model_tester.hidden_size],
)
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
inputs_dict["output_hidden_states"] = True
check_hidden_states_output(inputs_dict, config, model_class)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
config.output_hidden_states = True
check_hidden_states_output(inputs_dict, config, model_class)
def test_attention_outputs(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.return_dict = True
seq_len = getattr(self.model_tester, "seq_length", None)
decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", seq_len)
encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", seq_len)
decoder_key_length = getattr(self.model_tester, "decoder_key_length", decoder_seq_length)
encoder_key_length = getattr(self.model_tester, "key_length", encoder_seq_length)
for model_class in self.all_model_classes:
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = False
config.return_dict = True
model = model_class(config)
model.to(torch_device)
model.eval()
subsampled_encoder_seq_length = model._get_subsampled_output_lengths(encoder_seq_length)
subsampled_encoder_key_length = model._get_subsampled_output_lengths(encoder_key_length)
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
config.output_attentions = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, subsampled_encoder_seq_length, subsampled_encoder_key_length],
)
out_len = len(outputs)
correct_outlen = 5
# loss is at first position
if "labels" in inputs_dict:
correct_outlen += 1 # loss is added to beginning
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
self.assertEqual(out_len, correct_outlen)
# decoder attentions
decoder_attentions = outputs.decoder_attentions
self.assertIsInstance(decoder_attentions, (list, tuple))
self.assertEqual(len(decoder_attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(decoder_attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, decoder_seq_length, decoder_key_length],
)
# cross attentions
cross_attentions = outputs.cross_attentions
self.assertIsInstance(cross_attentions, (list, tuple))
self.assertEqual(len(cross_attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(cross_attentions[0].shape[-3:]),
[
self.model_tester.num_attention_heads,
decoder_seq_length,
subsampled_encoder_key_length,
],
)
# Check attention is always last and order is fine
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
added_hidden_states = 2
self.assertEqual(out_len + added_hidden_states, len(outputs))
self_attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(self_attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, subsampled_encoder_seq_length, subsampled_encoder_key_length],
)
def test_resize_tokens_embeddings(self):
(
original_config,
inputs_dict,
) = self.model_tester.prepare_config_and_inputs_for_common()
if not self.test_resize_embeddings:
return
for model_class in self.all_model_classes:
config = copy.deepcopy(original_config)
model = model_class(config)
model.to(torch_device)
if self.model_tester.is_training is False:
model.eval()
model_vocab_size = config.vocab_size
# Retrieve the embeddings and clone theme
model_embed = model.resize_token_embeddings(model_vocab_size)
cloned_embeddings = model_embed.weight.clone()
# Check that resizing the token embeddings with a larger vocab size increases the model's vocab size
model_embed = model.resize_token_embeddings(model_vocab_size + 10)
self.assertEqual(model.config.vocab_size, model_vocab_size + 10)
# Check that it actually resizes the embeddings matrix
self.assertEqual(model_embed.weight.shape[0], cloned_embeddings.shape[0] + 10)
# Check that the model can still do a forward pass successfully (every parameter should be resized)
model(**self._prepare_for_class(inputs_dict, model_class))
# Check that resizing the token embeddings with a smaller vocab size decreases the model's vocab size
model_embed = model.resize_token_embeddings(model_vocab_size - 15)
self.assertEqual(model.config.vocab_size, model_vocab_size - 15)
# Check that it actually resizes the embeddings matrix
self.assertEqual(model_embed.weight.shape[0], cloned_embeddings.shape[0] - 15)
# make sure that decoder_input_ids are resized
if "decoder_input_ids" in inputs_dict:
inputs_dict["decoder_input_ids"].clamp_(max=model_vocab_size - 15 - 1)
model(**self._prepare_for_class(inputs_dict, model_class))
# Check that adding and removing tokens has not modified the first part of the embedding matrix.
models_equal = True
for p1, p2 in zip(cloned_embeddings, model_embed.weight):
if p1.data.ne(p2.data).sum() > 0:
models_equal = False
self.assertTrue(models_equal)
def test_resize_embeddings_untied(self):
(
original_config,
inputs_dict,
) = self.model_tester.prepare_config_and_inputs_for_common()
if not self.test_resize_embeddings:
return
original_config.tie_word_embeddings = False
# if model cannot untied embeddings -> leave test
if original_config.tie_word_embeddings:
return
for model_class in self.all_model_classes:
config = copy.deepcopy(original_config)
model = model_class(config).to(torch_device)
# if no output embeddings -> leave test
if model.get_output_embeddings() is None:
continue
# Check that resizing the token embeddings with a larger vocab size increases the model's vocab size
model_vocab_size = config.vocab_size
model.resize_token_embeddings(model_vocab_size + 10)
self.assertEqual(model.config.vocab_size, model_vocab_size + 10)
output_embeds = model.get_output_embeddings()
self.assertEqual(output_embeds.weight.shape[0], model_vocab_size + 10)
# Check bias if present
if output_embeds.bias is not None:
self.assertEqual(output_embeds.bias.shape[0], model_vocab_size + 10)
# Check that the model can still do a forward pass successfully (every parameter should be resized)
model(**self._prepare_for_class(inputs_dict, model_class))
# Check that resizing the token embeddings with a smaller vocab size decreases the model's vocab size
model.resize_token_embeddings(model_vocab_size - 15)
self.assertEqual(model.config.vocab_size, model_vocab_size - 15)
# Check that it actually resizes the embeddings matrix
output_embeds = model.get_output_embeddings()
self.assertEqual(output_embeds.weight.shape[0], model_vocab_size - 15)
# Check bias if present
if output_embeds.bias is not None:
self.assertEqual(output_embeds.bias.shape[0], model_vocab_size - 15)
# Check that the model can still do a forward pass successfully (every parameter should be resized)
if "decoder_input_ids" in inputs_dict:
inputs_dict["decoder_input_ids"].clamp_(max=model_vocab_size - 15 - 1)
# Check that the model can still do a forward pass successfully (every parameter should be resized)
model(**self._prepare_for_class(inputs_dict, model_class))
def test_generate_without_input_ids(self):
pass
@staticmethod
def _get_encoder_outputs(
model, input_ids, attention_mask, output_attentions=None, output_hidden_states=None, num_interleave=1
):
encoder = model.get_encoder()
encoder_outputs = encoder(
input_ids,
attention_mask=attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
)
encoder_outputs["last_hidden_state"] = encoder_outputs.last_hidden_state.repeat_interleave(
num_interleave, dim=0
)
input_ids = input_ids[:, :, 0]
input_ids = torch.zeros_like(input_ids[:, :1], dtype=torch.long) + model._get_decoder_start_token_id()
attention_mask = None
return encoder_outputs, input_ids, attention_mask
def _check_outputs(self, output, input_ids, config, use_cache=False, num_return_sequences=1):
batch_size, seq_length = input_ids.shape[:2]
subsampled_seq_length = self.model_tester.get_subsampled_output_lengths(seq_length)
num_sequences_in_output = batch_size * num_return_sequences
gen_len = (
output.sequences.shape[-1] - 1 if config.is_encoder_decoder else output.sequences.shape[-1] - seq_length
)
# scores
self._check_scores(num_sequences_in_output, output.scores, length=gen_len, config=config)
# Attentions
# encoder
self._check_encoder_attention_for_generate(
output.encoder_attentions, batch_size, config, subsampled_seq_length
)
# decoder
self._check_attentions_for_generate(
num_sequences_in_output,
output.decoder_attentions,
min_length=1,
max_length=output.sequences.shape[-1],
config=config,
use_cache=use_cache,
)
# Hidden States
# encoder
self._check_encoder_hidden_states_for_generate(
output.encoder_hidden_states, batch_size, config, subsampled_seq_length
)
# decoder
self._check_hidden_states_for_generate(
num_sequences_in_output,
output.decoder_hidden_states,
min_length=1,
max_length=output.sequences.shape[-1],
config=config,
use_cache=use_cache,
)
def _create_and_check_torchscript(self, config, inputs_dict):
if not self.test_torchscript:
return
configs_no_init = _config_zero_init(config) # To be sure we have no Nan
configs_no_init.torchscript = True
for model_class in self.all_model_classes:
model = model_class(config=configs_no_init)
model.to(torch_device)
model.eval()
inputs = self._prepare_for_class(inputs_dict, model_class)
try:
model.config.use_cache = False # FSTM still requires this hack -> FSTM should probably be refactored similar to BART afterward
input_features = inputs["input_features"]
attention_mask = inputs["attention_mask"]
decoder_input_ids = inputs["decoder_input_ids"]
decoder_attention_mask = inputs["decoder_attention_mask"]
traced_model = torch.jit.trace(
model, (input_features, attention_mask, decoder_input_ids, decoder_attention_mask)
)
except RuntimeError:
self.fail("Couldn't trace module.")
with tempfile.TemporaryDirectory() as tmp_dir_name:
pt_file_name = os.path.join(tmp_dir_name, "traced_model.pt")
try:
torch.jit.save(traced_model, pt_file_name)
except Exception:
self.fail("Couldn't save module.")
try:
loaded_model = torch.jit.load(pt_file_name)
except Exception:
self.fail("Couldn't load module.")
model.to(torch_device)
model.eval()
loaded_model.to(torch_device)
loaded_model.eval()
model_state_dict = model.state_dict()
loaded_model_state_dict = loaded_model.state_dict()
self.assertEqual(set(model_state_dict.keys()), set(loaded_model_state_dict.keys()))
models_equal = True
for layer_name, p1 in model_state_dict.items():
p2 = loaded_model_state_dict[layer_name]
if p1.data.ne(p2.data).sum() > 0:
models_equal = False
self.assertTrue(models_equal)
@require_torch
@require_torchaudio
@require_sentencepiece
@require_tokenizers
@slow
class Speech2TextModelIntegrationTests(unittest.TestCase):
@cached_property
def default_processor(self):
return Speech2TextProcessor.from_pretrained("facebook/s2t-small-librispeech-asr")
def _load_datasamples(self, num_samples):
from datasets import load_dataset
import soundfile as sf
# map files to raw
def map_to_array(batch):
speech, _ = sf.read(batch["file"])
batch["speech"] = speech
return batch
ds = load_dataset("patrickvonplaten/librispeech_asr_dummy", "clean", split="validation")
ds = ds.select(range(num_samples)).map(map_to_array)
return ds["speech"][:num_samples]
def test_generation_librispeech(self):
model = Speech2TextForConditionalGeneration.from_pretrained("facebook/s2t-small-librispeech-asr")
model.to(torch_device)
processor = self.default_processor
input_speech = self._load_datasamples(1)
input_features = processor(input_speech, return_tensors="pt").input_features.to(torch_device)
generated_ids = model.generate(input_features)
generated_transcript = processor.batch_decode(generated_ids, skip_special_tokens=True)
EXPECTED_TRANSCRIPTIONS = ["a man said to the universe sir i exist"]
self.assertListEqual(generated_transcript, EXPECTED_TRANSCRIPTIONS)
def test_generation_librispeech_batched(self):
model = Speech2TextForConditionalGeneration.from_pretrained("facebook/s2t-small-librispeech-asr")
model.to(torch_device)
processor = self.default_processor
input_speech = self._load_datasamples(4)
inputs = processor(input_speech, return_tensors="pt", padding=True)
input_features = inputs.input_features.to(torch_device)
attention_mask = inputs.attention_mask.to(torch_device)
generated_ids = model.generate(input_features, attention_mask=attention_mask)
generated_transcripts = processor.batch_decode(generated_ids, skip_special_tokens=True)
EXPECTED_TRANSCRIPTIONS = [
"a man said to the universe sir i exist",
"sweat covered brion's body trickling into the titleing cloth that was the only garment he wore",
"the cut on his chest still dripping blood the ache of his overstrained eyes even the soaring arena around him with the thousands of spectators were trivialities not worth thinking about",
"his instant of panic was followed by a small sharp blow high on his chest",
]
self.assertListEqual(generated_transcripts, EXPECTED_TRANSCRIPTIONS)
|
huggingface/pytorch-transformers
|
tests/test_modeling_speech_to_text.py
|
Python
|
apache-2.0
| 32,528
|
# coding: utf-8
#
# Copyright 2018 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for methods in the action registry."""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
from core.domain import activity_domain
from core.tests import test_utils
class ActivityReferenceDomainUnitTests(test_utils.GenericTestBase):
"""Tests for ActivityReference domain class."""
def setUp(self):
super(ActivityReferenceDomainUnitTests, self).setUp()
self.exp_activity_reference = activity_domain.ActivityReference(
'exploration', '1234')
self.collection_activity_reference = activity_domain.ActivityReference(
'collection', '1234')
self.invalid_activity_reference_with_invalid_type = (
activity_domain.ActivityReference('invalid_activity_type', '1234'))
self.invalid_activity_reference_with_invalid_id = (
activity_domain.ActivityReference('exploration', 1234))
def test_that_hashes_for_different_object_types_are_distinct(self):
exp_hash = self.exp_activity_reference.get_hash()
collection_hash = self.collection_activity_reference.get_hash()
invalid_activity_hash = (
self.invalid_activity_reference_with_invalid_type.get_hash())
self.assertNotEqual(exp_hash, collection_hash)
self.assertNotEqual(exp_hash, invalid_activity_hash)
self.assertNotEqual(collection_hash, invalid_activity_hash)
def test_validate_with_invalid_type(self):
with self.assertRaisesRegexp(
Exception, 'Invalid activity type: invalid_activity_type'):
self.invalid_activity_reference_with_invalid_type.validate()
def test_validate_with_invalid_id(self):
with self.assertRaisesRegexp(
Exception, ('Expected id to be a string but found 1234')):
self.invalid_activity_reference_with_invalid_id.validate()
def test_to_dict(self):
exp_dict = self.exp_activity_reference.to_dict()
collection_dict = self.collection_activity_reference.to_dict()
self.assertEqual(
exp_dict,
{
'type': 'exploration',
'id': '1234'
}
)
self.assertEqual(
collection_dict,
{
'type': 'collection',
'id': '1234'
}
)
class ActivityReferencesDomainUnitTests(test_utils.GenericTestBase):
"""Tests for ActivityReferences domain class."""
def setUp(self):
super(ActivityReferencesDomainUnitTests, self).setUp()
exp_activity_reference = activity_domain.ActivityReference(
'exploration', '1234')
collection_activity_reference = activity_domain.ActivityReference(
'collection', '1234')
invalid_activity_reference = (
activity_domain.ActivityReference(
'invalid_activity_type', '1234'))
self.valid_activity_references = (
activity_domain.ActivityReferences([
exp_activity_reference, collection_activity_reference]))
self.invalid_activity_references = (
activity_domain.ActivityReferences([
exp_activity_reference, invalid_activity_reference]))
def test_validate_passes_with_valid_activity_reference_list(self):
self.valid_activity_references.validate()
def test_validate_fails_with_invalid_type_in_activity_reference_list(self):
with self.assertRaisesRegexp(
Exception, 'Invalid activity type: invalid_activity_type'):
self.invalid_activity_references.validate()
|
prasanna08/oppia
|
core/domain/activity_domain_test.py
|
Python
|
apache-2.0
| 4,280
|
# Copyright (c) 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests For HostManager
"""
import collections
import contextlib
import datetime
import mock
from oslo_serialization import jsonutils
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import versionutils
import six
import nova
from nova.compute import task_states
from nova.compute import vm_states
from nova import context as nova_context
from nova import exception
from nova import objects
from nova.objects import base as obj_base
from nova.pci import stats as pci_stats
from nova.scheduler import filters
from nova.scheduler import host_manager
from nova import test
from nova.tests import fixtures
from nova.tests.unit import fake_instance
from nova.tests.unit.scheduler import fakes
class FakeFilterClass1(filters.BaseHostFilter):
def host_passes(self, host_state, filter_properties):
pass
class FakeFilterClass2(filters.BaseHostFilter):
def host_passes(self, host_state, filter_properties):
pass
class HostManagerTestCase(test.NoDBTestCase):
"""Test case for HostManager class."""
@mock.patch.object(host_manager.HostManager, '_init_instance_info')
@mock.patch.object(host_manager.HostManager, '_init_aggregates')
def setUp(self, mock_init_agg, mock_init_inst):
super(HostManagerTestCase, self).setUp()
self.flags(available_filters=[
__name__ + '.FakeFilterClass1', __name__ + '.FakeFilterClass2'],
group='filter_scheduler')
self.flags(enabled_filters=['FakeFilterClass1'],
group='filter_scheduler')
self.host_manager = host_manager.HostManager()
cell = uuids.cell
self.fake_hosts = [host_manager.HostState('fake_host%s' % x,
'fake-node', cell) for x in range(1, 5)]
self.fake_hosts += [host_manager.HostState('fake_multihost',
'fake-node%s' % x, cell) for x in range(1, 5)]
self.useFixture(fixtures.SpawnIsSynchronousFixture())
def test_load_filters(self):
filters = self.host_manager._load_filters()
self.assertEqual(filters, ['FakeFilterClass1'])
def test_refresh_cells_caches(self):
ctxt = nova_context.RequestContext('fake', 'fake')
# Loading the non-cell0 mapping from the base test class.
self.assertEqual(1, len(self.host_manager.enabled_cells))
self.assertEqual(1, len(self.host_manager.cells))
# Creating cell mappings for mocking the list of cell_mappings obtained
# so that the refreshing mechanism can be properly tested. This will in
# turn ignore the loaded cell mapping from the base test case setup.
cell_uuid1 = uuids.cell1
cell_mapping1 = objects.CellMapping(context=ctxt,
uuid=cell_uuid1,
database_connection='fake:///db1',
transport_url='fake:///mq1',
disabled=False)
cell_uuid2 = uuids.cell2
cell_mapping2 = objects.CellMapping(context=ctxt,
uuid=cell_uuid2,
database_connection='fake:///db2',
transport_url='fake:///mq2',
disabled=True)
cell_uuid3 = uuids.cell3
cell_mapping3 = objects.CellMapping(context=ctxt,
uuid=cell_uuid3,
database_connection='fake:///db3',
transport_url='fake:///mq3',
disabled=False)
cells = [cell_mapping1, cell_mapping2, cell_mapping3]
with mock.patch('nova.objects.CellMappingList.get_all',
return_value=cells) as mock_cm:
self.host_manager.refresh_cells_caches()
mock_cm.assert_called_once()
self.assertEqual(2, len(self.host_manager.enabled_cells))
self.assertEqual(cell_uuid3, self.host_manager.enabled_cells[1].uuid)
self.assertEqual(3, len(self.host_manager.cells))
self.assertEqual(cell_uuid2, self.host_manager.cells[1].uuid)
def test_refresh_cells_caches_except_cell0(self):
ctxt = nova_context.RequestContext('fake-user', 'fake_project')
cell_uuid0 = objects.CellMapping.CELL0_UUID
cell_mapping0 = objects.CellMapping(context=ctxt,
uuid=cell_uuid0,
database_connection='fake:///db1',
transport_url='fake:///mq1')
cells = objects.CellMappingList(cell_mapping0)
# Mocking the return value of get_all cell_mappings to return only
# the cell0 mapping to check if its filtered or not.
with mock.patch('nova.objects.CellMappingList.get_all',
return_value=cells) as mock_cm:
self.host_manager.refresh_cells_caches()
mock_cm.assert_called_once()
self.assertEqual(0, len(self.host_manager.cells))
@mock.patch.object(nova.objects.InstanceList, 'get_by_filters')
@mock.patch.object(nova.objects.ComputeNodeList, 'get_all')
def test_init_instance_info_batches(self, mock_get_all,
mock_get_by_filters):
cn_list = objects.ComputeNodeList()
for num in range(22):
host_name = 'host_%s' % num
cn_list.objects.append(objects.ComputeNode(host=host_name))
mock_get_all.return_value = cn_list
self.host_manager._init_instance_info()
self.assertEqual(mock_get_by_filters.call_count, 3)
@mock.patch.object(nova.objects.InstanceList, 'get_by_filters')
@mock.patch.object(nova.objects.ComputeNodeList, 'get_all')
def test_init_instance_info(self, mock_get_all,
mock_get_by_filters):
cn1 = objects.ComputeNode(host='host1')
cn2 = objects.ComputeNode(host='host2')
inst1 = objects.Instance(host='host1', uuid=uuids.instance_1)
inst2 = objects.Instance(host='host1', uuid=uuids.instance_2)
inst3 = objects.Instance(host='host2', uuid=uuids.instance_3)
mock_get_all.return_value = objects.ComputeNodeList(objects=[cn1, cn2])
mock_get_by_filters.return_value = objects.InstanceList(
objects=[inst1, inst2, inst3])
hm = self.host_manager
hm._instance_info = {}
hm._init_instance_info()
self.assertEqual(len(hm._instance_info), 2)
fake_info = hm._instance_info['host1']
self.assertIn(uuids.instance_1, fake_info['instances'])
self.assertIn(uuids.instance_2, fake_info['instances'])
self.assertNotIn(uuids.instance_3, fake_info['instances'])
exp_filters = {'deleted': False, 'host': [u'host1', u'host2']}
mock_get_by_filters.assert_called_once_with(mock.ANY, exp_filters)
@mock.patch.object(nova.objects.InstanceList, 'get_by_filters')
@mock.patch.object(nova.objects.ComputeNodeList, 'get_all')
def test_init_instance_info_compute_nodes(self, mock_get_all,
mock_get_by_filters):
cn1 = objects.ComputeNode(host='host1')
cn2 = objects.ComputeNode(host='host2')
inst1 = objects.Instance(host='host1', uuid=uuids.instance_1)
inst2 = objects.Instance(host='host1', uuid=uuids.instance_2)
inst3 = objects.Instance(host='host2', uuid=uuids.instance_3)
cell = objects.CellMapping(database_connection='',
target_url='')
mock_get_by_filters.return_value = objects.InstanceList(
objects=[inst1, inst2, inst3])
hm = self.host_manager
hm._instance_info = {}
hm._init_instance_info({cell: [cn1, cn2]})
self.assertEqual(len(hm._instance_info), 2)
fake_info = hm._instance_info['host1']
self.assertIn(uuids.instance_1, fake_info['instances'])
self.assertIn(uuids.instance_2, fake_info['instances'])
self.assertNotIn(uuids.instance_3, fake_info['instances'])
exp_filters = {'deleted': False, 'host': [u'host1', u'host2']}
mock_get_by_filters.assert_called_once_with(mock.ANY, exp_filters)
# should not be called if the list of nodes was passed explicitly
self.assertFalse(mock_get_all.called)
def test_enabled_filters(self):
enabled_filters = self.host_manager.enabled_filters
self.assertEqual(1, len(enabled_filters))
self.assertIsInstance(enabled_filters[0], FakeFilterClass1)
@mock.patch.object(host_manager.HostManager, '_init_instance_info')
@mock.patch.object(objects.AggregateList, 'get_all')
def test_init_aggregates_no_aggs(self, agg_get_all, mock_init_info):
agg_get_all.return_value = []
self.host_manager = host_manager.HostManager()
self.assertEqual({}, self.host_manager.aggs_by_id)
self.assertEqual({}, self.host_manager.host_aggregates_map)
@mock.patch.object(host_manager.HostManager, '_init_instance_info')
@mock.patch.object(objects.AggregateList, 'get_all')
def test_init_aggregates_one_agg_no_hosts(self, agg_get_all,
mock_init_info):
fake_agg = objects.Aggregate(id=1, hosts=[])
agg_get_all.return_value = [fake_agg]
self.host_manager = host_manager.HostManager()
self.assertEqual({1: fake_agg}, self.host_manager.aggs_by_id)
self.assertEqual({}, self.host_manager.host_aggregates_map)
@mock.patch.object(host_manager.HostManager, '_init_instance_info')
@mock.patch.object(objects.AggregateList, 'get_all')
def test_init_aggregates_one_agg_with_hosts(self, agg_get_all,
mock_init_info):
fake_agg = objects.Aggregate(id=1, hosts=['fake-host'])
agg_get_all.return_value = [fake_agg]
self.host_manager = host_manager.HostManager()
self.assertEqual({1: fake_agg}, self.host_manager.aggs_by_id)
self.assertEqual({'fake-host': set([1])},
self.host_manager.host_aggregates_map)
def test_update_aggregates(self):
fake_agg = objects.Aggregate(id=1, hosts=['fake-host'])
self.host_manager.update_aggregates([fake_agg])
self.assertEqual({1: fake_agg}, self.host_manager.aggs_by_id)
self.assertEqual({'fake-host': set([1])},
self.host_manager.host_aggregates_map)
def test_update_aggregates_remove_hosts(self):
fake_agg = objects.Aggregate(id=1, hosts=['fake-host'])
self.host_manager.update_aggregates([fake_agg])
self.assertEqual({1: fake_agg}, self.host_manager.aggs_by_id)
self.assertEqual({'fake-host': set([1])},
self.host_manager.host_aggregates_map)
# Let's remove the host from the aggregate and update again
fake_agg.hosts = []
self.host_manager.update_aggregates([fake_agg])
self.assertEqual({1: fake_agg}, self.host_manager.aggs_by_id)
self.assertEqual({'fake-host': set([])},
self.host_manager.host_aggregates_map)
def test_delete_aggregate(self):
fake_agg = objects.Aggregate(id=1, hosts=['fake-host'])
self.host_manager.host_aggregates_map = collections.defaultdict(
set, {'fake-host': set([1])})
self.host_manager.aggs_by_id = {1: fake_agg}
self.host_manager.delete_aggregate(fake_agg)
self.assertEqual({}, self.host_manager.aggs_by_id)
self.assertEqual({'fake-host': set([])},
self.host_manager.host_aggregates_map)
def test_choose_host_filters_not_found(self):
self.assertRaises(exception.SchedulerHostFilterNotFound,
self.host_manager._choose_host_filters,
'FakeFilterClass3')
def test_choose_host_filters(self):
# Test we return 1 correct filter object
host_filters = self.host_manager._choose_host_filters(
['FakeFilterClass2'])
self.assertEqual(1, len(host_filters))
self.assertIsInstance(host_filters[0], FakeFilterClass2)
def _mock_get_filtered_hosts(self, info):
info['got_objs'] = []
info['got_fprops'] = []
def fake_filter_one(_self, obj, filter_props):
info['got_objs'].append(obj)
info['got_fprops'].append(filter_props)
return True
self.stub_out(__name__ + '.FakeFilterClass1._filter_one',
fake_filter_one)
def _verify_result(self, info, result, filters=True):
for x in info['got_fprops']:
self.assertEqual(x, info['expected_fprops'])
if filters:
self.assertEqual(set(info['expected_objs']), set(info['got_objs']))
self.assertEqual(set(info['expected_objs']), set(result))
def test_get_filtered_hosts(self):
fake_properties = objects.RequestSpec(ignore_hosts=[],
instance_uuid=uuids.instance,
force_hosts=[],
force_nodes=[])
info = {'expected_objs': self.fake_hosts,
'expected_fprops': fake_properties}
self._mock_get_filtered_hosts(info)
result = self.host_manager.get_filtered_hosts(self.fake_hosts,
fake_properties)
self._verify_result(info, result)
def test_get_filtered_hosts_with_requested_destination(self):
dest = objects.Destination(host='fake_host1', node='fake-node')
fake_properties = objects.RequestSpec(requested_destination=dest,
ignore_hosts=[],
instance_uuid=uuids.fake_uuid1,
force_hosts=[],
force_nodes=[])
info = {'expected_objs': [self.fake_hosts[0]],
'expected_fprops': fake_properties}
self._mock_get_filtered_hosts(info)
result = self.host_manager.get_filtered_hosts(self.fake_hosts,
fake_properties)
self._verify_result(info, result)
def test_get_filtered_hosts_with_wrong_requested_destination(self):
dest = objects.Destination(host='dummy', node='fake-node')
fake_properties = objects.RequestSpec(requested_destination=dest,
ignore_hosts=[],
instance_uuid=uuids.fake_uuid1,
force_hosts=[],
force_nodes=[])
info = {'expected_objs': [],
'expected_fprops': fake_properties}
self._mock_get_filtered_hosts(info)
result = self.host_manager.get_filtered_hosts(self.fake_hosts,
fake_properties)
self._verify_result(info, result)
def test_get_filtered_hosts_with_ignore(self):
fake_properties = objects.RequestSpec(
instance_uuid=uuids.instance,
ignore_hosts=['fake_host1', 'fake_host3',
'fake_host5', 'fake_multihost'],
force_hosts=[],
force_nodes=[])
# [1] and [3] are host2 and host4
info = {'expected_objs': [self.fake_hosts[1], self.fake_hosts[3]],
'expected_fprops': fake_properties}
self._mock_get_filtered_hosts(info)
result = self.host_manager.get_filtered_hosts(self.fake_hosts,
fake_properties)
self._verify_result(info, result)
def test_get_filtered_hosts_with_ignore_case_insensitive(self):
fake_properties = objects.RequestSpec(
instance_uuids=uuids.fakehost,
ignore_hosts=['FAKE_HOST1', 'FaKe_HoSt3', 'Fake_Multihost'],
force_hosts=[],
force_nodes=[])
# [1] and [3] are host2 and host4
info = {'expected_objs': [self.fake_hosts[1], self.fake_hosts[3]],
'expected_fprops': fake_properties}
self._mock_get_filtered_hosts(info)
result = self.host_manager.get_filtered_hosts(self.fake_hosts,
fake_properties)
self._verify_result(info, result)
def test_get_filtered_hosts_with_force_hosts(self):
fake_properties = objects.RequestSpec(
instance_uuid=uuids.instance,
ignore_hosts=[],
force_hosts=['fake_host1', 'fake_host3', 'fake_host5'],
force_nodes=[])
# [0] and [2] are host1 and host3
info = {'expected_objs': [self.fake_hosts[0], self.fake_hosts[2]],
'expected_fprops': fake_properties}
self._mock_get_filtered_hosts(info)
result = self.host_manager.get_filtered_hosts(self.fake_hosts,
fake_properties)
self._verify_result(info, result, False)
def test_get_filtered_hosts_with_force_case_insensitive(self):
fake_properties = objects.RequestSpec(
instance_uuids=uuids.fakehost,
ignore_hosts=[],
force_hosts=['FAKE_HOST1', 'FaKe_HoSt3', 'fake_host4',
'faKe_host5'],
force_nodes=[])
# [1] and [3] are host2 and host4
info = {'expected_objs': [self.fake_hosts[0], self.fake_hosts[2],
self.fake_hosts[3]],
'expected_fprops': fake_properties}
self._mock_get_filtered_hosts(info)
result = self.host_manager.get_filtered_hosts(self.fake_hosts,
fake_properties)
self._verify_result(info, result, False)
def test_get_filtered_hosts_with_no_matching_force_hosts(self):
fake_properties = objects.RequestSpec(
instance_uuid=uuids.instance,
ignore_hosts=[],
force_hosts=['fake_host5', 'fake_host6'],
force_nodes=[])
info = {'expected_objs': [],
'expected_fprops': fake_properties}
self._mock_get_filtered_hosts(info)
with mock.patch.object(self.host_manager.filter_handler,
'get_filtered_objects') as fake_filter:
result = self.host_manager.get_filtered_hosts(self.fake_hosts,
fake_properties)
self.assertFalse(fake_filter.called)
self._verify_result(info, result, False)
def test_get_filtered_hosts_with_ignore_and_force_hosts(self):
# Ensure ignore_hosts processed before force_hosts in host filters.
fake_properties = objects.RequestSpec(
instance_uuid=uuids.instance,
ignore_hosts=['fake_host1'],
force_hosts=['fake_host3', 'fake_host1'],
force_nodes=[])
# only fake_host3 should be left.
info = {'expected_objs': [self.fake_hosts[2]],
'expected_fprops': fake_properties}
self._mock_get_filtered_hosts(info)
result = self.host_manager.get_filtered_hosts(self.fake_hosts,
fake_properties)
self._verify_result(info, result, False)
def test_get_filtered_hosts_with_force_host_and_many_nodes(self):
# Ensure all nodes returned for a host with many nodes
fake_properties = objects.RequestSpec(
instance_uuid=uuids.instance,
ignore_hosts=[],
force_hosts=['fake_multihost'],
force_nodes=[])
info = {'expected_objs': [self.fake_hosts[4], self.fake_hosts[5],
self.fake_hosts[6], self.fake_hosts[7]],
'expected_fprops': fake_properties}
self._mock_get_filtered_hosts(info)
result = self.host_manager.get_filtered_hosts(self.fake_hosts,
fake_properties)
self._verify_result(info, result, False)
def test_get_filtered_hosts_with_force_nodes(self):
fake_properties = objects.RequestSpec(
instance_uuid=uuids.instance,
ignore_hosts=[],
force_hosts=[],
force_nodes=['fake-node2', 'fake-node4', 'fake-node9'])
# [5] is fake-node2, [7] is fake-node4
info = {'expected_objs': [self.fake_hosts[5], self.fake_hosts[7]],
'expected_fprops': fake_properties}
self._mock_get_filtered_hosts(info)
result = self.host_manager.get_filtered_hosts(self.fake_hosts,
fake_properties)
self._verify_result(info, result, False)
def test_get_filtered_hosts_with_force_hosts_and_nodes(self):
# Ensure only overlapping results if both force host and node
fake_properties = objects.RequestSpec(
instance_uuid=uuids.instance,
ignore_hosts=[],
force_hosts=['fake-host1', 'fake_multihost'],
force_nodes=['fake-node2', 'fake-node9'])
# [5] is fake-node2
info = {'expected_objs': [self.fake_hosts[5]],
'expected_fprops': fake_properties}
self._mock_get_filtered_hosts(info)
result = self.host_manager.get_filtered_hosts(self.fake_hosts,
fake_properties)
self._verify_result(info, result, False)
def test_get_filtered_hosts_with_force_hosts_and_wrong_nodes(self):
# Ensure non-overlapping force_node and force_host yield no result
fake_properties = objects.RequestSpec(
instance_uuid=uuids.instance,
ignore_hosts=[],
force_hosts=['fake_multihost'],
force_nodes=['fake-node'])
info = {'expected_objs': [],
'expected_fprops': fake_properties}
self._mock_get_filtered_hosts(info)
result = self.host_manager.get_filtered_hosts(self.fake_hosts,
fake_properties)
self._verify_result(info, result, False)
def test_get_filtered_hosts_with_ignore_hosts_and_force_nodes(self):
# Ensure ignore_hosts can coexist with force_nodes
fake_properties = objects.RequestSpec(
instance_uuid=uuids.instance,
ignore_hosts=['fake_host1', 'fake_host2'],
force_hosts=[],
force_nodes=['fake-node4', 'fake-node2'])
info = {'expected_objs': [self.fake_hosts[5], self.fake_hosts[7]],
'expected_fprops': fake_properties}
self._mock_get_filtered_hosts(info)
result = self.host_manager.get_filtered_hosts(self.fake_hosts,
fake_properties)
self._verify_result(info, result, False)
def test_get_filtered_hosts_with_ignore_hosts_and_force_same_nodes(self):
# Ensure ignore_hosts is processed before force_nodes
fake_properties = objects.RequestSpec(
instance_uuid=uuids.instance,
ignore_hosts=['fake_multihost'],
force_hosts=[],
force_nodes=['fake_node4', 'fake_node2'])
info = {'expected_objs': [],
'expected_fprops': fake_properties}
self._mock_get_filtered_hosts(info)
result = self.host_manager.get_filtered_hosts(self.fake_hosts,
fake_properties)
self._verify_result(info, result, False)
@mock.patch('nova.scheduler.host_manager.LOG')
@mock.patch('nova.objects.ServiceList.get_by_binary')
@mock.patch('nova.objects.ComputeNodeList.get_all')
@mock.patch('nova.objects.InstanceList.get_uuids_by_host')
def test_get_all_host_states(self, mock_get_by_host, mock_get_all,
mock_get_by_binary, mock_log):
mock_get_by_host.return_value = []
mock_get_all.return_value = fakes.COMPUTE_NODES
mock_get_by_binary.return_value = fakes.SERVICES
context = 'fake_context'
# get_all_host_states returns a generator, so make a map from it
host_states_map = {(state.host, state.nodename): state for state in
self.host_manager.get_all_host_states(context)}
self.assertEqual(4, len(host_states_map))
calls = [
mock.call(
"Host %(hostname)s has more disk space than database "
"expected (%(physical)s GB > %(database)s GB)",
{'physical': 3333, 'database': 3072, 'hostname': 'node3'}
),
mock.call(
"No compute service record found for host %(host)s",
{'host': 'fake'}
)
]
self.assertEqual(calls, mock_log.warning.call_args_list)
# Check that .service is set properly
for i in range(4):
compute_node = fakes.COMPUTE_NODES[i]
host = compute_node.host
node = compute_node.hypervisor_hostname
state_key = (host, node)
self.assertEqual(host_states_map[state_key].service,
obj_base.obj_to_primitive(fakes.get_service_by_host(host)))
self.assertEqual(host_states_map[('host1', 'node1')].free_ram_mb,
512)
# 511GB
self.assertEqual(host_states_map[('host1', 'node1')].free_disk_mb,
524288)
self.assertEqual(host_states_map[('host2', 'node2')].free_ram_mb,
1024)
# 1023GB
self.assertEqual(host_states_map[('host2', 'node2')].free_disk_mb,
1048576)
self.assertEqual(host_states_map[('host3', 'node3')].free_ram_mb,
3072)
# 3071GB
self.assertEqual(host_states_map[('host3', 'node3')].free_disk_mb,
3145728)
self.assertEqual(host_states_map[('host4', 'node4')].free_ram_mb,
8192)
# 8191GB
self.assertEqual(host_states_map[('host4', 'node4')].free_disk_mb,
8388608)
@mock.patch.object(nova.objects.InstanceList, 'get_uuids_by_host')
@mock.patch.object(host_manager.HostState, '_update_from_compute_node')
@mock.patch.object(objects.ComputeNodeList, 'get_all')
@mock.patch.object(objects.ServiceList, 'get_by_binary')
def test_get_all_host_states_with_no_aggs(self, svc_get_by_binary,
cn_get_all, update_from_cn,
mock_get_by_host):
svc_get_by_binary.return_value = [objects.Service(host='fake')]
cn_get_all.return_value = [
objects.ComputeNode(host='fake', hypervisor_hostname='fake')]
mock_get_by_host.return_value = []
self.host_manager.host_aggregates_map = collections.defaultdict(set)
hosts = self.host_manager.get_all_host_states('fake-context')
# get_all_host_states returns a generator, so make a map from it
host_states_map = {(state.host, state.nodename): state for state in
hosts}
host_state = host_states_map[('fake', 'fake')]
self.assertEqual([], host_state.aggregates)
@mock.patch.object(nova.objects.InstanceList, 'get_uuids_by_host')
@mock.patch.object(host_manager.HostState, '_update_from_compute_node')
@mock.patch.object(objects.ComputeNodeList, 'get_all')
@mock.patch.object(objects.ServiceList, 'get_by_binary')
def test_get_all_host_states_with_matching_aggs(self, svc_get_by_binary,
cn_get_all,
update_from_cn,
mock_get_by_host):
svc_get_by_binary.return_value = [objects.Service(host='fake')]
cn_get_all.return_value = [
objects.ComputeNode(host='fake', hypervisor_hostname='fake')]
mock_get_by_host.return_value = []
fake_agg = objects.Aggregate(id=1)
self.host_manager.host_aggregates_map = collections.defaultdict(
set, {'fake': set([1])})
self.host_manager.aggs_by_id = {1: fake_agg}
hosts = self.host_manager.get_all_host_states('fake-context')
# get_all_host_states returns a generator, so make a map from it
host_states_map = {(state.host, state.nodename): state for state in
hosts}
host_state = host_states_map[('fake', 'fake')]
self.assertEqual([fake_agg], host_state.aggregates)
@mock.patch.object(nova.objects.InstanceList, 'get_uuids_by_host')
@mock.patch.object(host_manager.HostState, '_update_from_compute_node')
@mock.patch.object(objects.ComputeNodeList, 'get_all')
@mock.patch.object(objects.ServiceList, 'get_by_binary')
def test_get_all_host_states_with_not_matching_aggs(self,
svc_get_by_binary,
cn_get_all,
update_from_cn,
mock_get_by_host):
svc_get_by_binary.return_value = [objects.Service(host='fake'),
objects.Service(host='other')]
cn_get_all.return_value = [
objects.ComputeNode(host='fake', hypervisor_hostname='fake'),
objects.ComputeNode(host='other', hypervisor_hostname='other')]
mock_get_by_host.return_value = []
fake_agg = objects.Aggregate(id=1)
self.host_manager.host_aggregates_map = collections.defaultdict(
set, {'other': set([1])})
self.host_manager.aggs_by_id = {1: fake_agg}
hosts = self.host_manager.get_all_host_states('fake-context')
# get_all_host_states returns a generator, so make a map from it
host_states_map = {(state.host, state.nodename): state for state in
hosts}
host_state = host_states_map[('fake', 'fake')]
self.assertEqual([], host_state.aggregates)
@mock.patch.object(nova.objects.InstanceList, 'get_uuids_by_host',
return_value=[])
@mock.patch.object(host_manager.HostState, '_update_from_compute_node')
@mock.patch.object(objects.ComputeNodeList, 'get_all')
@mock.patch.object(objects.ServiceList, 'get_by_binary')
def test_get_all_host_states_corrupt_aggregates_info(self,
svc_get_by_binary,
cn_get_all,
update_from_cn,
mock_get_by_host):
"""Regression test for bug 1605804
A host can be in multiple host-aggregates at the same time. When a
host gets removed from an aggregate in thread A and this aggregate
gets deleted in thread B, there can be a race-condition where the
mapping data in the host_manager can get out of sync for a moment.
This test simulates this condition for the bug-fix.
"""
host_a = 'host_a'
host_b = 'host_b'
svc_get_by_binary.return_value = [objects.Service(host=host_a),
objects.Service(host=host_b)]
cn_get_all.return_value = [
objects.ComputeNode(host=host_a, hypervisor_hostname=host_a),
objects.ComputeNode(host=host_b, hypervisor_hostname=host_b)]
aggregate = objects.Aggregate(id=1)
aggregate.hosts = [host_a, host_b]
aggr_list = objects.AggregateList()
aggr_list.objects = [aggregate]
self.host_manager.update_aggregates(aggr_list)
aggregate.hosts = [host_a]
self.host_manager.delete_aggregate(aggregate)
self.host_manager.get_all_host_states('fake-context')
@mock.patch('nova.objects.ServiceList.get_by_binary')
@mock.patch('nova.objects.ComputeNodeList.get_all')
@mock.patch('nova.objects.InstanceList.get_by_host')
def test_get_all_host_states_updated(self, mock_get_by_host,
mock_get_all_comp,
mock_get_svc_by_binary):
mock_get_all_comp.return_value = fakes.COMPUTE_NODES
mock_get_svc_by_binary.return_value = fakes.SERVICES
context = 'fake_context'
hm = self.host_manager
inst1 = objects.Instance(uuid=uuids.instance)
cn1 = objects.ComputeNode(host='host1')
hm._instance_info = {'host1': {'instances': {uuids.instance: inst1},
'updated': True}}
host_state = host_manager.HostState('host1', cn1, uuids.cell)
self.assertFalse(host_state.instances)
mock_get_by_host.return_value = None
host_state.update(
inst_dict=hm._get_instance_info(context, cn1))
self.assertFalse(mock_get_by_host.called)
self.assertTrue(host_state.instances)
self.assertEqual(host_state.instances[uuids.instance], inst1)
@mock.patch('nova.objects.ServiceList.get_by_binary')
@mock.patch('nova.objects.ComputeNodeList.get_all')
@mock.patch('nova.objects.InstanceList.get_uuids_by_host')
def test_get_all_host_states_not_updated(self, mock_get_by_host,
mock_get_all_comp,
mock_get_svc_by_binary):
mock_get_all_comp.return_value = fakes.COMPUTE_NODES
mock_get_svc_by_binary.return_value = fakes.SERVICES
context = 'fake_context'
hm = self.host_manager
inst1 = objects.Instance(uuid=uuids.instance)
cn1 = objects.ComputeNode(host='host1')
hm._instance_info = {'host1': {'instances': {uuids.instance: inst1},
'updated': False}}
host_state = host_manager.HostState('host1', cn1, uuids.cell)
self.assertFalse(host_state.instances)
mock_get_by_host.return_value = [uuids.instance]
host_state.update(
inst_dict=hm._get_instance_info(context, cn1))
mock_get_by_host.assert_called_once_with(context, cn1.host)
self.assertTrue(host_state.instances)
self.assertIn(uuids.instance, host_state.instances)
inst = host_state.instances[uuids.instance]
self.assertEqual(uuids.instance, inst.uuid)
self.assertIsNotNone(inst._context, 'Instance is orphaned.')
@mock.patch('nova.objects.InstanceList.get_uuids_by_host')
def test_recreate_instance_info(self, mock_get_by_host):
host_name = 'fake_host'
inst1 = fake_instance.fake_instance_obj('fake_context',
uuid=uuids.instance_1)
inst2 = fake_instance.fake_instance_obj('fake_context',
uuid=uuids.instance_2)
orig_inst_dict = {inst1.uuid: inst1, inst2.uuid: inst2}
mock_get_by_host.return_value = [uuids.instance_1, uuids.instance_2]
self.host_manager._instance_info = {
host_name: {
'instances': orig_inst_dict,
'updated': True,
}}
self.host_manager._recreate_instance_info('fake_context', host_name)
new_info = self.host_manager._instance_info[host_name]
self.assertEqual(len(new_info['instances']),
len(mock_get_by_host.return_value))
self.assertFalse(new_info['updated'])
def test_update_instance_info(self):
host_name = 'fake_host'
inst1 = fake_instance.fake_instance_obj('fake_context',
uuid=uuids.instance_1,
host=host_name)
inst2 = fake_instance.fake_instance_obj('fake_context',
uuid=uuids.instance_2,
host=host_name)
orig_inst_dict = {inst1.uuid: inst1, inst2.uuid: inst2}
self.host_manager._instance_info = {
host_name: {
'instances': orig_inst_dict,
'updated': False,
}}
inst3 = fake_instance.fake_instance_obj('fake_context',
uuid=uuids.instance_3,
host=host_name)
inst4 = fake_instance.fake_instance_obj('fake_context',
uuid=uuids.instance_4,
host=host_name)
update = objects.InstanceList(objects=[inst3, inst4])
self.host_manager.update_instance_info('fake_context', host_name,
update)
new_info = self.host_manager._instance_info[host_name]
self.assertEqual(len(new_info['instances']), 4)
self.assertTrue(new_info['updated'])
def test_update_instance_info_unknown_host(self):
self.host_manager._recreate_instance_info = mock.MagicMock()
host_name = 'fake_host'
inst1 = fake_instance.fake_instance_obj('fake_context',
uuid=uuids.instance_1,
host=host_name)
inst2 = fake_instance.fake_instance_obj('fake_context',
uuid=uuids.instance_2,
host=host_name)
orig_inst_dict = {inst1.uuid: inst1, inst2.uuid: inst2}
self.host_manager._instance_info = {
host_name: {
'instances': orig_inst_dict,
'updated': False,
}}
bad_host = 'bad_host'
inst3 = fake_instance.fake_instance_obj('fake_context',
uuid=uuids.instance_3,
host=bad_host)
inst_list3 = objects.InstanceList(objects=[inst3])
self.host_manager.update_instance_info('fake_context', bad_host,
inst_list3)
new_info = self.host_manager._instance_info[host_name]
self.host_manager._recreate_instance_info.assert_called_once_with(
'fake_context', bad_host)
self.assertEqual(len(new_info['instances']), len(orig_inst_dict))
self.assertFalse(new_info['updated'])
@mock.patch('nova.objects.HostMapping.get_by_host',
side_effect=exception.HostMappingNotFound(name='host1'))
def test_update_instance_info_unknown_host_mapping_not_found(self,
get_by_host):
"""Tests that case that update_instance_info is called with an
unregistered host so the host manager attempts to recreate the
instance list, but there is no host mapping found for the given
host (it might have just started not be discovered for cells
v2 yet).
"""
ctxt = nova_context.RequestContext()
instance_info = objects.InstanceList()
self.host_manager.update_instance_info(ctxt, 'host1', instance_info)
self.assertDictEqual(
{}, self.host_manager._instance_info['host1']['instances'])
get_by_host.assert_called_once_with(ctxt, 'host1')
def test_delete_instance_info(self):
host_name = 'fake_host'
inst1 = fake_instance.fake_instance_obj('fake_context',
uuid=uuids.instance_1,
host=host_name)
inst2 = fake_instance.fake_instance_obj('fake_context',
uuid=uuids.instance_2,
host=host_name)
orig_inst_dict = {inst1.uuid: inst1, inst2.uuid: inst2}
self.host_manager._instance_info = {
host_name: {
'instances': orig_inst_dict,
'updated': False,
}}
self.host_manager.delete_instance_info('fake_context', host_name,
inst1.uuid)
new_info = self.host_manager._instance_info[host_name]
self.assertEqual(len(new_info['instances']), 1)
self.assertTrue(new_info['updated'])
def test_delete_instance_info_unknown_host(self):
self.host_manager._recreate_instance_info = mock.MagicMock()
host_name = 'fake_host'
inst1 = fake_instance.fake_instance_obj('fake_context',
uuid=uuids.instance_1,
host=host_name)
inst2 = fake_instance.fake_instance_obj('fake_context',
uuid=uuids.instance_2,
host=host_name)
orig_inst_dict = {inst1.uuid: inst1, inst2.uuid: inst2}
self.host_manager._instance_info = {
host_name: {
'instances': orig_inst_dict,
'updated': False,
}}
bad_host = 'bad_host'
self.host_manager.delete_instance_info('fake_context', bad_host,
uuids.instance_1)
new_info = self.host_manager._instance_info[host_name]
self.host_manager._recreate_instance_info.assert_called_once_with(
'fake_context', bad_host)
self.assertEqual(len(new_info['instances']), len(orig_inst_dict))
self.assertFalse(new_info['updated'])
def test_sync_instance_info(self):
self.host_manager._recreate_instance_info = mock.MagicMock()
host_name = 'fake_host'
inst1 = fake_instance.fake_instance_obj('fake_context',
uuid=uuids.instance_1,
host=host_name)
inst2 = fake_instance.fake_instance_obj('fake_context',
uuid=uuids.instance_2,
host=host_name)
orig_inst_dict = {inst1.uuid: inst1, inst2.uuid: inst2}
self.host_manager._instance_info = {
host_name: {
'instances': orig_inst_dict,
'updated': False,
}}
self.host_manager.sync_instance_info('fake_context', host_name,
[uuids.instance_2,
uuids.instance_1])
new_info = self.host_manager._instance_info[host_name]
self.assertFalse(self.host_manager._recreate_instance_info.called)
self.assertTrue(new_info['updated'])
def test_sync_instance_info_fail(self):
self.host_manager._recreate_instance_info = mock.MagicMock()
host_name = 'fake_host'
inst1 = fake_instance.fake_instance_obj('fake_context',
uuid=uuids.instance_1,
host=host_name)
inst2 = fake_instance.fake_instance_obj('fake_context',
uuid=uuids.instance_2,
host=host_name)
orig_inst_dict = {inst1.uuid: inst1, inst2.uuid: inst2}
self.host_manager._instance_info = {
host_name: {
'instances': orig_inst_dict,
'updated': False,
}}
self.host_manager.sync_instance_info('fake_context', host_name,
[uuids.instance_2,
uuids.instance_1, 'new'])
new_info = self.host_manager._instance_info[host_name]
self.host_manager._recreate_instance_info.assert_called_once_with(
'fake_context', host_name)
self.assertFalse(new_info['updated'])
@mock.patch('nova.objects.CellMappingList.get_all')
@mock.patch('nova.objects.ComputeNodeList.get_all')
@mock.patch('nova.objects.ServiceList.get_by_binary')
def test_get_computes_for_cells(self, mock_sl, mock_cn, mock_cm):
cells = [
objects.CellMapping(uuid=uuids.cell1,
db_connection='none://1',
transport_url='none://'),
objects.CellMapping(uuid=uuids.cell2,
db_connection='none://2',
transport_url='none://'),
]
mock_cm.return_value = cells
mock_sl.side_effect = [
[objects.ServiceList(host='foo')],
[objects.ServiceList(host='bar')],
]
mock_cn.side_effect = [
[objects.ComputeNode(host='foo')],
[objects.ComputeNode(host='bar')],
]
context = nova_context.RequestContext('fake', 'fake')
cns, srv = self.host_manager._get_computes_for_cells(context, cells)
self.assertEqual({uuids.cell1: ['foo'],
uuids.cell2: ['bar']},
{cell: [cn.host for cn in computes]
for cell, computes in cns.items()})
self.assertEqual(['bar', 'foo'], sorted(list(srv.keys())))
@mock.patch('nova.objects.CellMappingList.get_all')
@mock.patch('nova.objects.ComputeNodeList.get_all_by_uuids')
@mock.patch('nova.objects.ServiceList.get_by_binary')
def test_get_computes_for_cells_uuid(self, mock_sl, mock_cn, mock_cm):
cells = [
objects.CellMapping(uuid=uuids.cell1,
db_connection='none://1',
transport_url='none://'),
objects.CellMapping(uuid=uuids.cell2,
db_connection='none://2',
transport_url='none://'),
]
mock_cm.return_value = cells
mock_sl.side_effect = [
[objects.ServiceList(host='foo')],
[objects.ServiceList(host='bar')],
]
mock_cn.side_effect = [
[objects.ComputeNode(host='foo')],
[objects.ComputeNode(host='bar')],
]
context = nova_context.RequestContext('fake', 'fake')
cns, srv = self.host_manager._get_computes_for_cells(context, cells,
[])
self.assertEqual({uuids.cell1: ['foo'],
uuids.cell2: ['bar']},
{cell: [cn.host for cn in computes]
for cell, computes in cns.items()})
self.assertEqual(['bar', 'foo'], sorted(list(srv.keys())))
@mock.patch('nova.context.target_cell')
@mock.patch('nova.objects.CellMappingList.get_all')
@mock.patch('nova.objects.ComputeNodeList.get_all')
@mock.patch('nova.objects.ServiceList.get_by_binary')
def test_get_computes_for_cells_limit_to_cell(self, mock_sl,
mock_cn, mock_cm,
mock_target):
host_manager.LOG.debug = host_manager.LOG.error
cells = [
objects.CellMapping(uuid=uuids.cell1,
database_connection='none://1',
transport_url='none://'),
objects.CellMapping(uuid=uuids.cell2,
database_connection='none://2',
transport_url='none://'),
]
mock_sl.return_value = [objects.ServiceList(host='foo')]
mock_cn.return_value = [objects.ComputeNode(host='foo')]
mock_cm.return_value = cells
@contextlib.contextmanager
def fake_set_target(context, cell):
yield mock.sentinel.cctxt
mock_target.side_effect = fake_set_target
context = nova_context.RequestContext('fake', 'fake')
cns, srv = self.host_manager._get_computes_for_cells(
context, cells=cells[1:])
self.assertEqual({uuids.cell2: ['foo']},
{cell: [cn.host for cn in computes]
for cell, computes in cns.items()})
self.assertEqual(['foo'], list(srv.keys()))
# NOTE(danms): We have two cells, but we should only have
# targeted one if we honored the only-cell destination requirement,
# and only looked up services and compute nodes in one
mock_target.assert_called_once_with(context, cells[1])
mock_cn.assert_called_once_with(mock.sentinel.cctxt)
mock_sl.assert_called_once_with(mock.sentinel.cctxt, 'nova-compute',
include_disabled=True)
@mock.patch('nova.context.scatter_gather_cells')
def test_get_computes_for_cells_failures(self, mock_sg):
mock_sg.return_value = {
uuids.cell1: ([mock.MagicMock(host='a'), mock.MagicMock(host='b')],
[mock.sentinel.c1n1, mock.sentinel.c1n2]),
uuids.cell2: nova_context.did_not_respond_sentinel,
uuids.cell3: exception.ComputeHostNotFound(host='c'),
}
context = nova_context.RequestContext('fake', 'fake')
cns, srv = self.host_manager._get_computes_for_cells(context, [])
self.assertEqual({uuids.cell1: [mock.sentinel.c1n1,
mock.sentinel.c1n2]}, cns)
self.assertEqual(['a', 'b'], sorted(srv.keys()))
class HostManagerChangedNodesTestCase(test.NoDBTestCase):
"""Test case for HostManager class."""
@mock.patch.object(host_manager.HostManager, '_init_instance_info')
@mock.patch.object(host_manager.HostManager, '_init_aggregates')
def setUp(self, mock_init_agg, mock_init_inst):
super(HostManagerChangedNodesTestCase, self).setUp()
self.host_manager = host_manager.HostManager()
@mock.patch('nova.objects.ServiceList.get_by_binary')
@mock.patch('nova.objects.ComputeNodeList.get_all')
@mock.patch('nova.objects.InstanceList.get_uuids_by_host')
def test_get_all_host_states(self, mock_get_by_host, mock_get_all,
mock_get_by_binary):
mock_get_by_host.return_value = []
mock_get_all.return_value = fakes.COMPUTE_NODES
mock_get_by_binary.return_value = fakes.SERVICES
context = 'fake_context'
# get_all_host_states returns a generator, so make a map from it
host_states_map = {(state.host, state.nodename): state for state in
self.host_manager.get_all_host_states(context)}
self.assertEqual(len(host_states_map), 4)
@mock.patch('nova.objects.ServiceList.get_by_binary')
@mock.patch('nova.objects.ComputeNodeList.get_all')
@mock.patch('nova.objects.InstanceList.get_uuids_by_host')
def test_get_all_host_states_after_delete_one(self, mock_get_by_host,
mock_get_all,
mock_get_by_binary):
getter = (lambda n: n.hypervisor_hostname
if 'hypervisor_hostname' in n else None)
running_nodes = [n for n in fakes.COMPUTE_NODES
if getter(n) != 'node4']
mock_get_by_host.return_value = []
mock_get_all.side_effect = [fakes.COMPUTE_NODES, running_nodes]
mock_get_by_binary.side_effect = [fakes.SERVICES, fakes.SERVICES]
context = 'fake_context'
# first call: all nodes
hosts = self.host_manager.get_all_host_states(context)
# get_all_host_states returns a generator, so make a map from it
host_states_map = {(state.host, state.nodename): state for state in
hosts}
self.assertEqual(len(host_states_map), 4)
# second call: just running nodes
hosts = self.host_manager.get_all_host_states(context)
host_states_map = {(state.host, state.nodename): state for state in
hosts}
self.assertEqual(len(host_states_map), 3)
@mock.patch('nova.objects.ServiceList.get_by_binary')
@mock.patch('nova.objects.ComputeNodeList.get_all')
@mock.patch('nova.objects.InstanceList.get_uuids_by_host')
def test_get_all_host_states_after_delete_all(self, mock_get_by_host,
mock_get_all,
mock_get_by_binary):
mock_get_by_host.return_value = []
mock_get_all.side_effect = [fakes.COMPUTE_NODES, []]
mock_get_by_binary.side_effect = [fakes.SERVICES, fakes.SERVICES]
context = 'fake_context'
# first call: all nodes
hosts = self.host_manager.get_all_host_states(context)
# get_all_host_states returns a generator, so make a map from it
host_states_map = {(state.host, state.nodename): state for state in
hosts}
self.assertEqual(len(host_states_map), 4)
# second call: no nodes
hosts = self.host_manager.get_all_host_states(context)
host_states_map = {(state.host, state.nodename): state for state in
hosts}
self.assertEqual(len(host_states_map), 0)
@mock.patch('nova.objects.ServiceList.get_by_binary')
@mock.patch('nova.objects.ComputeNodeList.get_all_by_uuids')
@mock.patch('nova.objects.InstanceList.get_uuids_by_host')
def test_get_host_states_by_uuids(self, mock_get_by_host, mock_get_all,
mock_get_by_binary):
mock_get_by_host.return_value = []
mock_get_all.side_effect = [fakes.COMPUTE_NODES, []]
mock_get_by_binary.side_effect = [fakes.SERVICES, fakes.SERVICES]
# Request 1: all nodes can satisfy the request
hosts1 = self.host_manager.get_host_states_by_uuids(
mock.sentinel.ctxt1, mock.sentinel.uuids1, objects.RequestSpec())
# get_host_states_by_uuids returns a generator so convert the values
# into an iterator
host_states1 = iter(hosts1)
# Request 2: no nodes can satisfy the request
hosts2 = self.host_manager.get_host_states_by_uuids(
mock.sentinel.ctxt2, mock.sentinel.uuids2, objects.RequestSpec())
host_states2 = iter(hosts2)
# Fake a concurrent request that is still processing the first result
# to make sure all nodes are still available candidates to Request 1.
num_hosts1 = len(list(host_states1))
self.assertEqual(4, num_hosts1)
# Verify that no nodes are available to Request 2.
num_hosts2 = len(list(host_states2))
self.assertEqual(0, num_hosts2)
class HostStateTestCase(test.NoDBTestCase):
"""Test case for HostState class."""
# update_from_compute_node() and consume_from_request() are tested
# in HostManagerTestCase.test_get_all_host_states()
@mock.patch('nova.utils.synchronized',
side_effect=lambda a: lambda f: lambda *args: f(*args))
def test_stat_consumption_from_compute_node(self, sync_mock):
stats = {
'num_instances': '5',
'num_proj_12345': '3',
'num_proj_23456': '1',
'num_vm_%s' % vm_states.BUILDING: '2',
'num_vm_%s' % vm_states.SUSPENDED: '1',
'num_task_%s' % task_states.RESIZE_MIGRATING: '1',
'num_task_%s' % task_states.MIGRATING: '2',
'num_os_type_linux': '4',
'num_os_type_windoze': '1',
'io_workload': '42',
}
hyper_ver_int = versionutils.convert_version_to_int('6.0.0')
compute = objects.ComputeNode(
uuid=uuids.cn1,
stats=stats, memory_mb=1, free_disk_gb=0, local_gb=0,
local_gb_used=0, free_ram_mb=0, vcpus=0, vcpus_used=0,
disk_available_least=None,
updated_at=datetime.datetime(2015, 11, 11, 11, 0, 0),
host_ip='127.0.0.1', hypervisor_type='htype',
hypervisor_hostname='hostname', cpu_info='cpu_info',
supported_hv_specs=[],
hypervisor_version=hyper_ver_int, numa_topology=None,
pci_device_pools=None, metrics=None,
cpu_allocation_ratio=16.0, ram_allocation_ratio=1.5,
disk_allocation_ratio=1.0)
host = host_manager.HostState("fakehost", "fakenode", uuids.cell)
host.update(compute=compute)
sync_mock.assert_called_once_with(("fakehost", "fakenode"))
self.assertEqual(5, host.num_instances)
self.assertEqual(42, host.num_io_ops)
self.assertEqual(10, len(host.stats))
self.assertEqual('127.0.0.1', str(host.host_ip))
self.assertEqual('htype', host.hypervisor_type)
self.assertEqual('hostname', host.hypervisor_hostname)
self.assertEqual('cpu_info', host.cpu_info)
self.assertEqual([], host.supported_instances)
self.assertEqual(hyper_ver_int, host.hypervisor_version)
def test_stat_consumption_from_compute_node_non_pci(self):
stats = {
'num_instances': '5',
'num_proj_12345': '3',
'num_proj_23456': '1',
'num_vm_%s' % vm_states.BUILDING: '2',
'num_vm_%s' % vm_states.SUSPENDED: '1',
'num_task_%s' % task_states.RESIZE_MIGRATING: '1',
'num_task_%s' % task_states.MIGRATING: '2',
'num_os_type_linux': '4',
'num_os_type_windoze': '1',
'io_workload': '42',
}
hyper_ver_int = versionutils.convert_version_to_int('6.0.0')
compute = objects.ComputeNode(
uuid=uuids.cn1,
stats=stats, memory_mb=0, free_disk_gb=0, local_gb=0,
local_gb_used=0, free_ram_mb=0, vcpus=0, vcpus_used=0,
disk_available_least=None,
updated_at=datetime.datetime(2015, 11, 11, 11, 0, 0),
host_ip='127.0.0.1', hypervisor_type='htype',
hypervisor_hostname='hostname', cpu_info='cpu_info',
supported_hv_specs=[],
hypervisor_version=hyper_ver_int, numa_topology=None,
pci_device_pools=None, metrics=None,
cpu_allocation_ratio=16.0, ram_allocation_ratio=1.5,
disk_allocation_ratio=1.0)
host = host_manager.HostState("fakehost", "fakenode", uuids.cell)
host.update(compute=compute)
self.assertEqual([], host.pci_stats.pools)
self.assertEqual(hyper_ver_int, host.hypervisor_version)
def test_stat_consumption_from_compute_node_rescue_unshelving(self):
stats = {
'num_instances': '5',
'num_proj_12345': '3',
'num_proj_23456': '1',
'num_vm_%s' % vm_states.BUILDING: '2',
'num_vm_%s' % vm_states.SUSPENDED: '1',
'num_task_%s' % task_states.UNSHELVING: '1',
'num_task_%s' % task_states.RESCUING: '2',
'num_os_type_linux': '4',
'num_os_type_windoze': '1',
'io_workload': '42',
}
hyper_ver_int = versionutils.convert_version_to_int('6.0.0')
compute = objects.ComputeNode(
uuid=uuids.cn1,
stats=stats, memory_mb=0, free_disk_gb=0, local_gb=0,
local_gb_used=0, free_ram_mb=0, vcpus=0, vcpus_used=0,
disk_available_least=None,
updated_at=datetime.datetime(2015, 11, 11, 11, 0, 0),
host_ip='127.0.0.1', hypervisor_type='htype',
hypervisor_hostname='hostname', cpu_info='cpu_info',
supported_hv_specs=[],
hypervisor_version=hyper_ver_int, numa_topology=None,
pci_device_pools=None, metrics=None,
cpu_allocation_ratio=16.0, ram_allocation_ratio=1.5,
disk_allocation_ratio=1.0)
host = host_manager.HostState("fakehost", "fakenode", uuids.cell)
host.update(compute=compute)
self.assertEqual(5, host.num_instances)
self.assertEqual(42, host.num_io_ops)
self.assertEqual(10, len(host.stats))
self.assertEqual([], host.pci_stats.pools)
self.assertEqual(hyper_ver_int, host.hypervisor_version)
@mock.patch('nova.utils.synchronized',
side_effect=lambda a: lambda f: lambda *args: f(*args))
@mock.patch('nova.virt.hardware.get_host_numa_usage_from_instance')
@mock.patch('nova.objects.Instance')
@mock.patch('nova.virt.hardware.numa_fit_instance_to_host')
@mock.patch('nova.virt.hardware.host_topology_and_format_from_host')
def test_stat_consumption_from_instance(self, host_topo_mock,
numa_fit_mock,
instance_init_mock,
numa_usage_mock,
sync_mock):
fake_numa_topology = objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell()])
fake_host_numa_topology = mock.Mock()
fake_instance = objects.Instance(numa_topology=fake_numa_topology)
host_topo_mock.return_value = (fake_host_numa_topology, True)
numa_usage_mock.return_value = fake_host_numa_topology
numa_fit_mock.return_value = fake_numa_topology
instance_init_mock.return_value = fake_instance
spec_obj = objects.RequestSpec(
instance_uuid=uuids.instance,
flavor=objects.Flavor(root_gb=0, ephemeral_gb=0, memory_mb=0,
vcpus=0),
numa_topology=fake_numa_topology,
pci_requests=objects.InstancePCIRequests(requests=[]))
host = host_manager.HostState("fakehost", "fakenode", uuids.cell)
self.assertIsNone(host.updated)
host.consume_from_request(spec_obj)
numa_fit_mock.assert_called_once_with(fake_host_numa_topology,
fake_numa_topology,
limits=None, pci_requests=None,
pci_stats=None)
numa_usage_mock.assert_called_once_with(host, fake_instance)
sync_mock.assert_called_once_with(("fakehost", "fakenode"))
self.assertEqual(fake_host_numa_topology, host.numa_topology)
self.assertIsNotNone(host.updated)
second_numa_topology = objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell()])
spec_obj = objects.RequestSpec(
instance_uuid=uuids.instance,
flavor=objects.Flavor(root_gb=0, ephemeral_gb=0, memory_mb=0,
vcpus=0),
numa_topology=second_numa_topology,
pci_requests=objects.InstancePCIRequests(requests=[]))
second_host_numa_topology = mock.Mock()
numa_usage_mock.return_value = second_host_numa_topology
numa_fit_mock.return_value = second_numa_topology
host.consume_from_request(spec_obj)
self.assertEqual(2, host.num_instances)
self.assertEqual(2, host.num_io_ops)
self.assertEqual(2, numa_usage_mock.call_count)
self.assertEqual(((host, fake_instance),), numa_usage_mock.call_args)
self.assertEqual(second_host_numa_topology, host.numa_topology)
self.assertIsNotNone(host.updated)
def test_stat_consumption_from_instance_pci(self):
inst_topology = objects.InstanceNUMATopology(
cells = [objects.InstanceNUMACell(
cpuset=set([0]),
memory=512, id=0)])
fake_requests = [{'request_id': uuids.request_id, 'count': 1,
'spec': [{'vendor_id': '8086'}]}]
fake_requests_obj = objects.InstancePCIRequests(
requests=[objects.InstancePCIRequest(**r)
for r in fake_requests],
instance_uuid=uuids.instance)
req_spec = objects.RequestSpec(
instance_uuid=uuids.instance,
project_id='12345',
numa_topology=inst_topology,
pci_requests=fake_requests_obj,
flavor=objects.Flavor(root_gb=0,
ephemeral_gb=0,
memory_mb=512,
vcpus=1))
host = host_manager.HostState("fakehost", "fakenode", uuids.cell)
self.assertIsNone(host.updated)
host.pci_stats = pci_stats.PciDeviceStats(
[objects.PciDevicePool(vendor_id='8086',
product_id='15ed',
numa_node=1,
count=1)])
host.numa_topology = fakes.NUMA_TOPOLOGY
host.consume_from_request(req_spec)
self.assertIsInstance(req_spec.numa_topology,
objects.InstanceNUMATopology)
self.assertEqual(512, host.numa_topology.cells[1].memory_usage)
self.assertEqual(1, host.numa_topology.cells[1].cpu_usage)
self.assertEqual(0, len(host.pci_stats.pools))
self.assertIsNotNone(host.updated)
def test_stat_consumption_from_instance_with_pci_exception(self):
fake_requests = [{'request_id': uuids.request_id, 'count': 3,
'spec': [{'vendor_id': '8086'}]}]
fake_requests_obj = objects.InstancePCIRequests(
requests=[objects.InstancePCIRequest(**r)
for r in fake_requests],
instance_uuid=uuids.instance)
req_spec = objects.RequestSpec(
instance_uuid=uuids.instance,
project_id='12345',
numa_topology=None,
pci_requests=fake_requests_obj,
flavor=objects.Flavor(root_gb=0,
ephemeral_gb=0,
memory_mb=1024,
vcpus=1))
host = host_manager.HostState("fakehost", "fakenode", uuids.cell)
self.assertIsNone(host.updated)
fake_updated = mock.sentinel.fake_updated
host.updated = fake_updated
host.pci_stats = pci_stats.PciDeviceStats()
with mock.patch.object(host.pci_stats, 'apply_requests',
side_effect=exception.PciDeviceRequestFailed):
host.consume_from_request(req_spec)
self.assertEqual(fake_updated, host.updated)
def test_resources_consumption_from_compute_node(self):
_ts_now = datetime.datetime(2015, 11, 11, 11, 0, 0)
metrics = [
dict(name='cpu.frequency',
value=1.0,
source='source1',
timestamp=_ts_now),
dict(name='numa.membw.current',
numa_membw_values={"0": 10, "1": 43},
source='source2',
timestamp=_ts_now),
]
hyper_ver_int = versionutils.convert_version_to_int('6.0.0')
compute = objects.ComputeNode(
uuid=uuids.cn1,
metrics=jsonutils.dumps(metrics),
memory_mb=0, free_disk_gb=0, local_gb=0,
local_gb_used=0, free_ram_mb=0, vcpus=0, vcpus_used=0,
disk_available_least=None,
updated_at=datetime.datetime(2015, 11, 11, 11, 0, 0),
host_ip='127.0.0.1', hypervisor_type='htype',
hypervisor_hostname='hostname', cpu_info='cpu_info',
supported_hv_specs=[],
hypervisor_version=hyper_ver_int,
numa_topology=fakes.NUMA_TOPOLOGY._to_json(),
stats=None, pci_device_pools=None,
cpu_allocation_ratio=16.0, ram_allocation_ratio=1.5,
disk_allocation_ratio=1.0)
host = host_manager.HostState("fakehost", "fakenode", uuids.cell)
host.update(compute=compute)
self.assertEqual(len(host.metrics), 2)
self.assertEqual(1.0, host.metrics.to_list()[0]['value'])
self.assertEqual('source1', host.metrics[0].source)
self.assertEqual('cpu.frequency', host.metrics[0].name)
self.assertEqual('numa.membw.current', host.metrics[1].name)
self.assertEqual('source2', host.metrics.to_list()[1]['source'])
self.assertEqual({'0': 10, '1': 43},
host.metrics[1].numa_membw_values)
self.assertIsInstance(host.numa_topology, six.string_types)
def test_stat_consumption_from_compute_node_not_ready(self):
compute = objects.ComputeNode(free_ram_mb=100,
uuid=uuids.compute_node_uuid)
host = host_manager.HostState("fakehost", "fakenode", uuids.cell)
host._update_from_compute_node(compute)
# Because compute record not ready, the update of free ram
# will not happen and the value will still be 0
self.assertEqual(0, host.free_ram_mb)
|
mikalstill/nova
|
nova/tests/unit/scheduler/test_host_manager.py
|
Python
|
apache-2.0
| 69,879
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for GetIamPolicy
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-bigquery-connection
# [START bigqueryconnection_v1_generated_ConnectionService_GetIamPolicy_sync]
from google.cloud import bigquery_connection_v1
def sample_get_iam_policy():
# Create a client
client = bigquery_connection_v1.ConnectionServiceClient()
# Initialize request argument(s)
request = bigquery_connection_v1.GetIamPolicyRequest(
resource="resource_value",
)
# Make the request
response = client.get_iam_policy(request=request)
# Handle the response
print(response)
# [END bigqueryconnection_v1_generated_ConnectionService_GetIamPolicy_sync]
|
googleapis/python-bigquery-connection
|
samples/generated_samples/bigqueryconnection_v1_generated_connection_service_get_iam_policy_sync.py
|
Python
|
apache-2.0
| 1,526
|
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
"""
:samp:`Publish resources under the ResourceSync Framework`
The class :class:`ResourceSync` is the main entrance to the rspub-core library. It is in essence a one-method
class, its main method: :func:`~ResourceSync.execute`. This method takes as argument ``filenames``:
an iterable of files and/or directories to process. (List and i.e. :class:`~rspub.core.selector.Selector` are iterables.)
Upon execution :class:`ResourceSync` will call the correct :class:`~rspub.core.executors.Executor` that
will walk all the files and directories named in ``filenames`` and that takes care of creating the right type
of sitemap: resourcelist, changelist etc. and complete the corresponding
sitemaps as capabilitylist and description.
Before you call :func:`~ResourceSync.execute` on :class:`ResourceSync` it may be advisable to set the proper
parameters for your synchronization. :class:`ResourceSync` is a subclass of :class:`~rspub.core.rs_paras.RsParameters`
and the description of ``parameters`` in that class is a good starting point to learn about the type, meaning and
function of these parameters. Here we will highlight some and discuss aspects of these parameters.
Selecting resources
-------------------
The algorithm for selecting resources can be shaped by you, the user of this library. If the default algorithm
suites you - so much for the better - then you don't have to do anything and you can safely skip this paragraph.
The default algorithm is implemented
by the :class:`~rspub.util.gates.GateBuilder` class :class:`~rspub.pluggable.gate.ResourceGateBuilder`. This
default class builds a :func:`~rspub.util.gates.gate` that allows any file that is encountered in the list
of files and directories of the ``filenames`` argument. It will exclude however any file that
is not in :func:`~rspub.core.rs_paras.RsParameters.resource_dir` or any of its subdirectories, hidden files and
files from the directories :func:`~rspub.core.rs_paras.RsParameters.metadata_dir`,
:func:`~rspub.core.rs_paras.RsParameters.description_dir` and :func:`~rspub.core.rs_paras.RsParameters.plugin_dir`
in case any of these directories are situated on the search-paths described in ``filenames``.
You can implement your own resource :func:`~rspub.util.gates.gate` by supplying a class named
`ResourceGateBuilder` in a directory you specify under the
:func:`~rspub.core.rs_paras.RsParameters.plugin_dir` ``parameter``. Your `ResourceGateBuilder` should subclass
:class:`~rspub.pluggable.gate.ResourceGateBuilder` or at least implement the methods
:func:`~rspub.util.gates.GateBuilder.build_includes` and :func:`~rspub.util.gates.GateBuilder.build_excludes`.
A detailed description of how to create your own `ResourceGateBuilder` can be found in
:doc:`rspub.pluggable.gate <rspub.pluggable.gate>`.
By shaping your own selection algorithm you could for instance say "include all the files from directory `x` but
exclude the subdirectory `y` and from directory `z` choose only those files whose filenames start with 'abc' and
from directory `z/b` choose only xml-files where the x-path expression `//such/and/so` yields 'foo' or 'bar'."
Anything goes, as long as you can express it as a predicate, that is, say 'yes' or 'no' to a resource, given
the filename of the resource.
.. seealso:: :doc:`rspub.util.gates <rspub.util.gates>`, :doc:`rspub.pluggable.gate <rspub.pluggable.gate>`
Strategies and executors
------------------------
The :class:`~rspub.core.rs_enum.Strategy` tells :class:`ResourceSync` in what way you want your resources processed.
Or better: :class:`ResourceSync` will choose the :class:`~rspub.core.executors.Executor` that fits your chosen strategy.
Do you want new resourcelists every time you call :func:`ResourceSync.execute`, do you want
new changelists or perhaps an incremental changelist. There are slots for other strategies in rspub-core,
such as resourcedump and changedump, but these strategies are not yet implemented.
If new changelist or incremental changelist is your strategy and there is no resourcelist.xml yet in your
:func:`~rspub.core.rs_paras.RsParameters.metadata_dir` then :class:`ResourceSync` will create a resourcelist.xml
the first time you call :func:`~ResourceSync.execute`.
The :class:`~rspub.core.rs_enum.Strategy` ``resourcelist`` does not require much system resources. Resources will
be processed one after the other and sitemap documents are written to disk once they are processed and
these sitemaps will at most take 50000 records. The strategies ``new_changelist`` and ``inc_changelist`` will
compare previous and present state of all your selected resources. In order to do so they collect metadata from
all the present resources in your selection and compare it to the previous state as recorded in resourcelists
and subsequent changelists.
This will be perfectly OK in most situations, however if the number of resources is very large this
comparison might be undoable. Anyway, large amounts of resources will probably be managed by some kind of
repository system that enables to query for the requested data. It is perfectly alright to write your own
:class:`~rspub.core.executors.Executor` that handles the synchronisation of resources in your repository system
and you are invited to share these executors. A suitable plugin mechanism to accommodate such extraterrestrial
executors could be accomplished in a next version of rspub-core.
.. seealso:: :func:`rspub.core.rs_paras.RsParameters.strategy`, :class:`rspub.core.rs_enum.Strategy`, :doc:`rspub.core.executors <rspub.core.executors>`
Multiple collections
--------------------
:class:`ResourceSync` is a subclass of :class:`~rspub.core.rs_paras.RsParameters` and so the parameters set on
:class:`ResourceSync` can be saved and reinstituted later on. :class:`~rspub.core.config.Configurations` has
methods for listing and removing previously saved configurations. Multiple collections of resources
could be synchronized, each collection with its own configuration. Synchronizing the collection 'spam' could
go along these lines::
# get a list of previously saved configurations
[print(x) for x in Configurations.list_configurations()]
# rspub_core
# spam_config
# eggs_config
# prepare for synchronization of collection 'all about spam'
resourcesync = ResourceSync(config_name="spam_config")
# spam resources are in two directories
filenames = ["resources/green_spam", "resources/blue_spam"]
# do the synchronization
resourcesync.execute(filenames)
.. seealso:: :class:`rspub.core.rs_paras.RsParameters`, :class:`rspub.core.config.Configurations`, :func:`~rspub.core.rs_paras.RsParameters.save_configuration_as`
Observe execution
-----------------
:class:`ResourceSync` is a subclass of :class:`~rspub.util.observe.Observable`. The executor to which the execution
is delegated inherits all observers registered with :class:`ResourceSync`. :class:`ResourceSync` it self does not
fire events.
.. seealso:: :doc:`rspub.util.observe <rspub.util.observe>`, :class:`rspub.core.executors.ExecutorEvent`
"""
import logging
import os
from glob import glob
from rspub.core.exe_changelist import NewChangeListExecutor, IncrementalChangeListExecutor
from rspub.core.exe_resourcelist import ResourceListExecutor
from rspub.core.rs_enum import Strategy
from rspub.core.rs_paras import RsParameters
from rspub.core.selector import Selector
from rspub.util import defaults
from rspub.util.observe import Observable, EventObserver
LOG = logging.getLogger(__name__)
class ResourceSync(Observable, RsParameters):
"""
:samp:`Main class for ResourceSync publishing`
"""
def __init__(self, **kwargs):
"""
:samp:`Initialization`
:param str config_name: the name of the configuration to read. If given, sets the current configuration.
:param kwargs: see :func:`rspub.core.rs_paras.RsParameters.__init__`
.. seealso:: :doc:`rspub.core.rs_paras <rspub.core.rs_paras>`
"""
Observable.__init__(self)
RsParameters.__init__(self, **kwargs)
def execute(self, filenames: iter=None, start_new=False):
"""
:samp:`Publish ResourceSync documents under conditions of current {parameters}`
Call appropriate executor and publish sitemap documents on the resources found in `filenames`.
If no file/files 'resourcelist_*.xml' are found in metadata directory will always dispatch to
strategy (new) ``resourcelist``.
If ``parameter`` :func:`~rspub.core.rs_paras.RsParameters.is_saving_sitemaps` is ``False`` will do
a dry run: no existing sitemaps will be changed and no new sitemaps will be written to disk.
:param filenames: filenames and/or directories to scan
:param start_new: erase metadata directory and create new resourcelists
"""
# always start fresh publication with resourcelist
resourcelist_files = sorted(glob(self.abs_metadata_path("resourcelist_*.xml")))
start_new = start_new or len(resourcelist_files) == 0
# do we have filenames or look for a saved Selector?
if filenames is None and self.selector_file:
try:
filenames = Selector(self.selector_file)
LOG.info("Loaded selector from '%s'" % self.selector_file)
except Exception as err:
LOG.warning("Unable to load selector: {0}".format(err))
if filenames is None:
raise RuntimeError("Unable to execute: no filenames.")
paras = RsParameters(**self.__dict__)
executor = None
if self.strategy == Strategy.resourcelist or start_new:
executor = ResourceListExecutor(paras)
elif self.strategy == Strategy.new_changelist:
executor = NewChangeListExecutor(paras)
elif self.strategy == Strategy.inc_changelist:
executor = IncrementalChangeListExecutor(paras)
if executor:
executor.register(*self.observers)
executor.execute(filenames)
else:
raise NotImplementedError("Strategy not implemented: %s" % self.strategy)
# associate current parameters with a selector
if isinstance(filenames, Selector):
if filenames.location:
try:
filenames.write()
self.selector_file = filenames.abs_location()
LOG.info("Associated parameters '%s' with selector at '%s'"
% (self.configuration_name(), self.selector_file))
except Exception as err:
LOG.warning("Unable to save selector: {0}".format(err))
# set a timestamp
if self.is_saving_sitemaps:
self.last_execution = executor.date_start_processing
self.save_configuration(True)
class ExecutionHistory(EventObserver):
"""
:samp:`Execution report creator`
Currently not in use.
"""
def __init__(self, history_dir):
self.history_dir = history_dir
os.makedirs(self.history_dir, exist_ok=True)
self.history_file = os.path.join(self.history_dir, "his.txt")
def pass_inform(self, *args, **kwargs):
#print(args)
pass
def inform_execution_start(self, *args, **kwargs):
print(args, kwargs)
|
cegesoma/rspub-core
|
rspub/core/rs.py
|
Python
|
apache-2.0
| 11,380
|
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
from keystoneclient.common import cms
from oslo_config import cfg
from oslo_log import log
from oslo_serialization import jsonutils
from oslo_utils import importutils
from oslo_utils import timeutils
import six
from keystone.common import controller
from keystone.common import dependency
from keystone.common import wsgi
from keystone import config
from keystone.contrib import federation
from keystone import exception
from keystone.i18n import _, _LI, _LW
from keystone.resource import controllers as resource_controllers
LOG = log.getLogger(__name__)
CONF = cfg.CONF
# registry of authentication methods
AUTH_METHODS = {}
AUTH_PLUGINS_LOADED = False
def load_auth_methods():
global AUTH_PLUGINS_LOADED
if AUTH_PLUGINS_LOADED:
# Only try and load methods a single time.
return
# config.setup_authentication should be idempotent, call it to ensure we
# have setup all the appropriate configuration options we may need.
config.setup_authentication()
for plugin in CONF.auth.methods:
if '.' in plugin:
# NOTE(morganfainberg): if '.' is in the plugin name, it should be
# imported rather than used as a plugin identifier.
plugin_class = plugin
driver = importutils.import_object(plugin)
if not hasattr(driver, 'method'):
raise ValueError(_('Cannot load an auth-plugin by class-name '
'without a "method" attribute defined: %s'),
plugin_class)
LOG.info(_LI('Loading auth-plugins by class-name is deprecated.'))
plugin_name = driver.method
else:
plugin_name = plugin
plugin_class = CONF.auth.get(plugin)
driver = importutils.import_object(plugin_class)
if plugin_name in AUTH_METHODS:
raise ValueError(_('Auth plugin %(plugin)s is requesting '
'previously registered method %(method)s') %
{'plugin': plugin_class, 'method': driver.method})
AUTH_METHODS[plugin_name] = driver
AUTH_PLUGINS_LOADED = True
def get_auth_method(method_name):
global AUTH_METHODS
if method_name not in AUTH_METHODS:
raise exception.AuthMethodNotSupported()
return AUTH_METHODS[method_name]
class AuthContext(dict):
"""Retrofitting auth_context to reconcile identity attributes.
The identity attributes must not have conflicting values among the
auth plug-ins. The only exception is `expires_at`, which is set to its
earliest value.
"""
# identity attributes need to be reconciled among the auth plugins
IDENTITY_ATTRIBUTES = frozenset(['user_id', 'project_id',
'access_token_id', 'domain_id',
'expires_at'])
def __setitem__(self, key, val):
if key in self.IDENTITY_ATTRIBUTES and key in self:
existing_val = self[key]
if key == 'expires_at':
# special treatment for 'expires_at', we are going to take
# the earliest expiration instead.
if existing_val != val:
LOG.info(_LI('"expires_at" has conflicting values '
'%(existing)s and %(new)s. Will use the '
'earliest value.'),
{'existing': existing_val, 'new': val})
if existing_val is None or val is None:
val = existing_val or val
else:
val = min(existing_val, val)
elif existing_val != val:
msg = _('Unable to reconcile identity attribute %(attribute)s '
'as it has conflicting values %(new)s and %(old)s') % (
{'attribute': key,
'new': val,
'old': existing_val})
raise exception.Unauthorized(msg)
return super(AuthContext, self).__setitem__(key, val)
# TODO(blk-u): this class doesn't use identity_api directly, but makes it
# available for consumers. Consumers should probably not be getting
# identity_api from this since it's available in global registry, then
# identity_api should be removed from this list.
@dependency.requires('identity_api', 'resource_api', 'trust_api')
class AuthInfo(object):
"""Encapsulation of "auth" request."""
@staticmethod
def create(context, auth=None):
auth_info = AuthInfo(context, auth=auth)
auth_info._validate_and_normalize_auth_data()
return auth_info
def __init__(self, context, auth=None):
self.context = context
self.auth = auth
self._scope_data = (None, None, None, None)
# self._scope_data is (domain_id, project_id, trust_ref, unscoped)
# project scope: (None, project_id, None, None)
# domain scope: (domain_id, None, None, None)
# trust scope: (None, None, trust_ref, None)
# unscoped: (None, None, None, 'unscoped')
def _assert_project_is_enabled(self, project_ref):
# ensure the project is enabled
try:
self.resource_api.assert_project_enabled(
project_id=project_ref['id'],
project=project_ref)
except AssertionError as e:
LOG.warning(six.text_type(e))
six.reraise(exception.Unauthorized, exception.Unauthorized(e),
sys.exc_info()[2])
def _assert_domain_is_enabled(self, domain_ref):
try:
self.resource_api.assert_domain_enabled(
domain_id=domain_ref['id'],
domain=domain_ref)
except AssertionError as e:
LOG.warning(six.text_type(e))
six.reraise(exception.Unauthorized, exception.Unauthorized(e),
sys.exc_info()[2])
def _lookup_domain(self, domain_info):
domain_id = domain_info.get('id')
domain_name = domain_info.get('name')
domain_ref = None
if not domain_id and not domain_name:
raise exception.ValidationError(attribute='id or name',
target='domain')
try:
if domain_name:
domain_ref = self.resource_api.get_domain_by_name(
domain_name)
else:
domain_ref = self.resource_api.get_domain(domain_id)
except exception.DomainNotFound as e:
LOG.exception(six.text_type(e))
raise exception.Unauthorized(e)
self._assert_domain_is_enabled(domain_ref)
return domain_ref
def _lookup_project(self, project_info):
project_id = project_info.get('id')
project_name = project_info.get('name')
project_ref = None
if not project_id and not project_name:
raise exception.ValidationError(attribute='id or name',
target='project')
try:
if project_name:
if 'domain' not in project_info:
raise exception.ValidationError(attribute='domain',
target='project')
domain_ref = self._lookup_domain(project_info['domain'])
project_ref = self.resource_api.get_project_by_name(
project_name, domain_ref['id'])
else:
project_ref = self.resource_api.get_project(project_id)
# NOTE(morganfainberg): The _lookup_domain method will raise
# exception.Unauthorized if the domain isn't found or is
# disabled.
self._lookup_domain({'id': project_ref['domain_id']})
except exception.ProjectNotFound as e:
LOG.exception(six.text_type(e))
raise exception.Unauthorized(e)
self._assert_project_is_enabled(project_ref)
return project_ref
def _lookup_trust(self, trust_info):
trust_id = trust_info.get('id')
if not trust_id:
raise exception.ValidationError(attribute='trust_id',
target='trust')
trust = self.trust_api.get_trust(trust_id)
if not trust:
raise exception.TrustNotFound(trust_id=trust_id)
return trust
def _validate_and_normalize_scope_data(self):
"""Validate and normalize scope data."""
if 'scope' not in self.auth:
return
if sum(['project' in self.auth['scope'],
'domain' in self.auth['scope'],
'unscoped' in self.auth['scope'],
'OS-TRUST:trust' in self.auth['scope']]) != 1:
raise exception.ValidationError(
attribute='project, domain, OS-TRUST:trust or unscoped',
target='scope')
if 'unscoped' in self.auth['scope']:
self._scope_data = (None, None, None, 'unscoped')
return
if 'project' in self.auth['scope']:
project_ref = self._lookup_project(self.auth['scope']['project'])
self._scope_data = (None, project_ref['id'], None, None)
elif 'domain' in self.auth['scope']:
domain_ref = self._lookup_domain(self.auth['scope']['domain'])
self._scope_data = (domain_ref['id'], None, None, None)
elif 'OS-TRUST:trust' in self.auth['scope']:
if not CONF.trust.enabled:
raise exception.Forbidden('Trusts are disabled.')
trust_ref = self._lookup_trust(
self.auth['scope']['OS-TRUST:trust'])
# TODO(ayoung): when trusts support domains, fill in domain data
if trust_ref.get('project_id') is not None:
project_ref = self._lookup_project(
{'id': trust_ref['project_id']})
self._scope_data = (None, project_ref['id'], trust_ref, None)
else:
self._scope_data = (None, None, trust_ref, None)
def _validate_auth_methods(self):
if 'identity' not in self.auth:
raise exception.ValidationError(attribute='identity',
target='auth')
# make sure auth methods are provided
if 'methods' not in self.auth['identity']:
raise exception.ValidationError(attribute='methods',
target='identity')
# make sure all the method data/payload are provided
for method_name in self.get_method_names():
if method_name not in self.auth['identity']:
raise exception.ValidationError(attribute=method_name,
target='identity')
# make sure auth method is supported
for method_name in self.get_method_names():
if method_name not in AUTH_METHODS:
raise exception.AuthMethodNotSupported()
def _validate_and_normalize_auth_data(self):
"""Make sure "auth" is valid."""
# make sure "auth" exist
if not self.auth:
raise exception.ValidationError(attribute='auth',
target='request body')
self._validate_auth_methods()
self._validate_and_normalize_scope_data()
def get_method_names(self):
"""Returns the identity method names.
:returns: list of auth method names
"""
# Sanitizes methods received in request's body
# Filters out duplicates, while keeping elements' order.
method_names = []
for method in self.auth['identity']['methods']:
if method not in method_names:
method_names.append(method)
return method_names
def get_method_data(self, method):
"""Get the auth method payload.
:returns: auth method payload
"""
if method not in self.auth['identity']['methods']:
raise exception.ValidationError(attribute=method,
target='identity')
return self.auth['identity'][method]
def get_scope(self):
"""Get scope information.
Verify and return the scoping information.
:returns: (domain_id, project_id, trust_ref, unscoped).
If scope to a project, (None, project_id, None, None)
will be returned.
If scoped to a domain, (domain_id, None, None, None)
will be returned.
If scoped to a trust, (None, project_id, trust_ref, None),
Will be returned, where the project_id comes from the
trust definition.
If unscoped, (None, None, None, 'unscoped') will be
returned.
"""
return self._scope_data
def set_scope(self, domain_id=None, project_id=None, trust=None,
unscoped=None):
"""Set scope information."""
if domain_id and project_id:
msg = _('Scoping to both domain and project is not allowed')
raise ValueError(msg)
if domain_id and trust:
msg = _('Scoping to both domain and trust is not allowed')
raise ValueError(msg)
if project_id and trust:
msg = _('Scoping to both project and trust is not allowed')
raise ValueError(msg)
self._scope_data = (domain_id, project_id, trust, unscoped)
@dependency.requires('assignment_api', 'catalog_api', 'identity_api',
'resource_api', 'token_provider_api', 'trust_api')
class Auth(controller.V3Controller):
# Note(atiwari): From V3 auth controller code we are
# calling protection() wrappers, so we need to setup
# the member_name and collection_name attributes of
# auth controller code.
# In the absence of these attributes, default 'entity'
# string will be used to represent the target which is
# generic. Policy can be defined using 'entity' but it
# would not reflect the exact entity that is in context.
# We are defining collection_name = 'tokens' and
# member_name = 'token' to facilitate policy decisions.
collection_name = 'tokens'
member_name = 'token'
def __init__(self, *args, **kw):
super(Auth, self).__init__(*args, **kw)
config.setup_authentication()
def authenticate_for_token(self, context, auth=None):
"""Authenticate user and issue a token."""
include_catalog = 'nocatalog' not in context['query_string']
try:
auth_info = AuthInfo.create(context, auth=auth)
auth_context = AuthContext(extras={},
method_names=[],
bind={})
self.authenticate(context, auth_info, auth_context)
if auth_context.get('access_token_id'):
auth_info.set_scope(None, auth_context['project_id'], None)
self._check_and_set_default_scoping(auth_info, auth_context)
(domain_id, project_id, trust, unscoped) = auth_info.get_scope()
method_names = auth_info.get_method_names()
method_names += auth_context.get('method_names', [])
# make sure the list is unique
method_names = list(set(method_names))
expires_at = auth_context.get('expires_at')
# NOTE(morganfainberg): define this here so it is clear what the
# argument is during the issue_v3_token provider call.
metadata_ref = None
token_audit_id = auth_context.get('audit_id')
(token_id, token_data) = self.token_provider_api.issue_v3_token(
auth_context['user_id'], method_names, expires_at, project_id,
domain_id, auth_context, trust, metadata_ref, include_catalog,
parent_audit_id=token_audit_id)
# NOTE(wanghong): We consume a trust use only when we are using
# trusts and have successfully issued a token.
if trust:
self.trust_api.consume_use(trust['id'])
return render_token_data_response(token_id, token_data,
created=True)
except exception.TrustNotFound as e:
raise exception.Unauthorized(e)
def _check_and_set_default_scoping(self, auth_info, auth_context):
(domain_id, project_id, trust, unscoped) = auth_info.get_scope()
if trust:
project_id = trust['project_id']
if domain_id or project_id or trust:
# scope is specified
return
# Skip scoping when unscoped federated token is being issued
if federation.IDENTITY_PROVIDER in auth_context:
return
# Do not scope if request is for explicitly unscoped token
if unscoped is not None:
return
# fill in default_project_id if it is available
try:
user_ref = self.identity_api.get_user(auth_context['user_id'])
except exception.UserNotFound as e:
LOG.exception(six.text_type(e))
raise exception.Unauthorized(e)
default_project_id = user_ref.get('default_project_id')
if not default_project_id:
# User has no default project. He shall get an unscoped token.
return
# make sure user's default project is legit before scoping to it
try:
default_project_ref = self.resource_api.get_project(
default_project_id)
default_project_domain_ref = self.resource_api.get_domain(
default_project_ref['domain_id'])
if (default_project_ref.get('enabled', True) and
default_project_domain_ref.get('enabled', True)):
if self.assignment_api.get_roles_for_user_and_project(
user_ref['id'], default_project_id):
auth_info.set_scope(project_id=default_project_id)
else:
msg = _LW("User %(user_id)s doesn't have access to"
" default project %(project_id)s. The token"
" will be unscoped rather than scoped to the"
" project.")
LOG.warning(msg,
{'user_id': user_ref['id'],
'project_id': default_project_id})
else:
msg = _LW("User %(user_id)s's default project %(project_id)s"
" is disabled. The token will be unscoped rather"
" than scoped to the project.")
LOG.warning(msg,
{'user_id': user_ref['id'],
'project_id': default_project_id})
except (exception.ProjectNotFound, exception.DomainNotFound):
# default project or default project domain doesn't exist,
# will issue unscoped token instead
msg = _LW("User %(user_id)s's default project %(project_id)s not"
" found. The token will be unscoped rather than"
" scoped to the project.")
LOG.warning(msg, {'user_id': user_ref['id'],
'project_id': default_project_id})
def authenticate(self, context, auth_info, auth_context):
"""Authenticate user."""
# The 'external' method allows any 'REMOTE_USER' based authentication
# In some cases the server can set REMOTE_USER as '' instead of
# dropping it, so this must be filtered out
if context['environment'].get('REMOTE_USER'):
try:
external = get_auth_method('external')
external.authenticate(context, auth_info, auth_context)
except exception.AuthMethodNotSupported:
# This will happen there is no 'external' plugin registered
# and the container is performing authentication.
# The 'kerberos' and 'saml' methods will be used this way.
# In those cases, it is correct to not register an
# 'external' plugin; if there is both an 'external' and a
# 'kerberos' plugin, it would run the check on identity twice.
pass
except exception.Unauthorized:
# If external fails then continue and attempt to determine
# user identity using remaining auth methods
pass
# need to aggregate the results in case two or more methods
# are specified
auth_response = {'methods': []}
for method_name in auth_info.get_method_names():
method = get_auth_method(method_name)
resp = method.authenticate(context,
auth_info.get_method_data(method_name),
auth_context)
if resp:
auth_response['methods'].append(method_name)
auth_response[method_name] = resp
if auth_response["methods"]:
# authentication continuation required
raise exception.AdditionalAuthRequired(auth_response)
if 'user_id' not in auth_context:
msg = _('User not found')
raise exception.Unauthorized(msg)
@controller.protected()
def check_token(self, context):
token_id = context.get('subject_token_id')
token_data = self.token_provider_api.validate_v3_token(
token_id)
# NOTE(morganfainberg): The code in
# ``keystone.common.wsgi.render_response`` will remove the content
# body.
return render_token_data_response(token_id, token_data)
@controller.protected()
def revoke_token(self, context):
token_id = context.get('subject_token_id')
return self.token_provider_api.revoke_token(token_id)
@controller.protected()
def validate_token(self, context):
token_id = context.get('subject_token_id')
include_catalog = 'nocatalog' not in context['query_string']
token_data = self.token_provider_api.validate_v3_token(
token_id)
if not include_catalog and 'catalog' in token_data['token']:
del token_data['token']['catalog']
return render_token_data_response(token_id, token_data)
@controller.protected()
def revocation_list(self, context, auth=None):
if not CONF.token.revoke_by_id:
raise exception.Gone()
tokens = self.token_provider_api.list_revoked_tokens()
for t in tokens:
expires = t['expires']
if not (expires and isinstance(expires, six.text_type)):
t['expires'] = timeutils.isotime(expires)
data = {'revoked': tokens}
json_data = jsonutils.dumps(data)
signed_text = cms.cms_sign_text(json_data,
CONF.signing.certfile,
CONF.signing.keyfile)
return {'signed': signed_text}
def _combine_lists_uniquely(self, a, b):
# it's most likely that only one of these will be filled so avoid
# the combination if possible.
if a and b:
return dict((x['id'], x) for x in a + b).values()
else:
return a or b
@controller.protected()
def get_auth_projects(self, context):
auth_context = self.get_auth_context(context)
user_id = auth_context.get('user_id')
user_refs = []
if user_id:
try:
user_refs = self.assignment_api.list_projects_for_user(user_id)
except exception.UserNotFound:
# federated users have an id but they don't link to anything
pass
group_ids = auth_context.get('group_ids')
grp_refs = []
if group_ids:
grp_refs = self.assignment_api.list_projects_for_groups(group_ids)
refs = self._combine_lists_uniquely(user_refs, grp_refs)
return resource_controllers.ProjectV3.wrap_collection(context, refs)
@controller.protected()
def get_auth_domains(self, context):
auth_context = self.get_auth_context(context)
user_id = auth_context.get('user_id')
user_refs = []
if user_id:
try:
user_refs = self.assignment_api.list_domains_for_user(user_id)
except exception.UserNotFound:
# federated users have an id but they don't link to anything
pass
group_ids = auth_context.get('group_ids')
grp_refs = []
if group_ids:
grp_refs = self.assignment_api.list_domains_for_groups(group_ids)
refs = self._combine_lists_uniquely(user_refs, grp_refs)
return resource_controllers.DomainV3.wrap_collection(context, refs)
@controller.protected()
def get_auth_catalog(self, context):
auth_context = self.get_auth_context(context)
user_id = auth_context.get('user_id')
project_id = auth_context.get('project_id')
if not project_id:
raise exception.Forbidden(
_('A project-scoped token is required to produce a service '
'catalog.'))
# The V3Controller base methods mostly assume that you're returning
# either a collection or a single element from a collection, neither of
# which apply to the catalog. Because this is a special case, this
# re-implements a tiny bit of work done by the base controller (such as
# self-referential link building) to avoid overriding or refactoring
# several private methods.
return {
'catalog': self.catalog_api.get_v3_catalog(user_id, project_id),
'links': {'self': self.base_url(context, path='auth/catalog')}
}
# FIXME(gyee): not sure if it belongs here or keystone.common. Park it here
# for now.
def render_token_data_response(token_id, token_data, created=False):
"""Render token data HTTP response.
Stash token ID into the X-Subject-Token header.
"""
headers = [('X-Subject-Token', token_id)]
if created:
status = (201, 'Created')
else:
status = (200, 'OK')
return wsgi.render_response(body=token_data,
status=status, headers=headers)
|
UTSA-ICS/keystone-kerberos
|
keystone/auth/controllers.py
|
Python
|
apache-2.0
| 27,222
|
# Copyright 2018,2019,2020,2021 Sony Corporation.
# Copyright 2021 Sony Group Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .graph_converter import FunctionModifier
class IdentityModifier(FunctionModifier):
"""
All functions are replaced to the same `new` function.
Args:
inputs (:obj:`dict`): Input variable mapping from the original input to another input. Default is the empty dictionary, so the new graph shares the original inputs.
Examples:
.. code-block:: python
pred = Model(...)
x = nn.Variable(...)
import nnabla.experimental.graph_converters as GC
modifiers = [GC.IdentityModifier({x0: x1})]
gc = GC.GraphConverter(modifiers)
pred = gc.convert(pred)
"""
def __init__(self, inputs={}, copy_value=False):
super(IdentityModifier, self).__init__()
self._input_dict = inputs
self._copy_value = copy_value
def modify(self, f, inputs):
# Replace only the initial inputs
if inputs[0].parent:
return
# Check if replacement dict empty
if not self._input_dict:
return
if f.inputs[0] in self._input_dict:
inp_repl = self._input_dict[f.inputs[0]]
if not self._copy_value:
inps = inp_repl
else:
if inp_repl.shape != f.inputs[0].shape:
raise ValueError("Shape between the replaced input ({}) and original input ({}) differs when copy_value=True".format(
inp_repl.shape, f.inputs[0].shape))
inp_repl.d = f.inputs[0].d.copy()
inp_repl.g = f.inputs[0].g.copy()
inps = [inp_repl] + inputs[1:]
self.init_map_func_inputs(f, inps)
o = self._call_function(
f.info.type_name, inps, f.info.args)
return o
def __finish__(self):
self._input_dict = None
self._copy_value = False
|
sony/nnabla
|
python/src/nnabla/experimental/graph_converters/identity.py
|
Python
|
apache-2.0
| 2,494
|
from sklearn.base import TransformerMixin
import pandas as pd
from time import time
class StaticTransformer(TransformerMixin):
def __init__(self, case_id_col, cat_cols, num_cols, fillna=True):
self.case_id_col = case_id_col
self.cat_cols = cat_cols
self.num_cols = num_cols
self.fillna = fillna
self.columns = None
self.fit_time = 0
self.transform_time = 0
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
start = time()
dt_first = X.groupby(self.case_id_col).first()
# transform numeric cols
dt_transformed = dt_first[self.num_cols]
# transform cat cols
if len(self.cat_cols) > 0:
dt_cat = pd.get_dummies(dt_first[self.cat_cols])
dt_transformed = pd.concat([dt_transformed, dt_cat], axis=1)
# fill NA with 0 if requested
if self.fillna:
dt_transformed = dt_transformed.fillna(0)
# add missing columns if necessary
if self.columns is not None:
missing_cols = [col for col in self.columns if col not in dt_transformed.columns]
for col in missing_cols:
dt_transformed[col] = 0
dt_transformed = dt_transformed[self.columns]
else:
self.columns = dt_transformed.columns
self.transform_time = time() - start
return dt_transformed
def get_feature_names(self):
return self.columns
|
irhete/predictive-monitoring-benchmark
|
transformers/StaticTransformer.py
|
Python
|
apache-2.0
| 1,584
|
import os
import unittest
import itk
import vtk
import qt
import ctk
import slicer
from slicer.ScriptedLoadableModule import *
import logging
class LabelMapToModel(ScriptedLoadableModule):
"""Uses ScriptedLoadableModule base class, available at:
https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py
"""
def __init__(self, parent):
ScriptedLoadableModule.__init__(self, parent)
# TODO make this more human readable by adding spaces
self.parent.title = "Label Map To Model"
self.parent.categories = ["Surface Models"]
self.parent.dependencies = []
# replace with "Firstname Lastname (Organization)"
self.parent.contributors = ["Matt McCormick (Kitware, Inc.)"]
self.parent.helpText = """
Convert a Slicer Label Map image into a Slicer Model (mesh).
"""
self.parent.acknowledgementText = """
This file was originally developed by Matthew McCormick, Kitware, Inc.
and was partially funded by NIH grant R41CA196565.
"""
class LabelMapToModelWidget(ScriptedLoadableModuleWidget):
"""Uses ScriptedLoadableModuleWidget base class, available at:
https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py
"""
def setup(self):
ScriptedLoadableModuleWidget.setup(self)
# Instantiate and connect widgets ...
#
# Parameters Area
#
parametersCollapsibleButton = ctk.ctkCollapsibleButton()
parametersCollapsibleButton.text = "Parameters"
self.layout.addWidget(parametersCollapsibleButton)
# Layout within the dummy collapsible button
parametersFormLayout = qt.QFormLayout(parametersCollapsibleButton)
#
# Input label map selector
#
self.inputSelector = slicer.qMRMLNodeComboBox()
self.inputSelector.nodeTypes = ["vtkMRMLLabelMapVolumeNode"]
self.inputSelector.selectNodeUponCreation = True
self.inputSelector.addEnabled = False
self.inputSelector.removeEnabled = False
self.inputSelector.noneEnabled = False
self.inputSelector.showHidden = False
self.inputSelector.showChildNodeTypes = False
self.inputSelector.setMRMLScene(slicer.mrmlScene)
self.inputSelector.setToolTip("Pick the input label map.")
parametersFormLayout.addRow("Input Label Map: ", self.inputSelector)
#
# Output model selector
#
self.outputSelector = slicer.qMRMLNodeComboBox()
self.outputSelector.nodeTypes = ["vtkMRMLModelNode"]
self.outputSelector.selectNodeUponCreation = True
self.outputSelector.addEnabled = True
self.outputSelector.removeEnabled = True
self.outputSelector.noneEnabled = True
self.outputSelector.showHidden = False
self.outputSelector.showChildNodeTypes = False
self.outputSelector.setMRMLScene(slicer.mrmlScene)
self.outputSelector.setToolTip("Pick the output model.")
parametersFormLayout.addRow("Output Model: ", self.outputSelector)
#
# Apply button
#
self.applyButton = qt.QPushButton("Apply")
self.applyButton.toolTip = "Run the algorithm."
self.applyButton.enabled = False
parametersFormLayout.addRow(self.applyButton)
# Connections
self.applyButton.connect('clicked(bool)', self.onApplyButton)
self.inputSelector.connect(
"currentNodeChanged(vtkMRMLNode*)", self.onSelect)
self.outputSelector.connect(
"currentNodeChanged(vtkMRMLNode*)", self.onSelect)
# Add vertical spacer
self.layout.addStretch(1)
# Refresh Apply button state
self.onSelect()
def cleanup(self):
pass
def onSelect(self):
self.applyButton.enabled = self.inputSelector.currentNode(
) and self.outputSelector.currentNode()
def onApplyButton(self):
logic = LabelMapToModelLogic()
logic.run(self.inputSelector.currentNode(),
self.outputSelector.currentNode())
class LabelMapToModelLogic(ScriptedLoadableModuleLogic):
"""This class should implement all the actual
computation done by your module. The interface
should be such that other python code can import
this class and make use of the functionality without
requiring an instance of the Widget.
Uses ScriptedLoadableModuleLogic base class, available at:
https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py
"""
def hasImageData(self, labelMapNode):
"""Returns true if the passed in volume node has valid image data."""
if not labelMapNode:
logging.debug('hasImageData failed: no volume node')
return False
if labelMapNode.GetImageData() == None:
logging.debug('hasImageData failed: no image data in volume node')
return False
return True
def isValidInputOutputData(self, inputLabelMap, outputModel):
"""Validates that the inputs and outputs are present."""
if not inputLabelMap:
logging.debug('isValidInputOutputData failed: no input label map node defined')
return False
if not outputModel:
logging.debug('isValidInputOutputData failed: no output model node defined')
return False
return True
def run(self, inputLabelMap, outputModel):
"""
Run the actual algorithm
"""
if not self.isValidInputOutputData(inputLabelMap, outputModel):
slicer.util.errorDisplay(
'Input volume is the same as output volume. Choose a different output volume.')
return False
logging.info('Processing started')
labelVtkData = inputLabelMap.GetImageData()
if labelVtkData.GetScalarTypeAsString == 'unsigned char':
PixelType = itk.UC
else:
slicer.util.errorDisplay('Pixel type of the label map is not yet supported.')
return False
Dimension = 3
logging.info('Processing completed')
return True
class LabelMapToModelTest(ScriptedLoadableModuleTest):
"""
This is the test case for your scripted module.
Uses ScriptedLoadableModuleTest base class, available at:
https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py
"""
def setUp(self):
""" Do whatever is needed to reset the state - typically a scene clear will be enough.
"""
slicer.mrmlScene.Clear(0)
def runTest(self):
"""Run as few or as many tests as needed here.
"""
self.setUp()
self.test_LabelMapToModel1()
def test_LabelMapToModel1(self):
""" Ideally you should have several levels of tests. At the lowest level
tests should exercise the functionality of the logic with different inputs
(both valid and invalid). At higher levels your tests should emulate the
way the user would interact with your code and confirm that it still works
the way you intended.
One of the most important features of the tests is that it should alert other
developers when their changes will have an impact on the behavior of your
module. For example, if a developer removes a feature that you depend on,
your test should break so they know that the feature is needed.
"""
self.delayDisplay("Starting the test")
#
# first, get some data
#
import urllib
downloads = (
('http://slicer.kitware.com/midas3/download?items=153172',
'BrainTumor_GBM_HG0003.mrb', slicer.util.loadScene),
)
for url, name, loader in downloads:
filePath = slicer.app.temporaryPath + '/' + name
if not os.path.exists(filePath) or os.stat(filePath).st_size == 0:
logging.info(
'Requesting download %s from %s...\n' % (name, url))
urllib.urlretrieve(url, filePath)
if loader:
logging.info('Loading %s...' % (name,))
loader(filePath)
self.delayDisplay('Finished with download and loading')
labelMapNode = slicer.util.getNode(pattern="Tissue Segmentation Volume")
logic = LabelMapToModelLogic()
self.assertTrue(logic.hasImageData(labelMapNode))
modelNode = slicer.vtkMRMLModelNode()
logic.run(labelMapNode, modelNode)
self.delayDisplay('Test passed!')
|
KitwareMedical/AugmentedLabelRegistration
|
LabelMapToModel/LabelMapToModel.py
|
Python
|
apache-2.0
| 8,658
|
#!/usr/bin/python
import sys, os
sys.path.append('..')
import json
import ConfigParser, string
from common import tool
def LoadClusterType():
global cluster_type
global role_type
f = open('cluster_type.json')
jstr = f.read()
js = json.loads(jstr)
cluster_type = {}
role_type = {}
for k, v in js.items():
name = v.get('name')
startModule = v.get('startModule')
stopModule = v.get('stopModule')
cleanModule = v.get('cleanModule')
cluster_type[k] = {'name': name,
'startModule': startModule,
'stopModule': stopModule,
'cleanModule': cleanModule}
roles = v.get('roles')
for k_, v_ in roles.items():
role_type[k + '_' + k_] = v_
def GetStartModule(name):
if not globals().has_key('cluster_type'):
LoadClusterType()
obj = cluster_type.get(name)
return obj.get('startModule')
def GetStopModule(name):
if not globals().has_key('cluster_type'):
LoadClusterType()
obj = cluster_type.get(name)
return obj.get('stopModule')
def GetCleanModule(name):
if not globals().has_key('cluster_type'):
LoadClusterType()
obj = cluster_type.get(name)
return obj.get('cleanModule')
def GetClusterType(name):
if not globals().has_key('cluster_type'):
LoadClusterType()
obj = cluster_type.get(name)
return obj.get('name')
def GetRoleType(name):
if not globals().has_key('role_type'):
LoadClusterType()
return role_type.get(name)
def LoadMachineStatus():
global machine_status
f = open('machine_status.json')
js = f.read()
machine_status = json.loads(js)
def GetMachineStatus(name):
if not globals().has_key('machine_status'):
LoadMachineStatus()
return machine_status.get(name)
def LoadOSType():
global os_type
f = open('os.json')
js = f.read()
os_type = json.loads(js)
def GetAllOSType():
if not globals().has_key('os_type'):
LoadOSType()
return os_type
def GetOSType(name):
if not globals().has_key('os_type'):
LoadOSType()
return os_type.get(name)
def LoadMysqlConfig():
global mysql_host
global mysql_user
global mysql_password
global mysql_database
cf = ConfigParser.ConfigParser()
cf.read('web.conf')
mysql_host = cf.get('mysql', 'host')
mysql_user = cf.get('mysql', 'user')
mysql_password = cf.get('mysql', 'password')
mysql_database = cf.get('mysql', 'database')
def MysqlConnector():
if not globals().has_key('mysql_host'):
LoadMysqlConfig()
if not globals().has_key('mysql_user'):
LoadMysqlConfig()
if not globals().has_key('mysql_password'):
LoadMysqlConfig()
if not globals().has_key('mysql_database'):
LoadMysqlConfig()
host = mysql_host
user = mysql_user
password = mysql_password
database = mysql_database
con = tool.connect_mysql(
host = host,
user = user,
password = password,
database = database
)
if con is None:
return None
connector = tool.Connector(con)
return connector
def Run():
LoadClusterType()
LoadMachineStatus()
LoadMysqlConfig()
|
codemeow5/deployment
|
web2/config.py
|
Python
|
apache-2.0
| 2,882
|
import mock
import pytest
import unittest
from datetime import datetime
from cloudify.models_states import VisibilityState, ExecutionState
from cloudify_rest_client.exceptions import (
CloudifyClientError,
IllegalExecutionParametersError,
)
from manager_rest.manager_exceptions import SQLStorageException, ConflictError
from manager_rest.storage import models, db
from manager_rest.rest.resources_v3_1.deployments import DeploymentGroupsId
from manager_rest.test import base_test
class DeploymentGroupsTestCase(base_test.BaseServerTestCase):
def setUp(self):
super(DeploymentGroupsTestCase, self).setUp()
self.blueprint = models.Blueprint(
id='blueprint',
creator=self.user,
tenant=self.tenant,
plan={'inputs': {}},
state='uploaded',
)
for dep_id in ['dep1', 'dep2']:
db.session.add(models.Deployment(
id=dep_id,
creator=self.user,
display_name='',
tenant=self.tenant,
blueprint=self.blueprint,
workflows={'install': {'operation': ''}}
))
def _deployment(self, **kwargs):
dep_params = {
'creator': self.user,
'tenant': self.tenant,
'blueprint': self.blueprint
}
dep_params.update(kwargs)
dep = models.Deployment(**dep_params)
db.session.add(dep)
return dep
def test_get_empty(self):
result = self.client.deployment_groups.list()
assert len(result) == 0
def test_add_empty_group(self):
result = self.client.deployment_groups.list()
assert len(result) == 0
result = self.client.deployment_groups.put('group1')
assert result['id'] == 'group1'
assert len(self.client.deployment_groups.list()) == 1
def test_add_to_group(self):
group = self.client.deployment_groups.put(
'group1',
deployment_ids=['dep1', 'dep2']
)
assert set(group.deployment_ids) == {'dep1', 'dep2'}
def test_overwrite_group(self):
group = self.client.deployment_groups.put(
'group1',
deployment_ids=['dep1']
)
assert group.deployment_ids == ['dep1']
group = self.client.deployment_groups.put(
'group1',
deployment_ids=['dep1']
)
assert group.deployment_ids == ['dep1']
def test_clear_group(self):
group = self.client.deployment_groups.put(
'group1',
deployment_ids=['dep1']
)
assert group.deployment_ids == ['dep1']
group = self.client.deployment_groups.put(
'group1',
deployment_ids=[]
)
assert group.deployment_ids == []
def test_update_attributes(self):
"""When deployment_ids is not provided, the group is not cleared"""
group = self.client.deployment_groups.put(
'group1',
deployment_ids=['dep1']
)
assert group.deployment_ids == ['dep1']
assert not group.description
assert not group.default_blueprint_id
assert not group.default_inputs
group = self.client.deployment_groups.put(
'group1',
description='descr',
blueprint_id='blueprint',
default_inputs={'inp1': 'value'}
)
assert group.description == 'descr'
assert group.deployment_ids == ['dep1']
assert group.default_blueprint_id == 'blueprint'
assert group.default_inputs == {'inp1': 'value'}
with self.assertRaises(CloudifyClientError) as cm:
self.client.deployment_groups.put(
'group1',
blueprint_id='nonexistent',
)
assert cm.exception.status_code == 404
def test_create_with_blueprint(self):
self.client.deployment_groups.put(
'group1',
blueprint_id='blueprint',
default_inputs={'a': 'b'}
)
group = self.sm.get(models.DeploymentGroup, 'group1')
assert group.default_blueprint.id == 'blueprint'
assert group.default_inputs == {'a': 'b'}
def test_set_visibility(self):
self.client.deployment_groups.put(
'group1',
visibility=VisibilityState.PRIVATE
)
group = self.sm.get(models.DeploymentGroup, 'group1')
assert group.visibility == VisibilityState.PRIVATE
self.client.deployment_groups.put(
'group1',
visibility=VisibilityState.TENANT
)
assert group.visibility == VisibilityState.TENANT
with self.assertRaisesRegex(
CloudifyClientError, 'visibility_states') as cm:
self.client.deployment_groups.put(
'group1',
visibility='invalid visibility'
)
assert cm.exception.status_code == 409
def test_create_deployment(self):
self.client.deployment_groups.put(
'group1',
blueprint_id='blueprint',
new_deployments=[{}]
)
group = self.sm.get(models.DeploymentGroup, 'group1')
assert len(group.deployments) == 1
dep = group.deployments[0]
assert dep.blueprint.id == 'blueprint'
assert dep.id.startswith('group1-')
def test_add_deployments(self):
group = self.client.deployment_groups.put(
'group1',
blueprint_id='blueprint',
deployment_ids=['dep1']
)
assert set(group.deployment_ids) == {'dep1'}
group = self.client.deployment_groups.put(
'group1',
new_deployments=[{}]
)
assert len(group.deployment_ids) == 2
group = self.client.deployment_groups.put(
'group1',
new_deployments=[{}]
)
assert 'dep1' in group.deployment_ids
assert len(group.deployment_ids) == 3
def test_create_from_spec(self):
self.blueprint.plan['inputs'] = {'http_web_server_port': {}}
inputs = {'http_web_server_port': 1234}
labels = [{'label1': 'label-value'}]
group = self.client.deployment_groups.put(
'group1',
blueprint_id='blueprint',
new_deployments=[
{
'id': 'spec_dep1',
'inputs': inputs,
'labels': labels,
}
]
)
assert set(group.deployment_ids) == {'spec_dep1'}
sm_group = self.sm.get(models.DeploymentGroup, 'group1')
assert sm_group.creation_counter == 1
deps = sm_group.deployments
assert len(deps) == 1
create_exec_params = deps[0].create_execution.parameters
assert create_exec_params['inputs'] == inputs
assert create_exec_params['labels'] == [('label1', 'label-value')]
def test_add_deployment_ids(self):
self.client.deployment_groups.put('group1')
group = self.client.deployment_groups.add_deployments(
'group1',
deployment_ids=['dep1']
)
assert group.deployment_ids == ['dep1']
group = self.client.deployment_groups.add_deployments(
'group1',
deployment_ids=['dep2']
)
assert set(group.deployment_ids) == {'dep1', 'dep2'}
def test_add_twice(self):
self.client.deployment_groups.put('group1')
group = self.client.deployment_groups.add_deployments(
'group1',
deployment_ids=['dep1']
)
assert group.deployment_ids == ['dep1']
group = self.client.deployment_groups.add_deployments(
'group1',
deployment_ids=['dep1']
)
assert group.deployment_ids == ['dep1']
def test_add_nonexistent(self):
self.client.deployment_groups.put('group1')
with self.assertRaisesRegex(CloudifyClientError, 'not found') as cm:
self.client.deployment_groups.add_deployments(
'group1',
deployment_ids=['nonexistent']
)
assert cm.exception.status_code == 404
with self.assertRaisesRegex(CloudifyClientError, 'not found') as cm:
self.client.deployment_groups.add_deployments(
'group1',
filter_id='nonexistent'
)
assert cm.exception.status_code == 404
def test_remove_nonexistent(self):
self.client.deployment_groups.put('group1')
with self.assertRaisesRegex(CloudifyClientError, 'not found') as cm:
self.client.deployment_groups.remove_deployments(
'group1',
deployment_ids=['nonexistent']
)
assert cm.exception.status_code == 404
with self.assertRaisesRegex(CloudifyClientError, 'not found') as cm:
self.client.deployment_groups.remove_deployments(
'group1',
filter_id='nonexistent'
)
assert cm.exception.status_code == 404
def test_remove_deployment_ids(self):
self.client.deployment_groups.put('group1')
group = self.client.deployment_groups.add_deployments(
'group1',
deployment_ids=['dep1', 'dep2']
)
assert set(group.deployment_ids) == {'dep1', 'dep2'}
group = self.client.deployment_groups.remove_deployments(
'group1',
deployment_ids=['dep1']
)
assert group.deployment_ids == ['dep2']
def test_add_deployment_count(self):
self.client.deployment_groups.put(
'group1',
blueprint_id='blueprint'
)
group = self.client.deployment_groups.add_deployments(
'group1',
count=3
)
assert len(group.deployment_ids) == 3
def test_add_remove_same(self):
self.client.deployment_groups.put('group1')
group = self.client.deployment_groups.add_deployments(
'group1',
deployment_ids=['dep1']
)
# add and remove the same deployment in a single call - it is
# removed; using the http client directly, because the restclient
# has no way to express such inconsistency
self.client.deployment_groups.api.patch(
'/deployment-groups/{0}'.format(group['id']),
data={
'add': {
'deployment_ids': ['dep2']
},
'remove': {
'deployment_ids': ['dep1', 'dep2']
},
}
)
group = self.client.deployment_groups.get(group['id'])
assert group.deployment_ids == []
def test_add_inputs(self):
self.client.deployment_groups.put(
'group1',
blueprint_id='blueprint'
)
group = self.client.deployment_groups.add_deployments(
'group1',
new_deployments=[{}, {}]
)
assert len(group.deployment_ids) == 2
def test_get_deployment(self):
"""Group IDs are also in the deployment response"""
self.client.deployment_groups.put(
'group1',
deployment_ids=['dep1']
)
self.client.deployment_groups.put(
'group2',
deployment_ids=['dep1']
)
dep = self.client.deployments.get('dep1')
assert set(dep.deployment_groups) == {'group1', 'group2'}
def test_get_deployment_include(self):
"""Group IDs are also in the deployment response"""
self.client.deployment_groups.put(
'group1',
deployment_ids=['dep1']
)
self.client.deployment_groups.put(
'group2',
deployment_ids=['dep1']
)
dep = self.client.deployments.get(
'dep1',
_include=['id', 'deployment_groups'])
assert set(dep.deployment_groups) == {'group1', 'group2'}
def test_get_deployment_by_group(self):
self.client.deployment_groups.put(
'group1',
deployment_ids=['dep1']
)
deployments = self.client.deployments.list(_group_id='group1')
assert len(deployments) == 1
assert deployments[0].id == 'dep1'
def test_group_delete(self):
group = self.client.deployment_groups.put(
'group1',
deployment_ids=['dep1']
)
assert len(group.deployment_ids) == 1
group = self.client.deployment_groups.delete('group1')
assert len(self.client.deployment_groups.list()) == 0
# deleting the group didn't delete the deployments themselves
assert len(self.client.deployments.list()) == 2
def test_group_delete_deployments(self):
self.client.deployment_groups.put(
'group1',
deployment_ids=['dep1']
)
self.client.deployment_groups.delete(
'group1', delete_deployments=True)
assert len(self.client.deployment_groups.list()) == 0
# dep hasn't been deleted _yet_, but check that delete-dep-env for it
# was run
dep = self.sm.get(models.Deployment, 'dep1')
assert any(exc.workflow_id == 'delete_deployment_environment'
for exc in dep.executions)
def test_create_filters(self):
"""Create a group with filter_id to set the deployments"""
self.client.deployments.update_labels('dep1', [
{'label1': 'value1'}
])
self.client.deployments_filters.create('filter1', [
{'key': 'label1', 'values': ['value1'],
'operator': 'any_of', 'type': 'label'}
])
self.client.deployment_groups.put(
'group1',
filter_id='filter1'
)
group = self.client.deployment_groups.get('group1')
assert group.deployment_ids == ['dep1']
def test_add_from_filter_ids(self):
"""Extend a group providing filter_id"""
self.client.deployments.update_labels('dep1', [
{'label1': 'value1'}
])
self.client.deployments_filters.create('filter1', [
{'key': 'label1', 'values': ['value1'],
'operator': 'any_of', 'type': 'label'}
])
self.client.deployment_groups.put('group1')
self.client.deployment_groups.add_deployments(
'group1',
filter_id='filter1'
)
group = self.client.deployment_groups.get('group1')
assert group.deployment_ids == ['dep1']
def test_add_from_filter_rules(self):
"""Extend a group providing filter_rules"""
self.client.deployments.update_labels('dep1', [
{'label1': 'value1'}
])
self.client.deployment_groups.put('group1')
self.client.deployment_groups.add_deployments(
'group1',
filter_rules=[{'key': 'label1',
'values': ['value1'],
'operator': 'any_of',
'type': 'label'}]
)
group = self.client.deployment_groups.get('group1')
assert group.deployment_ids == ['dep1']
def test_add_from_filters(self):
"""Extend a group providing filter_id and filter_rules"""
self.client.deployments.update_labels('dep1', [
{'label1': 'value1'}
])
self.client.deployments.update_labels('dep2', [
{'label1': 'value2'}
])
self.client.deployments_filters.create('filter1', [
{'key': 'label1', 'values': ['value1'],
'operator': 'any_of', 'type': 'label'}
])
self.client.deployment_groups.put('group1')
self.client.deployment_groups.add_deployments(
'group1',
filter_id='filter1',
filter_rules=[{'key': 'label1',
'values': ['value2'],
'operator': 'any_of',
'type': 'label'}]
)
group = self.client.deployment_groups.get('group1')
assert set(group.deployment_ids) == {'dep2', 'dep1'}
def test_remove_from_filters(self):
"""Shrink a group providing filter_id and filter_rules"""
self.client.deployments.update_labels('dep1', [
{'label1': 'value1'}
])
self.client.deployments.update_labels('dep2', [
{'label1': 'value2'}
])
self.client.deployments_filters.create('filter1', [
{'key': 'label1', 'values': ['value1'],
'operator': 'any_of', 'type': 'label'}
])
self.client.deployment_groups.put(
'group1',
deployment_ids=['dep1', 'dep2']
)
self.client.deployment_groups.remove_deployments(
'group1',
filter_id='filter1',
filter_rules=[{'key': 'label1',
'values': ['value2'],
'operator': 'any_of',
'type': 'label'}]
)
group = self.client.deployment_groups.get('group1')
assert group.deployment_ids == []
def test_add_from_group(self):
self.client.deployment_groups.put(
'group1',
deployment_ids=['dep1']
)
self.client.deployment_groups.put(
'group2',
deployment_ids=['dep2']
)
group3 = self.client.deployment_groups.put(
'group3',
deployments_from_group='group1'
)
assert set(group3.deployment_ids) == {'dep1'}
group3 = self.client.deployment_groups.add_deployments(
'group3',
deployments_from_group='group2'
)
assert set(group3.deployment_ids) == {'dep1', 'dep2'}
def test_remove_by_group(self):
self.client.deployment_groups.put(
'group1',
deployment_ids=['dep1', 'dep2']
)
self.client.deployment_groups.put(
'group2',
deployment_ids=['dep1']
)
self.client.deployment_groups.put('group3') # empty group
group1 = self.client.deployment_groups.remove_deployments(
'group1',
deployments_from_group='group3'
)
assert set(group1.deployment_ids) == {'dep1', 'dep2'}
group1 = self.client.deployment_groups.remove_deployments(
'group1',
deployments_from_group='group2'
)
assert set(group1.deployment_ids) == {'dep2'}
# removing is idempotent
group1 = self.client.deployment_groups.remove_deployments(
'group1',
deployments_from_group='group2'
)
assert set(group1.deployment_ids) == {'dep2'}
def test_set_labels(self):
"""Create a group with labels"""
labels = [{'label1': 'value1'}]
updated_labels = [{'label1': 'value2'}, {'label2': 'value3'}]
group = self.client.deployment_groups.put(
'group1',
labels=labels,
)
self.assert_resource_labels(group.labels, labels)
group = self.client.deployment_groups.put(
'group1',
labels=updated_labels,
)
self.assert_resource_labels(group.labels, updated_labels)
def test_group_labels_for_deployments(self):
"""Group labels are applied to the newly-created deployments"""
group = self.client.deployment_groups.put(
'group1',
labels=[{'label1': 'value1'}, {'label2': 'value2'}],
blueprint_id='blueprint',
new_deployments=[{
'labels': [{'label1': 'value1'}, {'label1': 'value2'},
{'label3': 'value4'}]
}]
)
dep_id = group.deployment_ids[0]
dep = self.sm.get(models.Deployment, dep_id)
assert set(dep.create_execution.parameters['labels']) == {
# from new_deployments:
('label1', 'value1'),
('label1', 'value2'),
('label3', 'value4'),
# from the group:
# ('label1', 'value1') - not present - deduplicated
('label2', 'value2')
}
def test_delete_group_label(self):
"""Deleting a label from the group, deletes it from its deps"""
self.client.deployments.update_labels('dep1', [{'label1': 'value1'}])
self.client.deployment_groups.put(
'group1',
labels=[{'label1': 'value1'}],
)
self.client.deployment_groups.put(
'group1',
deployment_ids=['dep1']
)
group = self.client.deployment_groups.put(
'group1',
labels=[]
)
assert group.labels == []
client_dep = self.client.deployments.get('dep1')
assert client_dep.labels == []
def test_add_group_label(self):
"""Adding a label to a group with deps, adds it to the deps"""
self.client.deployments.update_labels('dep1', [{'label1': 'value1'}])
group = self.client.deployment_groups.put(
'group1',
deployment_ids=['dep1'],
)
self.client.deployment_groups.put(
'group1',
labels=[{'label2': 'value2'}],
)
with self.assertRaises(CloudifyClientError) as cm:
self.client.deployment_groups.put(
'group1',
labels=[{'csys-invalid': 'xxx'}],
)
assert cm.exception.status_code == 400
with self.assertRaises(CloudifyClientError) as cm:
self.client.deployment_groups.put(
'group1',
labels=[{'łó-disallowed-characters': 'xxx'}],
)
assert cm.exception.status_code == 400
dep_id = group.deployment_ids[0]
client_dep = self.client.deployments.get(dep_id)
self.sm.get(models.Deployment, dep_id)
self.assert_resource_labels(client_dep.labels, [
{'label1': 'value1'}, {'label2': 'value2'}
])
def test_add_labels_already_exist(self):
labels = [{'label2': 'value2'}]
self.client.deployments.update_labels('dep1', labels)
self.client.deployment_groups.put(
'group1',
deployment_ids=['dep1'],
)
self.client.deployment_groups.put( # doesn't throw
'group1',
labels=labels,
)
dep = self.client.deployments.get('dep1')
self.assert_resource_labels(dep.labels, labels)
def test_add_labels_to_added_deployments(self):
"""Group labels are applied to deps added to the group"""
labels = [{'label1': 'value1'}]
self.client.deployment_groups.put(
'group1',
labels=labels,
)
self.client.deployment_groups.put(
'group2',
deployment_ids=['dep1']
)
filter_labels = [{'label': 'filter'}]
self.client.deployments.update_labels('dep2', filter_labels)
self.client.deployments_filters.create('filter1', [
{'key': 'label', 'values': ['filter'],
'operator': 'any_of', 'type': 'label'}
])
self.client.deployments.create('blueprint', 'dep3')
self.client.deployment_groups.put(
'group1',
# add a deployment using all 3 ways: by id, by clone, by filter
deployments_from_group=['group2'], # dep1
filter_id='filter1', # dep2
deployment_ids=['dep3'],
)
dep1 = self.client.deployments.get('dep1')
self.assert_resource_labels(dep1.labels, labels)
dep2 = self.client.deployments.get('dep2')
self.assert_resource_labels(dep2.labels, labels + filter_labels)
dep3 = self.client.deployments.get('dep3')
self.assert_resource_labels(dep3.labels, labels)
def test_add_labels_deployments_added_twice(self):
"""Add a deployment twice, in two ways, to a group with labels.
Only adds the labels once to the deployment.
"""
labels = [{'label1': 'value1'}]
self.client.deployment_groups.put(
'group1',
labels=labels,
)
self.client.deployment_groups.put(
'group2',
deployment_ids=['dep1']
)
self.client.deployment_groups.put(
'group1',
deployments_from_group=['group2'], # dep1
deployment_ids=['dep1'],
)
dep1 = self.client.deployments.get('dep1')
self.assert_resource_labels(dep1.labels, labels)
def test_add_invalid_label_parent(self):
self.client.deployment_groups.put(
'group1',
deployment_ids=['dep1', 'dep2']
)
with self.assertRaisesRegex(CloudifyClientError, 'not found'):
self.client.deployment_groups.put(
'group1',
labels=[{'csys-obj-parent': 'value2'}],
)
group = self.client.deployment_groups.get('group1')
dep1 = self.client.deployments.get('dep1')
dep2 = self.client.deployments.get('dep2')
assert len(group.labels) == 0
assert len(dep1.labels) == 0
assert len(dep2.labels) == 0
def test_add_cyclic_parent_labels_in_group(self):
self.client.deployments.update_labels(
'dep2', [{'csys-obj-parent': 'dep1'}])
self.client.deployment_groups.put(
'group1',
deployment_ids=['dep1', 'dep2']
)
with self.assertRaisesRegex(CloudifyClientError, 'cyclic'):
self.client.deployment_groups.put(
'group1',
labels=[{'csys-obj-parent': 'dep2'}],
)
group = self.client.deployment_groups.get('group1')
dep1 = self.client.deployments.get('dep1')
dep2 = self.client.deployments.get('dep2')
# Defining dep1 as a parent will add a consumer label to it
sanitized_dep1_labels = \
[lb for lb in dep1.labels if lb.key != 'csys-consumer-id']
assert len(group.labels) == 0
assert len(sanitized_dep1_labels) == 0
assert len(dep2.labels) == 1
def test_add_self_deployment_as_parent(self):
self.client.deployment_groups.put(
'group1',
deployment_ids=['dep1']
)
with self.assertRaisesRegex(CloudifyClientError, 'cyclic'):
self.client.deployment_groups.put(
'group1',
labels=[{'csys-obj-parent': 'dep1'}],
)
group = self.client.deployment_groups.get('group1')
dep1 = self.client.deployments.get('dep1')
assert len(group.labels) == 0
assert len(dep1.labels) == 0
def test_add_single_parent(self):
self.client.deployment_groups.put(
'group1',
deployment_ids=['dep1', 'dep2']
)
parent = self._deployment(id='parent_1')
self.client.deployment_groups.put(
'group1',
labels=[{'csys-obj-parent': 'parent_1'}],
)
assert parent.sub_services_count == 2
def test_add_multiple_parents(self):
self.client.deployment_groups.put(
'group1',
deployment_ids=['dep1', 'dep2']
)
parent1 = self._deployment(id='parent_1')
parent2 = self._deployment(id='parent_2')
self.client.deployment_groups.put(
'group1',
labels=[{'csys-obj-parent': 'parent_1'},
{'csys-obj-parent': 'parent_2'}],
)
assert parent1.sub_services_count == 2
assert parent2.sub_services_count == 2
def test_add_parents_before_adding_deployment(self):
parent1 = self._deployment(id='parent_1')
parent2 = self._deployment(id='parent_2')
self.client.deployment_groups.put('group1')
self.client.deployment_groups.put(
'group1',
labels=[{'csys-obj-parent': 'parent_1'},
{'csys-obj-parent': 'parent_2'}],
)
self.client.deployment_groups.put(
'group1',
deployment_ids=['dep1', 'dep2']
)
assert parent1.sub_services_count == 2
assert parent2.sub_services_count == 2
def test_add_parents_before_adding_deployments_from_groups(self):
parent1 = self._deployment(id='parent_1')
parent2 = self._deployment(id='parent_2')
parent3 = self._deployment(id='parent_3')
self.client.deployment_groups.put(
'group1',
labels=[{'csys-obj-parent': 'parent_1'},
{'csys-obj-parent': 'parent_2'},
{'csys-obj-parent': 'parent_3'}],
)
self.client.deployment_groups.put('group2', blueprint_id='blueprint')
self.client.deployment_groups.put('group3', blueprint_id='blueprint')
self.client.deployment_groups.add_deployments(
'group2',
count=4
)
self.client.deployment_groups.add_deployments(
'group3',
deployment_ids=['dep1', 'dep2']
)
self.client.deployment_groups.add_deployments(
'group1',
deployments_from_group='group2'
)
self.client.deployment_groups.add_deployments(
'group1',
deployments_from_group='group3'
)
assert parent1.sub_services_count == 6
assert parent2.sub_services_count == 6
assert parent3.sub_services_count == 6
def test_add_parents_to_multiple_source_of_deployments(self):
parent1 = self._deployment(id='parent_1')
self._deployment(id='dep3')
self._deployment(id='dep4')
self._deployment(id='dep5')
self.client.deployment_groups.put('group1', blueprint_id='blueprint')
self.client.deployment_groups.put('group2', blueprint_id='blueprint')
self.client.deployment_groups.put(
'group1',
labels=[{'csys-obj-parent': 'parent_1'}],
)
self.client.deployment_groups.add_deployments(
'group2',
deployment_ids=['dep1', 'dep2']
)
self.client.deployments.update_labels('dep3', [
{'label1': 'value1'}
])
self.client.deployments.update_labels('dep4', [
{'label1': 'value1'}
])
self.client.deployments_filters.create('filter1', [
{'key': 'label1', 'values': ['value1'],
'operator': 'any_of', 'type': 'label'}
])
self.client.deployment_groups.put(
'group1',
filter_id='filter1',
deployment_ids=['dep5'],
deployments_from_group='group2'
)
assert parent1.sub_services_count == 5
def test_add_parents_to_environment_deployments(self):
parent1 = self._deployment(id='parent_1')
self.client.deployment_groups.put('group1', blueprint_id='blueprint')
self.client.deployment_groups.add_deployments(
'group1',
count=4
)
self.client.deployment_groups.put(
'group1',
labels=[{'csys-obj-parent': 'parent_1'},
{'csys-obj-type': 'environment'}],
)
assert parent1.sub_environments_count == 4
def test_convert_service_to_environment_for_deployments(self):
parent1 = self._deployment(id='parent_1')
self.client.deployment_groups.put('group1', blueprint_id='blueprint')
self.client.deployment_groups.add_deployments(
'group1',
count=4
)
self.client.deployment_groups.put(
'group1',
labels=[{'csys-obj-parent': 'parent_1'}],
)
assert parent1.sub_services_count == 4
self.client.deployment_groups.put(
'group1',
labels=[{'csys-obj-parent': 'parent_1'},
{'csys-obj-type': 'environment'}],
)
assert parent1.sub_environments_count == 4
def test_convert_environment_to_service_for_deployments(self):
parent1 = self._deployment(id='parent_1')
self.client.deployment_groups.put('group1', blueprint_id='blueprint')
self.client.deployment_groups.add_deployments(
'group1',
count=4
)
self.client.deployment_groups.put(
'group1',
labels=[{'csys-obj-parent': 'parent_1'},
{'csys-obj-type': 'environment'}],
)
assert parent1.sub_environments_count == 4
self.client.deployment_groups.put(
'group1',
labels=[{'csys-obj-parent': 'parent_1'}],
)
assert parent1.sub_services_count == 4
def test_delete_parents_labels_from_deployments(self):
parent1 = self._deployment(id='parent_1')
self.client.deployment_groups.put(
'group1',
labels=[{'csys-obj-parent': 'parent_1'}],
blueprint_id='blueprint'
)
self.client.deployment_groups.add_deployments(
'group1',
deployment_ids=['dep1', 'dep2']
)
assert parent1.sub_services_count == 2
self.client.deployment_groups.put(
'group1',
labels=[],
blueprint_id='blueprint'
)
assert parent1.sub_services_count == 0
def test_validate_update_deployment_statuses_after_conversion(self):
parent1 = self._deployment(id='parent_1')
self.client.deployment_groups.put('group1', blueprint_id='blueprint')
self.client.deployment_groups.add_deployments(
'group1',
count=1
)
self.client.deployment_groups.put(
'group1',
labels=[{'csys-obj-parent': 'parent_1'},
{'csys-obj-type': 'environment'}],
)
group_deployment = self.sm.get(
models.DeploymentGroup, 'group1').deployments[0]
assert parent1.sub_environments_count == 1
assert parent1.sub_services_count == 0
assert parent1.sub_services_status is None
assert parent1.sub_environments_status \
== group_deployment.deployment_status
self.client.deployment_groups.put(
'group1',
labels=[{'csys-obj-type': 'service'},
{'csys-obj-parent': 'parent_1'}],
)
assert parent1.sub_environments_count == 0
assert parent1.sub_services_count == 1
assert parent1.sub_environments_status is None
assert parent1.sub_services_status \
== group_deployment.deployment_status
def test_invalid_inputs(self):
self.blueprint.plan['inputs'] = {'http_web_server_port': {}}
self.client.deployment_groups.put(
'group1',
blueprint_id='blueprint',
new_deployments=[
{'inputs': {'http_web_server_port': 8080}}
])
with self.assertRaisesRegex(CloudifyClientError, 'unknown input'):
self.client.deployment_groups.put(
'group1',
new_deployments=[
{'inputs': {
'nonexistent': 42,
'http_web_server_port': 8080,
}}
])
class ExecutionGroupsTestCase(base_test.BaseServerTestCase):
def setUp(self):
super().setUp()
bp = models.Blueprint(
id='bp1',
creator=self.user,
tenant=self.tenant,
plan={'inputs': {}},
)
self.deployment = models.Deployment(
id='dep1',
creator=self.user,
display_name='',
tenant=self.tenant,
blueprint=bp,
workflows={'install': {'operation': ''}}
)
self.dep_group = models.DeploymentGroup(
id='group1',
default_blueprint=bp,
tenant=self.tenant,
creator=self.user,
)
self.dep_group.deployments.append(self.deployment)
db.session.add(self.deployment)
def test_get_empty(self):
result = self.client.execution_groups.list()
assert len(result) == 0
def test_create_from_group(self):
group = self.client.execution_groups.start(
deployment_group_id='group1',
workflow_id='install'
)
assert len(group.execution_ids) == 1
execution = self.client.executions.get(group.execution_ids[0])
assert execution.workflow_id == 'install'
assert execution.deployment_id == 'dep1'
def test_get_events(self):
"""Get events by group id.
Include events for execution group, but not events for the particular
executions (either in group or not).
"""
group = self.client.execution_groups.start(
deployment_group_id='group1',
workflow_id='install'
)
non_group_execution = self.client.executions.start(
deployment_id='dep1',
workflow_id='install',
force=True, # force, because there's one already running
)
# refetch as ORM objects so we can pass them to Log/Event
execution_group = self.sm.get(models.ExecutionGroup, group.id)
group_execution = self.sm.get(models.Execution, group.execution_ids[0])
non_group_execution = self.sm.get(
models.Execution, non_group_execution.id
)
self.sm.put(
models.Log(
message='log1',
execution_group=execution_group,
reported_timestamp=datetime.utcnow()
)
)
self.sm.put(
models.Event(
message='event1',
execution_group=execution_group,
reported_timestamp=datetime.utcnow()
)
)
self.sm.put(
models.Log(
message='log2',
execution=group_execution,
reported_timestamp=datetime.utcnow()
)
)
self.sm.put(
models.Event(
message='event2',
execution=group_execution,
reported_timestamp=datetime.utcnow()
)
)
self.sm.put(
models.Log(
message='log3',
execution=non_group_execution,
reported_timestamp=datetime.utcnow()
)
)
self.sm.put(
models.Event(
message='event3',
execution=non_group_execution,
reported_timestamp=datetime.utcnow()
)
)
events = self.client.events.list(
execution_group_id=group['id'],
include_logs=True
)
assert len(events) == 2
assert all(e['execution_group_id'] == execution_group.id
for e in events)
def test_one_fk_not_null_constraint(self):
group = self.client.execution_groups.start(
deployment_group_id='group1',
workflow_id='install'
)
# refetch as ORM objects so we can pass them to Log/Event
execution_group = self.sm.get(models.ExecutionGroup, group.id)
execution = self.sm.get(models.Execution, group.execution_ids[0])
with self.assertRaisesRegex(SQLStorageException,
'violates check constraint'):
self.sm.put(
models.Event(
message='event',
execution=execution,
execution_group=execution_group,
reported_timestamp=datetime.utcnow()
)
)
with self.assertRaisesRegex(SQLStorageException,
'violates check constraint'):
self.sm.put(
models.Event(
message='event',
reported_timestamp=datetime.utcnow()
)
)
with self.assertRaisesRegex(SQLStorageException,
'violates check constraint'):
self.sm.put(
models.Log(
message='log',
execution=execution,
execution_group=execution_group,
reported_timestamp=datetime.utcnow()
)
)
with self.assertRaisesRegex(SQLStorageException,
'violates check constraint'):
self.sm.put(
models.Log(
message='log',
reported_timestamp=datetime.utcnow()
)
)
def test_get_execution_by_group(self):
execution_group = self.client.execution_groups.start(
deployment_group_id='group1',
workflow_id='install'
)
self.client.executions.start(
deployment_id='dep1',
workflow_id='install',
force=True, # force, because there's one already running
)
executions = self.client.executions.list(
_group_id=execution_group['id'])
assert len(executions) == 1
def test_get_execution_group(self):
group = self.client.execution_groups.start(
deployment_group_id='group1',
workflow_id='install'
)
execution = self.sm.get(models.Execution, group.execution_ids[0])
execution.status = ExecutionState.TERMINATED
self.sm.update(execution)
retrieved = self.client.execution_groups.get(group.id)
assert retrieved.id == group.id
assert len(retrieved.execution_ids) == 1
assert retrieved.status == ExecutionState.TERMINATED
listed = self.client.execution_groups.list()[0]
assert listed.id == group.id
assert listed.get('status') is None
assert listed.get('execution_ids') is None
def test_delete_deployment(self):
"""It's still possible to delete a deployment used in an exec-group"""
models.ExecutionGroup(
id='gr1',
workflow_id='',
tenant=self.tenant,
creator=self.user,
)
exc1 = models.Execution(
id='gr1',
workflow_id='install',
deployment=self.deployment,
status=ExecutionState.QUEUED,
tenant=self.tenant,
creator=self.user,
)
with self.assertRaisesRegex(CloudifyClientError, 'running or queued'):
self.client.deployments.delete('dep1')
exc1.status = ExecutionState.TERMINATED
self.client.deployments.delete('dep1')
delete_exec = self.sm.get(models.Execution, None, filters={
'workflow_id': 'delete_deployment_environment',
'deployment_id': 'dep1'
})
# set the execution to started, so that we can update its status
# via the restclient to terminated, which actually deletes
# the deployment from the db
delete_exec.status = ExecutionState.STARTED
self.client.executions.update(
delete_exec.id, ExecutionState.TERMINATED)
assert db.session.query(models.Deployment).count() == 0
def test_queues_over_concurrency(self):
exc_group = models.ExecutionGroup(
id='gr1',
workflow_id='',
tenant=self.tenant,
creator=self.user,
)
for _ in range(5):
exc_group.executions.append(models.Execution(
workflow_id='create_deployment_environment',
tenant=self.tenant,
creator=self.user,
status=ExecutionState.PENDING,
parameters={}
))
exc_group.concurrency = 3
messages = exc_group.start_executions(self.sm, self.rm)
assert len(messages) == exc_group.concurrency
assert sum(exc.status == ExecutionState.PENDING
for exc in exc_group.executions) == exc_group.concurrency
assert sum(exc.status == ExecutionState.QUEUED
for exc in exc_group.executions) == 2
def test_doesnt_start_finished(self):
exc_group = models.ExecutionGroup(
id='gr1',
workflow_id='',
tenant=self.tenant,
creator=self.user,
)
for exc_status in [ExecutionState.FAILED, ExecutionState.TERMINATED]:
exc_group.executions.append(models.Execution(
workflow_id='create_deployment_environment',
tenant=self.tenant,
creator=self.user,
status=exc_status,
))
messages = exc_group.start_executions(self.sm, self.rm)
assert len(messages) == 0
def test_cancel_group(self):
exc_group = models.ExecutionGroup(
id='gr1',
workflow_id='',
tenant=self.tenant,
creator=self.user,
)
ex1 = models.Execution(
workflow_id='',
tenant=self.tenant,
creator=self.user,
status=ExecutionState.STARTED,
)
ex2 = models.Execution(
workflow_id='',
tenant=self.tenant,
creator=self.user,
status=ExecutionState.STARTED,
)
exc_group.executions = [ex1, ex2]
self.client.execution_groups.cancel(exc_group.id)
for exc in exc_group.executions:
assert exc.status in (
ExecutionState.CANCELLED, ExecutionState.CANCELLING
)
@mock.patch('manager_rest.workflow_executor.execute_workflow')
def test_resume_group(self, mock_execute):
"""After all executions have been cancelled, resume them"""
exc_group = models.ExecutionGroup(
id='gr1',
workflow_id='',
tenant=self.tenant,
creator=self.user,
)
ex1 = models.Execution(
workflow_id='create_deployment_environment',
parameters={},
tenant=self.tenant,
creator=self.user,
status=ExecutionState.CANCELLED,
)
ex2 = models.Execution(
workflow_id='create_deployment_environment',
parameters={},
tenant=self.tenant,
creator=self.user,
status=ExecutionState.CANCELLED,
)
exc_group.executions = [ex1, ex2]
self.client.execution_groups.resume(exc_group.id)
group = self.sm.get(models.ExecutionGroup, exc_group.id)
for exc in group.executions:
assert exc.status in (
ExecutionState.PENDING, ExecutionState.QUEUED
)
mock_execute.assert_called()
def test_invalid_parameters(self):
with self.assertRaises(IllegalExecutionParametersError):
self.client.execution_groups.start(
deployment_group_id='group1',
workflow_id='install',
parameters={
'dep1': {'invalid-input': 42}
}
)
with self.assertRaises(IllegalExecutionParametersError):
self.client.execution_groups.start(
deployment_group_id='group1',
workflow_id='install',
default_parameters={'invalid-input': 42}
)
def test_group_status(self):
for execution_statuses, expected_group_status in [
([], None),
([ExecutionState.PENDING], ExecutionState.PENDING),
([ExecutionState.QUEUED], ExecutionState.QUEUED),
([ExecutionState.TERMINATED], ExecutionState.TERMINATED),
([ExecutionState.STARTED], ExecutionState.STARTED),
([ExecutionState.FAILED], ExecutionState.FAILED),
([ExecutionState.TERMINATED, ExecutionState.FAILED],
ExecutionState.FAILED),
([ExecutionState.STARTED, ExecutionState.PENDING,
ExecutionState.TERMINATED],
ExecutionState.STARTED),
([ExecutionState.TERMINATED, ExecutionState.STARTED],
ExecutionState.STARTED)
]:
with self.subTest():
exc_group = models.ExecutionGroup(
id='gr1',
workflow_id='',
tenant=self.tenant,
creator=self.user,
)
for exc_status in execution_statuses:
exc = models.Execution(
workflow_id='',
tenant=self.tenant,
creator=self.user,
status=exc_status
)
exc_group.executions.append(exc)
assert exc_group.status == expected_group_status
@mock.patch('manager_rest.workflow_executor.execute_workflow', mock.Mock())
def test_success_group(self):
# executions are already terminated when we add success_group, so
# they should be in the success group
exc_group = self.client.execution_groups.start(
deployment_group_id='group1',
workflow_id='install',
)
sm_exc_group = self.sm.get(models.ExecutionGroup, exc_group.id)
for exc in sm_exc_group.executions:
exc.status = ExecutionState.TERMINATED
self.sm.put(exc)
self.client.deployment_groups.put('group2')
self.client.execution_groups.set_target_group(
exc_group.id, success_group='group2')
target_group = self.sm.get(models.DeploymentGroup, 'group2')
assert len(target_group.deployments) == 1
# executions terminate after we add the success group
exc_group = self.client.execution_groups.start(
deployment_group_id='group1',
workflow_id='install',
)
self.client.deployment_groups.put('group3')
self.client.execution_groups.set_target_group(
exc_group.id, success_group='group3')
sm_exc_group = self.sm.get(models.ExecutionGroup, exc_group.id)
for exc in sm_exc_group.executions:
self.client.executions.update(
exc.id, status=ExecutionState.TERMINATED)
target_group = self.sm.get(models.DeploymentGroup, 'group3')
assert len(target_group.deployments) == 1
# same as above, but the deployment already is in the target group
exc_group = self.client.execution_groups.start(
deployment_group_id='group1',
workflow_id='install',
)
self.client.deployment_groups.put('group3', deployment_ids=['dep1'])
self.client.execution_groups.set_target_group(
exc_group.id, success_group='group3')
sm_exc_group = self.sm.get(models.ExecutionGroup, exc_group.id)
for exc in sm_exc_group.executions:
self.client.executions.update(
exc.id, status=ExecutionState.TERMINATED)
target_group = self.sm.get(models.DeploymentGroup, 'group3')
assert len(target_group.deployments) == 1
@mock.patch('manager_rest.workflow_executor.execute_workflow', mock.Mock())
def test_failed_group(self):
# similar to test_success_group, but for the failed group
exc_group = self.client.execution_groups.start(
deployment_group_id='group1',
workflow_id='install',
)
sm_exc_group = self.sm.get(models.ExecutionGroup, exc_group.id)
for exc in sm_exc_group.executions:
exc.status = ExecutionState.FAILED
self.sm.put(exc)
self.client.deployment_groups.put('group2')
self.client.execution_groups.set_target_group(
exc_group.id, failed_group='group2')
target_group = self.sm.get(models.DeploymentGroup, 'group2')
assert len(target_group.deployments) == 1
# executions terminate after we add the success group
exc_group = self.client.execution_groups.start(
deployment_group_id='group1',
workflow_id='install',
)
self.client.deployment_groups.put('group3')
self.client.execution_groups.set_target_group(
exc_group.id, failed_group='group3')
sm_exc_group = self.sm.get(models.ExecutionGroup, exc_group.id)
for exc in sm_exc_group.executions:
self.client.executions.update(
exc.id, status=ExecutionState.FAILED)
target_group = self.sm.get(models.DeploymentGroup, 'group3')
assert len(target_group.deployments) == 1
# same as above, but the deployment already is in the target group
exc_group = self.client.execution_groups.start(
deployment_group_id='group1',
workflow_id='install',
)
self.client.deployment_groups.put('group3', deployment_ids=['dep1'])
self.client.execution_groups.set_target_group(
exc_group.id, failed_group='group3')
sm_exc_group = self.sm.get(models.ExecutionGroup, exc_group.id)
for exc in sm_exc_group.executions:
self.client.executions.update(
exc.id, status=ExecutionState.FAILED)
target_group = self.sm.get(models.DeploymentGroup, 'group3')
assert len(target_group.deployments) == 1
class TestGenerateID(unittest.TestCase):
def setUp(self):
self.endpoint = DeploymentGroupsId()
def _mock_blueprint(self, id_template=None):
bp = mock.MagicMock()
bp.id = 'blueprint_id'
bp.plan = {
'deployment_settings': {'id_template': id_template}
}
return bp
def _generate_id(self, group, new_dep_spec):
return self.endpoint._new_deployment_id(group, new_dep_spec)
def test_from_blueprint(self):
group = models.DeploymentGroup(id='g1')
group.default_blueprint = self._mock_blueprint('hello-{uuid}')
new_id, is_unique = self._generate_id(group, {})
assert is_unique
assert new_id.startswith('hello')
assert len(new_id) > 36
def test_from_blueprint_no_variable(self):
group = models.DeploymentGroup(id='g1')
group.default_blueprint = self._mock_blueprint('hello')
with pytest.raises(ConflictError):
self._generate_id(group, {})
def test_group_id(self):
group = models.DeploymentGroup(id='g1')
group.default_blueprint = self._mock_blueprint()
new_id, is_unique = self._generate_id(group, {})
assert is_unique
assert new_id.startswith('g1')
assert len(new_id) > 36
def test_spec_no_variable(self):
group = models.DeploymentGroup(id='g1')
group.default_blueprint = self._mock_blueprint()
new_id, is_unique = self._generate_id(group, {'id': 'hello'})
assert not is_unique
assert new_id == 'hello'
def test_spec_template(self):
group = models.DeploymentGroup(id='g1')
group.default_blueprint = self._mock_blueprint()
new_id, is_unique = self._generate_id(
group, {'id': 'hello-{group_id}'})
assert not is_unique
assert new_id == 'hello-g1'
def test_spec_uuid(self):
group = models.DeploymentGroup(id='g1')
group.default_blueprint = self._mock_blueprint()
new_id, is_unique = self._generate_id(group, {'id': 'hello-{uuid}'})
assert is_unique
assert new_id.startswith('hello')
assert len(new_id) > 36
def test_blueprint_id(self):
group = models.DeploymentGroup(id='g1')
group.default_blueprint = self._mock_blueprint()
new_id, _ = self._generate_id(
group, {'id': '{blueprint_id}-{uuid}'})
assert new_id.startswith(group.default_blueprint.id)
def test_creation_counter(self):
group = models.DeploymentGroup(id='g1')
group.default_blueprint = self._mock_blueprint()
group.creation_counter = 42
new_id, _ = self._generate_id(group, {'id': '{group_id}-{count}'})
assert new_id == 'g1-42'
def test_site_name(self):
group = models.DeploymentGroup(id='g1')
group.default_blueprint = self._mock_blueprint()
new_id, _ = self._generate_id(
group, {'id': '{site_name}-{uuid}', 'site_name': 'a'})
assert new_id.startswith('a-')
def test_display_name(self):
group = models.DeploymentGroup(id='g1')
group.default_blueprint = self._mock_blueprint()
dep_spec = {'display_name': '{group_id}'}
self._generate_id(group, dep_spec)
assert dep_spec['display_name'] == 'g1'
def test_display_name_same_uuid(self):
group = models.DeploymentGroup(id='g1')
group.default_blueprint = self._mock_blueprint()
dep_spec = {'id': '{group_id}-{uuid}',
'display_name': '{group_id}-{uuid}'}
new_id, _ = self._generate_id(group, dep_spec)
assert dep_spec['display_name'] == new_id
def test_display_name_from_dsl(self):
group = models.DeploymentGroup(id='g1')
group.default_blueprint = self._mock_blueprint()
group.default_blueprint.plan['deployment_settings']['display_name'] =\
'display-name-{uuid}'
dep_spec = {}
new_id, _ = self._generate_id(group, dep_spec)
assert dep_spec['display_name'].startswith('display-name')
assert len(dep_spec['display_name']) > 36
def test_display_name_from_dsl_function(self):
group = models.DeploymentGroup(id='g1')
group.default_blueprint = self._mock_blueprint()
group.default_blueprint.plan['deployment_settings']['display_name'] =\
{'concat': ['display', 'name']}
dep_spec = {}
new_id, _ = self._generate_id(group, dep_spec)
assert not dep_spec.get('display_name')
|
cloudify-cosmo/cloudify-manager
|
rest-service/manager_rest/test/endpoints/test_deployment_groups.py
|
Python
|
apache-2.0
| 58,027
|
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 21 09:55:46 2017
@author: slim
"""
import matplotlib.pyplot as pl
import numpy as np
import TracyWidom as TW
import math
from multiprocessing import Pool
p = Pool(4)
c=[]
cx=[]
s343=[(i,j) for i in range(45,51)+range(52,66)+range(68,79)+range(81,88) for j in['A','B','C','D','E','F','G','H','J','K'] ]
s303=[(i,j) for i in range(41,45)+[79,80,88] for j in['A','B','C','H','j','K'] ]
s040=[(i,j) for i in range(41,45) for j in['D','E','F','G'] ]
s242=[(i,j) for i in [67] for j in['B','C','D','E','F','G','H','J'] ]
s003=[(i,j) for i in [66] for j in['H','J','k'] ]
sieges=s343+s303+s040+s242+s003
def attend(a,b):
col1=['A','B','C','D','E']
col2=['F','G','H','I','J']
c1= a[0]>=b[0]
c11=a[1]in col1 and b[1]in col1
c12=a[1]in col2 and b[1]in col2
return c1 and (c11 or c12)
def attente(sec):
att=[]
for i in range(len(sec)):
s=0
for j in range(i):
if attend(sec[i],sec[j]):
s=max(s,att[j])
att.append(s+1)
return max(att)
def cdf(x):
return cl.cdf(x)**2
def pdf(x):
return (cdf(x+0.001)-cdf(x-0.001))*1000/2
def moy():
return sum([x*pdf(x) for x in np.array(range (-1000,1000))/100.0])/100.0
## cl=TW.TracyWidom(beta=2)
n1=len(sieges)
n=10000
cl=TW.TracyWidom(beta=2)
print moy()
res=[attente(np.random.permutation(sieges)) for i in xrange(n)]
correction = (moy()*(n1/2.0)**(1.0/6)+math.sqrt(2*n1))/np.average(res)
pl.hist(res, range = (min(res)-2, max(res)+2), bins=max(res)-min(res)+5,label='Histogramme simulation A380')
x=np.array(range(100*min(res)-200,100*(max(res))+200))/100.0
y=n*cdf((x*correction-(2*n1)**0.5)/(n1/2.0)**(1.0/6) )
y2=n*pdf((x*correction-(2*n1)**0.5)/(n1/2.0)**(1.0/6))/(n1/2.0)**(1.0/6)
pl.plot(x,y,label='Repartition:F2^2 ')
pl.plot(x,y2,label='Distribution:F2^2 ')
x=range(min(res),max(res))
y=[len([ j for j in res if j<=i]) for i in x ]
pl.plot(x,y,'b*',label='FCC simulation A380')
pl.legend(bbox_to_anchor=(1.1, 0.8), loc=2, borderaxespad=0.)
## estimation
|
kammmoun/PFE
|
codes/A380.py
|
Python
|
apache-2.0
| 2,200
|
# /*******************************************************
# *
# * Copyright (C) 2015-2016 Kyriakos Naziris <kyriakos@naziris.co.uk>
# * This is a thesis project of University of Portsmouth.
# *
# * This file is part of HomeSecPi.
# *
# * Feel free to use and modify the source code as long as
# * as you give credit to the original author of the
# * project (Kyriakos Naziris - kyriakos@naziris.co.uk).
# *
# *******************************************************/
#!/usr/bin/python
from Adafruit_MCP230xx import Adafruit_MCP230XX
import os
import urllib # URL functions
import urllib2 # URL functions
import ftplib #FTP Library
from urllib import urlopen
from time import sleep, strftime
import datetime
import time
from Adafruit_CharLCDPlate import Adafruit_CharLCDPlate
import RPi.GPIO as GPIO
import subprocess
from subprocess import check_output
import datetime
import time
import sqlite3
import random
import string
import hashlib #For hashed passwords
from multiprocessing import Process, Queue
#e-Mail imports
from smtplib import SMTP
from smtplib import SMTPException
from email.MIMEMultipart import MIMEMultipart
from email.MIMEText import MIMEText
from email.MIMEImage import MIMEImage
import sys
import smbus
GPIO.setwarnings(False)
#Initialization of database
db = sqlite3.connect('homesecpidb', check_same_thread=False)
cursor = db.cursor()
queue = Queue()
mcp2 = Adafruit_MCP230XX(address = 0x20, num_gpios = 16) # MCP23017
lcd = Adafruit_CharLCDPlate()
lcd.begin(16,1)
led_yellow = 1
led_green = 2
led_red = 0
buzzer = 3
def button_Pressed(self):
mcp2.output(led_yellow, 1)
mcp2.output(buzzer, 1)
sleep(0.3)
mcp2.output(led_yellow, 0)
mcp2.output(buzzer, 0)
class keypad():
# CONSTANTS
KEYPAD = [
[1,2,3],
[4,5,6],
[7,8,9],
["*",0,"#"]
]
ROW = [4,17,10,22]
COLUMN = [14,23,18]
def __init__(self):
GPIO.setmode(GPIO.BCM)
def getKey(self):
# Set all columns as output low
for j in range(len(self.COLUMN)):
GPIO.setup(self.COLUMN[j], GPIO.OUT)
GPIO.output(self.COLUMN[j], GPIO.LOW)
# Set all rows as input
for i in range(len(self.ROW)):
GPIO.setup(self.ROW[i], GPIO.IN, pull_up_down=GPIO.PUD_UP)
# Scan rows for pushed key/button
# A valid key press should set "rowVal" between 0 and 3.
rowVal = -1
for i in range(len(self.ROW)):
tmpRead = GPIO.input(self.ROW[i])
if tmpRead == 0:
rowVal = i
# if rowVal is not 0 thru 3 then no button was pressed and we can exit
if rowVal < 0 or rowVal > 3:
self.exit()
return
# Convert columns to input
for j in range(len(self.COLUMN)):
GPIO.setup(self.COLUMN[j], GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
# Switch the i-th row found from scan to output
GPIO.setup(self.ROW[rowVal], GPIO.OUT)
GPIO.output(self.ROW[rowVal], GPIO.HIGH)
# Scan columns for still-pushed key/button
# A valid key press should set "colVal" between 0 and 2.
colVal = -1
for j in range(len(self.COLUMN)):
tmpRead = GPIO.input(self.COLUMN[j])
if tmpRead == 1:
colVal=j
# if colVal is not 0 thru 2 then no button was pressed and we can exit
if colVal < 0 or colVal > 2:
self.exit()
return
# Return the value of the key pressed
self.exit()
return self.KEYPAD[rowVal][colVal]
def exit(self):
# Reinitialize all rows and columns as input at exit
for i in range(len(self.ROW)):
GPIO.setup(self.ROW[i], GPIO.IN, pull_up_down=GPIO.PUD_UP)
for j in range(len(self.COLUMN)):
GPIO.setup(self.COLUMN[j], GPIO.IN, pull_up_down=GPIO.PUD_UP)
class interact():
def clean(self):
GPIO.cleanup()
exit()
def initialize_Output(self):
for k in range (4):
mcp2.config(k, mcp2.OUTPUT)
#Beeper beeps twice and LED flashes
def indication(self):
mcp2.output(buzzer, 1)
mcp2.output(led_yellow, 1)
sleep(0.2)
mcp2.output(buzzer, 0)
mcp2.output(led_yellow, 0)
sleep(0.2)
mcp2.output(buzzer, 1)
mcp2.output(led_yellow, 1)
sleep(0.2)
mcp2.output(buzzer, 0)
mcp2.output(led_yellow, 0)
#When a button on the keypad is pressed,
#the beeper sounds and LED flushes
def button_Pressed(self):
mcp2.output(buzzer, 1)
mcp2.output(led_yellow, 1)
sleep(0.3)
mcp2.output(buzzer, 0)
mcp2.output(led_yellow, 0)
def GetInIP(self):
output = check_output(["ip","addr","show","eth0"])
inIP = output.split('\n')[2].strip().split(' ')[1].split('/')[0]
return inIP
#Write on the screen
def askPass(self):
lcd.clear()
lcd.message("Please type your\nPasscode:")
#Sends sms to the user's phone number(fetched from Database),
#using the txtlocal's API
def sendSms(self, q, message):
username = 'your@emailaccount.com'
sender = 'HomeSecPi'
hash = 'your provided hash'
numbers = ("'{0}'".format(database().getmNumber()))
print numbers
test_flag = 1
values = {'test' : test_flag,
'uname' : username,
'hash' : hash,
'message' : message,
'from' : sender,
'selectednums' : numbers }
url = 'http://www.txtlocal.com/sendsmspost.php'
postdata = urllib.urlencode(values)
req = urllib2.Request(url, postdata)
print 'Attempt to send SMS ...'
while True:
try:
urlopen('https://www.google.com')
break
except:
print 'No active internet connection in order to send the SMS. Retry in 30 sec'
time.sleep(30)
print "Connection to TXTLocal established"
try:
response = urllib2.urlopen(req)
response_url = response.geturl()
if response_url==url:
print 'SMS has been sent!'
except urllib2.URLError, e:
print 'Send failed!'
print e.reason
#Uploading the captured image on the FTP Server
#and then returns a live link of the image
def ftpSession(self, q, image_path, image_name):
while True:
try:
urlopen('https://www.google.com')
break
except:
print 'No active internet connection in order to upload the picture. Retry in 30 sec FTP'
time.sleep(30)
session = ftplib.FTP('ftp.yourdomain.com','username','passowrd') # Here you need to provide your ftp Host, Username and password
session.cwd('images') #Give the correct folder where to store the image
print "FTP Connection established"
file = open(image_path,'rb') # file to send
session.storbinary('STOR ' + image_name, file) # send the file
file.close() # close file and FTP
session.quit()
link = 'http://www.yourdomain.com/your_directory/images_directory/' + image_name # This line here generates a link of the uploaded picture based on your webserver
print "File has been uploaded!"
return link
#Sends email using the google's SMTP Server.
def sendEmail(self, q, Subject, textBody, attachment, receiver):
"""This method sends an email"""
EMAIL_SUBJECT = Subject
EMAIL_USERNAME = 'youremail@gmail.com' # Here you need to fill your email address, in my case it was a gmail
EMAIL_FROM = 'HomeSecPi Project'
EMAIL_RECEIVER = receiver
GMAIL_SMTP = "smtp.gmail.com"
GMAIL_SMTP_PORT = 587
GMAIL_PASS = 'your_email_password' # And your email password
TEXT_SUBTYPE = "plain"
#Create the email.
msg = MIMEMultipart()
msg["Subject"] = EMAIL_SUBJECT
msg["From"] = EMAIL_FROM
msg["To"] = EMAIL_RECEIVER
body = MIMEMultipart('alternative')
body.attach(MIMEText(textBody, TEXT_SUBTYPE ))
#Attach the message
msg.attach(body)
#Attach a picuture.
if attachment != "NO":
msg.attach(MIMEImage(file(attachment).read()))
while True:
try:
urlopen('https://www.google.com')
break
except:
print 'No active internet connection in order to send the e-Mail. Retry in 30 sec'
time.sleep(30)
print "Connection to e-Mail server established"
try:
smtpObj = SMTP(GMAIL_SMTP, GMAIL_SMTP_PORT)
#Identify yourself to GMAIL ESMTP server.
smtpObj.ehlo()
#Put SMTP connection in TLS mode and call ehlo again.
smtpObj.starttls()
smtpObj.ehlo()
#Login to service
smtpObj.login(user=EMAIL_USERNAME, password=GMAIL_PASS)
#Send email
smtpObj.sendmail(EMAIL_FROM, EMAIL_RECEIVER, msg.as_string())
#close connection and session.
smtpObj.quit()
print 'e-Mail has been sent!'
except SMTPException as error:
print "Error: unable to send email : {err}".format(err=error)
#Capture image using the USB PS3 Camera. The picture is captured
#using the fswebcam and watermaked the actual date and time on it
def grabPicture(self):
grab_cam = subprocess.Popen("sudo fswebcam --timestamp '%d-%m-%Y %H:%M:%S (%Z)' -r 640x480 -d /dev/v4l/by-id/usb-OmniVision_Technologies__Inc._USB_Camera-B4.09.24.1-video-index0 -q /home/pi/HomeSecPi/pictures/%m-%d-%y-%H%M.jpg", shell=True)
grab_cam.wait()
todays_date = datetime.datetime.today()
image_name = todays_date.strftime('%m-%d-%y-%H%M') + '.jpg'
return image_name
#Calls the required functions to upload the captured image on the server,
#send email and SMS to the user, to alert about the intrudion
def takePicture(self, image_name):
image_path = '/home/pi/HomeSecPi/pictures/' + image_name
#interact().ftpSession(image_path, image_name)
Process(target=interact().ftpSession, args=(queue, image_path, image_name)).start()
rLink = 'http://www.yourdomain.com/your_directory/images_directory/' + image_name
#interact().sendEmail("Intruder Detected", "Here is your intruder:", image_path, database().getEmail()) #Dynamic get receiver
Process(target=interact().sendEmail, args=(queue, "Intruder Detected", "Here is your intruder:", image_path, database().getEmail())).start()
print rLink
#interact().sendSms("Here is your intruder: " + rLink)
Process(target=interact().sendSms, args=(queue, "Here is your intruder: " + rLink)).start()
#These functions arm and disarm the system accordinly,
#indicate the right indications on the LCD Screen, LEDs,
#beeper and Voices, and at the end the ask again for the passcode
class status():
def disarm(self):
database().switchOff()
lcd.clear()
lcd.message("System Disarmed!")
ir.indication()
mcp2.output(led_red, 0)
database().writeLogs("System has been Disarmed")
os.system('mpg321 -q /home/pi/HomeSecPi/Speech/disarmed.mp3 &')
sleep(3)
attempt = ""
ir.askPass()
def arm(self):
lcd.clear()
lcd.message("Initializing\nSystem...")
attempt = ""
sleep(2)
os.system('mpg321 -q /home/pi/HomeSecPi/Speech/systemset.mp3 &')
sleep(5)
lcd.clear()
lcd.message('Please Exit Now!')
finish_time = datetime.datetime.now() + datetime.timedelta(seconds=20)
while datetime.datetime.now() < finish_time:
sleep(5)
ir.indication()
database().switchOn()
database().writeLogs("System has been Armed")
lcd.clear()
lcd.message("System Armed!")
ir.indication()
os.system('mpg321 -q /home/pi/HomeSecPi/Speech/armed.mp3 &')
mcp2.output(led_red, 1)
ir.askPass()
#Reurns the mode of the UPiS power
def pwr_mode(self):
i2c = smbus.SMBus(1)
row = i2c.read_word_data(0x6a, 0x00)
row = "%02x "%(row,)
output = (row[-2:]).strip()
if (output == '1'):
return "External Cable Powering (EPR)"
elif (output == '2'):
return "USB Cable Powering"
elif (output == '3'):
return "Raspberry Pi Powering"
elif (output == '4'):
return "Battery Powering"
elif (output == '5'):
return "Battery Low Powering"
elif (output == '6'):
return "CPR Mode"
elif (output =='7'):
return "BPR Mode"
else:
return "Reading Error"
class database():
#Change the password with the given one
def changePass(self, password):
cursor.execute('''UPDATE users SET password = ? WHERE id = 1 ''', (password,))
db.commit()
database().writeLogs("User's password has been changed")
#Change the 4 digit passcode with the given one
def changeFdigit(self, digit):
cursor.execute('''UPDATE users SET fdigit = ? WHERE id = 1 ''', (digit,))
database().writeLogs("Four-Digit password has been changed")
db.commit()
#Generating a random password
def generatePass(self):
passwd = ''.join([random.choice(string.ascii_letters + string.digits) for _ in range(8)])
return passwd
#Calles the generatePass function to generate a random password,
#encrypt it and then store it into the database
def resetPass(self):
randomPass = database().generatePass()
hashedPass = database().getHashed(randomPass)
database().changePass(hashedPass)
return randomPass
#Encrypt the password using the hash method
def getHashed(self, password):
return hashlib.sha256(password).hexdigest()
#Transform the given password into hash and check
#if its the same password as the password in the database
def checkHashedPass(self, password, digest):
return database().getHashed(password) == digest
#Change email with the given one
def changeEmail(self, email):
cursor.execute('''UPDATE users SET email = ? WHERE id = 1 ''', (email,))
db.commit()
database().writeLogs("e-Mail has been changed")
#Change username with the given one
def changeUsername(self, newUsername):
cursor.execute('''UPDATE users SET Username = ? WHERE id = 1 ''', (newUsername,))
db.commit()
database().writeLogs("Username has been changed")
#Change Phone Number with the given one
def changePhoneNumber(self, newPhonenumber):
cursor.execute('''UPDATE users SET mNumber = ? WHERE id = 1 ''', (newPhonenumber,))
db.commit()
database().writeLogs("Phone number has been changed")
#Returns the actual state of the system (Armed or Disarmed)
def getState(self):
cursor.execute('''SELECT enabled FROM functions''')
output = cursor.fetchone()
state = output[0]
return state
#set the enabled field in the database as True,
#Which means the system is armed
def switchOn(self):
cursor.execute('''UPDATE functions SET enabled = ? ''', ('True',))
db.commit()
#set the enabled field in the database as False,
#Which means the system is disarmed
def switchOff(self):
cursor.execute('''UPDATE functions SET enabled = ? ''', ('False',))
db.commit()
#Returns the user's email
def getEmail(self):
cursor.execute('''SELECT email FROM users WHERE id = 1''')
userEmail = cursor.fetchone()
return userEmail[0]
#Returns the user's phone number
def getmNumber(self):
cursor.execute('''SELECT mNumber FROM users WHERE id = 1''')
mNumber = cursor.fetchone()
return mNumber[0]
#Returns the user's email
def getUsername(self):
cursor.execute('''SELECT username FROM users WHERE id = 1''')
output = cursor.fetchone()
currentUsername = str(output[0])
return currentUsername
#Returns the users encrypted password
def getPassword(self):
cursor.execute('''SELECT password FROM users WHERE id = 1''')
output = cursor.fetchone()
currentPassword = str(output[0])
return currentPassword
#Returns the list of users, which for now is just one user
def getUsers(self):
users = {}
cursor.execute('''SELECT username, password FROM users ''')
all_userDetails = cursor.fetchall()
for userDetails in all_userDetails:
users.update({str(userDetails[0]):str(userDetails[1])})
return users
db.close()
#Returns the four digit passcode of the user
def getFourDigit(self):
cursor.execute('''SELECT fdigit FROM users WHERE id = 1''')
output = cursor.fetchone()
fdigit = str(output[0])
return fdigit
def getLogs(self):
cursor.execute('''SELECT * FROM logs ORDER BY dateTime DESC
LIMIT 15''')
output = [dict(dateTime=row[0], message=row[1]) for row in cursor.fetchall()]
return output
def writeLogs(self, cMessage):
DateTime = datetime.datetime.now()
cDateTime = DateTime.strftime('%d/%m/%Y %H:%M:%S')
cursor.execute('''INSERT INTO logs(dateTime, message) VALUES(?, ?)''',(cDateTime, cMessage))
db.commit()
#Initialization of the classes
kp = keypad()
ir = interact()
st = status()
|
naziris/HomeSecPi
|
functions.py
|
Python
|
apache-2.0
| 17,039
|
# -*- coding: utf-8 -*-
from __future__ import print_function
from util import ctx2version, ctx2options
def build(ctx,image):
ctx.run("docker build -t {network}/{image}:{version} {image}".format(network=ctx.network,image=image,version=ctx2version(ctx,image)))
def run(ctx,image):
network=ctx.network
version=ctx2version(ctx,image)
options=ctx2options(ctx,image)
print ("DEBUG",image, network, version, options)
ctx.run("docker run -i -t --name {network}-{image} {options} {network}/{image}:{version}".format(network=network,image=image,version=version,options=options),pty=True)
def attach(ctx,image):
ctx.run("docker attach {network}-{image}".format(network=ctx.network,image=image),pty=True)
def bash(ctx,image):
ctx.run("docker exec -i -t {network}-{image} /bin/bash".format(network=ctx.network,image=image),pty=True)
def spawn(ctx,image):
ctx.run("docker run -i -t --hostname {image} {options} {network}/{image}:{version} /bin/bash".format(network=ctx.network,image=image,version=ctx2version(ctx,image),options=ctx2options(ctx,image)),pty=True)
def image(ctx,image):
ctx.run("docker images {network}/{image}:{version}".format(network=ctx.network,image=image,version=ctx2version(ctx,image)))
def start(ctx,image):
ctx.run("docker run -d --name {network}-{image} --hostname {image} {options} {network}/{image}:{version}".format(network=ctx.network,image=image,version=ctx2version(ctx,image),options=ctx2options(ctx,image)))
def stop(ctx,image):
ctx.run("docker stop {network}-{image}".format(network=ctx.network,image=image))
def rm(ctx,image):
ctx.run("docker rm {network}-{image}".format(network=ctx.network,image=image))
def status(ctx,image):
ctx.run("docker ps -a -f name={network}-{image}".format(network=ctx.network,image=image))
def inspect(ctx,image):
ctx.run("docker inspect {network}-{image}".format(network=ctx.network,image=image))
def systemd_reload(ctx,image):
ctx.run("docker exec -i {network}-{image} /usr/bin/systemctl reload {image}".format(network=ctx.network,image=image))
def containers_rm(ctx):
ctx.run("docker rm $(docker ps -a -q)")
ctx.run("docker rmi --force $(docker images -q)")
def volumes_rm(ctx):
ctx.run("docker volume rm $(docker volume ls -q --filter dangling=true)")
def cmd(ctx,image,cmd):
ctx.run("docker exec -i -t {network}-{image} {cmd}".format(network=ctx.network,image=image,cmd=cmd),pty=True)
def network_create(ctx):
ctx.run("docker network create {network}".format(network=ctx.network))
|
PixelDragon/pixeldragon
|
docker.py
|
Python
|
apache-2.0
| 2,539
|
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
This module contains a BigQuery Hook, as well as a very basic PEP 249
implementation for BigQuery.
"""
from builtins import range
from past.builtins import basestring
import logging
import time
from airflow.contrib.hooks.gcp_api_base_hook import GoogleCloudBaseHook
from airflow.hooks.dbapi_hook import DbApiHook
from apiclient.discovery import build, HttpError
from pandas.io.gbq import GbqConnector, \
_parse_data as gbq_parse_data, \
_check_google_client_version as gbq_check_google_client_version, \
_test_google_api_imports as gbq_test_google_api_imports
from pandas.tools.merge import concat
logging.getLogger("bigquery").setLevel(logging.INFO)
class BigQueryHook(GoogleCloudBaseHook, DbApiHook):
"""
Interact with BigQuery. This hook uses the Google Cloud Platform
connection.
"""
conn_name_attr = 'bigquery_conn_id'
def __init__(self,
bigquery_conn_id='bigquery_default',
delegate_to=None):
super(BigQueryHook, self).__init__(
conn_id=bigquery_conn_id,
delegate_to=delegate_to)
def get_conn(self):
"""
Returns a BigQuery PEP 249 connection object.
"""
service = self.get_service()
project = self._get_field('project')
return BigQueryConnection(service=service, project_id=project)
def get_service(self):
"""
Returns a BigQuery service object.
"""
http_authorized = self._authorize()
return build('bigquery', 'v2', http=http_authorized)
def insert_rows(self, table, rows, target_fields=None, commit_every=1000):
"""
Insertion is currently unsupported. Theoretically, you could use
BigQuery's streaming API to insert rows into a table, but this hasn't
been implemented.
"""
raise NotImplementedError()
def get_pandas_df(self, bql, parameters=None):
"""
Returns a Pandas DataFrame for the results produced by a BigQuery
query. The DbApiHook method must be overridden because Pandas
doesn't support PEP 249 connections, except for SQLite. See:
https://github.com/pydata/pandas/blob/master/pandas/io/sql.py#L447
https://github.com/pydata/pandas/issues/6900
:param bql: The BigQuery SQL to execute.
:type bql: string
"""
service = self.get_service()
project = self._get_field('project')
connector = BigQueryPandasConnector(project, service)
schema, pages = connector.run_query(bql)
dataframe_list = []
while len(pages) > 0:
page = pages.pop()
dataframe_list.append(gbq_parse_data(schema, page))
if len(dataframe_list) > 0:
return concat(dataframe_list, ignore_index=True)
else:
return gbq_parse_data(schema, [])
class BigQueryPandasConnector(GbqConnector):
"""
This connector behaves identically to GbqConnector (from Pandas), except
that it allows the service to be injected, and disables a call to
self.get_credentials(). This allows Airflow to use BigQuery with Pandas
without forcing a three legged OAuth connection. Instead, we can inject
service account credentials into the binding.
"""
def __init__(self, project_id, service, reauth=False, verbose=False):
gbq_check_google_client_version()
gbq_test_google_api_imports()
self.project_id = project_id
self.reauth = reauth
self.service = service
self.verbose = verbose
class BigQueryConnection(object):
"""
BigQuery does not have a notion of a persistent connection. Thus, these
objects are small stateless factories for cursors, which do all the real
work.
"""
def __init__(self, *args, **kwargs):
self._args = args
self._kwargs = kwargs
def close(self):
""" BigQueryConnection does not have anything to close. """
pass
def commit(self):
""" BigQueryConnection does not support transactions. """
pass
def cursor(self):
""" Return a new :py:class:`Cursor` object using the connection. """
return BigQueryCursor(*self._args, **self._kwargs)
def rollback(self):
raise NotImplementedError(
"BigQueryConnection does not have transactions")
class BigQueryBaseCursor(object):
"""
The BigQuery base cursor contains helper methods to execute queries against
BigQuery. The methods can be used directly by operators, in cases where a
PEP 249 cursor isn't needed.
"""
def __init__(self, service, project_id):
self.service = service
self.project_id = project_id
def run_query(
self, bql, destination_dataset_table = False,
write_disposition = 'WRITE_EMPTY',
allow_large_results=False,
udf_config = False,
use_legacy_sql=True):
"""
Executes a BigQuery SQL query. Optionally persists results in a BigQuery
table. See here:
https://cloud.google.com/bigquery/docs/reference/v2/jobs
For more details about these parameters.
:param bql: The BigQuery SQL to execute.
:type bql: string
:param destination_dataset_table: The dotted <dataset>.<table>
BigQuery table to save the query results.
:param write_disposition: What to do if the table already exists in
BigQuery.
:param allow_large_results: Whether to allow large results.
:type allow_large_results: boolean
:param udf_config: The User Defined Function configuration for the query.
See https://cloud.google.com/bigquery/user-defined-functions for details.
:type udf_config: list
:param use_legacy_sql: Whether to use legacy SQL (true) or standard SQL (false).
:type use_legacy_sql: boolean
"""
configuration = {
'query': {
'query': bql,
'useLegacySql': use_legacy_sql
}
}
if destination_dataset_table:
assert '.' in destination_dataset_table, (
'Expected destination_dataset_table in the format of '
'<dataset>.<table>. Got: {}').format(destination_dataset_table)
destination_project, destination_dataset, destination_table = \
_split_tablename(table_input=destination_dataset_table,
default_project_id=self.project_id)
configuration['query'].update({
'allowLargeResults': allow_large_results,
'writeDisposition': write_disposition,
'destinationTable': {
'projectId': destination_project,
'datasetId': destination_dataset,
'tableId': destination_table,
}
})
if udf_config:
assert isinstance(udf_config, list)
configuration['query'].update({
'userDefinedFunctionResources': udf_config
})
return self.run_with_configuration(configuration)
def run_extract( # noqa
self, source_project_dataset_table, destination_cloud_storage_uris,
compression='NONE', export_format='CSV', field_delimiter=',',
print_header=True):
"""
Executes a BigQuery extract command to copy data from BigQuery to
Google Cloud Storage. See here:
https://cloud.google.com/bigquery/docs/reference/v2/jobs
For more details about these parameters.
:param source_project_dataset_table: The dotted <dataset>.<table>
BigQuery table to use as the source data.
:type source_project_dataset_table: string
:param destination_cloud_storage_uris: The destination Google Cloud
Storage URI (e.g. gs://some-bucket/some-file.txt). Follows
convention defined here:
https://cloud.google.com/bigquery/exporting-data-from-bigquery#exportingmultiple
:type destination_cloud_storage_uris: list
:param compression: Type of compression to use.
:type compression: string
:param export_format: File format to export.
:type export_format: string
:param field_delimiter: The delimiter to use when extracting to a CSV.
:type field_delimiter: string
:param print_header: Whether to print a header for a CSV file extract.
:type print_header: boolean
"""
source_project, source_dataset, source_table = \
_split_tablename(table_input=source_project_dataset_table,
default_project_id=self.project_id,
var_name='source_project_dataset_table')
configuration = {
'extract': {
'sourceTable': {
'projectId': source_project,
'datasetId': source_dataset,
'tableId': source_table,
},
'compression': compression,
'destinationUris': destination_cloud_storage_uris,
'destinationFormat': export_format,
}
}
if export_format == 'CSV':
# Only set fieldDelimiter and printHeader fields if using CSV.
# Google does not like it if you set these fields for other export
# formats.
configuration['extract']['fieldDelimiter'] = field_delimiter
configuration['extract']['printHeader'] = print_header
return self.run_with_configuration(configuration)
def run_copy(self,
source_project_dataset_tables,
destination_project_dataset_table,
write_disposition='WRITE_EMPTY',
create_disposition='CREATE_IF_NEEDED'):
"""
Executes a BigQuery copy command to copy data from one BigQuery table
to another. See here:
https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.copy
For more details about these parameters.
:param source_project_dataset_tables: One or more dotted
(project:|project.)<dataset>.<table>
BigQuery tables to use as the source data. Use a list if there are
multiple source tables.
If <project> is not included, project will be the project defined
in the connection json.
:type source_project_dataset_tables: list|string
:param destination_project_dataset_table: The destination BigQuery
table. Format is: (project:|project.)<dataset>.<table>
:type destination_project_dataset_table: string
:param write_disposition: The write disposition if the table already exists.
:type write_disposition: string
:param create_disposition: The create disposition if the table doesn't exist.
:type create_disposition: string
"""
source_project_dataset_tables = (
[source_project_dataset_tables]
if not isinstance(source_project_dataset_tables, list)
else source_project_dataset_tables)
source_project_dataset_tables_fixup = []
for source_project_dataset_table in source_project_dataset_tables:
source_project, source_dataset, source_table = \
_split_tablename(table_input=source_project_dataset_table,
default_project_id=self.project_id,
var_name='source_project_dataset_table')
source_project_dataset_tables_fixup.append({
'projectId': source_project,
'datasetId': source_dataset,
'tableId': source_table
})
destination_project, destination_dataset, destination_table = \
_split_tablename(table_input=destination_project_dataset_table,
default_project_id=self.project_id)
configuration = {
'copy': {
'createDisposition': create_disposition,
'writeDisposition': write_disposition,
'sourceTables': source_project_dataset_tables_fixup,
'destinationTable': {
'projectId': destination_project,
'datasetId': destination_dataset,
'tableId': destination_table
}
}
}
return self.run_with_configuration(configuration)
def run_load(self,
destination_project_dataset_table,
schema_fields, source_uris,
source_format='CSV',
create_disposition='CREATE_IF_NEEDED',
skip_leading_rows=0,
write_disposition='WRITE_EMPTY',
field_delimiter=','):
"""
Executes a BigQuery load command to load data from Google Cloud Storage
to BigQuery. See here:
https://cloud.google.com/bigquery/docs/reference/v2/jobs
For more details about these parameters.
:param destination_project_dataset_table:
The dotted (<project>.|<project>:)<dataset>.<table> BigQuery table to load
data into. If <project> is not included, project will be the project defined
in the connection json.
:type destination_project_dataset_table: string
:param schema_fields: The schema field list as defined here:
https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.load
:type schema_fields: list
:param source_uris: The source Google Cloud
Storage URI (e.g. gs://some-bucket/some-file.txt). A single wild
per-object name can be used.
:type source_uris: list
:param source_format: File format to export.
:type source_format: string
:param create_disposition: The create disposition if the table doesn't exist.
:type create_disposition: string
:param skip_leading_rows: Number of rows to skip when loading from a CSV.
:type skip_leading_rows: int
:param write_disposition: The write disposition if the table already exists.
:type write_disposition: string
:param field_delimiter: The delimiter to use when loading from a CSV.
:type field_delimiter: string
"""
destination_project, destination_dataset, destination_table = \
_split_tablename(table_input=destination_project_dataset_table,
default_project_id=self.project_id,
var_name='destination_project_dataset_table')
configuration = {
'load': {
'createDisposition': create_disposition,
'destinationTable': {
'projectId': destination_project,
'datasetId': destination_dataset,
'tableId': destination_table,
},
'schema': {
'fields': schema_fields
},
'sourceFormat': source_format,
'sourceUris': source_uris,
'writeDisposition': write_disposition,
}
}
if source_format == 'CSV':
configuration['load']['skipLeadingRows'] = skip_leading_rows
configuration['load']['fieldDelimiter'] = field_delimiter
return self.run_with_configuration(configuration)
def run_with_configuration(self, configuration):
"""
Executes a BigQuery SQL query. See here:
https://cloud.google.com/bigquery/docs/reference/v2/jobs
For more details about the configuration parameter.
:param configuration: The configuration parameter maps directly to
BigQuery's configuration field in the job object. See
https://cloud.google.com/bigquery/docs/reference/v2/jobs for
details.
"""
jobs = self.service.jobs()
job_data = {
'configuration': configuration
}
# Send query and wait for reply.
query_reply = jobs \
.insert(projectId=self.project_id, body=job_data) \
.execute()
job_id = query_reply['jobReference']['jobId']
job = jobs.get(projectId=self.project_id, jobId=job_id).execute()
# Wait for query to finish.
while not job['status']['state'] == 'DONE':
logging.info('Waiting for job to complete: %s, %s', self.project_id, job_id)
time.sleep(5)
job = jobs.get(projectId=self.project_id, jobId=job_id).execute()
# Check if job had errors.
if 'errorResult' in job['status']:
raise Exception(
'BigQuery job failed. Final error was: %s', job['status']['errorResult'])
return job_id
def get_schema(self, dataset_id, table_id):
"""
Get the schema for a given datset.table.
see https://cloud.google.com/bigquery/docs/reference/v2/tables#resource
:param dataset_id: the dataset ID of the requested table
:param table_id: the table ID of the requested table
:return: a table schema
"""
tables_resource = self.service.tables() \
.get(projectId=self.project_id, datasetId=dataset_id, tableId=table_id) \
.execute()
return tables_resource['schema']
def get_tabledata(self, dataset_id, table_id,
max_results=None, page_token=None, start_index=None):
"""
Get the data of a given dataset.table.
see https://cloud.google.com/bigquery/docs/reference/v2/tabledata/list
:param dataset_id: the dataset ID of the requested table.
:param table_id: the table ID of the requested table.
:param max_results: the maximum results to return.
:param page_token: page token, returned from a previous call,
identifying the result set.
:param start_index: zero based index of the starting row to read.
:return: map containing the requested rows.
"""
optional_params = {}
if max_results:
optional_params['maxResults'] = max_results
if page_token:
optional_params['pageToken'] = page_token
if start_index:
optional_params['startIndex'] = start_index
return (
self.service.tabledata()
.list(
projectId=self.project_id, datasetId=dataset_id,
tableId=table_id, **optional_params)
.execute()
)
def run_table_delete(self, deletion_dataset_table, ignore_if_missing=False):
"""
Delete an existing table from the dataset;
If the table does not exist, return an error unless ignore_if_missing
is set to True.
:param deletion_dataset_table: A dotted
(<project>.|<project>:)<dataset>.<table> that indicates which table
will be deleted.
:type deletion_dataset_table: str
:param ignore_if_missing: if True, then return success even if the
requested table does not exist.
:type ignore_if_missing: boolean
:return:
"""
assert '.' in deletion_dataset_table, (
'Expected deletion_dataset_table in the format of '
'<dataset>.<table>. Got: {}').format(deletion_dataset_table)
deletion_project, deletion_dataset, deletion_table = \
_split_tablename(table_input=deletion_dataset_table,
default_project_id=self.project_id)
try:
tables_resource = self.service.tables() \
.delete(projectId=deletion_project,
datasetId=deletion_dataset,
tableId=deletion_table) \
.execute()
logging.info('Deleted table %s:%s.%s.',
deletion_project, deletion_dataset, deletion_table)
except HttpError:
if not ignore_if_missing:
raise Exception(
'Table deletion failed. Table does not exist.')
else:
logging.info('Table does not exist. Skipping.')
def run_table_upsert(self, dataset_id, table_resource, project_id=None):
"""
creates a new, empty table in the dataset;
If the table already exists, update the existing table.
Since BigQuery does not natively allow table upserts, this is not an
atomic operation.
:param dataset_id: the dataset to upsert the table into.
:type dataset_id: str
:param table_resource: a table resource. see
https://cloud.google.com/bigquery/docs/reference/v2/tables#resource
:type table_resource: dict
:param project_id: the project to upsert the table into. If None,
project will be self.project_id.
:return:
"""
# check to see if the table exists
table_id = table_resource['tableReference']['tableId']
project_id = project_id if project_id is not None else self.project_id
tables_list_resp = self.service.tables().list(projectId=project_id,
datasetId=dataset_id).execute()
while True:
for table in tables_list_resp.get('tables', []):
if table['tableReference']['tableId'] == table_id:
# found the table, do update
logging.info('table %s:%s.%s exists, updating.',
project_id, dataset_id, table_id)
return self.service.tables().update(projectId=project_id,
datasetId=dataset_id,
tableId=table_id,
body=table_resource).execute()
# If there is a next page, we need to check the next page.
if 'nextPageToken' in tables_list_resp:
tables_list_resp = self.service.tables()\
.list(projectId=project_id,
datasetId=dataset_id,
pageToken=tables_list_resp['nextPageToken'])\
.execute()
# If there is no next page, then the table doesn't exist.
else:
# do insert
logging.info('table %s:%s.%s does not exist. creating.',
project_id, dataset_id, table_id)
return self.service.tables().insert(projectId=project_id,
datasetId=dataset_id,
body=table_resource).execute()
def run_grant_dataset_view_access(self,
source_dataset,
view_dataset,
view_table,
source_project = None,
view_project = None):
"""
Grant authorized view access of a dataset to a view table.
If this view has already been granted access to the dataset, do nothing.
This method is not atomic. Running it may clobber a simultaneous update.
:param source_dataset: the source dataset
:type source_dataset: str
:param view_dataset: the dataset that the view is in
:type view_dataset: str
:param view_table: the table of the view
:type view_table: str
:param source_project: the project of the source dataset. If None,
self.project_id will be used.
:type source_project: str
:param view_project: the project that the view is in. If None,
self.project_id will be used.
:type view_project: str
:return: the datasets resource of the source dataset.
"""
# Apply default values to projects
source_project = source_project if source_project else self.project_id
view_project = view_project if view_project else self.project_id
# we don't want to clobber any existing accesses, so we have to get
# info on the dataset before we can add view access
source_dataset_resource = self.service.datasets().get(projectId=source_project,
datasetId=source_dataset).execute()
access = source_dataset_resource['access'] if 'access' in source_dataset_resource else []
view_access = {'view': {'projectId': view_project,
'datasetId': view_dataset,
'tableId': view_table}}
# check to see if the view we want to add already exists.
if view_access not in access:
logging.info('granting table %s:%s.%s authorized view access to %s:%s dataset.',
view_project, view_dataset, view_table,
source_project, source_dataset)
access.append(view_access)
return self.service.datasets().patch(projectId=source_project,
datasetId=source_dataset,
body={'access': access}).execute()
else:
# if view is already in access, do nothing.
logging.info('table %s:%s.%s already has authorized view access to %s:%s dataset.',
view_project, view_dataset, view_table,
source_project, source_dataset)
return source_dataset_resource
class BigQueryCursor(BigQueryBaseCursor):
"""
A very basic BigQuery PEP 249 cursor implementation. The PyHive PEP 249
implementation was used as a reference:
https://github.com/dropbox/PyHive/blob/master/pyhive/presto.py
https://github.com/dropbox/PyHive/blob/master/pyhive/common.py
"""
def __init__(self, service, project_id):
super(BigQueryCursor, self).__init__(service=service, project_id=project_id)
self.buffersize = None
self.page_token = None
self.job_id = None
self.buffer = []
self.all_pages_loaded = False
@property
def description(self):
""" The schema description method is not currently implemented. """
raise NotImplementedError
def close(self):
""" By default, do nothing """
pass
@property
def rowcount(self):
""" By default, return -1 to indicate that this is not supported. """
return -1
def execute(self, operation, parameters=None):
"""
Executes a BigQuery query, and returns the job ID.
:param operation: The query to execute.
:type operation: string
:param parameters: Parameters to substitute into the query.
:type parameters: dict
"""
bql = _bind_parameters(operation, parameters) if parameters else operation
self.job_id = self.run_query(bql)
def executemany(self, operation, seq_of_parameters):
"""
Execute a BigQuery query multiple times with different parameters.
:param operation: The query to execute.
:type operation: string
:param parameters: List of dictionary parameters to substitute into the
query.
:type parameters: list
"""
for parameters in seq_of_parameters:
self.execute(operation, parameters)
def fetchone(self):
""" Fetch the next row of a query result set. """
return self.next()
def next(self):
"""
Helper method for fetchone, which returns the next row from a buffer.
If the buffer is empty, attempts to paginate through the result set for
the next page, and load it into the buffer.
"""
if not self.job_id:
return None
if len(self.buffer) == 0:
if self.all_pages_loaded:
return None
query_results = (
self.service.jobs()
.getQueryResults(
projectId=self.project_id,
jobId=self.job_id,
pageToken=self.page_token)
.execute()
)
if 'rows' in query_results and query_results['rows']:
self.page_token = query_results.get('pageToken')
fields = query_results['schema']['fields']
col_types = [field['type'] for field in fields]
rows = query_results['rows']
for dict_row in rows:
typed_row = ([
_bq_cast(vs['v'], col_types[idx])
for idx, vs in enumerate(dict_row['f'])
])
self.buffer.append(typed_row)
if not self.page_token:
self.all_pages_loaded = True
else:
# Reset all state since we've exhausted the results.
self.page_token = None
self.job_id = None
self.page_token = None
return None
return self.buffer.pop(0)
def fetchmany(self, size=None):
"""
Fetch the next set of rows of a query result, returning a sequence of sequences (e.g. a
list of tuples). An empty sequence is returned when no more rows are available.
The number of rows to fetch per call is specified by the parameter. If it is not given, the
cursor's arraysize determines the number of rows to be fetched. The method should try to
fetch as many rows as indicated by the size parameter. If this is not possible due to the
specified number of rows not being available, fewer rows may be returned.
An :py:class:`~pyhive.exc.Error` (or subclass) exception is raised if the previous call to
:py:meth:`execute` did not produce any result set or no call was issued yet.
"""
if size is None:
size = self.arraysize
result = []
for _ in range(size):
one = self.fetchone()
if one is None:
break
else:
result.append(one)
return result
def fetchall(self):
"""
Fetch all (remaining) rows of a query result, returning them as a sequence of sequences
(e.g. a list of tuples).
"""
result = []
while True:
one = self.fetchone()
if one is None:
break
else:
result.append(one)
return result
def get_arraysize(self):
""" Specifies the number of rows to fetch at a time with .fetchmany() """
return self._buffersize if self.buffersize else 1
def set_arraysize(self, arraysize):
""" Specifies the number of rows to fetch at a time with .fetchmany() """
self.buffersize = arraysize
arraysize = property(get_arraysize, set_arraysize)
def setinputsizes(self, sizes):
""" Does nothing by default """
pass
def setoutputsize(self, size, column=None):
""" Does nothing by default """
pass
def _bind_parameters(operation, parameters):
""" Helper method that binds parameters to a SQL query. """
# inspired by MySQL Python Connector (conversion.py)
string_parameters = {}
for (name, value) in parameters.iteritems():
if value is None:
string_parameters[name] = 'NULL'
elif isinstance(value, basestring):
string_parameters[name] = "'" + _escape(value) + "'"
else:
string_parameters[name] = str(value)
return operation % string_parameters
def _escape(s):
""" Helper method that escapes parameters to a SQL query. """
e = s
e = e.replace('\\', '\\\\')
e = e.replace('\n', '\\n')
e = e.replace('\r', '\\r')
e = e.replace("'", "\\'")
e = e.replace('"', '\\"')
return e
def _bq_cast(string_field, bq_type):
"""
Helper method that casts a BigQuery row to the appropriate data types.
This is useful because BigQuery returns all fields as strings.
"""
if string_field is None:
return None
elif bq_type == 'INTEGER' or bq_type == 'TIMESTAMP':
return int(string_field)
elif bq_type == 'FLOAT':
return float(string_field)
elif bq_type == 'BOOLEAN':
assert string_field in set(['true', 'false'])
return string_field == 'true'
else:
return string_field
def _split_tablename(table_input, default_project_id, var_name=None):
assert default_project_id is not None, "INTERNAL: No default project is specified"
def var_print(var_name):
if var_name is None:
return ""
else:
return "Format exception for {var}: ".format(var=var_name)
cmpt = table_input.split(':')
if len(cmpt) == 1:
project_id = None
rest = cmpt[0]
elif len(cmpt) == 2:
project_id = cmpt[0]
rest = cmpt[1]
else:
raise Exception((
'{var}Expect format of (<project:)<dataset>.<table>, '
'got {input}'
).format(var=var_print(var_name), input=table_input))
cmpt = rest.split('.')
if len(cmpt) == 3:
assert project_id is None, (
"{var}Use either : or . to specify project"
).format(var=var_print(var_name))
project_id = cmpt[0]
dataset_id = cmpt[1]
table_id = cmpt[2]
elif len(cmpt) == 2:
dataset_id = cmpt[0]
table_id = cmpt[1]
else:
raise Exception((
'{var}Expect format of (<project.|<project:)<dataset>.<table>, '
'got {input}'
).format(var=var_print(var_name), input=table_input))
if project_id is None:
if var_name is not None:
logging.info(
'project not included in {var}: '
'{input}; using project "{project}"'.format(
var=var_name, input=table_input, project=default_project_id))
project_id = default_project_id
return project_id, dataset_id, table_id
|
plypaul/airflow
|
airflow/contrib/hooks/bigquery_hook.py
|
Python
|
apache-2.0
| 34,940
|
import json
from pathlib import Path
from typing import Optional, List, Dict, Text, Type
import tensorflow as tf
import numpy as np
import pytest
from _pytest.monkeypatch import MonkeyPatch
from _pytest.logging import LogCaptureFixture
import logging
from rasa.core.featurizers.single_state_featurizer import (
IntentTokenizerSingleStateFeaturizer,
)
from rasa.core.featurizers.tracker_featurizers import TrackerFeaturizer
from rasa.core.featurizers.tracker_featurizers import IntentMaxHistoryTrackerFeaturizer
from rasa.nlu.classifiers import LABEL_RANKING_LENGTH
from rasa.shared.core.generator import TrackerWithCachedStates
from rasa.core.policies.ted_policy import PREDICTION_FEATURES
from rasa.core.policies.unexpected_intent_policy import UnexpecTEDIntentPolicy
from rasa.engine.graph import ExecutionContext
from rasa.engine.storage.resource import Resource
from rasa.engine.storage.storage import ModelStorage
from rasa.shared.core.constants import ACTION_UNLIKELY_INTENT_NAME, ACTION_LISTEN_NAME
from rasa.shared.core.domain import Domain
from rasa.shared.core.events import (
ActionExecuted,
UserUttered,
EntitiesAdded,
SlotSet,
ActionExecutionRejected,
ActiveLoop,
)
from rasa.shared.core.trackers import DialogueStateTracker
from rasa.utils.tensorflow.constants import (
IGNORE_INTENTS_LIST,
LABEL,
MASK,
SENTENCE,
IDS,
POSITIVE_SCORES_KEY,
NEGATIVE_SCORES_KEY,
RANKING_KEY,
SCORE_KEY,
THRESHOLD_KEY,
SEVERITY_KEY,
QUERY_INTENT_KEY,
NAME,
RANKING_LENGTH,
)
from rasa.shared.nlu.constants import INTENT
from rasa.shared.core.events import Event
from rasa.utils.tensorflow import model_data_utils
from tests.core.test_policies import train_trackers
from tests.core.policies.test_ted_policy import TestTEDPolicy
class TestUnexpecTEDIntentPolicy(TestTEDPolicy):
@staticmethod
def _policy_class_to_test() -> Type[UnexpecTEDIntentPolicy]:
return UnexpecTEDIntentPolicy
@pytest.fixture(scope="class")
def featurizer(self) -> TrackerFeaturizer:
featurizer = IntentMaxHistoryTrackerFeaturizer(
IntentTokenizerSingleStateFeaturizer(), max_history=self.max_history
)
return featurizer
@staticmethod
def persist_and_load_policy(
trained_policy: UnexpecTEDIntentPolicy,
model_storage: ModelStorage,
resource: Resource,
execution_context: ExecutionContext,
):
return trained_policy.__class__.load(
trained_policy.config, model_storage, resource, execution_context
)
def test_ranking_length(self, trained_policy: UnexpecTEDIntentPolicy):
assert trained_policy.config[RANKING_LENGTH] == LABEL_RANKING_LENGTH
def test_ranking_length_and_renormalization(
self,
trained_policy: UnexpecTEDIntentPolicy,
tracker: DialogueStateTracker,
default_domain: Domain,
):
precomputations = None
prediction_metadata = trained_policy.predict_action_probabilities(
tracker, default_domain, precomputations,
).action_metadata
assert (
prediction_metadata is None
or len(prediction_metadata[RANKING_KEY])
== trained_policy.config[RANKING_LENGTH]
)
def test_label_data_assembly(
self, trained_policy: UnexpecTEDIntentPolicy, default_domain: Domain
):
# Construct input data
state_featurizer = trained_policy.featurizer.state_featurizer
encoded_all_labels = state_featurizer.encode_all_labels(
default_domain, precomputations=None
)
attribute_data, _ = model_data_utils.convert_to_data_format(encoded_all_labels)
assembled_label_data = trained_policy._assemble_label_data(
attribute_data, default_domain
)
assembled_label_data_signature = assembled_label_data.get_signature()
assert list(assembled_label_data_signature.keys()) == [
f"{LABEL}_{INTENT}",
LABEL,
]
assert assembled_label_data.num_examples == len(default_domain.intents)
assert list(assembled_label_data_signature[f"{LABEL}_{INTENT}"].keys()) == [
MASK,
SENTENCE,
]
assert list(assembled_label_data_signature[LABEL].keys()) == [IDS]
assert assembled_label_data_signature[f"{LABEL}_{INTENT}"][SENTENCE][
0
].units == len(default_domain.intents)
def test_training_with_no_intent(
self,
featurizer: Optional[TrackerFeaturizer],
default_domain: Domain,
tmp_path: Path,
caplog: LogCaptureFixture,
model_storage: ModelStorage,
resource: Resource,
execution_context: ExecutionContext,
):
stories = tmp_path / "stories.yml"
stories.write_text(
"""
version: "3.0"
stories:
- story: test path
steps:
- action: utter_greet
"""
)
policy = self.create_policy(
featurizer=featurizer,
model_storage=model_storage,
resource=resource,
execution_context=execution_context,
)
import tests.core.test_policies
training_trackers = tests.core.test_policies.train_trackers(
default_domain, str(stories), augmentation_factor=20
)
with pytest.warns(UserWarning):
policy.train(training_trackers, default_domain, precomputations=None)
def test_prepared_data_for_threshold_prediction(
self,
trained_policy: UnexpecTEDIntentPolicy,
default_domain: Domain,
stories_path: Path,
):
training_trackers = train_trackers(
default_domain, stories_path, augmentation_factor=0
)
training_model_data, _ = trained_policy._prepare_for_training(
training_trackers, default_domain, precomputations=None,
)
data_for_prediction = trained_policy._prepare_data_for_prediction(
training_model_data
)
assert set(data_for_prediction.data.keys()).issubset(PREDICTION_FEATURES)
def test_similarities_collection_for_label_ids(self):
label_ids = np.array([[0, 1], [1, -1], [2, -1]])
outputs = {
"similarities": np.array(
[[[1.2, 0.3, 0.2]], [[0.5, 0.2, 1.6]], [[0.01, 0.1, 1.7]],]
)
}
label_id_similarities = UnexpecTEDIntentPolicy._collect_label_id_grouped_scores(
outputs, label_ids
)
# Should contain similarities for all label ids except padding token.
assert sorted(list(label_id_similarities.keys())) == [0, 1, 2]
# Cross-check that the collected similarities are correct for each label id.
assert label_id_similarities[0] == {
POSITIVE_SCORES_KEY: [1.2],
NEGATIVE_SCORES_KEY: [0.5, 0.01],
}
assert label_id_similarities[1] == {
POSITIVE_SCORES_KEY: [0.3, 0.2],
NEGATIVE_SCORES_KEY: [0.1],
}
assert label_id_similarities[2] == {
POSITIVE_SCORES_KEY: [1.7],
NEGATIVE_SCORES_KEY: [0.2, 1.6],
}
def test_label_quantiles_computation(self):
label_id_scores = {
0: {
POSITIVE_SCORES_KEY: [1.3, 0.2],
NEGATIVE_SCORES_KEY: [
-0.1,
-1.2,
-2.3,
-4.1,
-0.5,
0.2,
0.8,
0.9,
-3.2,
-2.7,
],
},
3: {POSITIVE_SCORES_KEY: [1.3, 0.2], NEGATIVE_SCORES_KEY: [-0.1]},
6: {POSITIVE_SCORES_KEY: [1.3, 0.2], NEGATIVE_SCORES_KEY: []},
}
expected_thresholds = {
0: [
0.2,
0.2,
0.2,
0.2,
0.2,
-0.1,
-0.1,
-0.5,
-0.5,
-1.2,
-1.2,
-1.2,
-2.3,
-2.3,
-2.7,
-2.7,
-3.2,
-3.2,
-4.1,
-4.1,
],
3: [
-0.1,
-0.1,
-0.1,
-0.1,
-0.1,
-0.1,
-0.1,
-0.1,
-0.1,
-0.1,
-0.1,
-0.1,
-0.1,
-0.1,
-0.1,
-0.1,
-0.1,
-0.1,
-0.1,
-0.1,
],
6: [
0.2,
0.2,
0.2,
0.2,
0.2,
0.2,
0.2,
0.2,
0.2,
0.2,
0.2,
0.2,
0.2,
0.2,
0.2,
0.2,
0.2,
0.2,
0.2,
0.2,
],
}
thresholds = UnexpecTEDIntentPolicy._compute_label_quantiles(label_id_scores)
assert sorted(list(thresholds.keys())) == sorted(
list(expected_thresholds.keys())
)
for label_id, tolerance_thresholds in thresholds.items():
assert expected_thresholds[label_id] == tolerance_thresholds
def test_post_training_threshold_computation(
self,
trained_policy: UnexpecTEDIntentPolicy,
default_domain: Domain,
stories_path: Path,
):
training_trackers = train_trackers(
default_domain, stories_path, augmentation_factor=0
)
training_model_data, label_ids = trained_policy._prepare_for_training(
training_trackers, default_domain, precomputations=None,
)
trained_policy.compute_label_quantiles_post_training(
training_model_data, label_ids
)
computed_thresholds = trained_policy.label_quantiles
# -1 is used for padding and hence is not expected in the keys
expected_keys = list(np.unique(label_ids))
expected_keys.remove(-1)
assert sorted(list(computed_thresholds.keys())) == sorted(expected_keys)
@pytest.mark.parametrize(
"tolerance, expected_thresholds",
[
(0.0, [0.2, -0.1, 0.2]),
(0.75, [-2.9, -0.1, -4.3]),
(0.72, [-2.7, -0.1, -4.0]),
(0.78, [-2.9, -0.1, -4.3]),
(1.0, [-4.1, -0.1, -5.5]),
],
)
def test_pick_thresholds_for_labels(
self, tolerance: float, expected_thresholds: List[float]
):
label_id_tolerance_thresholds = {
0: [
0.2,
0.2,
0.2,
0.2,
0.2,
0.2,
-0.1,
-0.1,
-0.5,
-0.5,
-1.2,
-1.2,
-2.3,
-2.3,
-2.7,
-2.9,
-3.2,
-3.2,
-4.1,
-4.1,
],
3: [
-0.1,
-0.1,
-0.1,
-0.1,
-0.1,
-0.1,
-0.1,
-0.1,
-0.1,
-0.1,
-0.1,
-0.1,
-0.1,
-0.1,
-0.1,
-0.1,
-0.1,
-0.1,
-0.1,
-0.1,
],
4: [0.2 - (index * 0.3) for index in range(20)],
}
thresholds = UnexpecTEDIntentPolicy._pick_thresholds(
label_id_tolerance_thresholds, tolerance
)
assert sorted(list(thresholds.keys())) == sorted(
list(label_id_tolerance_thresholds.keys())
)
computed_values = list(thresholds.values())
assert expected_thresholds == computed_values
@pytest.mark.parametrize(
"predicted_similarity, threshold_value, is_unlikely",
[(1.2, 0.2, False), (0.3, -0.1, False), (-1.5, 0.03, True)],
)
def test_unlikely_intent_check(
self,
trained_policy: UnexpecTEDIntentPolicy,
model_storage: ModelStorage,
resource: Resource,
execution_context: ExecutionContext,
default_domain: Domain,
predicted_similarity: float,
threshold_value: float,
is_unlikely: bool,
tmp_path: Path,
):
loaded_policy = self.persist_and_load_policy(
trained_policy, model_storage, resource, execution_context
)
# Construct dummy similarities
similarities = np.array([[0.0] * len(default_domain.intents)])
dummy_intent_index = 4
similarities[0, dummy_intent_index] = predicted_similarity
loaded_policy.label_thresholds[dummy_intent_index] = threshold_value
query_intent = default_domain.intents[dummy_intent_index]
unlikely_intent_prediction = loaded_policy._check_unlikely_intent(
default_domain, similarities, query_intent
)
assert is_unlikely == unlikely_intent_prediction
def test_should_check_for_intent(
self,
trained_policy: UnexpecTEDIntentPolicy,
model_storage: ModelStorage,
resource: Resource,
execution_context: ExecutionContext,
default_domain: Domain,
tmp_path: Path,
):
loaded_policy = self.persist_and_load_policy(
trained_policy, model_storage, resource, execution_context
)
intent_index = 0
assert (
loaded_policy._should_check_for_intent(
default_domain.intents[intent_index], default_domain
)
is False
)
intent_index = 4
assert loaded_policy._should_check_for_intent(
default_domain.intents[intent_index], default_domain
)
loaded_policy.config[IGNORE_INTENTS_LIST] = [
default_domain.intents[intent_index]
]
assert (
loaded_policy._should_check_for_intent(
default_domain.intents[intent_index], default_domain
)
is False
)
def test_no_action_unlikely_intent_prediction(
self,
trained_policy: UnexpecTEDIntentPolicy,
model_storage: ModelStorage,
resource: Resource,
execution_context: ExecutionContext,
default_domain: Domain,
tmp_path: Path,
):
loaded_policy = self.persist_and_load_policy(
trained_policy, model_storage, resource, execution_context
)
expected_probabilities = [0] * default_domain.num_actions
precomputations = None
tracker = DialogueStateTracker(sender_id="init", slots=default_domain.slots)
prediction = loaded_policy.predict_action_probabilities(
tracker, default_domain, precomputations
)
assert prediction.probabilities == expected_probabilities
tracker.update_with_events(
[
UserUttered(text="hello", intent={"name": "greet"}),
ActionExecuted(action_name="utter_greet"),
],
default_domain,
)
prediction = loaded_policy.predict_action_probabilities(
tracker, default_domain, precomputations
)
assert prediction.probabilities == expected_probabilities
loaded_policy.model = None
prediction = loaded_policy.predict_action_probabilities(
tracker, default_domain, precomputations
)
assert prediction.probabilities == expected_probabilities
@pytest.mark.parametrize(
"predicted_similarity, threshold_value, is_unlikely",
[(1.2, 0.2, False), (0.3, -0.1, False), (-1.5, 0.03, True)],
)
def test_action_unlikely_intent_prediction(
self,
trained_policy: UnexpecTEDIntentPolicy,
model_storage: ModelStorage,
resource: Resource,
execution_context: ExecutionContext,
default_domain: Domain,
predicted_similarity: float,
threshold_value: float,
is_unlikely: bool,
monkeypatch: MonkeyPatch,
tmp_path: Path,
):
loaded_policy = self.persist_and_load_policy(
trained_policy, model_storage, resource, execution_context
)
similarities = np.array([[[0.0] * len(default_domain.intents)]])
dummy_intent_index = 4
similarities[0, 0, dummy_intent_index] = predicted_similarity
query_intent = default_domain.intents[dummy_intent_index]
loaded_policy.label_thresholds[dummy_intent_index] = threshold_value
precomputations = None
tracker = DialogueStateTracker(sender_id="init", slots=default_domain.slots)
tracker.update_with_events(
[UserUttered(text="hello", intent={"name": query_intent})], default_domain,
)
# Preset the model predictions to the similarity values
# so that we don't need to hardcode for particular model predictions.
monkeypatch.setattr(
loaded_policy.model,
"run_inference",
lambda data: {"similarities": similarities},
)
prediction = loaded_policy.predict_action_probabilities(
tracker, default_domain, precomputations
)
if not is_unlikely:
assert prediction.probabilities == [0.0] * default_domain.num_actions
else:
assert (
prediction.probabilities[
default_domain.index_for_action(ACTION_UNLIKELY_INTENT_NAME)
]
== 1.0
)
# Make sure metadata is set. The exact structure
# of the metadata is tested separately and
# not as part of this test.
assert prediction.action_metadata is not None
# Assert metadata is serializable
assert json.dumps(prediction.action_metadata)
@pytest.mark.parametrize(
"tracker_events, should_skip",
[
([], True),
([ActionExecuted("action_listen")], True),
(
[
ActionExecuted("action_listen"),
UserUttered("hi", intent={"name": "greet"}),
],
False,
),
(
[
ActionExecuted("action_listen"),
UserUttered("hi", intent={"name": "greet"}),
EntitiesAdded([{"name": "dummy"}]),
],
False,
),
(
[
ActionExecuted("action_listen"),
UserUttered("hi", intent={"name": "greet"}),
SlotSet("name"),
],
False,
),
(
[
ActiveLoop("loop"),
ActionExecuted("action_listen"),
UserUttered("hi", intent={"name": "greet"}),
ActionExecutionRejected("loop"),
],
False,
),
(
[
ActionExecuted("action_listen"),
UserUttered("hi", intent={"name": "greet"}),
ActionExecuted("utter_greet"),
],
True,
),
],
)
def test_skip_predictions_to_prevent_loop(
self,
trained_policy: UnexpecTEDIntentPolicy,
model_storage: ModelStorage,
resource: Resource,
execution_context: ExecutionContext,
default_domain: Domain,
caplog: LogCaptureFixture,
tracker_events: List[Event],
should_skip: bool,
tmp_path: Path,
):
"""Skips predictions to prevent loop."""
loaded_policy = self.persist_and_load_policy(
trained_policy, model_storage, resource, execution_context
)
precomputations = None
tracker = DialogueStateTracker(sender_id="init", slots=default_domain.slots)
tracker.update_with_events(tracker_events, default_domain)
with caplog.at_level(logging.DEBUG):
prediction = loaded_policy.predict_action_probabilities(
tracker, default_domain, precomputations
)
assert (
"Skipping predictions for UnexpecTEDIntentPolicy" in caplog.text
) == should_skip
if should_skip:
assert prediction.probabilities == loaded_policy._default_predictions(
default_domain
)
@pytest.mark.parametrize(
"tracker_events",
[
[
ActionExecuted("action_listen"),
UserUttered("hi", intent={"name": "inexistent_intent"}),
],
[
ActionExecuted("action_listen"),
UserUttered("hi", intent={"name": "inexistent_intent"}),
EntitiesAdded([{"name": "dummy"}]),
],
[
ActionExecuted("action_listen"),
UserUttered("hi", intent={"name": "inexistent_intent"}),
SlotSet("name"),
],
[
ActiveLoop("loop"),
ActionExecuted("action_listen"),
UserUttered("hi", intent={"name": "inexistent_intent"}),
ActionExecutionRejected("loop"),
],
],
)
def test_skip_predictions_if_new_intent(
self,
trained_policy: UnexpecTEDIntentPolicy,
model_storage: ModelStorage,
resource: Resource,
execution_context: ExecutionContext,
default_domain: Domain,
caplog: LogCaptureFixture,
tracker_events: List[Event],
):
"""Skips predictions if there's a new intent created."""
loaded_policy = self.persist_and_load_policy(
trained_policy, model_storage, resource, execution_context
)
tracker = DialogueStateTracker(sender_id="init", slots=default_domain.slots)
tracker.update_with_events(tracker_events, default_domain)
with caplog.at_level(logging.DEBUG):
prediction = loaded_policy.predict_action_probabilities(
tracker, default_domain, precomputations=None,
)
assert "Skipping predictions for UnexpecTEDIntentPolicy" in caplog.text
assert prediction.probabilities == loaded_policy._default_predictions(
default_domain
)
@pytest.mark.parametrize(
"tracker_events_with_action, tracker_events_without_action",
[
(
[
ActionExecuted(ACTION_LISTEN_NAME),
UserUttered(text="hello", intent={"name": "greet"}),
ActionExecuted(ACTION_UNLIKELY_INTENT_NAME),
ActionExecuted("utter_greet"),
UserUttered(text="sad", intent={"name": "thank_you"}),
],
[
ActionExecuted(ACTION_LISTEN_NAME),
UserUttered(text="hello", intent={"name": "greet"}),
ActionExecuted("utter_greet"),
UserUttered(text="sad", intent={"name": "thank_you"}),
],
),
(
[
ActionExecuted(ACTION_LISTEN_NAME),
UserUttered(text="hello", intent={"name": "greet"}),
EntitiesAdded(entities=[{"entity": "name", "value": "Peter"},]),
ActionExecuted(ACTION_UNLIKELY_INTENT_NAME),
ActionExecuted("utter_greet"),
UserUttered(text="sad", intent={"name": "thank_you"}),
],
[
ActionExecuted(ACTION_LISTEN_NAME),
UserUttered(text="hello", intent={"name": "greet"}),
EntitiesAdded(entities=[{"entity": "name", "value": "Peter"},]),
ActionExecuted("utter_greet"),
UserUttered(text="sad", intent={"name": "thank_you"}),
],
),
(
[
ActionExecuted(ACTION_LISTEN_NAME),
UserUttered(text="hello", intent={"name": "greet"}),
ActionExecuted(ACTION_UNLIKELY_INTENT_NAME),
ActionExecuted("some_form"),
ActiveLoop("some_form"),
ActionExecuted(ACTION_LISTEN_NAME),
UserUttered(text="default", intent={"name": "default"}),
ActionExecuted(ACTION_UNLIKELY_INTENT_NAME),
UserUttered(text="sad", intent={"name": "thank_you"}),
],
[
ActionExecuted(ACTION_LISTEN_NAME),
UserUttered(text="hello", intent={"name": "greet"}),
ActionExecuted(ACTION_UNLIKELY_INTENT_NAME),
ActionExecuted("some_form"),
ActiveLoop("some_form"),
ActionExecuted(ACTION_LISTEN_NAME),
UserUttered(text="default", intent={"name": "default"}),
UserUttered(text="sad", intent={"name": "thank_you"}),
],
),
],
)
def test_ignore_action_unlikely_intent(
self,
trained_policy: UnexpecTEDIntentPolicy,
model_storage: ModelStorage,
resource: Resource,
execution_context: ExecutionContext,
default_domain: Domain,
tracker_events_with_action: List[Event],
tracker_events_without_action: List[Event],
tmp_path: Path,
):
loaded_policy = self.persist_and_load_policy(
trained_policy, model_storage, resource, execution_context
)
precomputations = None
tracker_with_action = DialogueStateTracker.from_events(
"test 1", evts=tracker_events_with_action
)
tracker_without_action = DialogueStateTracker.from_events(
"test 2", evts=tracker_events_without_action
)
prediction_with_action = loaded_policy.predict_action_probabilities(
tracker_with_action, default_domain, precomputations
)
prediction_without_action = loaded_policy.predict_action_probabilities(
tracker_without_action, default_domain, precomputations
)
# If the weights didn't change then both trackers
# should result in same prediction. For `UnexpecTEDIntentPolicy`, the real
# prediction is inside action metadata.
assert (
prediction_with_action.action_metadata
== prediction_without_action.action_metadata
)
def test_label_embedding_collection(self, trained_policy: UnexpecTEDIntentPolicy):
label_ids = tf.constant([[[2], [-1]], [[1], [2]], [[0], [-1]]], dtype=tf.int32)
all_label_embeddings = np.random.random((10, 20))
# `-1` is used as padding label id. The embedding for it
# will be the same as `label_id=0`
expected_extracted_label_embeddings = tf.constant(
np.concatenate(
[
all_label_embeddings[2],
all_label_embeddings[0],
all_label_embeddings[1],
all_label_embeddings[2],
all_label_embeddings[0],
all_label_embeddings[0],
]
).reshape((3, 2, 20)),
dtype=tf.float32,
)
actual_extracted_label_embeddings = trained_policy.model._get_labels_embed(
label_ids, tf.constant(all_label_embeddings, dtype=tf.float32)
)
assert np.all(
expected_extracted_label_embeddings == actual_extracted_label_embeddings
)
@pytest.mark.parametrize(
"query_intent_index, ranking_length", [(0, 0), (1, 3), (2, 1), (5, 0)]
)
def test_collect_action_metadata(
self,
trained_policy: UnexpecTEDIntentPolicy,
model_storage: ModelStorage,
resource: Resource,
execution_context: ExecutionContext,
default_domain: Domain,
tmp_path: Path,
query_intent_index: int,
ranking_length: int,
):
loaded_policy = self.persist_and_load_policy(
trained_policy, model_storage, resource, execution_context
)
def test_individual_label_metadata(
label_metadata: Dict[Text, Optional[float]],
all_thresholds: Dict[int, float],
all_similarities: np.array,
label_index: int,
):
expected_score = all_similarities[0][label_index]
expected_threshold = (
all_thresholds[label_index] if label_index in all_thresholds else None
)
expected_severity = (
expected_threshold - expected_score if expected_threshold else None
)
assert label_metadata.get(SCORE_KEY) == expected_score
assert label_metadata.get(THRESHOLD_KEY) == expected_threshold
assert label_metadata.get(SEVERITY_KEY) == expected_severity
# Monkey-patch certain attributes of the policy to make the testing easier.
label_thresholds = {0: 1.2, 1: -0.3, 4: -2.3, 5: 0.2}
loaded_policy.label_thresholds = label_thresholds
loaded_policy.config[RANKING_LENGTH] = ranking_length
# Some dummy similarities
similarities = np.array([[3.2, 0.2, -1.2, -4.3, -5.1, 2.3]])
query_intent = default_domain.intents[query_intent_index]
metadata = loaded_policy._collect_action_metadata(
default_domain, similarities, query_intent=query_intent
)
# Expected outer-most keys
assert sorted(list(metadata.keys())) == sorted([QUERY_INTENT_KEY, RANKING_KEY])
# Schema validation for query intent key
assert sorted(list(metadata[QUERY_INTENT_KEY].keys())) == sorted(
[NAME, SCORE_KEY, THRESHOLD_KEY, SEVERITY_KEY]
)
# Test all elements of metadata for query intent
assert metadata[QUERY_INTENT_KEY].get(NAME) == query_intent
test_individual_label_metadata(
metadata.get(QUERY_INTENT_KEY),
label_thresholds,
similarities,
query_intent_index,
)
# Check if ranking is sorted correctly and truncated to `ranking_length`
sorted_label_similarities = sorted(
[(index, score) for index, score in enumerate(similarities[0])],
key=lambda x: -x[1],
)
sorted_label_similarities = (
sorted_label_similarities[:ranking_length]
if ranking_length
else sorted_label_similarities
)
expected_label_rankings = [
default_domain.intents[index] for index, _ in sorted_label_similarities
]
collected_label_rankings = [
label_metadata.get(NAME) for label_metadata in metadata.get(RANKING_KEY)
]
assert collected_label_rankings == expected_label_rankings
# Test all elements of metadata for all labels in ranking
for label_metadata in metadata.get(RANKING_KEY):
label_index = default_domain.intents.index(label_metadata.get(NAME))
test_individual_label_metadata(
label_metadata, label_thresholds, similarities, label_index
)
@pytest.mark.parametrize(
"tracker_events_for_training, expected_trackers_with_events",
[
# Filter because of no intent and action name
(
[
[
ActionExecuted(ACTION_LISTEN_NAME),
UserUttered(text="hello", intent={"name": "greet"}),
ActionExecuted("utter_greet"),
ActionExecuted(ACTION_LISTEN_NAME),
UserUttered(
text="happy to make it work", intent={"name": "goodbye"}
),
ActionExecuted("utter_goodbye"),
ActionExecuted(ACTION_LISTEN_NAME),
],
[
ActionExecuted(ACTION_LISTEN_NAME),
UserUttered(text="hello"),
ActionExecuted("utter_greet"),
ActionExecuted(ACTION_LISTEN_NAME),
UserUttered(text="happy to make it work"),
ActionExecuted(action_text="Great!"),
ActionExecuted(ACTION_LISTEN_NAME),
],
],
[
[
ActionExecuted(ACTION_LISTEN_NAME),
UserUttered(text="hello", intent={"name": "greet"}),
ActionExecuted("utter_greet"),
ActionExecuted(ACTION_LISTEN_NAME),
UserUttered(
text="happy to make it work", intent={"name": "goodbye"}
),
ActionExecuted("utter_goodbye"),
ActionExecuted(ACTION_LISTEN_NAME),
],
],
),
# Filter because of no action name
(
[
[
ActionExecuted(ACTION_LISTEN_NAME),
UserUttered(text="hello", intent={"name": "greet"}),
ActionExecuted("utter_greet"),
ActionExecuted(ACTION_LISTEN_NAME),
UserUttered(
text="happy to make it work", intent={"name": "goodbye"}
),
ActionExecuted("utter_goodbye"),
ActionExecuted(ACTION_LISTEN_NAME),
],
[
ActionExecuted(ACTION_LISTEN_NAME),
UserUttered(text="hello"),
ActionExecuted("utter_greet"),
ActionExecuted(ACTION_LISTEN_NAME),
UserUttered(
text="happy to make it work", intent={"name": "goodbye"}
),
ActionExecuted(action_text="Great!"),
ActionExecuted(ACTION_LISTEN_NAME),
],
],
[
[
ActionExecuted(ACTION_LISTEN_NAME),
UserUttered(text="hello", intent={"name": "greet"}),
ActionExecuted("utter_greet"),
ActionExecuted(ACTION_LISTEN_NAME),
UserUttered(
text="happy to make it work", intent={"name": "goodbye"}
),
ActionExecuted("utter_goodbye"),
ActionExecuted(ACTION_LISTEN_NAME),
],
],
),
# Filter because of no intent
(
[
[
ActionExecuted(ACTION_LISTEN_NAME),
UserUttered(text="hello", intent={"name": "greet"}),
ActionExecuted("utter_greet"),
ActionExecuted(ACTION_LISTEN_NAME),
UserUttered(
text="happy to make it work", intent={"name": "goodbye"}
),
ActionExecuted("utter_goodbye"),
ActionExecuted(ACTION_LISTEN_NAME),
],
[
ActionExecuted(ACTION_LISTEN_NAME),
UserUttered(text="hello"),
ActionExecuted("utter_greet"),
ActionExecuted(ACTION_LISTEN_NAME),
UserUttered(text="happy to make it work"),
ActionExecuted("utter_goodbye"),
ActionExecuted(ACTION_LISTEN_NAME),
],
],
[
[
ActionExecuted(ACTION_LISTEN_NAME),
UserUttered(text="hello", intent={"name": "greet"}),
ActionExecuted("utter_greet"),
ActionExecuted(ACTION_LISTEN_NAME),
UserUttered(
text="happy to make it work", intent={"name": "goodbye"}
),
ActionExecuted("utter_goodbye"),
ActionExecuted(ACTION_LISTEN_NAME),
],
],
),
# No filter needed
(
[
[
ActionExecuted(ACTION_LISTEN_NAME),
UserUttered(text="hello", intent={"name": "greet"}),
ActionExecuted("utter_greet"),
ActionExecuted(ACTION_LISTEN_NAME),
UserUttered(
text="happy to make it work", intent={"name": "goodbye"}
),
ActionExecuted("utter_goodbye"),
ActionExecuted(ACTION_LISTEN_NAME),
],
],
[
[
ActionExecuted(ACTION_LISTEN_NAME),
UserUttered(text="hello", intent={"name": "greet"}),
ActionExecuted("utter_greet"),
ActionExecuted(ACTION_LISTEN_NAME),
UserUttered(
text="happy to make it work", intent={"name": "goodbye"}
),
ActionExecuted("utter_goodbye"),
ActionExecuted(ACTION_LISTEN_NAME),
],
],
),
# Filter to return empty list of trackers
(
[
[
ActionExecuted(ACTION_LISTEN_NAME),
UserUttered(text="hello", intent={"name": "greet"}),
ActionExecuted("utter_greet"),
ActionExecuted(ACTION_LISTEN_NAME),
UserUttered(
text="happy to make it work", intent={"name": "goodbye"}
),
ActionExecuted(action_text="Great!"),
ActionExecuted(ACTION_LISTEN_NAME),
],
],
[],
),
],
)
def test_filter_training_trackers(
self,
tracker_events_for_training: List[List[Event]],
expected_trackers_with_events: List[List[Event]],
domain: Domain,
):
trackers_for_training = [
TrackerWithCachedStates.from_events(
sender_id=f"{tracker_index}", evts=events, domain=domain
)
for tracker_index, events in enumerate(tracker_events_for_training)
]
filtered_trackers = UnexpecTEDIntentPolicy._get_trackers_for_training(
trackers_for_training
)
assert len(filtered_trackers) == len(expected_trackers_with_events)
for collected_tracker, expected_tracker_events in zip(
filtered_trackers, expected_trackers_with_events
):
collected_tracker_events = list(collected_tracker.events)
assert collected_tracker_events == expected_tracker_events
@pytest.mark.parametrize(
"tracker_events, skip_training",
[
(
[
[
ActionExecuted(ACTION_LISTEN_NAME),
UserUttered(text="hello", intent={"name": "greet"}),
ActionExecuted("utter_greet"),
ActionExecuted(ACTION_LISTEN_NAME),
UserUttered(
text="happy to make it work", intent={"name": "goodbye"}
),
ActionExecuted("utter_goodbye"),
ActionExecuted(ACTION_LISTEN_NAME),
],
[
ActionExecuted(ACTION_LISTEN_NAME),
UserUttered(text="hello"),
ActionExecuted("utter_greet"),
ActionExecuted(ACTION_LISTEN_NAME),
UserUttered(text="happy to make it work"),
ActionExecuted(action_text="Great!"),
ActionExecuted(ACTION_LISTEN_NAME),
],
],
False,
),
(
[
[
ActionExecuted(ACTION_LISTEN_NAME),
UserUttered(text="hello"),
ActionExecuted("utter_greet"),
ActionExecuted(ACTION_LISTEN_NAME),
UserUttered(text="happy to make it work"),
ActionExecuted(action_text="Great!"),
ActionExecuted(ACTION_LISTEN_NAME),
],
],
True,
),
(
[
[
ActionExecuted(ACTION_LISTEN_NAME),
UserUttered(text="hello"),
ActionExecuted("utter_greet"),
ActionExecuted(ACTION_LISTEN_NAME),
UserUttered(text="happy to make it work"),
ActionExecuted("utter_goodbye"),
ActionExecuted(ACTION_LISTEN_NAME),
],
],
True,
),
(
[
[
ActionExecuted(ACTION_LISTEN_NAME),
UserUttered(text="hello"),
ActionExecuted("utter_greet"),
ActionExecuted(ACTION_LISTEN_NAME),
UserUttered(
text="happy to make it work", intent={"name": "goodbye"}
),
ActionExecuted(action_text="Great!"),
ActionExecuted(ACTION_LISTEN_NAME),
],
],
True,
),
],
)
def test_train_with_e2e_data(
default_model_storage: ModelStorage,
default_execution_context: ExecutionContext,
tracker_events: List[List[Event]],
skip_training: bool,
domain: Domain,
):
policy = UnexpecTEDIntentPolicy(
UnexpecTEDIntentPolicy.get_default_config(),
default_model_storage,
Resource("UnexpecTEDIntentPolicy"),
default_execution_context,
featurizer=IntentMaxHistoryTrackerFeaturizer(
IntentTokenizerSingleStateFeaturizer()
),
)
trackers_for_training = [
TrackerWithCachedStates.from_events(
sender_id=f"{tracker_index}", evts=events, domain=domain
)
for tracker_index, events in enumerate(tracker_events)
]
if skip_training:
with pytest.warns(UserWarning):
policy.train(trackers_for_training, domain, precomputations=None)
else:
policy.train(trackers_for_training, domain, precomputations=None)
|
RasaHQ/rasa_nlu
|
tests/core/policies/test_unexpected_intent_policy.py
|
Python
|
apache-2.0
| 43,992
|
from flask import Blueprint
blueprint = Blueprint('home', __name__)
from . import errors
from . import views
|
shuhari/yuhao.space
|
src/app/blueprints/home/__init__.py
|
Python
|
apache-2.0
| 113
|
# The Admin4 Project
# (c) 2013-2014 Andreas Pflug
#
# Licensed under the Apache License,
# see LICENSE.TXT for conditions of usage
from _objects import DatabaseObject
from _pgsql import pgQuery
from wh import xlt
class Schema(DatabaseObject):
typename=xlt("Schema")
shortname=xlt("Schema")
refreshOid="nsp.oid"
@staticmethod
def InstancesQuery(parentNode):
sql=pgQuery("pg_namespace nsp")
sql.AddCol("nsp.oid, nspacl, nspname as name, pg_get_userbyid(nspowner) AS owner, description")
sql.AddLeft("pg_description des ON des.objoid=nsp.oid")
sql.AddWhere("(nsp.oid=2200 OR nsp.oid > %d)" % parentNode.GetServer().GetLastSysOid())
sql.AddOrder("nspname")
return sql
def GetIcon(self):
icons=[]
icons.append("Schema")
oid=self.GetOid()
if oid <= self.GetServer().GetLastSysOid() and oid != 2200:
icons.append('pg')
return self.GetImageId(icons)
def GetProperties(self):
if not len(self.properties):
self.properties = [
(xlt("Name"), self.name),
( "OID" , self.info['oid']),
(xlt("Owner"), self.info['owner']),
(xlt("ACL"), self.info['nspacl'])
]
self.AddProperty(xlt("Description"), self.info['description'])
return self.properties
nodeinfo= [ { "class" : Schema, "parents": ["Database"], "sort": 4, "collection": xlt("Schemas"), } ]
|
andreas-p/admin4
|
modPg/Schema.py
|
Python
|
apache-2.0
| 1,423
|
#!/usr/bin/env python3
# -*- coding: utf8 -*-
import argparse
import sys
import code
import re
import os
import functools
from ansicolor import blue, red, black, green
from multiprocessing import cpu_count
from YakDB.Dump import dumpYDF, importYDFDump
import YakDB
def checkConnection(args):
import YakDB
#Check request/reply connection
print (blue("Checking request/reply connection...", bold=True))
conn = YakDB.Connection()
conn.connect(args.req_endpoint)
#Request server info
print((conn.serverInfo()).decode("utf-8"))
print(green("REQ/REP connection attempt successful"))
#Check push/pull connection
print (blue("Checking push/pull connection...", bold=True))
print(green("PUSH/PULL connection attempt successful"))
conn = YakDB.Connection()
conn.usePushMode()
conn.connect(args.push_endpoint)
def index(args):
from Translatron.Indexing.NLTKIndexer import runIndexerCLITool
runIndexerCLITool(args)
def importDocuments(args):
from Translatron.DocumentImport.PMC import runPMCImporterCLITool
runPMCImporterCLITool(args)
def importEntities(args):
for infile in args.infile:
basename = os.path.basename(infile)
if re.match(r"uniprot_[a-z]+\.dat\.gz", basename):
print(blue("Importing UniProt file..."))
from Translatron.Entities.UniProtImporter import importUniprot
importUniprot(args, infile)
elif re.match(r"d\d{4}.bin", basename):
print(blue("Importing MeSH file..."))
from Translatron.Entities.MeSHImporter import importMeSH
importMeSH(args, infile)
elif re.match(r"[a-z][a-z]wiki.+titles.+\.gz", basename):
print(blue("Importing Wikipedia page title file..."))
from Translatron.Entities.WikipediaImporter import importWikimediaPagelist
importWikimediaPagelist(args, infile)
else:
print (red("Can't interpret entity input file (uniprot_sprot.dat.gz - UniProt) %s " % basename))
def runServer(args):
"Run the main translatron server. Does not terminate."
from Translatron.Server import startTranslatron
startTranslatron(http_port=args.http_port)
def repl(dbargs):
code.InteractiveConsole(locals={}).interact("Translatron REPL (prototype)")
def __getDumpFilenames(args):
"Generate a tuple of filenames to dump to / restore from"
suffix = ".xz" if hasattr(args, "xz") and args.xz else ".gz"
prefix = args.outprefix if hasattr(args, "outprefix") else args.inprefix
documentsFilename = prefix + ".documents.ydf" + suffix
entitiesFilename = prefix + ".entities.ydf" + suffix
docidxFilename = prefix + ".docidx.ydf" + suffix
entityidxidxFilename = prefix + ".entityidx.ydf" + suffix
return (documentsFilename, entitiesFilename, docidxFilename, entityidxidxFilename)
def exportDump(args):
#Setup raw YakDB connection
conn = YakDB.Connection()
conn.connect(args.req_endpoint)
#Filenames to dump to
filenames = __getDumpFilenames(args)
#Dump every table
if not args.no_documents:
print (blue("Dumping document table to " + filenames[0], bold=True))
dumpYDF(conn, filenames[0], 1)
if not args.no_entities:
print (blue("Dumping entity table to " + filenames[1], bold=True))
dumpYDF(conn, filenames[1], 2)
if not args.no_document_idx:
print (blue("Dumping document index table to " + filenames[2], bold=True))
dumpYDF(conn, filenames[2], 3)
if not args.no_entity_idx:
print (blue("Dumping entity index table to " + filenames[3], bold=True))
dumpYDF(conn, filenames[3], 4)
def restoreDump(args):
#Setup raw YakDB connection
conn = YakDB.Connection()
conn.connect(args.req_endpoint)
#Filenames to dump to
filenames = __getDumpFilenames(args)
#NOTE: Partial & incremental restore is supported
#Restory every table if the corresponding file exists
if not args.no_documents:
if not os.path.isfile(filenames[0]):
print (red("Can't find document table file " + filenames[0], bold=True))
else: #It's a regular file
print (blue("Restoring document table from " + filenames[0], bold=True))
importYDFDump(conn, filenames[0], 1)
if not args.no_entities:
if not os.path.isfile(filenames[1]):
print (red("Can't find entity table file " + filenames[1], bold=True))
else: #It's a regular file
print (blue("Restoring entity table from " + filenames[1], bold=True))
importYDFDump(conn, filenames[1], 2)
if not args.no_document_idx:
if not os.path.isfile(filenames[2]):
print (red("Can't find document index table file " + filenames[2], bold=True))
else: #It's a regular file
print (blue("Restoring document index table from " + filenames[2], bold=True))
importYDFDump(conn, filenames[2], 3)
if not args.no_entity_idx:
if not os.path.isfile(filenames[3]):
print (red("Can't find document index table file " + filenames[3], bold=True))
else: #It's a regular file
print (blue("Restoring entity index table from " + filenames[3], bold=True))
importYDFDump(conn, filenames[3], 4)
def compact(args):
"Compact one ore more table"
#Setup raw YakDB connection
conn = YakDB.Connection()
conn.connect(args.req_endpoint)
#Restory every table if the corresponding file exists
if not args.no_documents:
print (blue("Compacting document table... ", bold=True))
conn.compactRange(1)
if not args.no_entities:
print (blue("Compacting entity table... ", bold=True))
conn.compactRange(2)
if not args.no_document_idx:
print (blue("Compacting document index table... ", bold=True))
conn.compactRange(3)
if not args.no_entity_idx:
print (blue("Compacting entity index table... ", bold=True))
conn.compactRange(4)
def truncate(args):
"Delete data from one or more tables"
#Check if the user is sure
if not args.yes_i_know_what_i_am_doing:
print (red("This will delete all your Translatron data. If you are sure, please use --yes-i-know-what-i-am-doing ", bold=True))
return
#Setup raw YakDB connection
conn = YakDB.Connection()
conn.connect(args.req_endpoint)
#
#Restory every table if the corresponding file exists
if not args.no_documents:
print (blue("Truncating document table... ", bold=True))
if args.hard: conn.truncateTable(1)
else: conn.deleteRange(1, None, None, None)
if not args.no_entities:
print (blue("Truncating entity table... ", bold=True))
if args.hard: conn.truncateTable(2)
else: conn.deleteRange(2, None, None, None)
if not args.no_document_idx:
print (blue("Truncating document index table... ", bold=True))
if args.hard: conn.truncateTable(3)
else: conn.deleteRange(3, None, None, None)
if not args.no_entity_idx:
print (blue("Truncating entity index table... ", bold=True))
if args.hard: conn.truncateTable(4)
else: conn.deleteRange(4, None, None, None)
def initializeTranslatron(args):
import nltk
nltk.download("all")
def runTranslatronCLI():
"""
Call this function to use the yak CLI on sys.argv.
"""
if sys.version_info.major < 3:
print(red("Translatron requires Python 3 to run.\nPlease run translatron using a python3k interpreter!", bold=True))
parser = argparse.ArgumentParser(description="Translatron client tool")
# Database options
serverArgsGroup = parser.add_argument_group(parser, "Translatron connection options")
serverArgsGroup.add_argument(
"-r", "--request-reply-endpoint",
help="The endpoint for request-reply connections",
action="store",
default="ipc:///tmp/yakserver-rep",
dest="req_endpoint")
serverArgsGroup.add_argument(
"-p", "--push-pull-endpoint",
help="The endpoint for push-pull connections",
action="store",
default="ipc:///tmp/yakserver-pull",
dest="push_endpoint")
# CLI options
cliOptsGroup = parser.add_argument_group(parser, "CLI options")
# Data is remapped in connection class
cliOptsGroup.add_argument(
"-q", "--quiet",
help="Don't print verbose info",
action="store_true",
dest="quiet")
###
# Create parsers for the individual commands
###
subparsers = parser.add_subparsers(title="Commands")
# Connection check
parserConnCheck = subparsers.add_parser("conncheck", description="Verify that a connection to YakDB is possible")
parserConnCheck.set_defaults(func=checkConnection)
# Run server
parserRun = subparsers.add_parser("run", description="Run the Translatron server")
parserRun.add_argument("--http-port", type=int, default=8080, help="Which port to listen on for HTTP requests")
parserRun.set_defaults(func=runServer)
# Indexer
parserIndex = subparsers.add_parser("index", description="Run the indexer for previously imported documents")
parserIndex.add_argument("--no-documents", action="store_true", help="Do not index documents")
parserIndex.add_argument("--no-entities", action="store_true", help="Do not index entities")
parserIndex.add_argument("-s", "--statistics", action="store_true", help="Print token frequency statistics")
parserIndex.set_defaults(func=index)
# Dump tables
parserDump = subparsers.add_parser("dump", description="Export database dump")
parserDump.add_argument("outprefix", default="translatron-dump", nargs='?', help="The file prefix to dump to. Table name and .xz is automatically appended")
parserDump.add_argument("--no-documents", action="store_true", help="Do not dump the documents table")
parserDump.add_argument("--no-entities", action="store_true", help="Do not dump the entity table")
parserDump.add_argument("--no-document-idx", action="store_true", help="Do not dump the document index table")
parserDump.add_argument("--no-entity-idx", action="store_true", help="Do not dump the entity index table")
parserDump.add_argument("-x", "--xz", action="store_true", help="Use XZ compression instead of the default GZ")
parserDump.set_defaults(func=exportDump)
# Restore tables
parserRestore = subparsers.add_parser("restore", description="Restore database dump (incremental)")
parserRestore.add_argument("inprefix", default="translatron-dump", nargs='?', help="The file prefix to restore from. Table name and .xz is automatically appended")
parserRestore.add_argument("--no-documents", action="store_true", help="Do not restore the documents table")
parserRestore.add_argument("--no-entities", action="store_true", help="Do not restore the entity table")
parserRestore.add_argument("--no-document-idx", action="store_true", help="Do not restore the document index table")
parserRestore.add_argument("--no-entity-idx", action="store_true", help="Do not restore the entity index table")
parserRestore.set_defaults(func=restoreDump)
# Compact all tables
parserCompact = subparsers.add_parser("compact", description="Perform a database compaction. Increases speed, but might take some time.")
parserCompact.add_argument("--no-documents", action="store_true", help="Do not compact the documents table")
parserCompact.add_argument("--no-entities", action="store_true", help="Do not compact the entity table")
parserCompact.add_argument("--no-document-idx", action="store_true", help="Do not compact the document index table")
parserCompact.add_argument("--no-entity-idx", action="store_true", help="Do not compact the entity index table")
parserCompact.set_defaults(func=compact)
# Truncate tables
parserTruncate = subparsers.add_parser("truncate", description="Delete data from one or more tables")
parserTruncate.add_argument("--no-documents", action="store_true", help="Do not truncate the documents table")
parserTruncate.add_argument("--no-entities", action="store_true", help="Do not truncate the entity table")
parserTruncate.add_argument("--no-document-idx", action="store_true", help="Do not truncate the document index table")
parserTruncate.add_argument("--no-entity-idx", action="store_true", help="Do not truncate the entity index table")
parserTruncate.add_argument("--yes-i-know-what-i-am-doing", action="store_true", help="Use this option if you are really sure you want to delete your data")
parserTruncate.add_argument("--hard", action="store_true", help="Hard truncation (YakDB truncate instead of delete-range). Unsafe but faster and avoids required compaction. Server restart might be required")
parserTruncate.set_defaults(func=truncate)
# Import documents/entities from
parserImportDocuments = subparsers.add_parser("import-documents", description="Import documents")
parserImportDocuments.add_argument("infile", nargs="+", help="The PMC articles.X-Y.tar.gz input file(s)")
parserImportDocuments.add_argument("-w", "--workers", type=int, default=cpu_count(), help="The number of worker processes to use")
parserImportDocuments.add_argument("-f", "--filter", default="", help="Prefix filter for PMC TARs. For example, use ACS_Nano here to import only that journal")
parserImportDocuments.add_argument("-c", "--content-filter", default="", help="Case-insensitive content filter for. For example, use Coxiella here to import only documents containing the string coxiella. Applied on the raw document.")
parserImportDocuments.set_defaults(func=importDocuments)
# Import documents/entities from
parserImportEntities = subparsers.add_parser("import-entities", description="Import entities")
parserImportEntities.add_argument("infile", nargs="+", help="The PMC articles.X-Y.tar.gz input file(s)")
parserImportEntities.add_argument("-w", "--workers", type=int, default=cpu_count(), help="The number of worker processes to use")
parserImportEntities.set_defaults(func=importEntities)
# Intialize
parserInitialize = subparsers.add_parser("initialize", description="Initialize translatron (download NLTK data)")
parserInitialize.set_defaults(func=initializeTranslatron)
# REPL
parserREPL = subparsers.add_parser("repl", description="Start a Read-eval-print loop (REPL) for interactive DB usage")
parserREPL.set_defaults(func=repl)
###
# Parse and call the function
###
args = parser.parse_args()
# For some reason, the default=info setting only works with Python2
if "func" not in args:
print(red("No command specified, see help as listed below."))
print(red("Example: translatron conncheck"))
parser.print_help()
sys.exit(1)
args.func(args)
|
ulikoehler/Translatron
|
Translatron/CLI.py
|
Python
|
apache-2.0
| 14,998
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Copyright 2012 Joe Harris
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
'''
Trivial example of how to update container meta data. See:
http://docs.rackspace.com/files/api/v1/cf-devguide/content/Update_Container_Metadata-d1e1900.html
'''
import os, sys
# make sure our local copy of txcloudfiles is in sys.path
PATH_TO_TXCF = '../txcloudfiles/'
try:
import txcloudfiles
except ImportError:
txcfpath = os.path.dirname(os.path.realpath(PATH_TO_TXCF))
if txcfpath not in sys.path:
sys.path.insert(0, txcfpath)
from twisted.internet import reactor
from txcloudfiles import get_auth, UK_ENDPOINT, US_ENDPOINT, DataUsage
def _got_session(session):
print '> got session: %s' % session
container_name = 'some_test_container'
def _ok((response, v)):
'''
'response' is a transport.Response() instance.
'v' is boolean True.
'''
print '> got response: %s' % response
print '> set container error page: %s -> error.html' % container_name
reactor.stop()
print '> sending request'
# 'container' here is any name of an existing empty container. Can be a Container() object if you like.
session.set_cdn_container_error(container=container_name, error_file='error.html').addCallback(_ok).addErrback(_error)
def _error(e):
'''
'e' here will be a twisted.python.failure.Failure() instance wrapping
a ResponseError() object. ResponseError() instances contain information
about the request to help you find out why it errored through its
ResponseError().request attribute.
'''
print 'error!'
print e.printTraceback()
reactor.stop()
auth = get_auth(UK_ENDPOINT, os.environ.get('TXCFUSR', ''), os.environ.get('TXCFAPI', ''))
auth.get_session().addCallback(_got_session).addErrback(_error)
reactor.run()
'''
EOF
'''
|
meeb/txcloudfiles
|
examples/cdn_container_error.py
|
Python
|
apache-2.0
| 2,457
|
import setuptools
setuptools.setup(
name = "opencontrail-netns",
version = "0.2",
packages = setuptools.find_packages(),
entry_points = {
'console_scripts': [
'netns-daemon-start = opencontrail_netns.daemon_start:daemon_start',
'netns-daemon-stop = opencontrail_netns.daemon_stop:daemon_stop'
],
}
)
|
DreamLab/opencontrail-netns
|
setup.py
|
Python
|
apache-2.0
| 366
|
#!/usr/bin/env python
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from ambari_commons.constants import AMBARI_SUDO_BINARY
from resource_management.libraries.functions import format
from resource_management.libraries.functions import conf_select, stack_select
from resource_management.libraries.functions.constants import StackFeature
from resource_management.libraries.functions.stack_features import check_stack_feature
from resource_management.libraries.functions.default import default
from resource_management.libraries.functions import get_kinit_path
from resource_management.libraries.functions import get_port_from_url
from resource_management.libraries.functions.get_not_managed_resources import get_not_managed_resources
from resource_management.libraries.functions.setup_atlas_hook import has_atlas_in_cluster
from resource_management.libraries.script.script import Script
from resource_management.libraries.functions.get_lzo_packages import get_lzo_packages
from resource_management.libraries.functions.expect import expect
from resource_management.libraries.resources.hdfs_resource import HdfsResource
from resource_management.libraries.functions.get_architecture import get_architecture
from resource_management.libraries.functions.stack_features import get_stack_feature_version
from resource_management.libraries.functions.version import get_major_version
from resource_management.core.utils import PasswordString
from ambari_commons.credential_store_helper import get_password_from_credential_store
from urlparse import urlparse
import status_params
import os
import re
# server configurations
config = Script.get_config()
tmp_dir = Script.get_tmp_dir()
sudo = AMBARI_SUDO_BINARY
architecture = get_architecture()
# Needed since this writes out the Atlas Hive Hook config file.
cluster_name = config['clusterName']
hostname = config["hostname"]
# New Cluster Stack Version that is defined during the RESTART of a Rolling Upgrade
version = default("/commandParams/version", None)
stack_name = status_params.stack_name
stack_name_uppercase = stack_name.upper()
upgrade_direction = default("/commandParams/upgrade_direction", None)
agent_stack_retry_on_unavailability = config['hostLevelParams']['agent_stack_retry_on_unavailability']
agent_stack_retry_count = expect("/hostLevelParams/agent_stack_retry_count", int)
stack_root = status_params.stack_root
stack_version_unformatted = status_params.stack_version_unformatted
stack_version_formatted = status_params.stack_version_formatted
major_stack_version = get_major_version(stack_version_formatted)
version_for_stack_feature_checks = get_stack_feature_version(config)
hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
hadoop_bin_dir = stack_select.get_hadoop_dir("bin")
hadoop_lib_home = stack_select.get_hadoop_dir("lib")
#spark_conf
spark_conf_dir = format("{stack_root}/current/spark-client/conf")
#hadoop params
if stack_version_formatted and check_stack_feature(StackFeature.ROLLING_UPGRADE,stack_version_formatted):
stack_version = None
upgrade_stack = stack_select._get_upgrade_stack()
if upgrade_stack is not None and len(upgrade_stack) == 2 and upgrade_stack[1] is not None:
stack_version = upgrade_stack[1]
# oozie-server or oozie-client, depending on role
oozie_root = status_params.component_directory
# using the correct oozie root dir, format the correct location
oozie_lib_dir = format("{stack_root}/current/{oozie_root}")
oozie_setup_sh = format("{stack_root}/current/{oozie_root}/bin/oozie-setup.sh")
oozie_webapps_dir = format("{stack_root}/current/{oozie_root}/oozie-server/webapps")
oozie_webapps_conf_dir = format("{stack_root}/current/{oozie_root}/oozie-server/conf")
oozie_libext_dir = format("{stack_root}/current/{oozie_root}/libext")
oozie_server_dir = format("{stack_root}/current/{oozie_root}/oozie-server")
oozie_shared_lib = format("{stack_root}/current/{oozie_root}/share")
oozie_home = format("{stack_root}/current/{oozie_root}")
oozie_bin_dir = format("{stack_root}/current/{oozie_root}/bin")
oozie_examples_regex = format("{stack_root}/current/{oozie_root}/doc")
# set the falcon home for copying JARs; if in an upgrade, then use the version of falcon that
# matches the version of oozie
falcon_home = format("{stack_root}/current/falcon-client")
if stack_version is not None:
falcon_home = '{0}/{1}/falcon'.format(stack_root, stack_version)
conf_dir = format("{stack_root}/current/{oozie_root}/conf")
hive_conf_dir = format("{conf_dir}/action-conf/hive")
else:
oozie_lib_dir = "/var/lib/oozie"
oozie_setup_sh = "/usr/lib/oozie/bin/oozie-setup.sh"
oozie_webapps_dir = "/var/lib/oozie/oozie-server/webapps/"
oozie_webapps_conf_dir = "/var/lib/oozie/oozie-server/conf"
oozie_libext_dir = "/usr/lib/oozie/libext"
oozie_server_dir = "/var/lib/oozie/oozie-server"
oozie_shared_lib = "/usr/lib/oozie/share"
oozie_home = "/usr/lib/oozie"
oozie_bin_dir = "/usr/bin"
falcon_home = '/usr/lib/falcon'
conf_dir = "/etc/oozie/conf"
hive_conf_dir = "/etc/oozie/conf/action-conf/hive"
oozie_examples_regex = "/usr/share/doc/oozie-*"
execute_path = oozie_bin_dir + os.pathsep + hadoop_bin_dir
oozie_user = config['configurations']['oozie-env']['oozie_user']
smokeuser = config['configurations']['cluster-env']['smokeuser']
smokeuser_principal = config['configurations']['cluster-env']['smokeuser_principal_name']
smoke_hdfs_user_mode = 0770
service_check_queue_name = default('/configurations/yarn-env/service_check.queue.name', 'default')
# This config actually contains {oozie_user}
oozie_admin_users = format(config['configurations']['oozie-env']['oozie_admin_users'])
user_group = config['configurations']['cluster-env']['user_group']
jdk_location = config['hostLevelParams']['jdk_location']
check_db_connection_jar_name = "DBConnectionVerification.jar"
check_db_connection_jar = format("/usr/lib/ambari-agent/{check_db_connection_jar_name}")
oozie_tmp_dir = default("configurations/oozie-env/oozie_tmp_dir", "/var/tmp/oozie")
oozie_hdfs_user_dir = format("/user/{oozie_user}")
oozie_pid_dir = status_params.oozie_pid_dir
pid_file = status_params.pid_file
hadoop_jar_location = "/usr/lib/hadoop/"
java_share_dir = "/usr/share/java"
java64_home = config['hostLevelParams']['java_home']
java_exec = format("{java64_home}/bin/java")
ext_js_file = "ext-2.2.zip"
ext_js_path = format("/usr/share/{stack_name_uppercase}-oozie/{ext_js_file}")
security_enabled = config['configurations']['cluster-env']['security_enabled']
oozie_heapsize = config['configurations']['oozie-env']['oozie_heapsize']
oozie_permsize = config['configurations']['oozie-env']['oozie_permsize']
limits_conf_dir = "/etc/security/limits.d"
oozie_user_nofile_limit = default('/configurations/oozie-env/oozie_user_nofile_limit', 32000)
oozie_user_nproc_limit = default('/configurations/oozie-env/oozie_user_nproc_limit', 16000)
kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
oozie_service_keytab = config['configurations']['oozie-site']['oozie.service.HadoopAccessorService.keytab.file']
oozie_principal = config['configurations']['oozie-site']['oozie.service.HadoopAccessorService.kerberos.principal']
http_principal = config['configurations']['oozie-site']['oozie.authentication.kerberos.principal']
oozie_site = config['configurations']['oozie-site']
# Need this for yarn.nodemanager.recovery.dir in yarn-site
yarn_log_dir_prefix = config['configurations']['yarn-env']['yarn_log_dir_prefix']
yarn_resourcemanager_address = config['configurations']['yarn-site']['yarn.resourcemanager.address']
zk_namespace = default('/configurations/oozie-site/oozie.zookeeper.namespace', 'oozie')
zk_connection_string = default('/configurations/oozie-site/oozie.zookeeper.connection.string', None)
jaas_file = os.path.join(conf_dir, 'zkmigrator_jaas.conf')
stack_supports_zk_security = check_stack_feature(StackFeature.SECURE_ZOOKEEPER, version_for_stack_feature_checks)
credential_store_enabled = False
if 'credentialStoreEnabled' in config:
credential_store_enabled = config['credentialStoreEnabled']
if security_enabled:
oozie_site = dict(config['configurations']['oozie-site'])
oozie_principal_with_host = oozie_principal.replace('_HOST', hostname)
# If a user-supplied oozie.ha.authentication.kerberos.principal property exists in oozie-site,
# use it to replace the existing oozie.authentication.kerberos.principal value. This is to ensure
# that any special principal name needed for HA is used rather than the Ambari-generated value
if "oozie.ha.authentication.kerberos.principal" in oozie_site:
oozie_site['oozie.authentication.kerberos.principal'] = oozie_site['oozie.ha.authentication.kerberos.principal']
http_principal = oozie_site['oozie.authentication.kerberos.principal']
# If a user-supplied oozie.ha.authentication.kerberos.keytab property exists in oozie-site,
# use it to replace the existing oozie.authentication.kerberos.keytab value. This is to ensure
# that any special keytab file needed for HA is used rather than the Ambari-generated value
if "oozie.ha.authentication.kerberos.keytab" in oozie_site:
oozie_site['oozie.authentication.kerberos.keytab'] = oozie_site['oozie.ha.authentication.kerberos.keytab']
if stack_version_formatted and check_stack_feature(StackFeature.OOZIE_HOST_KERBEROS, stack_version_formatted):
#older versions of oozie have problems when using _HOST in principal
oozie_site['oozie.service.HadoopAccessorService.kerberos.principal'] = oozie_principal_with_host
oozie_site['oozie.authentication.kerberos.principal'] = http_principal.replace('_HOST', hostname)
smokeuser_keytab = config['configurations']['cluster-env']['smokeuser_keytab']
oozie_keytab = default("/configurations/oozie-env/oozie_keytab", oozie_service_keytab)
oozie_env_sh_template = config['configurations']['oozie-env']['content']
oracle_driver_jar_name = "ojdbc6.jar"
oozie_metastore_user_name = config['configurations']['oozie-site']['oozie.service.JPAService.jdbc.username']
if credential_store_enabled:
if 'hadoop.security.credential.provider.path' in config['configurations']['oozie-site']:
cs_lib_path = config['configurations']['oozie-site']['credentialStoreClassPath']
java_home = config['hostLevelParams']['java_home']
alias = 'oozie.service.JPAService.jdbc.password'
provider_path = config['configurations']['oozie-site']['hadoop.security.credential.provider.path']
oozie_metastore_user_passwd = PasswordString(get_password_from_credential_store(alias, provider_path, cs_lib_path, java_home, jdk_location))
else:
raise Exception("hadoop.security.credential.provider.path property should be set")
else:
oozie_metastore_user_passwd = default("/configurations/oozie-site/oozie.service.JPAService.jdbc.password","")
oozie_jdbc_connection_url = default("/configurations/oozie-site/oozie.service.JPAService.jdbc.url", "")
oozie_log_dir = config['configurations']['oozie-env']['oozie_log_dir']
oozie_data_dir = config['configurations']['oozie-env']['oozie_data_dir']
oozie_server_port = get_port_from_url(config['configurations']['oozie-site']['oozie.base.url'])
oozie_server_admin_port = config['configurations']['oozie-env']['oozie_admin_port']
if 'export OOZIE_HTTPS_PORT' in oozie_env_sh_template or 'oozie.https.port' in config['configurations']['oozie-site'] or 'oozie.https.keystore.file' in config['configurations']['oozie-site'] or 'oozie.https.keystore.pass' in config['configurations']['oozie-site']:
oozie_secure = '-secure'
else:
oozie_secure = ''
https_port = None
# try to get https port form oozie-env content
for line in oozie_env_sh_template.splitlines():
result = re.match(r"export\s+OOZIE_HTTPS_PORT=(\d+)", line)
if result is not None:
https_port = result.group(1)
# or from oozie-site.xml
if https_port is None and 'oozie.https.port' in config['configurations']['oozie-site']:
https_port = config['configurations']['oozie-site']['oozie.https.port']
oozie_base_url = config['configurations']['oozie-site']['oozie.base.url']
service_check_job_name = default("/configurations/oozie-env/service_check_job_name", "no-op")
# construct proper url for https
if https_port is not None:
parsed_url = urlparse(oozie_base_url)
oozie_base_url = oozie_base_url.replace(parsed_url.scheme, "https")
if parsed_url.port is None:
oozie_base_url.replace(parsed_url.hostname, ":".join([parsed_url.hostname, str(https_port)]))
else:
oozie_base_url = oozie_base_url.replace(str(parsed_url.port), str(https_port))
oozie_setup_sh_current = oozie_setup_sh
hdfs_site = config['configurations']['hdfs-site']
fs_root = config['configurations']['core-site']['fs.defaultFS']
if stack_version_formatted and check_stack_feature(StackFeature.OOZIE_SETUP_SHARED_LIB, stack_version_formatted):
put_shared_lib_to_hdfs_cmd = format("{oozie_setup_sh} sharelib create -fs {fs_root} -locallib {oozie_shared_lib}")
# for older
else:
put_shared_lib_to_hdfs_cmd = format("hadoop --config {hadoop_conf_dir} dfs -put {oozie_shared_lib} {oozie_hdfs_user_dir}")
default_connectors_map = { "com.microsoft.sqlserver.jdbc.SQLServerDriver":"sqljdbc4.jar",
"com.mysql.jdbc.Driver":"mysql-connector-java.jar",
"org.postgresql.Driver":"postgresql-jdbc.jar",
"oracle.jdbc.driver.OracleDriver":"ojdbc.jar",
"sap.jdbc4.sqlanywhere.IDriver":"sajdbc4.jar"}
jdbc_driver_name = default("/configurations/oozie-site/oozie.service.JPAService.jdbc.driver", "")
# NOT SURE THAT IT'S A GOOD IDEA TO USE PATH TO CLASS IN DRIVER, MAYBE IT WILL BE BETTER TO USE DB TYPE.
# BECAUSE PATH TO CLASSES COULD BE CHANGED
sqla_db_used = False
previous_jdbc_jar_name = None
if jdbc_driver_name == "com.microsoft.sqlserver.jdbc.SQLServerDriver":
jdbc_driver_jar = default("/hostLevelParams/custom_mssql_jdbc_name", None)
previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_mssql_jdbc_name", None)
elif jdbc_driver_name == "com.mysql.jdbc.Driver":
jdbc_driver_jar = default("/hostLevelParams/custom_mysql_jdbc_name", None)
previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_mysql_jdbc_name", None)
elif jdbc_driver_name == "org.postgresql.Driver":
jdbc_driver_jar = format("{oozie_home}/libserver/postgresql-9.0-801.jdbc4.jar") #oozie using it's own postgres jdbc
previous_jdbc_jar_name = None
elif jdbc_driver_name == "oracle.jdbc.driver.OracleDriver":
jdbc_driver_jar = default("/hostLevelParams/custom_oracle_jdbc_name", None)
previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_oracle_jdbc_name", None)
elif jdbc_driver_name == "sap.jdbc4.sqlanywhere.IDriver":
jdbc_driver_jar = default("/hostLevelParams/custom_sqlanywhere_jdbc_name", None)
previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_sqlanywhere_jdbc_name", None)
sqla_db_used = True
else:
jdbc_driver_jar = ""
jdbc_symlink_name = ""
previous_jdbc_jar_name = None
default("/hostLevelParams/custom_sqlanywhere_jdbc_name", None)
driver_curl_source = format("{jdk_location}/{jdbc_driver_jar}")
downloaded_custom_connector = format("{tmp_dir}/{jdbc_driver_jar}")
if jdbc_driver_name == "org.postgresql.Driver":
target = jdbc_driver_jar
previous_jdbc_jar = None
else:
target = format("{oozie_libext_dir}/{jdbc_driver_jar}")
previous_jdbc_jar = format("{oozie_libext_dir}/{previous_jdbc_jar_name}")
#constants for type2 jdbc
jdbc_libs_dir = format("{oozie_libext_dir}/native/lib64")
lib_dir_available = os.path.exists(jdbc_libs_dir)
if sqla_db_used:
jars_path_in_archive = format("{tmp_dir}/sqla-client-jdbc/java/*")
libs_path_in_archive = format("{tmp_dir}/sqla-client-jdbc/native/lib64/*")
downloaded_custom_connector = format("{tmp_dir}/{jdbc_driver_jar}")
hdfs_share_dir = format("{oozie_hdfs_user_dir}/share")
ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0]
falcon_host = default("/clusterHostInfo/falcon_server_hosts", [])
has_falcon_host = not len(falcon_host) == 0
oozie_server_hostnames = default("/clusterHostInfo/oozie_server", [])
oozie_server_hostnames = sorted(oozie_server_hostnames)
oozie_log_maxhistory = default('configurations/oozie-log4j/oozie_log_maxhistory',720)
#oozie-log4j.properties
if (('oozie-log4j' in config['configurations']) and ('content' in config['configurations']['oozie-log4j'])):
log4j_props = config['configurations']['oozie-log4j']['content']
else:
log4j_props = None
oozie_hdfs_user_mode = 0775
hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
hdfs_principal_name = config['configurations']['hadoop-env']['hdfs_principal_name']
hdfs_site = config['configurations']['hdfs-site']
default_fs = config['configurations']['core-site']['fs.defaultFS']
dfs_type = default("/commandParams/dfs_type", "")
########################################################
############# Atlas related params #####################
########################################################
#region Atlas Hooks needed by Hive on Oozie
hive_atlas_application_properties = default('/configurations/hive-atlas-application.properties', {})
if has_atlas_in_cluster():
atlas_hook_filename = default('/configurations/atlas-env/metadata_conf_file', 'atlas-application.properties')
#endregion
import functools
#create partial functions with common arguments for every HdfsResource call
#to create/delete hdfs directory/file/copyfromlocal we need to call params.HdfsResource in code
HdfsResource = functools.partial(
HdfsResource,
user=hdfs_user,
hdfs_resource_ignore_file = "/var/lib/ambari-agent/data/.hdfs_resource_ignore",
security_enabled = security_enabled,
keytab = hdfs_user_keytab,
kinit_path_local = kinit_path_local,
hadoop_bin_dir = hadoop_bin_dir,
hadoop_conf_dir = hadoop_conf_dir,
principal_name = hdfs_principal_name,
hdfs_site = hdfs_site,
default_fs = default_fs,
immutable_paths = get_not_managed_resources(),
dfs_type = dfs_type
)
is_webhdfs_enabled = config['configurations']['hdfs-site']['dfs.webhdfs.enabled']
# The logic for LZO also exists in HDFS' params.py
io_compression_codecs = default("/configurations/core-site/io.compression.codecs", None)
lzo_enabled = io_compression_codecs is not None and "com.hadoop.compression.lzo" in io_compression_codecs.lower()
all_lzo_packages = get_lzo_packages(stack_version_unformatted)
|
radicalbit/ambari
|
ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/params_linux.py
|
Python
|
apache-2.0
| 19,215
|
import os, sys, subprocess
def envIsInstalled():
currentPath = os.path.dirname(os.path.abspath(__file__))
if os.path.isdir(os.path.join(currentPath,"..","..", "miniconda2")):
return True
return False
def envInstall():
currentPath = os.path.dirname(os.path.abspath(__file__))
dir_install = os.path.join(currentPath,"..","..")
py_version = str(sys.version_info.major)+"."+str(sys.version_info.minor)
os_type = sys.platform
if os_type == "linux2": #Linux
import struct
nbit = struct.calcsize("P") * 8
path_env_install = os.path.join(currentPath,"..","TraficLib","envInstallTFLinux.sh")
cmd_env_install = path_env_install + " " + str(dir_install) + " " + str(py_version) + " " + str(nbit)
elif os_type == "darwin":
path_env_install = os.path.join(currentPath,"..","TraficLib","envInstallTFMacOS.sh") #MacOS
cmd_env_install = path_env_install + " " + str(dir_install) + " " + str(py_version)
cmd = ["bash", "-c", str(cmd_env_install)]
subprocess.Popen(cmd)
def runMaybeEnvInstallTF():
if not envIsInstalled():
envInstall()
if __name__ == '__main__':
runMaybeEnvInstallTF()
|
PrinceNgattaiLam/Trafic
|
TraficLib/envInstallTF.py
|
Python
|
apache-2.0
| 1,188
|
# config.py
host = "10.222.138.163"
port = 8080
mport = 1889
|
miaolujing/python_script
|
hawkeye_autotest/rtp/config.py
|
Python
|
apache-2.0
| 60
|
# Copyright 2011 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions that help with accept proposals status."""
from soc.modules.gsoc.models import accept_proposals_status
def getOrCreateStatusForProgram(program_entity):
"""Returns the AcceptProposalsStatus entity belonging to the given
program or creates a new one.
Args:
program_entity: Program entity to get or create the
AcceptProposalsStatus for.
"""
query = accept_proposals_status.GSoCAcceptProposalsStatus.all().filter(
'program', program_entity)
aps_entity = query.get()
if not aps_entity:
aps_entity = accept_proposals_status.GSoCAcceptProposalsStatus(
program=program_entity)
aps_entity.put()
return aps_entity
|
rhyolight/nupic.son
|
app/soc/modules/gsoc/logic/accept_proposals.py
|
Python
|
apache-2.0
| 1,256
|
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
# Import MNIST data
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("./data/", one_hot=True)
# Parameters
learning_rate = 0.1
training_epochs = 100
batch_size = 256
display_step = 1
examples_to_show = 10
# Network Parameters
n_hidden_1 = 256 # 1st layer num features
n_hidden_2 = 128 # 2nd layer num features
n_input = 784 # MNIST data input (img shape: 28*28)
X = tf.placeholder("float", [None, n_input])
# Write your code here
# 1- Initialize your network weights and bias in random distibution
# 2- Use Sigmoid as the activation function for the hidden and output layers
# 3- Uncomment the y_pred line after finising your tensorflow code
# Prediction
# y_pred = hidden_to_output_decode
# Targets (Labels) are the input data.
y_true = X
# Define loss and optimizer, minimize the squared error
cost = tf.reduce_mean(tf.pow(y_true - y_pred, 2))
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)
# Initializing the variables
init = tf.global_variables_initializer()
# Launch the graph
with tf.Session() as sess:
sess.run(init)
total_batch = int(mnist.train.num_examples/batch_size)
# Training cycle
for epoch in range(training_epochs):
# Loop over all batches
for i in range(total_batch):
batch_xs, batch_ys = mnist.train.next_batch(batch_size)
# Run optimization op (backprop) and cost op (to get loss value)
_, c = sess.run([optimizer, cost], feed_dict={X: batch_xs})
# Display logs per epoch step
if epoch % display_step == 0:
print("Epoch:", '%04d' % (epoch+1),
"cost=", "{:.9f}".format(c))
print("Optimization Finished!")
# Applying encode and decode over test set
encode_decode = sess.run(
y_pred, feed_dict={X: mnist.test.images[:examples_to_show]})
# Compare original images with their reconstructions
f, a = plt.subplots(2, 10, figsize=(10, 2))
for i in range(examples_to_show):
a[0][i].imshow(np.reshape(mnist.test.images[i], (28, 28)))
a[1][i].imshow(np.reshape(encode_decode[i], (28, 28)))
f.show()
plt.draw()
plt.waitforbuttonpress()
|
AhmedHani/FCIS-Machine-Learning-2017
|
Session6/Practical/Template/mnist_autoencoder.py
|
Python
|
apache-2.0
| 2,278
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .client import PartitionAssignmentServiceClient
from .async_client import PartitionAssignmentServiceAsyncClient
__all__ = (
"PartitionAssignmentServiceClient",
"PartitionAssignmentServiceAsyncClient",
)
|
googleapis/python-pubsublite
|
google/cloud/pubsublite_v1/services/partition_assignment_service/__init__.py
|
Python
|
apache-2.0
| 817
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: skip-file
import numpy as np
import random
import torch
import torch.nn.functional as F
from torch.utils.data import Dataset
from collections import defaultdict
from copy import deepcopy
from lego.cpp_sampler.online_sampler import OnlineSampler
from smore.evaluation.dataloader import TestDataset
from lego.common.utils import query_name_dict, flatten, get_edge_index, get_powerset, list2tuple, recursive_main, flatten, flatten_list
class RelationSampler(OnlineSampler):
def batch_generator(self, batch_size):
super_batch_gen = super(RelationSampler, self).batch_generator(batch_size)
while True:
pos_ans, _, _, _, q_args, q_structs = next(super_batch_gen)
single_ents = np.random.randint(low=0, high=self.kg.num_ent, size=(batch_size))
pos_ans = torch.cat([pos_ans, torch.LongTensor(single_ents)], dim=0)
q_args = q_args + np.expand_dims(single_ents, axis=1).tolist()
q_structs = q_structs + [('e',())]*batch_size
yield pos_ans, q_args, q_structs
class BranchSampler(OnlineSampler):
def batch_generator(self, batch_size):
super_batch_gen = super(BranchSampler, self).batch_generator(batch_size)
while True:
pos_ans, _, _, _, q_args, q_structs = next(super_batch_gen)
labels = [1]*len(q_structs)
q_args_flatten = []
fake_q_args = []
fake_q_structs = []
for idx, q_struct in enumerate(q_structs):
if query_name_dict[q_struct] == '2i':
q_args_flatten.append(q_args[idx][:2])
q_args_flatten.append(q_args[idx][2:4])
elif query_name_dict[q_struct] == '3i':
q_args_flatten.append(q_args[idx][:2])
q_args_flatten.append(q_args[idx][2:4])
q_args_flatten.append(q_args[idx][4:6])
elif query_name_dict[q_struct] == 'pi':
q_args_flatten.append(q_args[idx][:3])
q_args_flatten.append(q_args[idx][3:5])
fake_q = q_args[idx]
fake_q_args.append(fake_q[:2])
fake_q_args.append(fake_q[3:5])
fake_q_structs.append((('e', ('r',)), ('e', ('r',))))
labels.append(0)
elif query_name_dict[q_struct] == '2pi':
q_args_flatten.append(q_args[idx][:3])
q_args_flatten.append(q_args[idx][3:6])
fake_q = q_args[idx]
fake_q_args.append(fake_q[:2])
fake_q_args.append(fake_q[3:6])
fake_q_structs.append((('e', ('r',)), ('e', ('r', 'r'))))
labels.append(0)
fake_q = q_args[idx]
fake_q_args.append(fake_q[:3])
fake_q_args.append(fake_q[3:5])
fake_q_structs.append((('e', ('r', 'r')), ('e', ('r',))))
labels.append(0)
elif query_name_dict[q_struct] == 'p3i':
q_args_flatten.append(q_args[idx][:3])
q_args_flatten.append(q_args[idx][3:5])
q_args_flatten.append(q_args[idx][5:7])
fake_q = q_args[idx]
fake_q_args.append(fake_q[:2])
fake_q_args.append(fake_q[3:5])
fake_q_args.append(fake_q[5:7])
fake_q_structs.append((('e', ('r',)), ('e', ('r',)), ('e', ('r',))))
labels.append(0)
q_args = q_args_flatten + fake_q_args
q_structs += fake_q_structs
q_structs_flatten = [x for y in q_structs for x in y]
assert len(q_args) == len(q_structs_flatten)
assert len(q_structs) == len(labels)
labels = torch.FloatTensor(labels).unsqueeze(1)
yield q_args, q_structs, q_structs_flatten, labels
class EvalRelationDataset(Dataset):
def __init__(self, data, nentity, nrelation):
self.queries = data['query']
self.structures = data['structure']
self.rels = data['rel']
self.len = len(self.queries)
self.nentity = nentity
self.nrelation = nrelation
def __len__(self):
return self.len
def __getitem__(self, idx):
return flatten(self.queries[idx]), self.queries[idx], self.structures[idx], self.rels[idx]
@staticmethod
def collate_fn(data):
query = [_[0] for _ in data]
query_unflatten = [_[1] for _ in data]
query_structure = [_[2] for _ in data]
rel = torch.LongTensor([_[3] for _ in data]).unsqueeze(1)
return query, query_unflatten, query_structure, rel
class EvalBranchDataset(Dataset):
def __init__(self, data, nentity, nrelation):
self.queries = data['query']
self.structures = data['structure']
self.labels = data['label']
self.len = len(self.queries)
self.nentity = nentity
self.nrelation = nrelation
def __len__(self):
return self.len
def __getitem__(self, idx):
return self.queries[idx], self.structures[idx], self.labels[idx]
@staticmethod
def collate_fn(data):
query_unflatten = [_[0] for _ in data]
query = [flatten(x) for y in query_unflatten for x in y]
query_structure_unflatten = [_[1] for _ in data]
query_structure = [x for y in query_structure_unflatten for x in y]
label = torch.LongTensor([_[2] for _ in data]).unsqueeze(1)
return query, query_unflatten, query_structure, query_structure_unflatten, label
class EvalQuestionDataset(TestDataset):
def __init__(self, data, answers, nentity, nrelation):
super(EvalQuestionDataset, self).__init__(nentity, nrelation)
self.data = data
self.answers = answers
self.test_all = True
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
question = self.data[idx][0]
query = self.data[idx][1]
query_structure = self.data[idx][2]
easy_answers = set()
hard_answers = self.answers[question]
if self.test_all:
neg_samples = None
else:
neg_samples = torch.LongTensor(list(hard_answers) + list(self.data[idx][4]))
return neg_samples, flatten(query), query, query_structure, easy_answers, hard_answers
def subset(self, pos, num):
data = self.data[pos : pos + num]
return EvalQuestionDataset(data, self.answers, self.nentity, self.nrelation)
class UnfoldedProgramDataloader(Dataset):
def __init__(self, data, nentity, nrelation, batch_size, query2box,
supervised_batch_size=0, supervised_minimum_reward=1.,
supervised_update_strictly_better=False,
max_nentity=4, shuffle=True, eval=False,
reduce='sum', weighted_sample=False, temperature=1.,
skip_indices=[], supervised_candidate_batch_size=1,):
self.len = len(data)
self.data = data
self.nentity = nentity
self.nrelation = nrelation
self.batch_size = batch_size
self.query2box = query2box
assert self.batch_size == 1, "batching not supported"
self.max_nentity = max_nentity
assert max_nentity > 1
self.max_y_score_len = int(pow(2, max_nentity)) - 1
self.i = 0
self.idxs = list(range(self.len))
self.shuffle = shuffle
self.eval = eval
self.all_edge_indices = []
self.all_powersets = []
self.reduce = reduce
self.supervised_batch_size = supervised_batch_size
self.supervised_minimum_reward = supervised_minimum_reward
self.supervised_update_strictly_better = supervised_update_strictly_better
self.supervised_candidate_batch_size = supervised_candidate_batch_size
self.skip_indices = skip_indices
for i in range(max_nentity + 1):
if i == 0:
self.all_edge_indices.append([])
self.all_powersets.append([])
else:
if i == 1:
self.all_edge_indices.append(np.array(get_edge_index(i)).T)
self.all_powersets.append(get_powerset(i))
else:
edge_index = np.array(get_edge_index(i)).T
edge_index[1] -= 1
self.all_edge_indices.append(edge_index)
self.all_powersets.append(get_powerset(i)[1:])
self.max_rewards = np.zeros((self.len))
self.avg_rewards = 0.01*np.ones((self.len))
self.n_sampled = np.ones((self.len))
self.weighted_sample = weighted_sample
self.temperature = temperature
self.best_solutions = [[] for _ in range(self.len)]
if shuffle:
np.random.shuffle(self.idxs)
def __len__(self):
return self.len
@staticmethod
def collate_fn(data):
x_scores = np.concatenate([_[0] for _ in data], 0)
x_relations = np.concatenate([_[1] for _ in data], 0)
y_scores = np.concatenate([_[2] for _ in data], 0)
y_relations = np.concatenate([_[3] for _ in data], 0)
mask_relations = np.concatenate([_[4] for _ in data], 0)
w_scores = np.concatenate([_[5] for _ in data], 0)
w_relations = np.concatenate([_[6] for _ in data], 0)
berts = np.concatenate([_[7] for _ in data], 0)
max_y_score_len = data[0][11]
mask_relations_class = np.concatenate([_[12] for _ in data], 0)
noisy_mask_relations = np.concatenate([_[15] for _ in data], 0)
nrelation = data[0][22]
idx_list = [_[23] for _ in data]
edge_indices, additional_edge_indices, question_indices, step_indices, softmax_edge_indices = [], [], [], [], []
n_program, n_data, n_powerset, n_question, n_candidate = 0, 0, 0, 0, 0
for i in range(len(data)):
edge_index = data[i][8]
edge_index[0] += n_data
edge_index[1] += n_powerset
edge_indices.append(edge_index)
additional_edge_index = data[i][16]
if len(additional_edge_index) > 0:
additional_edge_index += n_powerset
additional_edge_indices.append(additional_edge_index)
question_index = data[i][13]
question_index[0] += n_candidate
question_index[1] += n_question
question_indices.append(question_index)
step_index = data[i][14]
step_index[0] += n_program
step_index[1] += n_candidate
step_indices.append(step_index)
softmax_edge_index = data[i][9]
softmax_edge_index[0] += n_powerset
softmax_edge_index[1] += n_program * max_y_score_len
softmax_edge_indices.append(softmax_edge_index)
n_program += data[i][17]
n_data += data[i][18]
n_powerset += data[i][19]
n_question += data[i][20]
n_candidate += data[i][21]
if len(additional_edge_indices) > 0:
additional_edge_indices = np.concatenate(additional_edge_indices, 0)
additional_edge_indices = np.stack([[n_data]*len(additional_edge_indices), additional_edge_indices])
edge_indices.append(additional_edge_indices)
edge_indices = np.concatenate(edge_indices, axis=1)
softmax_edge_indices = np.concatenate(softmax_edge_indices, axis=1)
question_indices = np.concatenate(question_indices, axis=1)
step_indices = np.concatenate(step_indices, axis=1)
return x_scores, x_relations, y_scores, y_relations, mask_relations, w_scores, w_relations, berts, edge_indices, softmax_edge_indices, n_program, max_y_score_len, mask_relations_class, question_indices, step_indices, noisy_mask_relations, nrelation, idx_list
def __getitem__(self, idx):
'''
x_scores: (n_data, center dim + offset dim)
x_relations: (n_data, center dim + offset dim + bert dim)
y_scores: (n_program, max_y_score_len)
y_relations: (n_data, nrelation)
mask_relations: (n_data)
w_scores: (n_program)
w_relations: (n_data)
berts: (n_powerset, bert dim)
edge_indices: (2, n_message_passing)
softmax_edge_indices: (2, n_powerset)
mask_relations_class: (n_data, nrelation)
note that n_powerset != n_data * max_y_score_len.
n_powerset = \sum_i 2^n_i (e.g. 2+8+4+16), n_data * max_y_score_len = 4*16,
n_message_passing = \sum n_i * 2^(n_i - 1),
n_program = args.batch_size
'''
x_scores, x_relations, y_scores, y_relations = [], [], [], []
mask_relations, w_scores, w_relations, berts = [], [], [], []
noisy_mask_relations = []
edge_indices, softmax_edge_indices = [], []
mask_relations_class = []
if self.reduce == 'max':
additional_edge_indices = []
else:
additional_edge_indices = []
n_data = 0
n_powerset = 0
n_program = 0
n_question = 0
n_candidate = 0
n_all, n_negative_one, n_str = 0, 0, 0
question_indices, step_indices = [], []
i = 0
num_cand = len(self.data[idx])
if num_cand < self.supervised_candidate_batch_size:
cand_indices = np.arange(num_cand)
else:
cand_indices = np.random.choice(num_cand, size=self.supervised_candidate_batch_size, replace=False)
for cand_idx in cand_indices:
for step_idx in range(len(self.data[idx][cand_idx])):
bert, center, offset, action, w, query, query_structure, question, mask_relation_class = self.data[idx][cand_idx][step_idx]
bert = np.array(bert) # (1, 768)
if len(center) == 1:
cur_powerset_len = 2
else:
cur_powerset_len = int(pow(2, len(center))-1)
berts.append(np.tile(bert, (cur_powerset_len, 1)))
powerset = self.all_powersets[len(center)]
assert len(powerset) == cur_powerset_len
target = tuple(np.arange(len(action[0]))[np.array(action[0]) == 1]) #(0,1) or (1) or (0,1,2)
y_scores.append(powerset.index(target))
edge_index = deepcopy(self.all_edge_indices[len(center)])
edge_index[0] += n_data
edge_index[1] += n_powerset
edge_indices.append(edge_index)
if self.reduce == 'max' and len(center) == 1:
additional_edge_indices.append(n_powerset)
softmax_edge_index = np.stack([n_powerset + np.arange(cur_powerset_len), n_program * self.max_y_score_len + np.arange(cur_powerset_len)])
softmax_edge_indices.append(softmax_edge_index)
w_scores.append(1/w)
step_indices.append(np.array([[n_program], [n_candidate]]))
n_program += 1
n_data += len(center)
n_powerset += cur_powerset_len
no_relation = type(action[-1]) == str
for j in range(len(center)):
x_scores.append(np.concatenate([center[j], offset[j]]))
x_relations.append(np.concatenate([center[j], offset[j], bert[0]]))
y_relations.append(action[-1] if not no_relation else 0)
mask_relations_class.append(mask_relation_class)
if no_relation:
mask_relations.append(0.)
if j == 0:
noisy_mask_relations.append(1.)
else:
noisy_mask_relations.append(0.)
else:
if self.eval: # a notable change here, it means that when evaluation, we should do all the necessary prediction
mask_relations.append(1. if action[0][j] == 1 else 0.)
noisy_mask_relations.append(1. if action[0][j] == 1 else 0.)
else: # but when it is training, we need to filter those datapoints when mask_relation_class does not even have the ground truth task
mask_relations.append(1. if action[0][j] == 1 and mask_relation_class[action[-1]] else 0.)
noisy_mask_relations.append(1. if action[0][j] == 1 and mask_relation_class[action[-1]] else 0.)
w_relations.append(1/w)
question_indices.append(np.array([[n_candidate], [n_question]]))
n_candidate += 1
n_question += 1
berts = np.concatenate(berts, axis=0)
edge_indices = np.concatenate(edge_indices, axis=1)
softmax_edge_indices = np.concatenate(softmax_edge_indices, axis=1)
question_indices = np.concatenate(question_indices, axis=1)
step_indices = np.concatenate(step_indices, axis=1)
if len(additional_edge_indices) > 0:
additional_edge_indices = np.array(additional_edge_indices)
return x_scores, x_relations, y_scores, y_relations, mask_relations, w_scores, w_relations, berts, edge_indices, softmax_edge_indices, n_program, self.max_y_score_len, mask_relations_class, question_indices, step_indices, noisy_mask_relations, additional_edge_indices, n_program, n_data, n_powerset, n_question, n_candidate, self.nrelation, idx
class SingledirectionalOneShotIterator(object):
def __init__(self, dataloader):
self.iterator = self.one_shot_iterator(dataloader)
self.step = 0
self.max_y_score_len = dataloader.dataset.max_y_score_len
self.eval = dataloader.dataset.eval
self.len = dataloader.dataset.len
def __len__(self):
return self.len
def __next__(self):
self.step += 1
data = next(self.iterator)
x_scores, x_relations, y_scores, y_relations, mask_relations, w_scores, w_relations, berts, edge_indices, softmax_edge_indices, n_program, max_y_score_len, mask_relations_class, question_indices, step_indices, noisy_mask_relations, nrelation, idx_list = data
x_scores = torch.Tensor(x_scores)
x_relations = torch.Tensor(x_relations)
y_scores = torch.LongTensor(y_scores)
y_relations = torch.LongTensor(y_relations)
# y_relations = F.one_hot(torch.LongTensor(y_relations), nrelation)
mask_relations = torch.Tensor(mask_relations)
w_scores = torch.Tensor(w_scores)
w_relations = torch.Tensor(w_relations)
berts = torch.Tensor(berts)
mask_relations_class = torch.Tensor(mask_relations_class).bool()
noisy_mask_relations = torch.Tensor(noisy_mask_relations).bool()
edge_indices = torch.LongTensor(edge_indices)
softmax_edge_indices = torch.LongTensor(softmax_edge_indices)
question_indices = torch.LongTensor(question_indices)
step_indices = torch.LongTensor(step_indices)
return x_scores, x_relations, y_scores, y_relations, mask_relations, w_scores, w_relations, berts, edge_indices, softmax_edge_indices, n_program, max_y_score_len, mask_relations_class, question_indices, step_indices, noisy_mask_relations
@staticmethod
def one_shot_iterator(dataloader):
while True:
for data in dataloader:
yield data
def train_supervised(self, maxlen, ground_truth=False):
return True
def next_supervised(self):
return self.__next__()
class ProgramDataloader(object):
def __init__(self, data, nentity, nrelation, batch_size, query2box,
supervised_batch_size=0, supervised_minimum_reward=1.,
supervised_update_strictly_better=False,
max_nentity=4, shuffle=True, eval=False,
reduce='sum', weighted_sample=False, temperature=1.,
skip_indices=[]):
# print (data, batch_size)
self.len = len(data)
self.data = data
self.nentity = nentity
self.nrelation = nrelation
self.batch_size = batch_size
self.query2box = query2box
assert self.batch_size == 1, "batching not supported"
self.max_nentity = max_nentity
assert max_nentity > 1
self.max_y_score_len = int(pow(2, max_nentity)) - 1
self.i = 0
self.idxs = list(range(self.len))
self.shuffle = shuffle
self.eval = eval
self.all_edge_indices = []
self.all_powersets = []
self.reduce = reduce
self.supervised_batch_size = supervised_batch_size
self.supervised_minimum_reward = supervised_minimum_reward
self.supervised_update_strictly_better = supervised_update_strictly_better
self.skip_indices = skip_indices
for i in range(max_nentity + 1):
if i == 0:
self.all_edge_indices.append([])
self.all_powersets.append([])
else:
if i == 1:
self.all_edge_indices.append(np.array(get_edge_index(i)).T)
self.all_powersets.append(get_powerset(i))
else:
edge_index = np.array(get_edge_index(i)).T
edge_index[1] -= 1
self.all_edge_indices.append(edge_index)
self.all_powersets.append(get_powerset(i)[1:])
self.max_rewards = np.zeros((self.len))
self.avg_rewards = 0.01*np.ones((self.len))
self.n_sampled = np.ones((self.len))
self.weighted_sample = weighted_sample
self.temperature = temperature
self.best_solutions = [[] for _ in range(self.len)]
if shuffle:
np.random.shuffle(self.idxs)
def train_supervised(self, maxlen, ground_truth=False):
if ground_truth:
try:
tmp = len(self.best_solutions_over_bar)
except:
self.best_solutions_over_bar = [item for i in range(self.len) for item in self.data[i]]
else:
self.best_solutions_over_bar = [item for i in range(self.len) if self.max_rewards[i] > self.supervised_minimum_reward and len(self.best_solutions[i]) <= maxlen+1 for item in self.best_solutions[i]]
return len(self.best_solutions_over_bar) > 10
def next_supervised(self):
cur_idxs = np.random.choice(len(self.best_solutions_over_bar), size=self.supervised_batch_size, replace=self.supervised_batch_size>len(self.best_solutions_over_bar))
return self.prepare_supervised_batch(cur_idxs)
def __len__(self):
return self.len
def __iter__(self):
return self
def __next__(self):
if self.weighted_sample:
cur_idxs = np.random.choice(self.len, size=self.batch_size, p=softmax(-self.temperature * self.avg_rewards))
return self.prepare_batch(cur_idxs)
else:
if self.i == self.len and self.eval:
return [None]*21
if self.i + self.batch_size > self.len:
cur_idxs = self.idxs[self.i:]
if self.eval:
self.i = self.len
else:
if self.shuffle:
np.random.shuffle(self.idxs)
cur_idxs += self.idxs[:self.i + self.batch_size - self.len]
if self.i + self.batch_size - self.len >= self.len:
self.i = 0
else:
self.i = self.i + self.batch_size - self.len
else:
cur_idxs = self.idxs[self.i:self.i + self.batch_size]
self.i += self.batch_size
cur_data = self.prepare_batch(cur_idxs)
return cur_data
def update_weight_and_solutions(self, cur_idxs, rewards, tmp_solutions, update_solution):
assert len(cur_idxs) == len(rewards)
for i, (idx, reward) in enumerate(zip(cur_idxs, rewards)):
self.avg_rewards[idx] = (self.avg_rewards[idx] * self.n_sampled[idx] + reward) / (self.n_sampled[idx] + 1)
self.n_sampled[idx] += 1
update_flag = (self.supervised_update_strictly_better and reward > self.max_rewards[idx]) \
or (not self.supervised_update_strictly_better and reward >= self.max_rewards[idx])
self.max_rewards[idx] = max(self.max_rewards[idx], reward)
if update_flag and update_solution:
for ii in range(len(tmp_solutions[i])):
tmp_solutions[i][ii][0] = self.data[idx][0][0] # bert
tmp_solutions[i][ii][4] = len(tmp_solutions[i]) # w
tmp_solutions[i][ii][5] = self.data[idx][0][5] # query
tmp_solutions[i][ii][6] = self.data[idx][0][6] # query structure
tmp_solutions[i][ii][7] = self.data[idx][0][7] # question
self.best_solutions[idx] = tmp_solutions[i]
def prepare_batch(self, cur_idxs):
'''
x_scores: (n_data, center dim + offset dim)
x_relations: (n_data, center dim + offset dim + bert dim)
y_scores: (n_program, max_y_score_len)
y_relations: (n_data, nrelation)
mask_relations: (n_data)
w_scores: (n_program)
w_relations: (n_data)
berts: (n_powerset, bert dim)
edge_indices: (2, n_message_passing)
softmax_edge_indices: (2, n_powerset)
mask_relations_class: (n_data, nrelation)
note that n_powerset != n_data * max_y_score_len.
n_powerset = \sum_i 2^n_i (e.g. 2+8+4+16), n_data * max_y_score_len = 4*16,
n_message_passing = \sum n_i * 2^(n_i - 1),
n_program = args.batch_size
'''
x_scores, x_relations, y_scores, y_relations = [], [], [], []
mask_relations, w_scores, w_relations, berts = [], [], [], []
edge_indices, softmax_edge_indices = [], []
mask_relations_class = []
queries = []
questions = []
powersets = []
value_edge_indices = []
sampled_score, sampled_relation, branches_picked = [], [], []
if self.reduce == 'max':
additional_edge_indices = []
n_data = 0
n_powerset = 0
n_program = 0
n_all, n_negative_one, n_str = 0, 0, 0
for i, idx in enumerate(cur_idxs):
bert, _, _, action, w, query, query_structure, question, mask_relation_class = self.data[idx][0]
# bert, center, offset, action, w, query, query_structure, question, mask_relation_class = self.data[idx][0]
if type(query) == list:
query = list2tuple(query)
tmp_structure, _, _, _ = recursive_main(flatten(query), query_structure, 0, [], 0, 0)
if self.query2box.geo == 'box':
center = self.query2box.entity_embedding(flatten_list(tmp_structure))
offset = torch.zeros_like(center)
elif self.query2box.geo == 'rotate':
embedding = self.query2box.entity_embedding(flatten_list(tmp_structure))
center, offset = torch.chunk(embedding, 2, dim=1)
assert len(center) == len(action[0])
'''
key difference here is that we further have query structure
'''
queries.append(query)
questions.append(question)
bert = np.array(bert) # (1, 768)
if len(center) == 1:
cur_powerset_len = 2
else:
cur_powerset_len = int(pow(2, len(center))-1)
berts.append(np.tile(bert, (cur_powerset_len, 1)))
powerset = self.all_powersets[len(center)]
powersets.append(powerset)
assert len(powerset) == cur_powerset_len
target = tuple(np.arange(len(action[0]))[np.array(action[0]) == 1]) #(0,1) or (1) or (0,1,2)
y = np.zeros(self.max_y_score_len)
y[powerset.index(target)] = 1
y_scores.append(y)
for j in self.data[idx]:
tmp_powerset = self.all_powersets[len(j[3][0])]
tmp_action = j[3]
tmp_target = tuple(np.arange(len(tmp_action[0]))[np.array(tmp_action[0]) == 1]) #(0,1) or (1) or (0,1,2)
sampled_score.append(torch.LongTensor([tmp_powerset.index(tmp_target)]))
branches_picked.append(tmp_target)
if len(tmp_target) != 1:
assert type(tmp_action[-1]) == str, tmp_action[-1]
sampled_relation.append(None)
else:
assert type(tmp_action[-1]) == int or type(tmp_action[-1]) == np.int32, tmp_action[-1]
sampled_relation.append([tmp_action[-1]])
edge_index = deepcopy(self.all_edge_indices[len(center)])
edge_index[0] += n_data
edge_index[1] += n_powerset
edge_indices.append(edge_index)
if self.reduce == 'max' and len(center) == 1:
additional_edge_indices.append(n_powerset)
softmax_edge_index = np.stack([n_powerset + np.arange(cur_powerset_len), i * self.max_y_score_len + np.arange(cur_powerset_len)])
softmax_edge_indices.append(softmax_edge_index)
w_scores.append(1/w)
value_edge_indices.append(n_powerset + cur_powerset_len - 1)
n_program += 1
n_data += len(center)
n_powerset += cur_powerset_len
if type(action[-1]) == str:
no_relation = True
else:
no_relation = False
for j in range(len(center)):
if type(center) == np.ndarray:
x_scores.append(np.concatenate([center[j], offset[j]]))
x_relations.append(np.concatenate([center[j], offset[j], bert[0]]))
elif type(center) == torch.Tensor:
x_scores.append(torch.cat([center[j], offset[j]], dim=-1))
x_relations.append(torch.cat([center[j], offset[j], torch.Tensor(bert[0]).to(center[j].device)], dim=-1))
else:
assert False
y_relations.append(action[-1] if type(action[-1]) == int else 0)
mask_relations_class.append(mask_relation_class)
if no_relation:
mask_relations.append(0.)
else:
if self.eval: # a notable change here, it means that when evaluation, we should do all the necessary prediction
mask_relations.append(1. if action[0][j] == 1 else 0.)
else: # but when it is training, we need to filter those datapoints when mask_relation_class does not even have the ground truth task
mask_relations.append(1. if action[0][j] == 1 and mask_relation_class[action[-1]] else 0.)
w_relations.append(1/w)
if self.reduce == 'max':
additional_edge_indices = np.stack([[n_data]*len(additional_edge_indices), additional_edge_indices])
edge_indices.append(additional_edge_indices)
x_scores = torch.stack(x_scores)
x_relations = torch.stack(x_relations).to(x_scores.device)
y_scores = torch.LongTensor(y_scores).to(x_scores.device)
y_relations = F.one_hot(torch.LongTensor(y_relations), self.nrelation).to(x_scores.device)
mask_relations = torch.Tensor(mask_relations).to(x_scores.device)
w_scores = torch.Tensor(w_scores).to(x_scores.device)
w_relations = torch.Tensor(w_relations).to(x_scores.device)
berts = torch.Tensor(np.concatenate(berts, axis=0)).to(x_scores.device)
edge_indices = torch.LongTensor(np.concatenate(edge_indices, axis=1)).to(x_scores.device)
softmax_edge_indices = torch.LongTensor(np.concatenate(softmax_edge_indices, axis=1)).to(x_scores.device)
mask_relations_class = torch.Tensor(mask_relations_class).bool().to(x_scores.device)
return x_scores, x_relations, y_scores, y_relations, mask_relations, \
w_scores, w_relations, berts, edge_indices, softmax_edge_indices, \
n_program, self.max_y_score_len, mask_relations_class, queries, powersets, \
value_edge_indices, sampled_score, sampled_relation, branches_picked, questions, \
cur_idxs
|
google-research/google-research
|
lego/lego/data_process/dataloader.py
|
Python
|
apache-2.0
| 33,137
|
#
# =============================================================================
# PYTHON SCRIPT FOR PLOTTING AREA RATIO v MN CHART
# creates figure 6
# =============================================================================
import pylab
import numpy
import scipy
# valid color names
# http://w3schools.com/html/html_colornames.asp
execfile( 'size_plot.dat' )
MN = [ 0.1001, 0.20, 0.30, 0.40, 0.50, 0.60, 0.70, 0.80, 0.8999 ]
pylab.figure( figsize=(12,12), facecolor='lightgrey' )
axes = [ 0.00, 1.2001, 0.0, 1.0 ]
pylab.axis( axes )
pylab.yticks([0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0])
pylab.tick_params(axis='both', which='major', labelsize=16)
pylab.ylabel('Area Ratio (Bypass/Tube)', fontsize=18)
pylab.xlabel('Bypass Mach Number', fontsize=18)
pylab.grid(b=True, which='major', color='grey', linestyle='--')
#pylab.title( 'hyperloop' )
pylab.legend(loc="best")
CS = pylab.tricontour( MNbyp, AR, MNpod, MN, colors = ['darkblue','red','darkgreen','purple', 'grey','darkorange', 'black', 'lightblue'] )
fmt = {} #tricontour labels
strs = [ 'Pod M=0.1', '0.2', '0.3', '0.4', '0.5', '0.6', '0.7', '0.8', '0.9' ]
for l,s in zip( CS.levels, strs ):
fmt[l] = s
xx = [0.928, 0.97, 0.97, 0.97, 0.97, 0.97, 0.97, 0.97, 0.97]
yy = [0.18, 0.34, 0.49, 0.62, 0.75, 0.83, 0.92, 0.96, 0.985]
labelpts = zip(xx,yy)
pylab.clabel( CS, inline=1,fontsize=14, fmt = fmt , manual=labelpts )
pylab.annotate(" Available pod area \ndecreasing relative to tube", fontsize=16, xy=(1.09, 0.486), xycoords='data', xytext=(0., 0),
rotation=90, textcoords='offset points', bbox=dict(boxstyle="square", edgecolor='lightgrey',facecolor='lightgrey') )
# x, y, dx, dy,
pylab.arrow( 1.121, 0.458, 0.0, 0.10, fc='lightgrey', ec='lightgrey', head_width=0.16, head_length=0.070 )
pylab.vlines(1.0, 0, 1.0, colors='darkgrey', linestyles='dashed',lw= 3)#, label="limit")
pylab.gcf().set_size_inches(11,8)
#pylab.show()
pylab.tight_layout()
pylab.savefig('../output/areaPlot.pdf', dpi=300)
|
jcchin/Hyperloop
|
src/hyperloop/plot/size_plot.py
|
Python
|
apache-2.0
| 2,035
|
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""
Purpose
Shows how to use AWS Identity and Access Management (IAM) policies.
"""
# snippet-start:[python.example_code.iam.policy_wrapper.imports]
import json
import logging
import operator
import pprint
import time
import boto3
from botocore.exceptions import ClientError
logger = logging.getLogger(__name__)
iam = boto3.resource('iam')
# snippet-end:[python.example_code.iam.policy_wrapper.imports]
# snippet-start:[python.example_code.iam.CreatePolicy]
def create_policy(name, description, actions, resource_arn):
"""
Creates a policy that contains a single statement.
:param name: The name of the policy to create.
:param description: The description of the policy.
:param actions: The actions allowed by the policy. These typically take the
form of service:action, such as s3:PutObject.
:param resource_arn: The Amazon Resource Name (ARN) of the resource this policy
applies to. This ARN can contain wildcards, such as
'arn:aws:s3:::my-bucket/*' to allow actions on all objects
in the bucket named 'my-bucket'.
:return: The newly created policy.
"""
policy_doc = {
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": actions,
"Resource": resource_arn
}
]
}
try:
policy = iam.create_policy(
PolicyName=name, Description=description,
PolicyDocument=json.dumps(policy_doc))
logger.info("Created policy %s.", policy.arn)
except ClientError:
logger.exception("Couldn't create policy %s.", name)
raise
else:
return policy
# snippet-end:[python.example_code.iam.CreatePolicy]
# snippet-start:[python.example_code.iam.DeletePolicy]
def delete_policy(policy_arn):
"""
Deletes a policy.
:param policy_arn: The ARN of the policy to delete.
"""
try:
iam.Policy(policy_arn).delete()
logger.info("Deleted policy %s.", policy_arn)
except ClientError:
logger.exception("Couldn't delete policy %s.", policy_arn)
raise
# snippet-end:[python.example_code.iam.DeletePolicy]
# snippet-start:[python.example_code.iam.CreatePolicyVersion]
def create_policy_version(policy_arn, actions, resource_arn, set_as_default):
"""
Creates a policy version. Policies can have up to five versions. The default
version is the one that is used for all resources that reference the policy.
:param policy_arn: The ARN of the policy.
:param actions: The actions to allow in the policy version.
:param resource_arn: The ARN of the resource this policy version applies to.
:param set_as_default: When True, this policy version is set as the default
version for the policy. Otherwise, the default
is not changed.
:return: The newly created policy version.
"""
policy_doc = {
'Version': '2012-10-17',
'Statement': [
{
'Effect': 'Allow',
'Action': actions,
'Resource': resource_arn
}
]
}
try:
policy = iam.Policy(policy_arn)
policy_version = policy.create_version(
PolicyDocument=json.dumps(policy_doc), SetAsDefault=set_as_default)
logger.info(
"Created policy version %s for policy %s.",
policy_version.version_id, policy_version.arn)
except ClientError:
logger.exception("Couldn't create a policy version for %s.", policy_arn)
raise
else:
return policy_version
# snippet-end:[python.example_code.iam.CreatePolicyVersion]
# snippet-start:[python.example_code.iam.ListPolicies]
def list_policies(scope):
"""
Lists the policies in the current account.
:param scope: Limits the kinds of policies that are returned. For example,
'Local' specifies that only locally managed policies are returned.
:return: The list of policies.
"""
try:
policies = list(iam.policies.filter(Scope=scope))
logger.info("Got %s policies in scope '%s'.", len(policies), scope)
except ClientError:
logger.exception("Couldn't get policies for scope '%s'.", scope)
raise
else:
return policies
# snippet-end:[python.example_code.iam.ListPolicies]
# snippet-start:[python.example_code.iam.GetPolicy]
# snippet-start:[python.example_code.iam.GetPolicyVersion]
def get_default_policy_statement(policy_arn):
"""
Gets the statement of the default version of the specified policy.
:param policy_arn: The ARN of the policy to look up.
:return: The statement of the default policy version.
"""
try:
policy = iam.Policy(policy_arn)
# To get an attribute of a policy, the SDK first calls get_policy.
policy_doc = policy.default_version.document
policy_statement = policy_doc.get('Statement', None)
logger.info("Got default policy doc for %s.", policy.policy_name)
logger.info(policy_doc)
except ClientError:
logger.exception("Couldn't get default policy statement for %s.", policy_arn)
raise
else:
return policy_statement
# snippet-end:[python.example_code.iam.GetPolicyVersion]
# snippet-end:[python.example_code.iam.GetPolicy]
# snippet-start:[python.example_code.iam.Scenario_RollbackPolicyVersion]
def rollback_policy_version(policy_arn):
"""
Rolls back to the previous default policy, if it exists.
1. Gets the list of policy versions in order by date.
2. Finds the default.
3. Makes the previous policy the default.
4. Deletes the old default version.
:param policy_arn: The ARN of the policy to roll back.
:return: The default version of the policy after the rollback.
"""
try:
policy_versions = sorted(
iam.Policy(policy_arn).versions.all(),
key=operator.attrgetter('create_date'))
logger.info("Got %s versions for %s.", len(policy_versions), policy_arn)
except ClientError:
logger.exception("Couldn't get versions for %s.", policy_arn)
raise
default_version = None
rollback_version = None
try:
while default_version is None:
ver = policy_versions.pop()
if ver.is_default_version:
default_version = ver
rollback_version = policy_versions.pop()
rollback_version.set_as_default()
logger.info("Set %s as the default version.", rollback_version.version_id)
default_version.delete()
logger.info("Deleted original default version %s.", default_version.version_id)
except IndexError:
if default_version is None:
logger.warning("No default version found for %s.", policy_arn)
elif rollback_version is None:
logger.warning(
"Default version %s found for %s, but no previous version exists, so "
"nothing to roll back to.", default_version.version_id, policy_arn)
except ClientError:
logger.exception("Couldn't roll back version for %s.", policy_arn)
raise
else:
return rollback_version
# snippet-end:[python.example_code.iam.Scenario_RollbackPolicyVersion]
# snippet-start:[python.example_code.iam.AttachRolePolicy_Policy]
def attach_to_role(role_name, policy_arn):
"""
Attaches a policy to a role.
:param role_name: The name of the role. **Note** this is the name, not the ARN.
:param policy_arn: The ARN of the policy.
"""
try:
iam.Policy(policy_arn).attach_role(RoleName=role_name)
logger.info("Attached policy %s to role %s.", policy_arn, role_name)
except ClientError:
logger.exception("Couldn't attach policy %s to role %s.", policy_arn, role_name)
raise
# snippet-end:[python.example_code.iam.AttachRolePolicy_Policy]
# snippet-start:[python.example_code.iam.DetachRolePolicy_Policy]
def detach_from_role(role_name, policy_arn):
"""
Detaches a policy from a role.
:param role_name: The name of the role. **Note** this is the name, not the ARN.
:param policy_arn: The ARN of the policy.
"""
try:
iam.Policy(policy_arn).detach_role(RoleName=role_name)
logger.info("Detached policy %s from role %s.", policy_arn, role_name)
except ClientError:
logger.exception(
"Couldn't detach policy %s from role %s.", policy_arn, role_name)
raise
# snippet-end:[python.example_code.iam.DetachRolePolicy_Policy]
# snippet-start:[python.example_code.iam.Scenario_PolicyManagement]
def usage_demo():
"""Shows how to use the policy functions."""
logging.basicConfig(level=logging.INFO, format='%(levelname)s: %(message)s')
print('-'*88)
print("Welcome to the AWS Identity and Account Management policy demo.")
print('-'*88)
print("Policies let you define sets of permissions that can be attached to "
"other IAM resources, like users and roles.")
bucket_arn = f'arn:aws:s3:::made-up-bucket-name'
policy = create_policy(
'demo-iam-policy', 'Policy for IAM demonstration.',
['s3:ListObjects'], bucket_arn)
print(f"Created policy {policy.policy_name}.")
policies = list_policies('Local')
print(f"Your account has {len(policies)} managed policies:")
print(*[pol.policy_name for pol in policies], sep=', ')
time.sleep(1)
policy_version = create_policy_version(
policy.arn, ['s3:PutObject'], bucket_arn, True)
print(f"Added policy version {policy_version.version_id} to policy "
f"{policy.policy_name}.")
default_statement = get_default_policy_statement(policy.arn)
print(f"The default policy statement for {policy.policy_name} is:")
pprint.pprint(default_statement)
rollback_version = rollback_policy_version(policy.arn)
print(f"Rolled back to version {rollback_version.version_id} for "
f"{policy.policy_name}.")
default_statement = get_default_policy_statement(policy.arn)
print(f"The default policy statement for {policy.policy_name} is now:")
pprint.pprint(default_statement)
delete_policy(policy.arn)
print(f"Deleted policy {policy.policy_name}.")
print("Thanks for watching!")
# snippet-end:[python.example_code.iam.Scenario_PolicyManagement]
if __name__ == '__main__':
usage_demo()
|
awsdocs/aws-doc-sdk-examples
|
python/example_code/iam/iam_basics/policy_wrapper.py
|
Python
|
apache-2.0
| 10,603
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016, Giacomo Cariello. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import json
import os
import platform
import re
from subprocess import Popen, PIPE, STDOUT
from builtins import object # pylint: disable=redefined-builtin
from future import standard_library
from dockeroo import BaseRecipe, BaseSubRecipe
from dockeroo.utils import ExternalProcessError
from dockeroo.utils import reify
standard_library.install_aliases()
DEFAULT_TIMEOUT = 180
SEPARATOR = '|'
FNULL = open(os.devnull, 'w')
class DockerMachineProcess(Popen):
def __init__(self, args, stdin=None, stdout=None):
args = ['docker-machine'] + args
super(DockerMachineProcess, self).__init__(
args, stdin=stdin, stdout=stdout, stderr=PIPE, close_fds=True)
class DockerMachine(object):
def __init__(self, name, logger):
self.name = name
self.logger = logger
@property
@reify
def platform(self):
return self.run_cmd("uname -m", quiet=True, return_output=True)
@property
@reify
def url(self):
proc = DockerMachineProcess(['url', self.name], stdout=PIPE)
if proc.wait() != 0:
raise ExternalProcessError(
"Error requesting \"docker-machine url {}\"".format(self.name), proc)
return proc.stdout.read().rstrip(os.linesep)
@property
@reify
def inspect(self):
proc = DockerMachineProcess(['inspect', self.name], stdout=PIPE)
if proc.wait() != 0:
raise ExternalProcessError(
"Error requesting \"docker-machine inspect {}\"".format(self.name), proc)
return json.loads(proc.stdout.read())
@classmethod
def machines(cls, **filters):
params = ['Name', 'Active', 'ActiveHost', 'ActiveSwarm', 'DriverName', 'State', 'URL',
'Swarm', 'Error', 'DockerVersion', 'ResponseTime']
args = ['ls', '--format',
SEPARATOR.join(['{{{{.{}}}}}'.format(x) for x in params])]
for key, value in filters.items():
args += ['--filter', '{}={}'.format(key, value)]
proc = DockerMachineProcess(args, stdout=PIPE)
if proc.wait() != 0:
raise ExternalProcessError(
"Error running \"docker-machine {}\"".format(' '.join(args)), proc)
params_map = dict([(x, re.sub(
'((?<=[a-z0-9])[A-Z]|(?!^)[A-Z](?=[a-z]))', r'_\1', x).lower()) for x in params])
ret = []
for line in proc.stdout.read().splitlines():
record = {}
values = line.split(SEPARATOR)
if len(values) < 2:
continue
for num, param in enumerate(params):
record[params_map[param]] = values[num] \
if values[num] and values[num] != '<none>' else None
ret.append(record)
return ret
@classmethod
def create(cls, name, engine_driver, engine_options):
args = ['create', '-d', engine_driver]
for k, v in engine_options:
args += ["--{}".format(k), v]
args.append(name)
proc = DockerMachineProcess(args)
if proc.wait() != 0:
raise ExternalProcessError(
"Error running \"docker-machine {}\"".format(' '.join(args)), proc)
@classmethod
def remove(cls, name):
args = ['rm', '-y', name]
proc = DockerMachineProcess(args)
if proc.wait() != 0:
raise ExternalProcessError(
"Error running \"docker-machine {}\"".format(' '.join(args)), proc)
def run_cmd(self, cmd, quiet=False, return_output=False):
if not quiet:
self.logger.info("Running command \"%s\" on machine \"%s\"", cmd, self.name)
args = ['ssh', self.name, cmd]
proc = DockerMachineProcess(args, stdout=PIPE if return_output else None)
if proc.wait() != 0:
raise ExternalProcessError(
"Error running command \"{}\" on machine \"{}\"".format(cmd, self.name), proc)
if return_output:
return proc.stdout.read().strip()
def config_binfmt(self, arch):
self.run_cmd('[ -f /proc/sys/fs/binfmt_misc/register ] || '
'sudo mount binfmt_misc -t binfmt_misc /proc/sys/fs/binfmt_misc')
self.run_cmd(
'[ -f /proc/sys/fs/binfmt_misc/{arch} ] || '
'sudo /bin/sh -c "echo \'{binfmt}\' >/proc/sys/fs/binfmt_misc/register"'.format(arch=arch, binfmt={
'aarch64':
r':{arch}:M::'
r'\x7fELF\x02\x01\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\xb7:'
r'\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff'
r'\xff\xff\xff\xff\xff\xff\xfe\xff\xff:'
r'/usr/bin/qemu-{arch}:',
'arm':
r':{arch}:M::'
r'\x7fELF\x01\x01\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x28\x00:'
r'\xff\xff\xff\xff\xff\xff\xff\x00\xff\xff'
r'\xff\xff\xff\xff\xff\xff\xfe\xff\xff\xff:'
r'/usr/bin/qemu-{arch}:',
'armeb':
r':{arch}:M::'
r'\x7fELF\x01\x02\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x28:'
r'\xff\xff\xff\xff\xff\xff\xff\x00\xff\xff'
r'\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff:'
r'/usr/bin/qemu-{arch}:',
'alpha':
r':{arch}:M::'
r'\x7fELF\x02\x01\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x26\x90:'
r'\xff\xff\xff\xff\xff\xfe\xfe\xff\xff\xff'
r'\xff\xff\xff\xff\xff\xff\xfe\xff\xff\xff:'
r'/usr/bin/qemu-{arch}:',
'mips':
r':{arch}:M::'
r'\x7fELF\x01\x02\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x08:'
r'\xff\xff\xff\xff\xff\xff\xff\x00\xff\xff'
r'\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff:'
r'/usr/bin/qemu-{arch}:',
'mipsel':
r':{arch}:M::'
r'\x7fELF\x01\x01\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x08\x00:'
r'\xff\xff\xff\xff\xff\xff\xff\x00\xff\xff'
r'\xff\xff\xff\xff\xff\xff\xfe\xff\xff\xff:'
r'/usr/bin/qemu-{arch}:',
'ppc':
r':{arch}:M::'
r'\x7fELF\x01\x02\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x14:'
r'\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff'
r'\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff:'
r'/usr/bin/qemu-{arch}:',
'sh4':
r':{arch}:M::'
r'\x7fELF\x01\x01\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x2a\x00:'
r'\xff\xff\xff\xff\xff\xff\xff\x00\xff\xff'
r'\xff\xff\xff\xff\xff\xff\xfb\xff\xff\xff:'
r'/usr/bin/qemu-{arch}:',
'sh4eb':
r':{arch}:M::'
r'\x7fELF\x01\x02\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x2a:'
r'\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff'
r'\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff:'
r'/usr/bin/qemu-{arch}:',
'sparc':
r':{arch}:M::'
r'\x7fELF\x01\x02\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x02:'
r'\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff'
r'\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff:'
r'/usr/bin/qemu-{arch}:',
}[arch].format(arch=arch)))
class BaseDockerMachineSubRecipe(BaseSubRecipe):
pass
|
dockeroo/dockeroo
|
dockeroo/docker_machine/__init__.py
|
Python
|
apache-2.0
| 8,456
|
# Definition for a binary tree node.
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
from collections import deque
class Solution:
def maxDepth(self, root):
"""
:type root: TreeNode
:rtype: int
"""
if not root:
return 0
return self.func(root, 1)
def func(self, node, depth):
if node == None or (node.left == None and node.right == None):
return depth
else:
return max(self.func(node.left, depth + 1), self.func(node.right, depth + 1))
def stringToTreeNode(input):
input = input.strip()
input = input[1:-1]
if not input:
return None
inputValues = [s.strip() for s in input.split(',')]
root = TreeNode(int(inputValues[0]))
nodeQueue = [root]
front = 0
index = 1
while index < len(inputValues):
node = nodeQueue[front]
front = front + 1
item = inputValues[index]
index = index + 1
if item != "null":
leftNumber = int(item)
node.left = TreeNode(leftNumber)
nodeQueue.append(node.left)
if index >= len(inputValues):
break
item = inputValues[index]
index = index + 1
if item != "null":
rightNumber = int(item)
node.right = TreeNode(rightNumber)
nodeQueue.append(node.right)
return root
def stringToTreeNode(input):
input = input.strip()
input = input[1:-1]
if not input:
return None
inputValues = [s.strip() for s in input.split(',')]
root = TreeNode(int(inputValues[0]))
nodeQueue = [root]
front = 0
index = 1
while index < len(inputValues):
node = nodeQueue[front]
front = front + 1
item = inputValues[index]
index = index + 1
if item != "null":
leftNumber = int(item)
node.left = TreeNode(leftNumber)
nodeQueue.append(node.left)
if index >= len(inputValues):
break
item = inputValues[index]
index = index + 1
if item != "null":
rightNumber = int(item)
node.right = TreeNode(rightNumber)
nodeQueue.append(node.right)
return root
def main():
import sys
def readlines():
for line in sys.stdin:
yield line.strip('\n')
lines = readlines()
while True:
try:
line = next(lines)
root = stringToTreeNode(line)
ret = Solution().maxDepth(root)
out = str(ret)
print(out)
except StopIteration:
break
if __name__ == '__main__':
main()
|
sonymoon/algorithm
|
src/main/python/leetcode-python/easy/maxDepth.py
|
Python
|
apache-2.0
| 2,753
|
import logging
from flask import render_template, request, redirect, url_for, flash
from flask_login import login_required
from flask_wtf import Form
from wtforms import SelectField
from kconfig import chaptersBookByName, chaptersBook
from kconfig import enablersBookByName
from kernel.Backlog import BacklogFactory, LocalBacklogFactory
from kernel.Analyser import ChapterAnalyser, ChaptersAnalyser
from kernel.Reporter import CoordinationReporter
from kernel import agileCalendar
from kernel.DataBoard import Data
from kernel.NM_Aggregates import ChapterDeck
from kernel.NM_HelpDeskReporter import DeckReporter
from . import chapters
__author__ = "Manuel Escriche <mev@tid.es>"
class SelectForm(Form):
select = SelectField(u'Backlogs')
@chapters.route("/")
@chapters.route("/overview")
@login_required
def overview():
analyser = ChaptersAnalyser.fromFile()
analyser.chaptersBook = chaptersBookByName
return render_template('chapters/overview.html', analyser=analyser, calendar=agileCalendar)
@chapters.route("helpdesk/<chaptername>", methods=['GET', 'POST'])
@login_required
def helpdesk(chaptername):
chapter = chaptersBook[chaptername]
form = SelectForm()
options = [(n, item) for n, item in enumerate(enablersBookByName)]
if request.method == 'POST':
enablername = dict(options)[int(form.select.data)]
return redirect(url_for('enablers.helpdesk', enablername=enablername))
form.select.choices = [(n, '{} - {} - {} ({})'
.format(n+1, enablersBookByName[item].chapter, item, enablersBookByName[item].mode))
for n, item in enumerate(enablersBookByName)]
data = ChapterDeck(chapter, *Data.getChapterHelpDesk(chaptername))
if data.source == 'store':
flash('Data from local storage obtained at {}'.format(data.timestamp))
sortedby = request.args.get('sortedby') if request.args.get('sortedby') else 'age'
reporter = DeckReporter(chaptername, data)
return render_template('chapters/helpdesk.html',
chapter=chapter,
sortedby=sortedby,
data=data,
reporter=reporter,
form=form,
calendar=agileCalendar)
@chapters.route("backlog/<chaptername>", methods=['GET', 'POST'])
@login_required
def backlog(chaptername):
analyser = ChapterAnalyser.fromFile(chaptername)
form = SelectForm()
options = [(n, item) for n, item in enumerate(analyser.enablers)]
if request.method == 'POST':
enablername = dict(options)[int(form.select.data)]
return redirect(url_for('enablers.backlog', enablername=enablername))
form.select.choices = [(n, '{} ({})'.format(item, enablersBookByName[item].mode))
for n, item in enumerate(analyser.enablers)]
analyser.chaptersBook = chaptersBookByName
try:
backlog_factory = BacklogFactory.getInstance()
my_backlog = backlog_factory.getCoordinationBacklog(chaptername)
except Exception as e:
logging.warning(e)
local_factory = LocalBacklogFactory.getInstance()
my_backlog = local_factory.getCoordinationBacklog(chaptername)
flash('Data from local storage obtained at {}'.format(my_backlog.timestamp))
sortedby = request.args.get('sortedby') if request.args.get('sortedby') else 'name'
my_backlog.sort(key=my_backlog.sortDict[sortedby])
reporter = CoordinationReporter(chaptername, my_backlog)
return render_template('chapters/dashboard.html',
analyser=analyser, reporter=reporter,
calendar=agileCalendar, form=form)
@chapters.route("raw/<chaptername>")
@login_required
def raw(chaptername):
analyser = ChapterAnalyser.fromFile(chaptername)
analyser.chaptersBook = chaptersBookByName
try:
backlog_factory = BacklogFactory.getInstance()
my_backlog = backlog_factory.getCoordinationBacklog(chaptername)
except Exception as e:
logging.warning(e)
local_factory = LocalBacklogFactory.getInstance()
my_backlog = local_factory.getCoordinationBacklog(chaptername)
flash('Data from local storage obtained at {}'.format(my_backlog.timestamp))
sortedby = request.args.get('sortedby') if request.args.get('sortedby') else 'name'
my_backlog.sort(key=my_backlog.sortDict[sortedby])
reporter = CoordinationReporter(chaptername, my_backlog)
return render_template('chapters/raw.html', analyser=analyser, reporter=reporter, calendar=agileCalendar)
@chapters.route("review/<chaptername>")
@login_required
def review(chaptername):
analyser = ChapterAnalyser.fromFile(chaptername)
analyser.chaptersBook = chaptersBookByName
try:
backlog_factory = BacklogFactory.getInstance()
my_backlog = backlog_factory.getCoordinationBacklog(chaptername)
except Exception as e:
logging.warning(e)
local_factory = LocalBacklogFactory.getInstance()
my_backlog = local_factory.getCoordinationBacklog(chaptername)
flash('Data from local storage obtained at {}'.format(my_backlog.timestamp))
sortedby = request.args.get('sortedby') if request.args.get('sortedby') else 'name'
my_backlog.sort(key=my_backlog.sortDict[sortedby])
reporter = CoordinationReporter(chaptername, my_backlog)
return render_template('chapters/review.html', analyser=analyser, reporter=reporter, calendar=agileCalendar)
|
flopezag/fiware-backlog
|
app/chapters/views.py
|
Python
|
apache-2.0
| 5,537
|
from shared import create_dummy_data as ccd
from semisupervised.depLabelPropagation import label_propagation
from pyspark.sql import functions as F
from shared.Plot2DGraphs import plot3D
def double_helix(sc, example, label):
spark_double_helix = ccd.create_spark_data(
sc, func=ccd.create_double_helix,
points_pr_helix=example['n'], alpha=example['alpha'],
beta=example['beta'], missing=example['missing']
)
# spark_double_helix.show()
plot3D(spark_double_helix, label ,**example)
spark_double_helix = spark_double_helix.withColumnRenamed(
existing='label', new='original_label'
)
weight_transition = label_propagation(
sc=sc, data_frame=spark_double_helix,
label_col=label, id_col='id',
feature_cols='x y z'.split(), k=2,
max_iters=25, sigma=0.43
)
result = (spark_double_helix
.alias('a')
.join(other=weight_transition.alias('b'),
on=F.col('a.id') == F.col('b.row'), how='inner')
.drop('b.row')
)
# result.show()
plot3D(result, 'label', **example)
|
mssalvador/WorkflowCleaning
|
examples/SemisupervisedDoubleHelix.py
|
Python
|
apache-2.0
| 1,101
|
# coding: utf-8
#
# Copyright 2017 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for classifier services"""
import os
from core.domain import classifier_domain
from core.domain import classifier_registry
from core.domain import classifier_services
from core.domain import exp_services
from core.platform import models
from core.tests import test_utils
import feconf
import utils
(classifier_models,) = models.Registry.import_models(
[models.NAMES.classifier])
class ClassifierServicesTests(test_utils.GenericTestBase):
"""Test "classify" using the sample explorations.
Since the end to end tests cover correct classification, and frontend tests
test hard rules, ReaderClassifyTests is only checking that the string
classifier is actually called.
"""
def setUp(self):
super(ClassifierServicesTests, self).setUp()
self._init_classify_inputs('16')
def _init_classify_inputs(self, exploration_id):
test_exp_filepath = os.path.join(
feconf.TESTS_DATA_DIR, 'string_classifier_test.yaml')
yaml_content = utils.get_file_contents(test_exp_filepath)
assets_list = []
exp_services.save_new_exploration_from_yaml_and_assets(
feconf.SYSTEM_COMMITTER_ID, yaml_content, exploration_id,
assets_list)
self.exp_id = exploration_id
self.exp_state = (
exp_services.get_exploration_by_id(exploration_id).states['Home'])
def _is_string_classifier_called(self, answer):
sc = classifier_registry.Registry.get_classifier_by_algorithm_id(
feconf.INTERACTION_CLASSIFIER_MAPPING['TextInput'][
'algorithm_id'])
string_classifier_predict = (
sc.__class__.predict)
predict_counter = test_utils.CallCounter(
string_classifier_predict)
with self.swap(sc.__class__, 'predict', predict_counter):
response = classifier_services.classify(self.exp_state, answer)
answer_group_index = response['answer_group_index']
rule_spec_index = response['rule_spec_index']
answer_groups = self.exp_state.interaction.answer_groups
if answer_group_index == len(answer_groups):
return 'default'
answer_group = answer_groups[answer_group_index]
return (answer_group.get_classifier_rule_index() == rule_spec_index and
predict_counter.times_called == 1)
def test_string_classifier_classification(self):
"""All these responses trigger the string classifier."""
with self.swap(feconf, 'ENABLE_STRING_CLASSIFIER', True):
self.assertTrue(
self._is_string_classifier_called(
'it\'s a permutation of 3 elements'))
self.assertTrue(
self._is_string_classifier_called(
'There are 3 options for the first ball, and 2 for the '
'remaining two. So 3*2=6.'))
self.assertTrue(
self._is_string_classifier_called('abc acb bac bca cbb cba'))
self.assertTrue(
self._is_string_classifier_called('dunno, just guessed'))
def test_retrieval_of_classifiers(self):
"""Test the get_classifier_by_id method."""
with self.assertRaisesRegexp(Exception, (
"Entity for class ClassifierDataModel with id fake_id not found")):
classifier_services.get_classifier_by_id('fake_id')
exp_id = u'1'
classifier_id = u'1'
state = 'Home'
classifier_id = classifier_models.ClassifierDataModel.create(
classifier_id, exp_id, 1, state,
feconf.INTERACTION_CLASSIFIER_MAPPING['TextInput'][
'algorithm_id'], [], 1)
classifier = classifier_services.get_classifier_by_id(
classifier_id)
self.assertEqual(classifier.exp_id, exp_id)
self.assertEqual(classifier.state_name, state)
self.assertEqual(classifier.id, classifier_id)
def test_deletion_of_classifiers(self):
"""Test the delete_classifier method."""
with self.assertRaisesRegexp(Exception, (
"Entity for class ClassifierDataModel with id fake_id not found")):
classifier_services.delete_classifier('fake_id')
exp_id = u'1'
classifier_id = u'1'
state = 'Home'
classifier_id = classifier_models.ClassifierDataModel.create(
classifier_id, exp_id, 1, state,
feconf.INTERACTION_CLASSIFIER_MAPPING['TextInput'][
'algorithm_id'], [], 1)
classifier_services.delete_classifier(classifier_id)
with self.assertRaisesRegexp(Exception, (
"Entity for class ClassifierDataModel with id %s not found" %(
classifier_id))):
classifier_services.get_classifier_by_id(classifier_id)
def test_creation_of_classifiers(self):
"""Test the create_classifier method."""
exp_id = u'1'
state_name = 'Home'
interaction_id = 'TextInput'
classifier_data = {
'_alpha': 0.1,
'_beta': 0.001,
'_prediction_threshold': 0.5,
'_training_iterations': 25,
'_prediction_iterations': 5,
'_num_labels': 10,
'_num_docs': 12,
'_num_words': 20,
'_label_to_id': {'text': 1},
'_word_to_id': {'hello': 2},
'_w_dp': [],
'_b_dl': [],
'_l_dp': [],
'_c_dl': [],
'_c_lw': [],
'_c_l': []
}
job_id = classifier_services.create_classifier_training_job(
feconf.INTERACTION_CLASSIFIER_MAPPING[interaction_id][
'algorithm_id'], interaction_id, exp_id, 1, state_name,
[], feconf.TRAINING_JOB_STATUS_NEW)
classifier_id = (
classifier_services.create_classifier(job_id, classifier_data))
classifier = classifier_services.get_classifier_by_id(
classifier_id)
self.assertEqual(classifier.exp_id, exp_id)
self.assertEqual(classifier.state_name, state_name)
self.assertEqual(classifier.id, classifier_id)
def test_retrieval_of_classifier_training_jobs(self):
"""Test the get_classifier_training_job_by_id method."""
with self.assertRaisesRegexp(Exception, (
'Entity for class ClassifierTrainingJobModel with id fake_id '
'not found')):
classifier_services.get_classifier_training_job_by_id('fake_id')
exp_id = u'1'
state_name = 'Home'
interaction_id = 'TextInput'
job_id = classifier_models.ClassifierTrainingJobModel.create(
feconf.INTERACTION_CLASSIFIER_MAPPING['TextInput']['algorithm_id'],
interaction_id, exp_id, 1, [], state_name,
feconf.TRAINING_JOB_STATUS_NEW)
classifier_training_job = (
classifier_services.get_classifier_training_job_by_id(job_id))
self.assertEqual(classifier_training_job.algorithm_id,
feconf.INTERACTION_CLASSIFIER_MAPPING['TextInput'][
'algorithm_id'])
self.assertEqual(classifier_training_job.interaction_id, interaction_id)
self.assertEqual(classifier_training_job.exp_id, exp_id)
self.assertEqual(classifier_training_job.exp_version, 1)
self.assertEqual(classifier_training_job.training_data, [])
self.assertEqual(classifier_training_job.state_name, state_name)
self.assertEqual(classifier_training_job.status,
feconf.TRAINING_JOB_STATUS_NEW)
def test_deletion_of_classifier_training_jobs(self):
"""Test the delete_classifier_training_job method."""
exp_id = u'1'
state_name = 'Home'
interaction_id = 'TextInput'
job_id = classifier_models.ClassifierTrainingJobModel.create(
feconf.INTERACTION_CLASSIFIER_MAPPING['TextInput']['algorithm_id'],
interaction_id, exp_id, 1, [], state_name,
feconf.TRAINING_JOB_STATUS_NEW)
self.assertTrue(job_id)
classifier_services.delete_classifier_training_job(job_id)
with self.assertRaisesRegexp(Exception, (
'Entity for class ClassifierTrainingJobModel '
'with id %s not found' %(
job_id))):
classifier_services.get_classifier_training_job_by_id(job_id)
def test_mark_training_job_complete(self):
"""Test the mark_training_job_complete method."""
exp_id = u'1'
state_name = 'Home'
interaction_id = 'TextInput'
job_id = classifier_services.create_classifier_training_job(
feconf.INTERACTION_CLASSIFIER_MAPPING[interaction_id][
'algorithm_id'], interaction_id, exp_id, 1, state_name,
[], feconf.TRAINING_JOB_STATUS_PENDING)
classifier_training_job = (
classifier_services.get_classifier_training_job_by_id(job_id))
self.assertEqual(classifier_training_job.status,
feconf.TRAINING_JOB_STATUS_PENDING)
classifier_services.mark_training_job_complete(job_id)
classifier_training_job = (
classifier_services.get_classifier_training_job_by_id(job_id))
self.assertEqual(classifier_training_job.status,
feconf.TRAINING_JOB_STATUS_COMPLETE)
# Test that invalid status changes cannot be made.
with self.assertRaisesRegexp(Exception, (
'The status change %s to %s is not valid.' % (
feconf.TRAINING_JOB_STATUS_COMPLETE,
feconf.TRAINING_JOB_STATUS_COMPLETE))):
classifier_services.mark_training_job_complete(job_id)
def test_retrieval_of_classifier_from_exploration_attributes(self):
"""Test the get_classifier_from_exploration_attributes method."""
exp_id = u'1'
state_name = u'टेक्स्ट'
classifier_id = 'classifier_id1'
classifier_id = classifier_models.ClassifierDataModel.create(
classifier_id, exp_id, 1, state_name,
feconf.INTERACTION_CLASSIFIER_MAPPING['TextInput'][
'algorithm_id'], [], 1)
classifier_models.ClassifierExplorationMappingModel.create(
exp_id, 1, state_name, classifier_id)
classifier = (
classifier_services.get_classifier_from_exploration_attributes(
exp_id, 1, state_name))
self.assertEqual(classifier.exp_id, exp_id)
self.assertEqual(classifier.exp_version_when_created, 1)
self.assertEqual(classifier.state_name, state_name)
self.assertEqual(classifier.id, classifier_id)
def test_creation_of_classifier_exploration_mapping(self):
"""Test the create_classifier_exploration_mapping method."""
exp_id = '1'
state_name = u'टेक्स्ट'
classifier_id = 'classifier_id1'
# Check that mapping can't be created since the classifier doesn't
# exist.
with self.assertRaisesRegexp(Exception, (
'Entity for class ClassifierDataModel with id %s not found' %(
classifier_id))):
classifier_services.create_classifier_exploration_mapping(
exp_id, 1, state_name, classifier_id)
# Create classifier
classifier_id = classifier_models.ClassifierDataModel.create(
classifier_id, exp_id, 1, state_name,
feconf.INTERACTION_CLASSIFIER_MAPPING['TextInput'][
'algorithm_id'], [], 1)
classifier_services.create_classifier_exploration_mapping(
exp_id, 1, state_name, classifier_id)
classifier_exploration_mapping = (
classifier_domain.ClassifierExplorationMapping(
exp_id, 1, state_name, classifier_id))
self.assertEqual(classifier_exploration_mapping.exp_id, exp_id)
self.assertEqual(classifier_exploration_mapping.exp_version, 1)
self.assertEqual(classifier_exploration_mapping.state_name, state_name)
self.assertEqual(classifier_exploration_mapping.classifier_id,
classifier_id)
# Check that exception is raised if the mapping already exists.
with self.assertRaisesRegexp(Exception, (
'The Classifier-Exploration mapping with id %s.%s.%s '
'already exists.' % (exp_id, 1, state_name.encode('utf-8')))):
classifier_services.create_classifier_exploration_mapping(
exp_id, 1, state_name, classifier_id)
|
shaz13/oppia
|
core/domain/classifier_services_test.py
|
Python
|
apache-2.0
| 13,186
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import unittest
from unittest.mock import MagicMock, patch
from airflow import DAG, AirflowException
from airflow.models import Connection
from airflow.providers.apache.livy.hooks.livy import BatchState, LivyHook
from airflow.providers.apache.livy.operators.livy import LivyOperator
from airflow.utils import db, timezone
DEFAULT_DATE = timezone.datetime(2017, 1, 1)
mock_livy_client = MagicMock()
BATCH_ID = 100
class TestLivyOperator(unittest.TestCase):
def setUp(self):
args = {
'owner': 'airflow',
'start_date': DEFAULT_DATE
}
self.dag = DAG('test_dag_id', default_args=args)
db.merge_conn(Connection(
conn_id='livyunittest', conn_type='livy',
host='localhost:8998', port='8998', schema='http'
))
@patch('airflow.providers.apache.livy.operators.livy.LivyHook.get_batch_state')
def test_poll_for_termination(self, mock_livy):
state_list = 2 * [BatchState.RUNNING] + [BatchState.SUCCESS]
def side_effect(_):
if state_list:
return state_list.pop(0)
# fail if does not stop right before
raise AssertionError()
mock_livy.side_effect = side_effect
task = LivyOperator(
file='sparkapp',
polling_interval=1,
dag=self.dag,
task_id='livy_example'
)
task._livy_hook = task.get_hook()
task.poll_for_termination(BATCH_ID)
mock_livy.assert_called_with(BATCH_ID)
self.assertEqual(mock_livy.call_count, 3)
@patch('airflow.providers.apache.livy.operators.livy.LivyHook.get_batch_state')
def test_poll_for_termination_fail(self, mock_livy):
state_list = 2 * [BatchState.RUNNING] + [BatchState.ERROR]
def side_effect(_):
if state_list:
return state_list.pop(0)
# fail if does not stop right before
raise AssertionError()
mock_livy.side_effect = side_effect
task = LivyOperator(
file='sparkapp',
polling_interval=1,
dag=self.dag,
task_id='livy_example'
)
task._livy_hook = task.get_hook()
with self.assertRaises(AirflowException):
task.poll_for_termination(BATCH_ID)
mock_livy.assert_called_with(BATCH_ID)
self.assertEqual(mock_livy.call_count, 3)
@patch('airflow.providers.apache.livy.operators.livy.LivyHook.get_batch_state',
return_value=BatchState.SUCCESS)
@patch('airflow.providers.apache.livy.operators.livy.LivyHook.post_batch', return_value=BATCH_ID)
def test_execution(self, mock_post, mock_get):
task = LivyOperator(
livy_conn_id='livyunittest',
file='sparkapp',
polling_interval=1,
dag=self.dag,
task_id='livy_example'
)
task.execute(context={})
call_args = {k: v for k, v in mock_post.call_args[1].items() if v}
self.assertEqual(call_args, {'file': 'sparkapp'})
mock_get.assert_called_once_with(BATCH_ID)
@patch('airflow.providers.apache.livy.operators.livy.LivyHook.delete_batch')
@patch('airflow.providers.apache.livy.operators.livy.LivyHook.post_batch', return_value=BATCH_ID)
def test_deletion(self, mock_post, mock_delete):
task = LivyOperator(
livy_conn_id='livyunittest',
file='sparkapp',
dag=self.dag,
task_id='livy_example'
)
task.execute(context={})
task.kill()
mock_delete.assert_called_once_with(BATCH_ID)
def test_injected_hook(self):
def_hook = LivyHook(livy_conn_id='livyunittest')
task = LivyOperator(
file='sparkapp',
dag=self.dag,
task_id='livy_example'
)
task._livy_hook = def_hook
self.assertEqual(task.get_hook(), def_hook)
if __name__ == '__main__':
unittest.main()
|
mtagle/airflow
|
tests/providers/apache/livy/operators/test_livy.py
|
Python
|
apache-2.0
| 4,778
|
# Copyright 2018 The Batfish Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from pybatfish.datamodel import Edge, Interface, IssueType
from pybatfish.util import BfJsonEncoder
def test_as_dict():
assert Interface(hostname="host", interface="iface").dict() == {
"hostname": "host",
"interface": "iface",
}
assert IssueType(major="lazer", minor="coal").dict() == {
"major": "lazer",
"minor": "coal",
}
# Make sure Edge dict is right if either string or Interface is passed in
assert Edge(
node1="r1",
node1interface="iface1",
node2="r2",
node2interface=Interface(hostname="r2", interface="iface2"),
).dict() == {
"node1": "r1",
"node1interface": "iface1",
"node2": "r2",
"node2interface": "iface2",
}
def test_json_serialization():
i = Interface(hostname="host", interface="iface")
# Load into dict from json to ignore key ordering
assert json.loads(BfJsonEncoder().encode(i)) == json.loads(json.dumps(i.dict()))
|
batfish/pybatfish
|
tests/datamodel/test_datamodel_element.py
|
Python
|
apache-2.0
| 1,608
|
#!/usr/bin/env python
DOCUMENTATION = '''
---
module: eapi_vlan
short_description: VLAN Commands for Arista, via eapi
'''
EXAMPLES = '''
- name: configures the vlan name
eapi_vlan: vlanid=1 name=TEST_VLAN_1
'''
from ansible.module_utils.basic import AnsibleModule
import pyeapi, sys, argparse
def valid_id(i):
##Check that the VLAN is valid and not already in use
vlan_id = int(i)
if vlan_id > 999 or vlan_id < 100:
raise argparse.ArgumentTypeError("Only VLAN's between 100 and 999 are allowed")
return vlan_id
def main():
parser = argparse.ArgumentParser("ex2.py")
parser.add_argument('--check', action="store_true", dest="check_only", default=False, help='Do not make changes, check only')
parser.add_argument('--remove', action="store_true", dest="remove_vlan", default=False, help='Delete the VLAN')
parser.add_argument('--name', action="store", dest="vlan_name", help='VLAN name')
parser.add_argument('device', action="store", help='device hostname as found in ~/.eapi.conf, see https://eos.arista.com/introducing-the-python-client-for-eapi-pyeapi/')
parser.add_argument('vlan_id', type=valid_id, action="store", help='The VLAN ID to work with')
args = parser.parse_args()
try:
args.vlan_id
except NameError:
args.vlan_id = 100
try:
args.vlan_name
except NameError:
args.vlan_name = 'VLAN' + str(vlan_id)
device = pyeapi.connect_to(args.device)
if not args.remove_vlan or args.check_only:
if check_vlan(args.vlan_id, args.vlan_name, device):
pass #VLAN Check is ok, go ahead and add it
add_vlan(args.vlan_id, args.vlan_name, device)
else:
print "ERR: 123, this should never happen"
else:
remove_vlan(args.vlan_id, device)
def remove_vlan(vlan_id, device):
cmds = ['no vlan ' + str(vlan_id)]
if device.config(cmds):
print "Deleting VLAN" + str(vlan_id)
def add_vlan(vlan_id, vlan_name, device):
cmds = ['vlan ' + str(vlan_id), 'name ' + vlan_name]
#Wouldn't mind having some error handling here, but I'm not sure what sort of 'return' from the .config method might be interpretted as an error?
if device.config(cmds):
print "Adding the " + vlan_name + " VLAN with ID " + str(vlan_id)
def check_vlan(vlan_id, vlan_name, device):
vlans = device.enable("show vlan")[0]['result']['vlans']
vlan_list = vlans.keys()
#vlans = map(int, vlans) ##pylint says not to do this
vlan_list = [int(i) for i in vlan_list]
##VLAN ID check
if vlan_id in vlan_list:
print >> sys.stderr, "Vlan " + str(vlan_id) + " is already in use, quitting."
sys.exit(1)
else:
print "VLAN " + str(vlan_id) + " is available"
##VLAN Name check
for vlan_id, attribs in vlans.iteritems():
if attribs['name'] == vlan_name:
print >> sys.stderr, "VLAN Name " + vlan_name + " already in use on VLAN " + vlan_id + ", quitting."
sys.exit(2)
return True
if __name__ == '__main__':
main()
|
jrogers512/pynet
|
class7/library/eapi_vlan.py
|
Python
|
apache-2.0
| 3,062
|
#!/usr/bin/python
# -*- coding:utf-8 -*-
# ---------------------------
# Author: deangao
# Copyright: 2016 deangao
# Version: v1.0.0
# Created: 2016/3/16
# ---------------------------
import numpy as np
import pprint as pp
import matplotlib.pyplot as plt
"""
matplotlib是Python中一个基础的绘图模块,是其它高级绘图模块的基础。
matplotlib 官方网站上面有很多实例:
http://matplotlib.org/gallery.html
首先我们可以先了解下一个标准图表的基本组成,
一个普通的图表组件通常包括:
x轴、y轴、x轴刻度、y轴刻度、x轴标题、y轴标题
图例、图表标题等。
而各组件的属性有:颜色、宽度、透明度等。
图表的类型有:
散点图、折线图、气泡图、雷达图、柱状图、箱线图、直方图、饼图
热图、密度图等等。
"""
# 下面选取几个进行简单的介绍
# pp.pprint(plt.style.available)
# plt可用的风格或者主题, 比如使用R中的ggplot
plt.style.use('ggplot')
# ======================散点图=======================
x = np.random.rand(100)
y = np.random.rand(100)
"""
plot方法的参数:
plot(x, y) # 默认的为折线图
plot(x, y, 'bo') # 使用蓝色的圆圈标记
plot(y) # x轴为0-N-1, y轴为y
plot(y, 'r+') # y轴为y,x轴为0-N-1, 但是点已+号表示
"""
p1 = plt.subplot(2, 2, 1)
p1.plot(x, y)
plt.xlabel('X')
plt.ylabel('Y')
p2 = plt.subplot(2, 2, 2)
p2.plot(x, y, 'bo')
p3 = plt.subplot(2, 2, 3)
p3.plot(y)
p4 = plt.subplot(2, 2, 4)
p4.plot(y, 'r+')
plt.show()
# ======================极坐标图======================
plt.figure(2)
N = 150
r = 2 * np.random.rand(N)
theta = 2 * np.pi * np.random.rand(N)
area = 200 * r**2 * np.random.rand(N)
colors = theta
ax = plt.subplot(111, projection='polar')
c = plt.scatter(theta, r, c=colors, s=area, cmap=plt.cm.hsv)
c.set_alpha(0.75)
plt.show()
# =======================填充图=======================
plt.figure(3)
x = np.linspace(0, 2 * np.pi, 100)
y1 = np.sin(x)
y2 = np.sin(3 * x)
plt.fill(x, y1, 'b', x, y2, 'r', alpha=0.3)
plt.show()
|
iwhgao/python_practise
|
13.matplotlib.py
|
Python
|
apache-2.0
| 2,097
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function, division, absolute_import
from collections import defaultdict
from .effect_prediction import reference_coding_transcripts_for_variant
from .reference_context import ReferenceContext
from .reference_coding_sequence_key import ReferenceCodingSequenceKey
def reference_contexts_for_variant(
variant,
context_size,
transcript_id_whitelist=None):
"""
variant : varcode.Variant
context_size : int
Max of nucleotides to include to the left and right of the variant
in the context sequence.
transcript_id_whitelist : set, optional
If given, then only consider transcripts whose IDs are in this set.
Returns list of ReferenceContext objects, sorted by maximum length of
coding sequence of any supporting transcripts.
"""
overlapping_transcripts = reference_coding_transcripts_for_variant(
variant=variant,
transcript_id_whitelist=transcript_id_whitelist)
# dictionary mapping SequenceKeyWithReadingFrame keys to list of
# transcript objects
sequence_groups = defaultdict(list)
for transcript in overlapping_transcripts:
reference_coding_sequence_key = \
ReferenceCodingSequenceKey.from_variant_and_transcript(
variant=variant,
transcript=transcript,
context_size=context_size)
if reference_coding_sequence_key is not None:
sequence_groups[reference_coding_sequence_key].append(transcript)
reference_contexts = [
ReferenceContext.from_reference_coding_sequence_key(
key, variant, matching_transcripts)
for (key, matching_transcripts) in sequence_groups.items()
]
reference_contexts.sort(
key=ReferenceContext.sort_key_decreasing_max_length_transcript_cds)
return reference_contexts
def reference_contexts_generator(
variants,
context_size,
transcript_id_whitelist=None):
"""
Extract a set of reference contexts for each variant in the collection.
Parameters
----------
variants : varcode.VariantCollection
context_size : int
Max of nucleotides to include to the left and right of the variant
in the context sequence.
transcript_id_whitelist : set, optional
If given, then only consider transcripts whose IDs are in this set.
Generate a series of (Variant, [ReferenceContext]) pairs, where the
to list of ReferenceContext objects for each variant is sorted by
max coding sequence length of any transcript.
"""
for variant in variants:
reference_contexts = reference_contexts_for_variant(
variant=variant,
context_size=context_size,
transcript_id_whitelist=transcript_id_whitelist)
yield variant, reference_contexts
|
hammerlab/isovar
|
isovar/reference_context_helpers.py
|
Python
|
apache-2.0
| 3,404
|
while True:
s = raw_input('Enter something:')
if s == 'quit':
break
if len(s) < 3:
continue
print 'Input is of sufficient length'
|
xingchaoma/github-python-study
|
byteofpython/ch08_modules/continue.py
|
Python
|
apache-2.0
| 162
|
#!/usr/bin/env python
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from setuptools import find_packages
from setuptools import setup
REQUIRED_PACKAGES = [
'numpy>=1.14',
'pandas>=0.22',
'six>=1.11',
'google-api-python-client',
'google-cloud-storage',
'tensorflow>=1.15,<2',
'mlflow>1.0,<2'
]
setup(
name='trainer',
version='0.1',
install_requires=REQUIRED_PACKAGES,
packages=find_packages(),
include_package_data=True,
description='AI Platform trainer'
)
|
GoogleCloudPlatform/ml-on-gcp
|
tutorials/tensorflow/mlflow_gcp/setup.py
|
Python
|
apache-2.0
| 1,062
|
from __future__ import absolute_import, print_function
import yaml
from datetime import datetime
from mock import Mock, patch
from uuid import uuid4
from changes.config import db
from changes.listeners.build_revision import revision_created_handler, CommitTrigger
from changes.models.build import Build
from changes.models.project import ProjectOption
from changes.testutils.cases import TestCase
from changes.testutils.fixtures import SAMPLE_DIFF
from changes.vcs.base import CommandError, RevisionResult, Vcs, UnknownRevision
class RevisionCreatedHandlerTestCase(TestCase):
def get_fake_vcs(self, log_results=None):
def _log_results(parent=None, branch=None, offset=0, limit=1):
assert not branch
return iter([
RevisionResult(
id='a' * 40,
message='hello world',
author='Foo <foo@example.com>',
author_date=datetime.utcnow(),
)])
if log_results is None:
log_results = _log_results
# Fake having a VCS and stub the returned commit log
fake_vcs = Mock(spec=Vcs)
fake_vcs.read_file.side_effect = CommandError(cmd="test command", retcode=128)
fake_vcs.exists.return_value = True
fake_vcs.log.side_effect = UnknownRevision(cmd="test command", retcode=128)
fake_vcs.export.side_effect = UnknownRevision(cmd="test command", retcode=128)
fake_vcs.get_changed_files.side_effect = UnknownRevision(cmd="test command", retcode=128)
fake_vcs.get_patch_hash.return_value = 'a' * 40
def fake_update():
# this simulates the effect of calling update() on a repo,
# mainly that `export` and `log` now works.
fake_vcs.log.side_effect = log_results
fake_vcs.export.side_effect = None
fake_vcs.export.return_value = SAMPLE_DIFF
fake_vcs.get_changed_files.side_effect = lambda id: Vcs.get_changed_files(fake_vcs, id)
fake_vcs.update.side_effect = fake_update
return fake_vcs
@patch('changes.models.repository.Repository.get_vcs')
def test_simple(self, get_vcs):
repo = self.create_repo()
revision = self.create_revision(repository=repo)
project = self.create_project(repository=repo)
self.create_plan(project)
get_vcs.return_value = self.get_fake_vcs()
revision_created_handler(revision_sha=revision.sha, repository_id=repo.id)
build_list = list(Build.query.filter(
Build.project == project,
))
assert len(build_list) == 1
@patch('changes.models.repository.Repository.get_vcs')
def test_disabled(self, get_vcs):
repo = self.create_repo()
revision = self.create_revision(repository=repo)
project = self.create_project(repository=repo)
self.create_plan(project)
get_vcs.return_value = self.get_fake_vcs()
db.session.add(ProjectOption(project=project, name='build.commit-trigger', value='0'))
db.session.flush()
revision_created_handler(revision_sha=revision.sha, repository_id=repo.id)
assert not Build.query.first()
@patch('changes.models.repository.Repository.get_vcs')
@patch('changes.api.build_index.identify_revision')
def test_file_whitelist(self, mock_identify_revision, mock_get_vcs):
repo = self.create_repo()
revision = self.create_revision(repository=repo)
project = self.create_project(repository=repo)
self.create_plan(project)
option = ProjectOption(project=project, name='build.file-whitelist', value='foo.txt')
mock_vcs = self.get_fake_vcs()
mock_vcs.export.side_effect = None
mock_vcs.export.return_value = SAMPLE_DIFF
mock_vcs.get_changed_files.side_effect = lambda id: Vcs.get_changed_files(mock_vcs, id)
mock_vcs.update.side_effect = None
mock_identify_revision.return_value = revision
mock_get_vcs.return_value = mock_vcs
db.session.add(option)
db.session.flush()
revision_created_handler(revision_sha=revision.sha, repository_id=repo.id)
mock_vcs.export.assert_called_once_with(revision.sha)
assert not Build.query.first()
option.value = 'ci/*'
db.session.add(option)
db.session.flush()
revision_created_handler(revision_sha=revision.sha, repository_id=repo.id)
mock_identify_revision.assert_called_once_with(repo, revision.sha)
assert Build.query.first()
@patch('changes.models.repository.Repository.get_vcs')
@patch('changes.api.build_index.identify_revision')
def test_file_blacklist(self, mock_identify_revision, mock_get_vcs):
repo = self.create_repo()
revision = self.create_revision(repository=repo)
project = self.create_project(repository=repo)
self.create_plan(project)
mock_vcs = self.get_fake_vcs()
mock_vcs.export.side_effect = None
mock_vcs.export.return_value = SAMPLE_DIFF
mock_vcs.get_changed_files.side_effect = lambda id: Vcs.get_changed_files(mock_vcs, id)
mock_vcs.update.side_effect = None
mock_identify_revision.return_value = revision
mock_vcs.read_file.side_effect = None
mock_vcs.read_file.return_value = yaml.safe_dump({
'build.file-blacklist': ['ci/*'],
})
mock_get_vcs.return_value = mock_vcs
revision_created_handler(revision_sha=revision.sha, repository_id=repo.id)
mock_vcs.export.assert_called_once_with(revision.sha)
assert not Build.query.first()
mock_vcs.read_file.return_value = yaml.safe_dump({
'build.file-blacklist': ['ci/not-real'],
})
revision_created_handler(revision_sha=revision.sha, repository_id=repo.id)
mock_identify_revision.assert_called_once_with(repo, revision.sha)
assert Build.query.first()
@patch('changes.models.repository.Repository.get_vcs')
@patch('changes.api.build_index.identify_revision')
def test_invalid_config(self, mock_identify_revision, mock_get_vcs):
repo = self.create_repo()
revision = self.create_revision(repository=repo)
project = self.create_project(repository=repo)
project2 = self.create_project(repository=repo)
self.create_plan(project)
self.create_plan(project2)
mock_vcs = self.get_fake_vcs()
mock_vcs.export.side_effect = None
mock_vcs.export.return_value = SAMPLE_DIFF
mock_vcs.get_changed_files.side_effect = lambda id: Vcs.get_changed_files(mock_vcs, id)
mock_vcs.update.side_effect = None
mock_identify_revision.return_value = revision
mock_vcs.read_file.side_effect = ('{{invalid yaml}}', yaml.safe_dump({
'build.file-blacklist': ['ci/not-real'],
}))
mock_get_vcs.return_value = mock_vcs
revision_created_handler(revision_sha=revision.sha, repository_id=repo.id)
mock_vcs.export.assert_called_once_with(revision.sha)
assert len(list(Build.query)) == 2
def test_get_changed_files_updates_vcs(self):
repo = self.create_repo()
sha = uuid4().hex
revision = self.create_revision(repository=repo, sha=sha)
# No updated needed.
with patch.object(repo, 'get_vcs') as get_vcs:
mock_vcs = self.get_fake_vcs()
mock_vcs.export.side_effect = None
mock_vcs.export.return_value = SAMPLE_DIFF
mock_vcs.get_changed_files.side_effect = lambda id: Vcs.get_changed_files(mock_vcs, id)
mock_vcs.update.side_effect = None
get_vcs.return_value = mock_vcs
ct = CommitTrigger(revision)
ct.get_changed_files()
self.assertEqual(list(mock_vcs.method_calls), [
('exists', (), {}),
('get_changed_files', (sha,), {}),
('export', (sha,), {}),
])
# Successful update
with patch.object(repo, 'get_vcs') as get_vcs:
mock_vcs = self.get_fake_vcs()
# Raise first time, work second time.
mock_vcs.export.side_effect = (UnknownRevision("", 1), SAMPLE_DIFF)
mock_vcs.get_changed_files.side_effect = lambda id: Vcs.get_changed_files(mock_vcs, id)
mock_vcs.update.side_effect = None
get_vcs.return_value = mock_vcs
ct = CommitTrigger(revision)
ct.get_changed_files()
self.assertEqual(list(mock_vcs.method_calls), [
('exists', (), {}),
('get_changed_files', (sha,), {}),
('export', (sha,), {}),
('update', (), {}),
('get_changed_files', (sha,), {}),
('export', (sha,), {}),
])
# Unsuccessful update
with patch.object(repo, 'get_vcs') as get_vcs:
mock_vcs = self.get_fake_vcs()
mock_vcs.exists.return_value = True
# Revision is always unknown.
mock_vcs.export.side_effect = UnknownRevision("", 1)
mock_vcs.get_changed_files.side_effect = lambda id: Vcs.get_changed_files(mock_vcs, id)
mock_vcs.update.side_effect = None
get_vcs.return_value = mock_vcs
ct = CommitTrigger(revision)
with self.assertRaises(UnknownRevision):
ct.get_changed_files()
self.assertEqual(list(mock_vcs.method_calls), [
('exists', (), {}),
('get_changed_files', (sha,), {}),
('export', (sha,), {}),
('update', (), {}),
('get_changed_files', (sha,), {}),
('export', (sha,), {}),
])
|
dropbox/changes
|
tests/changes/listeners/test_build_revision.py
|
Python
|
apache-2.0
| 9,840
|
# -*- coding: utf-8 -*-
"""
Time-boxed development planning functions.
---
type:
python_module
validation_level:
v00_minimum
protection:
k00_public
copyright:
"Copyright 2016 High Integrity Artificial Intelligence Systems"
license:
"Licensed under the Apache License, Version 2.0 (the License);
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an AS IS BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License."
...
"""
import datetime
import da.day
_DAYS_IN_WEEK = 7
_DAYS_IN_FORTNIGHT = 2 * _DAYS_IN_WEEK
_DAYS_IN_YEAR = 365.242
_DAYS_IN_CENTURY = 100 * _DAYS_IN_YEAR
# -----------------------------------------------------------------------------
def ident(date):
"""
Return a timebox id for the specified date.
"""
thursday = da.day.thursday_week_of_the(date)
two_weeks_ago = thursday - datetime.timedelta(_DAYS_IN_FORTNIGHT)
in_first_fortnight = thursday.month != two_weeks_ago.month
timebox_suffix = 'A' if in_first_fortnight else 'B'
timebox_id = '{year}{month:02}{suffix}'.format(
year = str(thursday.year)[2:],
month = thursday.month,
suffix = timebox_suffix)
return timebox_id
# -----------------------------------------------------------------------------
def timebox_year(date):
"""Return the timebox effective year for the specified date."""
return da.day.thursday_week_of_the(date).year
# -----------------------------------------------------------------------------
def timebox_dates_for_month(date):
"""
There are two timeboxes per month - A and B.
- timebox_a is the first timebox this month.
- timebox_b is the second timebox this month.
"""
timebox_a_start = da.day.monday_week_of_the(
da.day.first_thursday_of_month(
date.year,
date.month))
timebox_a_end = da.day.sunday_week_of_the(
da.day.second_thursday_of_month(
date.year,
date.month))
timebox_b_start = da.day.monday_week_of_the(
da.day.third_thursday_of_month(
date.year,
date.month))
timebox_b_end = da.day.sunday_week_of_the(
da.day.last_thursday_of_month(
date.year,
date.month))
return ((timebox_a_start, timebox_a_end),
(timebox_b_start, timebox_b_end))
# -----------------------------------------------------------------------------
def timebox_start_date(timebox_id, time_now = None):
"""
Return the starting date for the specified timebox id.
"""
if time_now is None:
time_now = datetime.datetime.now(datetime.timezone.utc)
time_now = datetime.date(year = time_now.year,
month = time_now.month,
day = time_now.day)
min_abs_delta_days = datetime.timedelta.max
for delta_century in [-1, 0, 1]:
candidate_date = _candidate_timebox_start_date(
timebox_id, time_now, delta_century)
abs_delta_days = abs(time_now - candidate_date)
if abs_delta_days < min_abs_delta_days:
min_abs_delta_days = abs_delta_days
min_delta_date = candidate_date
return min_delta_date
# -----------------------------------------------------------------------------
def _candidate_timebox_start_date(timebox_id, time_now, delta_century):
"""
Generate a candidate timebox start date for the specified century.
"""
delta_days = delta_century * _DAYS_IN_CENTURY
candidate_century = time_now + datetime.timedelta(days = delta_days)
first_two_digits = '{year}'.format(year = candidate_century.year)[0:2]
last_two_digits = timebox_id[0:2]
string_year = '{first}{last}'.format(first = first_two_digits,
last = last_two_digits)
string_month = timebox_id[2:4]
first_thursday = da.day.first_thursday_of_month(
year = int(string_year),
month = int(string_month))
if timebox_id.endswith('A'):
start_date = da.day.monday_week_of_the(first_thursday)
elif timebox_id.endswith('B'):
third_thursday = first_thursday + datetime.timedelta(
days = _DAYS_IN_FORTNIGHT)
return da.day.monday_week_of_the(third_thursday)
else:
raise RuntimeError('Bad timebox_id {id}'.format(id = timebox_id))
return start_date
|
wtpayne/hiai
|
a3_src/h70_internal/da/timebox.py
|
Python
|
apache-2.0
| 5,647
|
"""Support for WeMo device discovery."""
import logging
import requests
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.components.discovery import SERVICE_WEMO
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers import discovery
from homeassistant.const import EVENT_HOMEASSISTANT_START, EVENT_HOMEASSISTANT_STOP
DOMAIN = "wemo"
# Mapping from Wemo model_name to component.
WEMO_MODEL_DISPATCH = {
"Bridge": "light",
"CoffeeMaker": "switch",
"Dimmer": "light",
"Humidifier": "fan",
"Insight": "switch",
"LightSwitch": "switch",
"Maker": "switch",
"Motion": "binary_sensor",
"Sensor": "binary_sensor",
"Socket": "switch",
}
SUBSCRIPTION_REGISTRY = None
KNOWN_DEVICES = []
_LOGGER = logging.getLogger(__name__)
def coerce_host_port(value):
"""Validate that provided value is either just host or host:port.
Returns (host, None) or (host, port) respectively.
"""
host, _, port = value.partition(":")
if not host:
raise vol.Invalid("host cannot be empty")
if port:
port = cv.port(port)
else:
port = None
return host, port
CONF_STATIC = "static"
CONF_DISCOVERY = "discovery"
DEFAULT_DISCOVERY = True
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Optional(CONF_STATIC, default=[]): vol.Schema(
[vol.All(cv.string, coerce_host_port)]
),
vol.Optional(CONF_DISCOVERY, default=DEFAULT_DISCOVERY): cv.boolean,
}
)
},
extra=vol.ALLOW_EXTRA,
)
def setup(hass, config):
"""Set up for WeMo devices."""
hass.data[DOMAIN] = config
if DOMAIN in config:
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_IMPORT}
)
)
return True
async def async_setup_entry(hass, entry):
"""Set up a wemo config entry."""
import pywemo
config = hass.data[DOMAIN]
# Keep track of WeMo devices
devices = []
# Keep track of WeMo device subscriptions for push updates
global SUBSCRIPTION_REGISTRY
SUBSCRIPTION_REGISTRY = pywemo.SubscriptionRegistry()
await hass.async_add_executor_job(SUBSCRIPTION_REGISTRY.start)
def stop_wemo(event):
"""Shutdown Wemo subscriptions and subscription thread on exit."""
_LOGGER.debug("Shutting down WeMo event subscriptions")
SUBSCRIPTION_REGISTRY.stop()
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, stop_wemo)
def setup_url_for_device(device):
"""Determine setup.xml url for given device."""
return f"http://{device.host}:{device.port}/setup.xml"
def setup_url_for_address(host, port):
"""Determine setup.xml url for given host and port pair."""
if not port:
port = pywemo.ouimeaux_device.probe_wemo(host)
if not port:
return None
return f"http://{host}:{port}/setup.xml"
def discovery_dispatch(service, discovery_info):
"""Dispatcher for incoming WeMo discovery events."""
# name, model, location, mac
model_name = discovery_info.get("model_name")
serial = discovery_info.get("serial")
# Only register a device once
if serial in KNOWN_DEVICES:
_LOGGER.debug("Ignoring known device %s %s", service, discovery_info)
return
_LOGGER.debug("Discovered unique WeMo device: %s", serial)
KNOWN_DEVICES.append(serial)
component = WEMO_MODEL_DISPATCH.get(model_name, "switch")
discovery.load_platform(hass, component, DOMAIN, discovery_info, config)
discovery.async_listen(hass, SERVICE_WEMO, discovery_dispatch)
def discover_wemo_devices(now):
"""Run discovery for WeMo devices."""
_LOGGER.debug("Beginning WeMo device discovery...")
_LOGGER.debug("Adding statically configured WeMo devices...")
for host, port in config.get(DOMAIN, {}).get(CONF_STATIC, []):
url = setup_url_for_address(host, port)
if not url:
_LOGGER.error(
"Unable to get description url for WeMo at: %s",
f"{host}:{port}" if port else host,
)
continue
try:
device = pywemo.discovery.device_from_description(url, None)
except (
requests.exceptions.ConnectionError,
requests.exceptions.Timeout,
) as err:
_LOGGER.error("Unable to access WeMo at %s (%s)", url, err)
continue
if not [d[1] for d in devices if d[1].serialnumber == device.serialnumber]:
devices.append((url, device))
if config.get(DOMAIN, {}).get(CONF_DISCOVERY, DEFAULT_DISCOVERY):
_LOGGER.debug("Scanning network for WeMo devices...")
for device in pywemo.discover_devices():
if not [
d[1] for d in devices if d[1].serialnumber == device.serialnumber
]:
devices.append((setup_url_for_device(device), device))
for url, device in devices:
_LOGGER.debug("Adding WeMo device at %s:%i", device.host, device.port)
discovery_info = {
"model_name": device.model_name,
"serial": device.serialnumber,
"mac_address": device.mac,
"ssdp_description": url,
}
discovery_dispatch(SERVICE_WEMO, discovery_info)
_LOGGER.debug("WeMo device discovery has finished")
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_START, discover_wemo_devices)
return True
|
Cinntax/home-assistant
|
homeassistant/components/wemo/__init__.py
|
Python
|
apache-2.0
| 5,830
|
#!/usrbin/python
#encoding:utf-8
'''
Author: wangxu
Email: wangxu@oneniceapp.com
任务更新
'''
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
import logging
import tornado.web
import json
import os
import time
import datetime
import traceback
CURRENTPATH = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(CURRENTPATH, '../../'))
from job_define import Job
from generate_files import generate_files,get_session_id
from azkaban_api import schedule_flow,execute_flow,fetchexec_flow
#web接口类
class JobApiHandler(tornado.web.RequestHandler):
#统一调用post方法
def get(self):
self.post()
#action为操作类型
def post(self):
self.username = 'azkaban_api'
self.password = 'azkaban_pwd'
self.session_id = get_session_id(self.username,self.password)
action = self.get_argument('action')
method = getattr(self,action)
#查询类
get_action = set(['get_alljobs'])
if action in get_action:
method()
else:
resp = {'status':200,'message':''}
try:
result = method()
if result!=None:
resp = result
except Exception,e:
logging.info(traceback.format_exc())
resp['status'] = 400
resp['message'] = str(e)
logging.info(str(resp))
self.write(json.dumps(resp))
def upload_project(self):
#上传结果
project_name = self.get_argument('project_name')
result_list = generate_files(self.username,self.session_id,project_name)
logging.info(str(result_list))
if len(result_list) == 0:
raise Exception('unexist project_name')
result = result_list[0]
if result['upload_flag'] == 'false':
raise Exception(str(result_list))
logging.info('[%s] upload jobs' % (self.username))
def schedule_flow(self):
project_name = self.get_argument('project_name')
flow_name = self.get_argument('flow_name')
schedule_time = self.get_argument('schedule_time')
period = self.get_argument('period')
result = schedule_flow(self.session_id,project_name,flow_name,schedule_time,period)
logging.info(str(result))
def get_alljobs(self):
job_list = Job.get_alljobs()
jobs = map(lambda x:{'name':x.name,'project_name':x.project_name}, job_list)
self.write(json.dumps(jobs))
def delete_job(self):
login_user = self.username
name = self.get_argument('name')
try:
job = Job.get_job_fromdb(name)
except:
raise Exception('not fonud job[%s]' % name)
job.updater = login_user
flag,mes = job.has_job_permission()
logging.info('check job permission [%s] [%s]' % (flag,mes))
if not flag:
raise Exception(mes)
job.unschedule_flow(self.session_id)
job.delete_dependencies()
job.delete_job()
logging.info('[%s]delete job [%s]' % (login_user,name))
def execute_flow(self):
project_name = self.get_argument('project_name')
flow_name = self.get_argument('flow_name')
param_dict = self.request.arguments
del param_dict['action']
result = execute_flow(self.session_id,project_name,flow_name,param_dict)
return result
def fetchexec_flow(self):
execid = self.get_argument('execid')
result = fetchexec_flow(self.session_id,execid)
return result
def update_job(self):
login_user = self.username
#必需参数
required_args = ['name','project_name','server_host','server_user','server_script','server_dir']
for arg in required_args:
self.get_argument(arg)
#生成job
attr_list = Job.get_attr_list()
#dependencies_box = self.get_argument('dependencies_box','')
job = Job()
#动态加载字段,默认均为字符串
for attr in attr_list:
value = str(self.get_argument(attr,'')).strip()
if value!='':
setattr(job,attr,value)
logging.info(attr+':'+value)
#默认设置
job.name = job.name.replace('.','-')
job.updater = login_user
job.update_time = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
if job.creator == '':
job.creator = job.updater
job.create_time = job.update_time
#更新
flag,mes = job.has_job_permission()
logging.info('check job permission [%s] [%s]' % (flag,mes))
if not flag:
raise Exception(mes)
job.update_job()
logging.info('[%s] update job [%s]' % (login_user,job.name))
|
cocofree/azkaban_assistant
|
schedule/webapp/handler/job_api.py
|
Python
|
apache-2.0
| 4,930
|
"""Config flow to configure the Synology DSM integration."""
import logging
from urllib.parse import urlparse
from synology_dsm import SynologyDSM
from synology_dsm.exceptions import (
SynologyDSMException,
SynologyDSMLogin2SAFailedException,
SynologyDSMLogin2SARequiredException,
SynologyDSMLoginInvalidException,
SynologyDSMRequestException,
)
import voluptuous as vol
from homeassistant import config_entries, exceptions
from homeassistant.components import ssdp
from homeassistant.const import (
CONF_DISKS,
CONF_HOST,
CONF_MAC,
CONF_NAME,
CONF_PASSWORD,
CONF_PORT,
CONF_SCAN_INTERVAL,
CONF_SSL,
CONF_TIMEOUT,
CONF_USERNAME,
CONF_VERIFY_SSL,
)
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
from .const import (
CONF_DEVICE_TOKEN,
CONF_VOLUMES,
DEFAULT_PORT,
DEFAULT_PORT_SSL,
DEFAULT_SCAN_INTERVAL,
DEFAULT_TIMEOUT,
DEFAULT_USE_SSL,
DEFAULT_VERIFY_SSL,
)
from .const import DOMAIN # pylint: disable=unused-import
_LOGGER = logging.getLogger(__name__)
CONF_OTP_CODE = "otp_code"
def _discovery_schema_with_defaults(discovery_info):
return vol.Schema(_ordered_shared_schema(discovery_info))
def _user_schema_with_defaults(user_input):
user_schema = {
vol.Required(CONF_HOST, default=user_input.get(CONF_HOST, "")): str,
}
user_schema.update(_ordered_shared_schema(user_input))
return vol.Schema(user_schema)
def _ordered_shared_schema(schema_input):
return {
vol.Required(CONF_USERNAME, default=schema_input.get(CONF_USERNAME, "")): str,
vol.Required(CONF_PASSWORD, default=schema_input.get(CONF_PASSWORD, "")): str,
vol.Optional(CONF_PORT, default=schema_input.get(CONF_PORT, "")): str,
vol.Optional(
CONF_SSL, default=schema_input.get(CONF_SSL, DEFAULT_USE_SSL)
): bool,
vol.Optional(
CONF_VERIFY_SSL,
default=schema_input.get(CONF_VERIFY_SSL, DEFAULT_VERIFY_SSL),
): bool,
}
class SynologyDSMFlowHandler(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a config flow."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_CLOUD_POLL
@staticmethod
@callback
def async_get_options_flow(config_entry):
"""Get the options flow for this handler."""
return SynologyDSMOptionsFlowHandler(config_entry)
def __init__(self):
"""Initialize the synology_dsm config flow."""
self.saved_user_input = {}
self.discovered_conf = {}
async def _show_setup_form(self, user_input=None, errors=None):
"""Show the setup form to the user."""
if not user_input:
user_input = {}
if self.discovered_conf:
user_input.update(self.discovered_conf)
step_id = "link"
data_schema = _discovery_schema_with_defaults(user_input)
else:
step_id = "user"
data_schema = _user_schema_with_defaults(user_input)
return self.async_show_form(
step_id=step_id,
data_schema=data_schema,
errors=errors or {},
description_placeholders=self.discovered_conf or {},
)
async def async_step_user(self, user_input=None):
"""Handle a flow initiated by the user."""
errors = {}
if user_input is None:
return await self._show_setup_form(user_input, None)
if self.discovered_conf:
user_input.update(self.discovered_conf)
host = user_input[CONF_HOST]
port = user_input.get(CONF_PORT)
username = user_input[CONF_USERNAME]
password = user_input[CONF_PASSWORD]
use_ssl = user_input.get(CONF_SSL, DEFAULT_USE_SSL)
verify_ssl = user_input.get(CONF_VERIFY_SSL, DEFAULT_VERIFY_SSL)
otp_code = user_input.get(CONF_OTP_CODE)
if not port:
if use_ssl is True:
port = DEFAULT_PORT_SSL
else:
port = DEFAULT_PORT
api = SynologyDSM(
host, port, username, password, use_ssl, verify_ssl, timeout=30
)
try:
serial = await self.hass.async_add_executor_job(
_login_and_fetch_syno_info, api, otp_code
)
except SynologyDSMLogin2SARequiredException:
return await self.async_step_2sa(user_input)
except SynologyDSMLogin2SAFailedException:
errors[CONF_OTP_CODE] = "otp_failed"
user_input[CONF_OTP_CODE] = None
return await self.async_step_2sa(user_input, errors)
except SynologyDSMLoginInvalidException as ex:
_LOGGER.error(ex)
errors[CONF_USERNAME] = "invalid_auth"
except SynologyDSMRequestException as ex:
_LOGGER.error(ex)
errors[CONF_HOST] = "cannot_connect"
except SynologyDSMException as ex:
_LOGGER.error(ex)
errors["base"] = "unknown"
except InvalidData:
errors["base"] = "missing_data"
if errors:
return await self._show_setup_form(user_input, errors)
# unique_id should be serial for services purpose
await self.async_set_unique_id(serial, raise_on_progress=False)
# Check if already configured
self._abort_if_unique_id_configured()
config_data = {
CONF_HOST: host,
CONF_PORT: port,
CONF_SSL: use_ssl,
CONF_VERIFY_SSL: verify_ssl,
CONF_USERNAME: username,
CONF_PASSWORD: password,
CONF_MAC: api.network.macs,
}
if otp_code:
config_data[CONF_DEVICE_TOKEN] = api.device_token
if user_input.get(CONF_DISKS):
config_data[CONF_DISKS] = user_input[CONF_DISKS]
if user_input.get(CONF_VOLUMES):
config_data[CONF_VOLUMES] = user_input[CONF_VOLUMES]
return self.async_create_entry(title=host, data=config_data)
async def async_step_ssdp(self, discovery_info):
"""Handle a discovered synology_dsm."""
parsed_url = urlparse(discovery_info[ssdp.ATTR_SSDP_LOCATION])
friendly_name = (
discovery_info[ssdp.ATTR_UPNP_FRIENDLY_NAME].split("(", 1)[0].strip()
)
mac = discovery_info[ssdp.ATTR_UPNP_SERIAL].upper()
# Synology NAS can broadcast on multiple IP addresses, since they can be connected to multiple ethernets.
# The serial of the NAS is actually its MAC address.
if self._mac_already_configured(mac):
return self.async_abort(reason="already_configured")
await self.async_set_unique_id(mac)
self._abort_if_unique_id_configured()
self.discovered_conf = {
CONF_NAME: friendly_name,
CONF_HOST: parsed_url.hostname,
}
self.context["title_placeholders"] = self.discovered_conf
return await self.async_step_user()
async def async_step_import(self, user_input=None):
"""Import a config entry."""
return await self.async_step_user(user_input)
async def async_step_link(self, user_input):
"""Link a config entry from discovery."""
return await self.async_step_user(user_input)
async def async_step_2sa(self, user_input, errors=None):
"""Enter 2SA code to anthenticate."""
if not self.saved_user_input:
self.saved_user_input = user_input
if not user_input.get(CONF_OTP_CODE):
return self.async_show_form(
step_id="2sa",
data_schema=vol.Schema({vol.Required(CONF_OTP_CODE): str}),
errors=errors or {},
)
user_input = {**self.saved_user_input, **user_input}
self.saved_user_input = {}
return await self.async_step_user(user_input)
def _mac_already_configured(self, mac):
"""See if we already have configured a NAS with this MAC address."""
existing_macs = [
mac.replace("-", "")
for entry in self._async_current_entries()
for mac in entry.data.get(CONF_MAC, [])
]
return mac in existing_macs
class SynologyDSMOptionsFlowHandler(config_entries.OptionsFlow):
"""Handle a option flow."""
def __init__(self, config_entry: config_entries.ConfigEntry):
"""Initialize options flow."""
self.config_entry = config_entry
async def async_step_init(self, user_input=None):
"""Handle options flow."""
if user_input is not None:
return self.async_create_entry(title="", data=user_input)
data_schema = vol.Schema(
{
vol.Optional(
CONF_SCAN_INTERVAL,
default=self.config_entry.options.get(
CONF_SCAN_INTERVAL, DEFAULT_SCAN_INTERVAL
),
): cv.positive_int,
vol.Optional(
CONF_TIMEOUT,
default=self.config_entry.options.get(
CONF_TIMEOUT, DEFAULT_TIMEOUT
),
): cv.positive_int,
}
)
return self.async_show_form(step_id="init", data_schema=data_schema)
def _login_and_fetch_syno_info(api, otp_code):
"""Login to the NAS and fetch basic data."""
# These do i/o
api.login(otp_code)
api.utilisation.update()
api.storage.update()
api.network.update()
if (
not api.information.serial
or api.utilisation.cpu_user_load is None
or not api.storage.volumes_ids
or not api.network.macs
):
raise InvalidData
return api.information.serial
class InvalidData(exceptions.HomeAssistantError):
"""Error to indicate we get invalid data from the nas."""
|
partofthething/home-assistant
|
homeassistant/components/synology_dsm/config_flow.py
|
Python
|
apache-2.0
| 9,882
|
#!/usr/bin/env python
from getpass import getpass
from pprint import pprint as pp
from napalm_base import get_network_driver
host = 'nxos1.twb-tech.com'
username = 'pyclass'
password = getpass()
optional_args = {}
optional_args['nxos_protocol'] = 'https'
optional_args['port'] = 8443
driver = get_network_driver('nxos')
device = driver(host, username, password, optional_args=optional_args)
print
print "\n\n>>>Test device open"
device.open()
print
print ">>>Load config change (merge) - no commit"
device.load_merge_candidate(filename='nxos_merge.conf')
print device.compare_config()
print
raw_input("Hit any key to continue: ")
print
print ">>>Discard config change (merge)"
device.discard_config()
print device.compare_config()
print
raw_input("Hit any key to continue: ")
print
print ">>>Load config change (merge) - commit"
device.load_merge_candidate(filename='nxos_merge.conf')
print device.compare_config()
device.commit_config()
print
raw_input("Hit any key to continue: ")
|
ktbyers/pynet-ons-nov16
|
napalm_example/test_nxos_cfg.py
|
Python
|
apache-2.0
| 990
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Image.order'
db.add_column(u'catalog_image', 'order',
self.gf('django.db.models.fields.IntegerField')(default=0),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Image.order'
db.delete_column(u'catalog_image', 'order')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'catalog.cfistoreitem': {
'Meta': {'object_name': 'CfiStoreItem'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'item': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['catalog.Product']", 'unique': 'True'}),
'likers': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'cfi_store_item_likes'", 'symmetrical': 'False', 'through': "orm['catalog.LikeCfiStoreItem']", 'to': u"orm['auth.User']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'catalog.comment': {
'Meta': {'object_name': 'Comment'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'body': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.documentation': {
'Meta': {'object_name': 'Documentation'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '1000'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'catalog.emailcollect': {
'Meta': {'object_name': 'EmailCollect'},
'email': ('django.db.models.fields.EmailField', [], {'max_length': '30'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'catalog.image': {
'Meta': {'object_name': 'Image'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'large_url': ('django.db.models.fields.URLField', [], {'max_length': '1000'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'small_url': ('django.db.models.fields.URLField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'images'", 'null': 'True', 'to': u"orm['auth.User']"})
},
'catalog.like': {
'Meta': {'object_name': 'Like'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.likecfistoreitem': {
'Meta': {'unique_together': "(('user', 'cfi_store_item'),)", 'object_name': 'LikeCfiStoreItem'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'cfi_store_item': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.CfiStoreItem']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.likemakey': {
'Meta': {'unique_together': "(('user', 'makey'),)", 'object_name': 'LikeMakey'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'makey': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Makey']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.likenote': {
'Meta': {'unique_together': "(('user', 'note'),)", 'object_name': 'LikeNote'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'note': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Note']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.likeproduct': {
'Meta': {'unique_together': "(('user', 'product'),)", 'object_name': 'LikeProduct'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Product']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.likeproductdescription': {
'Meta': {'unique_together': "(('user', 'product_description'),)", 'object_name': 'LikeProductDescription'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'product_description': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.ProductDescription']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.likeproductimage': {
'Meta': {'unique_together': "(('user', 'image'),)", 'object_name': 'LikeProductImage'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.ProductImage']"}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Product']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.likeproducttutorial': {
'Meta': {'unique_together': "(('user', 'tutorial', 'product'),)", 'object_name': 'LikeProductTutorial'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Product']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'tutorial': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Tutorial']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.likeshop': {
'Meta': {'unique_together': "(('user', 'shop'),)", 'object_name': 'LikeShop'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'shop': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Shop']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.list': {
'Meta': {'object_name': 'List'},
'access': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'access'", 'symmetrical': 'False', 'to': u"orm['auth.User']"}),
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_private': ('django.db.models.fields.BooleanField', [], {}),
'items': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['catalog.ListItem']", 'symmetrical': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'owner'", 'to': u"orm['auth.User']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'catalog.listgroup': {
'Meta': {'object_name': 'ListGroup'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'lists': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['catalog.List']", 'symmetrical': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'catalog.listitem': {
'Meta': {'object_name': 'ListItem'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'createdby': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'note': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Product']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'catalog.location': {
'Meta': {'object_name': 'Location'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'catalog.logidenticalproduct': {
'Meta': {'object_name': 'LogIdenticalProduct'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'product1': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'product1'", 'to': "orm['catalog.Product']"}),
'product2': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'product2'", 'to': "orm['catalog.Product']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.makey': {
'Meta': {'object_name': 'Makey'},
'about': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'collaborators': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'collaborators'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['auth.User']"}),
'comments': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'makeycomments'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.Comment']"}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'documentations': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'makeydocumentations'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.Documentation']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'images': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'makeyimages'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.Image']"}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'new_parts': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'makeys'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.NewProduct']"}),
'new_users': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'makeys'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.NewUser']"}),
'notes': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'makeynotes'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.Note']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'videos': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'makeyvideos'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.Video']"}),
'votes': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'})
},
'catalog.makeyimage': {
'Meta': {'object_name': 'MakeyImage'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'makey_id': ('django.db.models.fields.IntegerField', [], {}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'catalog.newproduct': {
'Meta': {'object_name': 'NewProduct'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Image']", 'null': 'True', 'blank': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'catalog.newuser': {
'Meta': {'object_name': 'NewUser'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'catalog.note': {
'Meta': {'object_name': 'Note'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'body': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'comments': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['catalog.Comment']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'likes_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '140', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.product': {
'Meta': {'object_name': 'Product'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'identicalto': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Product']", 'null': 'True', 'blank': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'likers': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'product_likes'", 'symmetrical': 'False', 'through': "orm['catalog.LikeProduct']", 'to': u"orm['auth.User']"}),
'makeys': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'partsused'", 'blank': 'True', 'to': "orm['catalog.Makey']"}),
'makeys_as_tools': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'tools_used'", 'blank': 'True', 'to': "orm['catalog.Makey']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'sku': ('django.db.models.fields.IntegerField', [], {}),
'tutorials': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'products'", 'blank': 'True', 'to': "orm['catalog.Tutorial']"})
},
'catalog.productdescription': {
'Meta': {'object_name': 'ProductDescription'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '100000'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'productdescriptions'", 'to': "orm['catalog.Product']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'shop': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Shop']", 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'blank': 'True'}),
'user_or_shop': ('django.db.models.fields.BooleanField', [], {})
},
'catalog.productimage': {
'Meta': {'object_name': 'ProductImage'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'productimages'", 'to': "orm['catalog.Product']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'shop': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Shop']", 'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'catalog.productreview': {
'Meta': {'object_name': 'ProductReview'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'product_reviews'", 'to': "orm['catalog.Product']"}),
'rating': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'review': ('django.db.models.fields.CharField', [], {'max_length': '100000'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'votes': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'catalog.productshopurl': {
'Meta': {'object_name': 'ProductShopUrl'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'productshopurls'", 'to': "orm['catalog.Product']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'shop': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Shop']"}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
'catalog.searchlog': {
'Meta': {'object_name': 'SearchLog'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'term': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'time': ('django.db.models.fields.DateTimeField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'catalog.shop': {
'Meta': {'object_name': 'Shop'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'images': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'shopimages'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.Image']"}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'likes': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'shop_likes'", 'symmetrical': 'False', 'through': "orm['catalog.LikeShop']", 'to': u"orm['auth.User']"}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Location']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
'catalog.shopreview': {
'Meta': {'object_name': 'ShopReview'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'rating': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'review': ('django.db.models.fields.CharField', [], {'max_length': '100000'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'shop': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'shop_reviews'", 'to': "orm['catalog.Shop']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'votes': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'catalog.toindexstore': {
'Meta': {'object_name': 'ToIndexStore'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Location']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
'catalog.topmakeys': {
'Meta': {'object_name': 'TopMakeys'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'makey': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Makey']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'catalog.topproducts': {
'Meta': {'object_name': 'TopProducts'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Product']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'catalog.topshops': {
'Meta': {'object_name': 'TopShops'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'shop': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Shop']"})
},
'catalog.toptutorials': {
'Meta': {'object_name': 'TopTutorials'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'tutorial': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Tutorial']"})
},
'catalog.topusers': {
'Meta': {'object_name': 'TopUsers'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.tutorial': {
'Meta': {'object_name': 'Tutorial'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'images': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'tutorialimages'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.Image']"}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'votes': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'catalog.userflags': {
'Meta': {'object_name': 'UserFlags'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'show_maker_intro': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'show_makey_intro': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.userinteraction': {
'Meta': {'object_name': 'UserInteraction'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'event': ('django.db.models.fields.IntegerField', [], {}),
'event_id': ('django.db.models.fields.IntegerField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'profile'", 'unique': 'True', 'to': u"orm['auth.User']"})
},
'catalog.video': {
'Meta': {'object_name': 'Video'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'embed_url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'site': ('django.db.models.fields.IntegerField', [], {}),
'thumb_url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'catalog.votemakey': {
'Meta': {'unique_together': "(('user', 'makey'),)", 'object_name': 'VoteMakey'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'makey': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Makey']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'vote': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'catalog.voteproductreview': {
'Meta': {'unique_together': "(('user', 'review'),)", 'object_name': 'VoteProductReview'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'review': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.ProductReview']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'vote': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'catalog.voteshopreview': {
'Meta': {'unique_together': "(('user', 'review'),)", 'object_name': 'VoteShopReview'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'review': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.ShopReview']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'vote': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'catalog.votetutorial': {
'Meta': {'unique_together': "(('user', 'tutorial'),)", 'object_name': 'VoteTutorial'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'tutorial': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Tutorial']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'vote': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['catalog']
|
Makeystreet/makeystreet
|
woot/apps/catalog/migrations/0074_auto__add_field_image_order.py
|
Python
|
apache-2.0
| 41,311
|
# Copyright 2016 Cisco Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module provides APIs for bios related configuration like boot order
"""
import logging
import json
import imcsdk.imccoreutils as imccoreutils
from imcsdk.imcexception import ImcOperationError
from imcsdk.apis.utils import _is_valid_arg
log = logging.getLogger('imc')
def _get_bios_dn(handle, server_id=1):
server_dn = imccoreutils.get_server_dn(handle, server_id)
return (server_dn + '/bios')
def _get_bios_profile_mo(handle, name, server_id=1):
bios_dn = _get_bios_dn(handle, server_id)
parent_dn = bios_dn + '/profile-mgmt'
mos = handle.query_children(in_dn=parent_dn)
for mo in mos:
if mo._class_id == 'BiosProfile' and mo.name == name:
return mo
return None
def _get_bios_profile(handle, name, server_id=1):
mo = _get_bios_profile_mo(handle, name=name, server_id=server_id)
if mo is None:
raise ImcOperationError("Get BiosProfile: %s " % name,
"Managed Object not found")
return mo
def bios_profile_backup_running(handle, server_id=1, **kwargs):
"""
Backups up the running configuration of various bios tokens to create a
'cisco_backup_profile'.
Will overwrite the existing backup profile if it exists.
Args:
handle (ImcHandle)
server_id (int): Id of the server to perform
this operation on C3260 platforms
kwargs : Key-Value paired arguments for future use
Returns:
BiosProfile object corresponding to the backup profile created
Raises:
ImcOperationError if the backup profile is not created
Examples:
bios_profile_backup_running(handle, server_id=1)
"""
from imcsdk.mometa.bios.BiosProfileManagement import BiosProfileManagement
from imcsdk.mometa.bios.BiosProfileManagement import \
BiosProfileManagementConsts
mo = BiosProfileManagement(parent_mo_or_dn=_get_bios_dn(handle, server_id))
mo.admin_action = BiosProfileManagementConsts.ADMIN_ACTION_BACKUP
mo.set_prop_multiple(**kwargs)
handle.set_mo(mo)
return _get_bios_profile(handle, name='cisco_backup_profile',
server_id=server_id)
def bios_profile_upload(handle, remote_server, remote_file, protocol='tftp',
user=None, pwd=None, server_id=1, **kwargs):
"""
Uploads a user configured bios profile in json format.
Cisco IMC supports uploading a maximum of 3 profiles
Args:
handle (ImcHandle)
remote_server (str): Remote Server IP or Hostname
remote_file (str): Remote file path
protocol (str): Protocol for downloading the certificate
['tftp', 'ftp', 'http', 'scp', 'sftp']
server_id (int): Id of the server to perform
this operation on C3260 platforms
kwargs: Key-Value paired arguments for future use
Returns:
UploadBiosProfile object
Examples:
bios_profile_upload(handle, remote_server='1.1.1.1',
remote_file='/tmp/bios_profile', protocol='scp',
user='abcd', pwd='pqrs')
"""
from imcsdk.mometa.upload.UploadBiosProfile import UploadBiosProfile
bios_dn = _get_bios_dn(handle, server_id=server_id)
mo = UploadBiosProfile(
parent_mo_or_dn=bios_dn + '/profile-mgmt')
params = {
'remote_server': remote_server,
'remote_file': remote_file,
'protocol': protocol,
'user': user,
'pwd': pwd
}
mo.set_prop_multiple(**params)
mo.set_prop_multiple(**kwargs)
handle.set_mo(mo)
return handle.query_dn(mo.dn)
def bios_profile_get(handle, name, server_id=1):
"""
Gets the bios profile corresponding to the name specified
Args:
handle (ImcHandle)
name (str): Name of the bios profile.
Corresponds to the name field in the json file.
server_id (int): Id of the server to perform
this operation on C3260 platforms
Returns:
BiosProfile object corresponding to the name specified
Raises:
ImcOperationError if the bios profile is not found
Examples:
bios_profile_get(handle, name='simple')
"""
return _get_bios_profile_mo(handle, name=name, server_id=server_id)
def bios_profile_activate(handle, name, backup_on_activate=True,
reboot_on_activate=False, server_id=1, **kwargs):
"""
Activates the bios profile specified by name on the Cisco IMC Server
Args:
handle (ImcHandle)
name (str): Name of the bios profile.
Corresponds to the name field in the json file.
backup_on_activate (bool): Backup running bios configuration
before activating this profile.
Will overwrite the previous backup.
reboot_on_activate (bool): Reboot the host/server for the newer bios
configuration to be applied.
server_id (int): Id of the server to perform
this operation on C3260 platforms.
kwargs: Key-Value paired arguments for future use.
Returns:
BiosProfile object corresponding to the name specified
Raises:
ImcOperationError if the bios profile is not found
Examples:
bios_profile_activate(handle, name='simple',
backup_on_activate=True,
reboot_on_activate=False)
"""
from imcsdk.mometa.bios.BiosProfile import BiosProfileConsts
mo = _get_bios_profile(handle, name=name, server_id=server_id)
params = {
'backup_on_activate': ('no', 'yes')[backup_on_activate],
'reboot_on_activate': ('no', 'yes')[reboot_on_activate],
'enabled': 'yes',
'admin_action': BiosProfileConsts.ADMIN_ACTION_ACTIVATE
}
mo.set_prop_multiple(**params)
mo.set_prop_multiple(**kwargs)
handle.set_mo(mo)
return handle.query_dn(mo.dn)
def bios_profile_delete(handle, name, server_id=1):
"""
Deletes the bios profile specified by the name on the Cisco IMC server
Args:
handle (ImcHandle)
name (str): Name of the bios profile.
Corresponds to the name field in the json file.
server_id (int): Id of the server to perform
this operation on C3260 platforms.
Returns:
None
Raises:
ImcOperationError if the bios profile is not found
Examples:
bios_profile_delete(handle, name='simple', server_id=2)
"""
from imcsdk.mometa.bios.BiosProfile import BiosProfileConsts
mo = _get_bios_profile(handle, name=name, server_id=server_id)
mo.admin_action = BiosProfileConsts.ADMIN_ACTION_DELETE
handle.set_mo(mo)
def is_bios_profile_enabled(handle, name, server_id=1):
"""
Args:
handle (ImcHandle)
name (str): Name of the bios profile.
Corresponds to the name field in the json file.
server_id (int): Id of the server to perform
this operation on C3260 platforms.
Returns:
bool
Raises:
ImcOperationError if the bios profile is not found
Examples:
is_bios_profile_enabled(handle,
name='simple',
server_id=1)
"""
mo = _get_bios_profile(handle, name=name, server_id=server_id)
return mo.enabled.lower() in ['yes', 'true']
def bios_profile_exists(handle, name, server_id=1, **kwargs):
"""
Checks if the bios profile with the specified params exists
Args:
handle (ImcHandle)
name (str): Name of the bios profile.
Corresponds to the name field in the json file.
server_id (int): Id of the server to perform
this operation on C3260 platforms.
kwargs: Key-Value paired arguments relevant to BiosProfile object
Returns:
(True, BiosProfile) if the settings match, else (False, None)
Examples:
match, mo = bios_profile_exists(handle, name='simple',
enabled=True)
"""
mo = _get_bios_profile_mo(handle, name=name, server_id=server_id)
if mo is None:
return False, None
params = {}
if _is_valid_arg('enabled', kwargs):
params['enabled'] = ('No', 'Yes')[kwargs.pop('enabled')]
if not mo.check_prop_match(**params):
return False, None
if not mo.check_prop_match(**kwargs):
return False, None
return True, mo
def bios_profile_generate_json(handle, name, server_id=1, file_name=None):
"""
Generates a json output of the bios profile specified by the name on
the Cisco IMC server.
If a file name is specified, it writes the output to the file.
Args:
handle (ImcHandle)
name (str): Name of the bios profile.
Corresponds to the name field in the json file.
server_id (int): Id of the server to perform
this operation on C3260 platforms.
Returns:
JSON Output of the Bios Tokens
Raises:
ImcOperationError if the bios profile is not found
Examples:
bios_profile_generate_json(handle, name='simple', server_id=2)
"""
output = {}
output['tokens'] = {}
mo = _get_bios_profile(handle, name=name, server_id=server_id)
output['name'] = mo.name
output['description'] = mo.description
tokens = handle.query_children(in_dn=mo.dn)
output['tokens'] = {x.name: x.configured_value for x in tokens}
if file_name:
f = open(file_name, 'w')
f.write(json.dumps(output))
f.close()
return output
def bios_tokens_set(handle, tokens={}, server_id=1):
"""
Args:
handle (ImcHandle)
tokens (dictionary) : (key, value) pair of bios tokens with key being the name of the token
server_id (int): Id of the server to perform
this operation on C3260 platforms.
Returns:
None
Examples:
bios_tokens_set(handle,
tokens = {
"BaudRate": "19200",
"IntelVTDATSSupport": "enabled",
"ConsoleRedirection": "com-1",
"FlowControl": "rts-cts"},
server_id=2)
"""
from imcsdk.imccoreutils import load_class
parent_dn = _get_bios_dn(handle, server_id) + "/bios-settings"
mo_table = _get_bios_mo_table(handle, tokens, server_id)
for mo_name, props in mo_table.items():
mo_class = load_class(mo_name)
mo_obj = mo_class(parent_mo_or_dn=parent_dn, **props)
handle.set_mo(mo_obj)
def bios_tokens_exist(handle, tokens={}, server_id=1):
"""
Args:
handle (ImcHandle)
tokens (dictionary) : (key, value) pair of bios tokens with key being the name of the token
server_id (int): Id of the server to perform
this operation on C3260 platforms.
Returns:
True/False based on the match with the server side tokens
Examples:
bios_tokens_exist(handle,
tokens = {
"BaudRate": "19200",
"IntelVTDATSSupport": "enabled",
"ConsoleRedirection": "com-1",
"FlowControl": "rts-cts"},
server_id=2)
"""
parent_dn = _get_bios_dn(handle, server_id) + "/bios-settings"
mo_table = _get_bios_mo_table(handle, tokens, server_id)
for mo_name, props in mo_table.items():
cimc_mos = handle.query_classid(class_id=mo_name)
cimc_mo = None
for mo in cimc_mos:
if mo.dn.startswith(parent_dn):
cimc_mo = mo
break
if cimc_mo is None:
return False
# Skip comparison when the value to be checked with is "platform-default"
modified_props = {x: props[x] for x in props if props[x] != "platform-default"}
if not cimc_mo.check_prop_match(**modified_props):
return False
return True
def _get_bios_mo_table(handle, tokens={}, server_id=1):
from imcsdk.imcbiostables import bios_tokens_table
mo_table = {}
for token, value in tokens.items():
bios_tokens_table_platform = bios_tokens_table.get(handle.platform,
bios_tokens_table[
'classic'])
entry = bios_tokens_table_platform.get(token)
if entry is None:
log.warning("Token not found: %s Platform: %s" % token,
handle.platform)
continue
mo_props = mo_table.get(entry["mo_name"], {})
mo_props[entry["prop_name"]] = value
mo_table[entry["mo_name"]] = mo_props
return mo_table
|
ragupta-git/ImcSdk
|
imcsdk/apis/server/bios.py
|
Python
|
apache-2.0
| 13,690
|
# Copyright 2013 - Mirantis, Inc.
# Copyright 2015 - StackStorm, Inc.
# Copyright 2015 Huawei Technologies Co., Ltd.
# Copyright 2016 - Brocade Communications Systems, Inc.
# Copyright 2018 - Extreme Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import datetime
import json
import mock
from oslo_config import cfg
import oslo_messaging
from oslo_utils import uuidutils
import sqlalchemy as sa
from webtest import app as webtest_app
from mistral.api.controllers.v2 import execution
from mistral.db.v2 import api as db_api
from mistral.db.v2.sqlalchemy import api as sql_db_api
from mistral.db.v2.sqlalchemy import models
from mistral import exceptions as exc
from mistral.rpc import base as rpc_base
from mistral.rpc import clients as rpc_clients
from mistral.tests.unit.api import base
from mistral.tests.unit import base as unit_base
from mistral import utils
from mistral.utils import rest_utils
from mistral.workflow import states
# This line is needed for correct initialization of messaging config.
oslo_messaging.get_rpc_transport(cfg.CONF)
WF_EX = models.WorkflowExecution(
id='123e4567-e89b-12d3-a456-426655440000',
workflow_name='some',
workflow_id='123e4567-e89b-12d3-a456-426655441111',
description='execution description.',
spec={'name': 'some'},
state=states.RUNNING,
state_info=None,
input={'foo': 'bar'},
output={},
params={'env': {'k1': 'abc'}},
created_at=datetime.datetime(1970, 1, 1),
updated_at=datetime.datetime(1970, 1, 1)
)
WF_EX_JSON = {
'id': '123e4567-e89b-12d3-a456-426655440000',
'input': '{"foo": "bar"}',
'output': '{}',
'params': '{"env": {"k1": "abc"}}',
'state': 'RUNNING',
'state_info': None,
'created_at': '1970-01-01 00:00:00',
'updated_at': '1970-01-01 00:00:00',
'workflow_name': 'some',
'workflow_id': '123e4567-e89b-12d3-a456-426655441111'
}
SUB_WF_EX = models.WorkflowExecution(
id=uuidutils.generate_uuid(),
workflow_name='some',
workflow_id='123e4567-e89b-12d3-a456-426655441111',
description='foobar',
spec={'name': 'some'},
state=states.RUNNING,
state_info=None,
input={'foo': 'bar'},
output={},
params={'env': {'k1': 'abc'}},
created_at=datetime.datetime(1970, 1, 1),
updated_at=datetime.datetime(1970, 1, 1),
task_execution_id=uuidutils.generate_uuid()
)
SUB_WF_EX_JSON = {
'id': SUB_WF_EX.id,
'workflow_name': 'some',
'workflow_id': '123e4567-e89b-12d3-a456-426655441111',
'input': '{"foo": "bar"}',
'output': '{}',
'params': '{"env": {"k1": "abc"}}',
'state': 'RUNNING',
'state_info': None,
'created_at': '1970-01-01 00:00:00',
'updated_at': '1970-01-01 00:00:00',
'task_execution_id': SUB_WF_EX.task_execution_id
}
MOCK_SUB_WF_EXECUTIONS = mock.MagicMock(return_value=[SUB_WF_EX])
SUB_WF_EX_JSON_WITH_DESC = copy.deepcopy(SUB_WF_EX_JSON)
SUB_WF_EX_JSON_WITH_DESC['description'] = SUB_WF_EX.description
UPDATED_WF_EX = copy.deepcopy(WF_EX)
UPDATED_WF_EX['state'] = states.PAUSED
UPDATED_WF_EX_JSON = copy.deepcopy(WF_EX_JSON)
UPDATED_WF_EX_JSON['state'] = states.PAUSED
UPDATED_WF_EX_ENV = copy.deepcopy(UPDATED_WF_EX)
UPDATED_WF_EX_ENV['params'] = {'env': {'k1': 'def'}}
UPDATED_WF_EX_ENV_DESC = copy.deepcopy(UPDATED_WF_EX)
UPDATED_WF_EX_ENV_DESC['description'] = 'foobar'
UPDATED_WF_EX_ENV_DESC['params'] = {'env': {'k1': 'def'}}
WF_EX_JSON_WITH_DESC = copy.deepcopy(WF_EX_JSON)
WF_EX_JSON_WITH_DESC['description'] = WF_EX.description
WF_EX_WITH_PROJECT_ID = WF_EX.get_clone()
WF_EX_WITH_PROJECT_ID.project_id = '<default-project>'
SOURCE_WF_EX = copy.deepcopy(WF_EX)
SOURCE_WF_EX['source_execution_id'] = WF_EX.id
SOURCE_WF_EX['id'] = uuidutils.generate_uuid()
SOURCE_WF_EX_JSON_WITH_DESC = copy.deepcopy(WF_EX_JSON_WITH_DESC)
SOURCE_WF_EX_JSON_WITH_DESC['id'] = SOURCE_WF_EX.id
SOURCE_WF_EX_JSON_WITH_DESC['source_execution_id'] = \
SOURCE_WF_EX.source_execution_id
MOCK_WF_EX = mock.MagicMock(return_value=WF_EX)
MOCK_SUB_WF_EX = mock.MagicMock(return_value=SUB_WF_EX)
MOCK_SOURCE_WF_EX = mock.MagicMock(return_value=SOURCE_WF_EX)
MOCK_WF_EXECUTIONS = mock.MagicMock(return_value=[WF_EX])
MOCK_UPDATED_WF_EX = mock.MagicMock(return_value=UPDATED_WF_EX)
MOCK_DELETE = mock.MagicMock(return_value=None)
MOCK_EMPTY = mock.MagicMock(return_value=[])
MOCK_NOT_FOUND = mock.MagicMock(side_effect=exc.DBEntityNotFoundError())
MOCK_ACTION_EXC = mock.MagicMock(side_effect=exc.ActionException())
@mock.patch.object(rpc_base, '_IMPL_CLIENT', mock.Mock())
class TestExecutionsController(base.APITest):
@mock.patch.object(db_api, 'get_workflow_execution', MOCK_WF_EX)
def test_get(self):
resp = self.app.get('/v2/executions/123')
self.assertEqual(200, resp.status_int)
self.assertDictEqual(WF_EX_JSON_WITH_DESC, resp.json)
@mock.patch.object(db_api, 'get_workflow_execution')
def test_get_operational_error(self, mocked_get):
mocked_get.side_effect = [
# Emulating DB OperationalError
sa.exc.OperationalError('Mock', 'mock', 'mock'),
WF_EX # Successful run
]
resp = self.app.get('/v2/executions/123')
self.assertEqual(200, resp.status_int)
self.assertDictEqual(WF_EX_JSON_WITH_DESC, resp.json)
@mock.patch.object(db_api, 'get_workflow_execution', MOCK_SUB_WF_EX)
def test_get_sub_wf_ex(self):
resp = self.app.get('/v2/executions/123')
self.assertEqual(200, resp.status_int)
self.assertDictEqual(SUB_WF_EX_JSON_WITH_DESC, resp.json)
@mock.patch.object(db_api, 'get_workflow_execution', MOCK_NOT_FOUND)
def test_get_not_found(self):
resp = self.app.get('/v2/executions/123', expect_errors=True)
self.assertEqual(404, resp.status_int)
@mock.patch.object(db_api, 'get_workflow_execution',
return_value=WF_EX_WITH_PROJECT_ID)
def test_get_within_project_id(self, mock_get):
resp = self.app.get('/v2/executions/123', expect_errors=True)
self.assertEqual(200, resp.status_int)
self.assertTrue('project_id' in resp.json)
@mock.patch.object(
db_api,
'get_workflow_execution',
mock.MagicMock(return_value=None)
)
@mock.patch.object(
rpc_clients.EngineClient,
'pause_workflow',
MOCK_UPDATED_WF_EX
)
def test_put_state_paused(self):
update_exec = {
'id': WF_EX['id'],
'state': states.PAUSED
}
resp = self.app.put_json('/v2/executions/123', update_exec)
expected_exec = copy.deepcopy(WF_EX_JSON_WITH_DESC)
expected_exec['state'] = states.PAUSED
self.assertEqual(200, resp.status_int)
self.assertDictEqual(expected_exec, resp.json)
@mock.patch.object(
db_api,
'get_workflow_execution',
mock.MagicMock(return_value=None)
)
@mock.patch.object(rpc_clients.EngineClient, 'stop_workflow')
def test_put_state_error(self, mock_stop_wf):
update_exec = {
'id': WF_EX['id'],
'state': states.ERROR,
'state_info': 'Force'
}
wf_ex = copy.deepcopy(WF_EX)
wf_ex['state'] = states.ERROR
wf_ex['state_info'] = 'Force'
mock_stop_wf.return_value = wf_ex
resp = self.app.put_json('/v2/executions/123', update_exec)
expected_exec = copy.deepcopy(WF_EX_JSON_WITH_DESC)
expected_exec['state'] = states.ERROR
expected_exec['state_info'] = 'Force'
self.assertEqual(200, resp.status_int)
self.assertDictEqual(expected_exec, resp.json)
mock_stop_wf.assert_called_once_with('123', 'ERROR', 'Force')
@mock.patch.object(
db_api,
'get_workflow_execution',
mock.MagicMock(return_value=None)
)
@mock.patch.object(rpc_clients.EngineClient, 'stop_workflow')
def test_put_state_cancelled(self, mock_stop_wf):
update_exec = {
'id': WF_EX['id'],
'state': states.CANCELLED,
'state_info': 'Cancelled by user.'
}
wf_ex = copy.deepcopy(WF_EX)
wf_ex['state'] = states.CANCELLED
wf_ex['state_info'] = 'Cancelled by user.'
mock_stop_wf.return_value = wf_ex
resp = self.app.put_json('/v2/executions/123', update_exec)
expected_exec = copy.deepcopy(WF_EX_JSON_WITH_DESC)
expected_exec['state'] = states.CANCELLED
expected_exec['state_info'] = 'Cancelled by user.'
self.assertEqual(200, resp.status_int)
self.assertDictEqual(expected_exec, resp.json)
mock_stop_wf.assert_called_once_with(
'123',
'CANCELLED',
'Cancelled by user.'
)
@mock.patch.object(
db_api,
'get_workflow_execution',
mock.MagicMock(return_value=None)
)
@mock.patch.object(rpc_clients.EngineClient, 'resume_workflow')
def test_put_state_resume(self, mock_resume_wf):
update_exec = {
'id': WF_EX['id'],
'state': states.RUNNING
}
wf_ex = copy.deepcopy(WF_EX)
wf_ex['state'] = states.RUNNING
wf_ex['state_info'] = None
mock_resume_wf.return_value = wf_ex
resp = self.app.put_json('/v2/executions/123', update_exec)
expected_exec = copy.deepcopy(WF_EX_JSON_WITH_DESC)
expected_exec['state'] = states.RUNNING
expected_exec['state_info'] = None
self.assertEqual(200, resp.status_int)
self.assertDictEqual(expected_exec, resp.json)
mock_resume_wf.assert_called_once_with('123', env=None)
@mock.patch.object(
db_api,
'get_workflow_execution',
mock.MagicMock(return_value=None)
)
def test_put_invalid_state(self):
invalid_states = [states.IDLE, states.WAITING, states.RUNNING_DELAYED]
for state in invalid_states:
update_exec = {
'id': WF_EX['id'],
'state': state
}
resp = self.app.put_json(
'/v2/executions/123',
update_exec,
expect_errors=True
)
self.assertEqual(400, resp.status_int)
self.assertIn(
'Cannot change state to %s.' % state,
resp.json['faultstring']
)
@mock.patch.object(
db_api,
'get_workflow_execution',
mock.MagicMock(return_value=None)
)
@mock.patch.object(rpc_clients.EngineClient, 'stop_workflow')
def test_put_state_info_unset(self, mock_stop_wf):
update_exec = {
'id': WF_EX['id'],
'state': states.ERROR,
}
wf_ex = copy.deepcopy(WF_EX)
wf_ex['state'] = states.ERROR
del wf_ex.state_info
mock_stop_wf.return_value = wf_ex
resp = self.app.put_json('/v2/executions/123', update_exec)
expected_exec = copy.deepcopy(WF_EX_JSON_WITH_DESC)
expected_exec['state'] = states.ERROR
expected_exec['state_info'] = None
self.assertEqual(200, resp.status_int)
self.assertDictEqual(expected_exec, resp.json)
mock_stop_wf.assert_called_once_with('123', 'ERROR', None)
@mock.patch('mistral.db.v2.api.get_workflow_execution')
@mock.patch(
'mistral.db.v2.api.update_workflow_execution',
return_value=WF_EX
)
def test_put_description(self, mock_update, mock_ensure):
update_params = {'description': 'execution description.'}
resp = self.app.put_json('/v2/executions/123', update_params)
self.assertEqual(200, resp.status_int)
mock_ensure.assert_called_once_with('123')
mock_update.assert_called_once_with('123', update_params)
@mock.patch.object(
sql_db_api,
'get_workflow_execution',
mock.MagicMock(return_value=copy.deepcopy(UPDATED_WF_EX))
)
@mock.patch(
'mistral.services.workflows.update_workflow_execution_env',
return_value=copy.deepcopy(UPDATED_WF_EX_ENV)
)
def test_put_env(self, mock_update_env):
update_exec = {'params': '{"env": {"k1": "def"}}'}
resp = self.app.put_json('/v2/executions/123', update_exec)
self.assertEqual(200, resp.status_int)
self.assertEqual(update_exec['params'], resp.json['params'])
mock_update_env.assert_called_once_with(UPDATED_WF_EX, {'k1': 'def'})
@mock.patch.object(db_api, 'update_workflow_execution', MOCK_NOT_FOUND)
def test_put_not_found(self):
resp = self.app.put_json(
'/v2/executions/123',
dict(state=states.PAUSED),
expect_errors=True
)
self.assertEqual(404, resp.status_int)
@mock.patch.object(
db_api,
'get_workflow_execution',
mock.MagicMock(return_value=None)
)
def test_put_empty(self):
resp = self.app.put_json('/v2/executions/123', {}, expect_errors=True)
self.assertEqual(400, resp.status_int)
self.assertIn(
'state, description, or env is not provided for update',
resp.json['faultstring']
)
@mock.patch.object(
db_api,
'get_workflow_execution',
mock.MagicMock(return_value=None)
)
def test_put_state_and_description(self):
resp = self.app.put_json(
'/v2/executions/123',
{'description': 'foobar', 'state': states.ERROR},
expect_errors=True
)
self.assertEqual(400, resp.status_int)
self.assertIn(
'description must be updated separately from state',
resp.json['faultstring']
)
@mock.patch.object(
sql_db_api,
'get_workflow_execution',
mock.MagicMock(return_value=copy.deepcopy(UPDATED_WF_EX))
)
@mock.patch(
'mistral.db.v2.api.update_workflow_execution',
return_value=WF_EX
)
@mock.patch(
'mistral.services.workflows.update_workflow_execution_env',
return_value=copy.deepcopy(UPDATED_WF_EX_ENV_DESC)
)
def test_put_env_and_description(self, mock_update_env, mock_update):
update_exec = {
'description': 'foobar',
'params': '{"env": {"k1": "def"}}'
}
resp = self.app.put_json('/v2/executions/123', update_exec)
self.assertEqual(200, resp.status_int)
self.assertEqual(update_exec['description'], resp.json['description'])
self.assertEqual(update_exec['params'], resp.json['params'])
mock_update.assert_called_once_with('123', {'description': 'foobar'})
mock_update_env.assert_called_once_with(UPDATED_WF_EX, {'k1': 'def'})
@mock.patch.object(
db_api,
'get_workflow_execution',
mock.MagicMock(return_value=None)
)
def test_put_env_wrong_state(self):
update_exec = {
'id': WF_EX['id'],
'state': states.SUCCESS,
'params': '{"env": {"k1": "def"}}'
}
resp = self.app.put_json(
'/v2/executions/123',
update_exec,
expect_errors=True
)
self.assertEqual(400, resp.status_int)
expected_fault = (
'env can only be updated when workflow execution '
'is not running or on resume from pause'
)
self.assertIn(expected_fault, resp.json['faultstring'])
@mock.patch.object(rpc_clients.EngineClient, 'start_workflow')
@mock.patch.object(db_api, 'load_workflow_execution')
def test_post_auto_id(self, load_wf_ex_func, start_wf_func):
# NOTE: In fact, we use "white box" testing here to understand
# if the REST controller calls other APIs as expected. This is
# the only way of testing available with the current testing
# infrastructure.
start_wf_func.return_value = WF_EX.to_dict()
json_body = WF_EX_JSON_WITH_DESC.copy()
# We don't want to pass execution ID in this case.
del json_body['id']
expected_json = WF_EX_JSON_WITH_DESC
resp = self.app.post_json('/v2/executions', json_body)
self.assertEqual(201, resp.status_int)
self.assertDictEqual(expected_json, resp.json)
load_wf_ex_func.assert_not_called()
kwargs = json.loads(expected_json['params'])
kwargs['description'] = expected_json['description']
start_wf_func.assert_called_once_with(
expected_json['workflow_id'],
'',
None,
json.loads(expected_json['input']),
**kwargs
)
@mock.patch.object(rpc_clients.EngineClient, 'start_workflow')
@mock.patch.object(db_api, 'load_workflow_execution')
def test_post_with_exec_id_exec_doesnt_exist(self, load_wf_ex_func,
start_wf_func):
# NOTE: In fact, we use "white box" testing here to understand
# if the REST controller calls other APIs as expected. This is
# the only way of testing available with the current testing
# infrastructure.
# Imitate that the execution doesn't exist in DB.
load_wf_ex_func.return_value = None
start_wf_func.return_value = WF_EX.to_dict()
# We want to pass execution ID in this case so we don't delete 'id'
# from the dict.
json_body = WF_EX_JSON_WITH_DESC.copy()
expected_json = WF_EX_JSON_WITH_DESC
resp = self.app.post_json('/v2/executions', json_body)
self.assertEqual(201, resp.status_int)
self.assertDictEqual(expected_json, resp.json)
load_wf_ex_func.assert_called_once_with(expected_json['id'])
kwargs = json.loads(expected_json['params'])
kwargs['description'] = expected_json['description']
start_wf_func.assert_called_once_with(
expected_json['workflow_id'],
'',
expected_json['id'],
json.loads(expected_json['input']),
**kwargs
)
@mock.patch.object(rpc_clients.EngineClient, 'start_workflow')
@mock.patch.object(db_api, 'load_workflow_execution')
def test_post_with_exec_id_exec_exists(self, load_wf_ex_func,
start_wf_func):
# NOTE: In fact, we use "white box" testing here to understand
# if the REST controller calls other APIs as expected. This is
# the only way of testing available with the current testing
# infrastructure.
# Imitate that the execution exists in DB.
load_wf_ex_func.return_value = WF_EX
# We want to pass execution ID in this case so we don't delete 'id'
# from the dict.
json_body = WF_EX_JSON_WITH_DESC.copy()
expected_json = WF_EX_JSON_WITH_DESC
resp = self.app.post_json('/v2/executions', json_body)
self.assertEqual(201, resp.status_int)
self.assertDictEqual(expected_json, resp.json)
load_wf_ex_func.assert_called_once_with(expected_json['id'])
# Note that "start_workflow" method on engine API should not be called
# in this case because we passed execution ID to the endpoint and the
# corresponding object exists.
start_wf_func.assert_not_called()
@mock.patch.object(db_api, 'get_workflow_execution', MOCK_WF_EX)
@mock.patch.object(rpc_clients.EngineClient, 'start_workflow')
def test_post_with_source_execution_id(self, wf_exec_mock):
wf_exec_mock.return_value = SOURCE_WF_EX.to_dict()
resp = self.app.post_json('/v2/executions/',
SOURCE_WF_EX_JSON_WITH_DESC)
source_wf_ex_json = copy.copy(SOURCE_WF_EX_JSON_WITH_DESC)
del source_wf_ex_json['source_execution_id']
self.assertEqual(201, resp.status_int)
self.assertDictEqual(source_wf_ex_json, resp.json)
exec_dict = source_wf_ex_json
wf_exec_mock.assert_called_once_with(
exec_dict['workflow_id'],
'',
exec_dict['id'],
json.loads(exec_dict['input']),
description=exec_dict['description'],
**json.loads(exec_dict['params'])
)
@mock.patch.object(db_api, 'get_workflow_execution', MOCK_WF_EX)
@mock.patch.object(rpc_clients.EngineClient, 'start_workflow')
def test_post_with_src_exec_id_without_exec_id(self, wf_exec_mock):
source_wf_ex = copy.copy(SOURCE_WF_EX)
source_wf_ex.id = ""
source_wf_ex_json = copy.copy(SOURCE_WF_EX_JSON_WITH_DESC)
source_wf_ex_json['id'] = ''
wf_exec_mock.return_value = source_wf_ex.to_dict()
resp = self.app.post_json('/v2/executions/', source_wf_ex_json)
del source_wf_ex_json['source_execution_id']
self.assertEqual(201, resp.status_int)
self.assertDictEqual(source_wf_ex_json, resp.json)
exec_dict = source_wf_ex_json
wf_exec_mock.assert_called_once_with(
exec_dict['workflow_id'],
'',
'',
json.loads(exec_dict['input']),
description=exec_dict['description'],
**json.loads(exec_dict['params'])
)
@mock.patch.object(db_api, 'get_workflow_execution', MOCK_EMPTY)
@mock.patch.object(rpc_clients.EngineClient, 'start_workflow')
def test_post_without_source_execution_id(self, wf_exec_mock):
wf_exec_mock.return_value = SOURCE_WF_EX.to_dict()
source_wf_ex_json = copy.copy(SOURCE_WF_EX_JSON_WITH_DESC)
source_wf_ex_json['source_execution_id'] = ""
# here we want to pass an empty value into the api for the
# source execution id to make sure that the correct actions are
# taken.
resp = self.app.post_json('/v2/executions/', source_wf_ex_json)
self.assertEqual(201, resp.status_int)
del source_wf_ex_json['source_execution_id']
# here we have to remove the source execution key as the
# id is only used to perform a lookup.
self.assertDictEqual(source_wf_ex_json, resp.json)
exec_dict = source_wf_ex_json
wf_exec_mock.assert_called_once_with(
exec_dict['workflow_id'],
'',
exec_dict['id'],
json.loads(exec_dict['input']),
description=exec_dict['description'],
**json.loads(exec_dict['params'])
)
@mock.patch.object(
rpc_clients.EngineClient,
'start_workflow',
MOCK_ACTION_EXC
)
def test_post_throws_exception(self):
context = self.assertRaises(
webtest_app.AppError,
self.app.post_json,
'/v2/executions',
WF_EX_JSON
)
self.assertIn('Bad response: 400', context.args[0])
def test_post_without_workflow_id_and_name(self):
context = self.assertRaises(
webtest_app.AppError,
self.app.post_json,
'/v2/executions',
{'description': 'some description here.'}
)
self.assertIn('Bad response: 400', context.args[0])
@mock.patch.object(db_api, 'delete_workflow_execution', MOCK_DELETE)
def test_delete(self):
resp = self.app.delete('/v2/executions/123')
self.assertEqual(204, resp.status_int)
@mock.patch.object(db_api, 'delete_workflow_execution', MOCK_NOT_FOUND)
def test_delete_not_found(self):
resp = self.app.delete('/v2/executions/123', expect_errors=True)
self.assertEqual(404, resp.status_int)
@mock.patch.object(db_api, 'get_workflow_executions', MOCK_WF_EXECUTIONS)
def test_get_all(self):
resp = self.app.get('/v2/executions')
self.assertEqual(200, resp.status_int)
self.assertEqual(1, len(resp.json['executions']))
self.assertDictEqual(WF_EX_JSON_WITH_DESC, resp.json['executions'][0])
@mock.patch.object(db_api, 'get_workflow_executions')
def test_get_all_operational_error(self, mocked_get_all):
mocked_get_all.side_effect = [
# Emulating DB OperationalError
sa.exc.OperationalError('Mock', 'mock', 'mock'),
[WF_EX] # Successful run
]
resp = self.app.get('/v2/executions')
self.assertEqual(200, resp.status_int)
self.assertEqual(1, len(resp.json['executions']))
self.assertDictEqual(WF_EX_JSON_WITH_DESC, resp.json['executions'][0])
@mock.patch.object(db_api, 'get_workflow_executions', MOCK_EMPTY)
def test_get_all_empty(self):
resp = self.app.get('/v2/executions')
self.assertEqual(200, resp.status_int)
self.assertEqual(0, len(resp.json['executions']))
@mock.patch.object(db_api, "get_workflow_executions", MOCK_WF_EXECUTIONS)
def test_get_all_pagination(self):
resp = self.app.get(
'/v2/executions?limit=1&sort_keys=id,workflow_name'
'&sort_dirs=asc,desc')
self.assertEqual(200, resp.status_int)
self.assertIn('next', resp.json)
self.assertEqual(1, len(resp.json['executions']))
self.assertDictEqual(WF_EX_JSON_WITH_DESC, resp.json['executions'][0])
param_dict = utils.get_dict_from_string(
resp.json['next'].split('?')[1],
delimiter='&'
)
expected_dict = {
'marker': '123e4567-e89b-12d3-a456-426655440000',
'limit': 1,
'sort_keys': 'id,workflow_name',
'sort_dirs': 'asc,desc'
}
self.assertDictEqual(expected_dict, param_dict)
def test_get_all_pagination_limit_negative(self):
resp = self.app.get(
'/v2/executions?limit=-1&sort_keys=id&sort_dirs=asc',
expect_errors=True
)
self.assertEqual(400, resp.status_int)
self.assertIn("Limit must be positive", resp.body.decode())
def test_get_all_pagination_limit_not_integer(self):
resp = self.app.get(
'/v2/executions?limit=1.1&sort_keys=id&sort_dirs=asc',
expect_errors=True
)
self.assertEqual(400, resp.status_int)
self.assertIn("unable to convert to int", resp.body.decode())
def test_get_all_pagination_invalid_sort_dirs_length(self):
resp = self.app.get(
'/v2/executions?limit=1&sort_keys=id&sort_dirs=asc,asc',
expect_errors=True
)
self.assertEqual(400, resp.status_int)
self.assertIn(
"Length of sort_keys must be equal or greater than sort_dirs",
resp.body.decode()
)
def test_get_all_pagination_unknown_direction(self):
resp = self.app.get(
'/v2/actions?limit=1&sort_keys=id&sort_dirs=nonexist',
expect_errors=True
)
self.assertEqual(400, resp.status_int)
self.assertIn("Unknown sort direction", resp.body.decode())
@mock.patch.object(
db_api,
'get_workflow_executions',
MOCK_SUB_WF_EXECUTIONS
)
def test_get_task_workflow_executions(self):
resp = self.app.get(
'/v2/tasks/%s/workflow_executions' % SUB_WF_EX.task_execution_id
)
self.assertEqual(200, resp.status_int)
self.assertEqual(1, len(resp.json['executions']))
self.assertDictEqual(
SUB_WF_EX_JSON_WITH_DESC,
resp.json['executions'][0]
)
@mock.patch.object(db_api, 'get_workflow_executions', MOCK_WF_EXECUTIONS)
@mock.patch.object(rest_utils, 'get_all')
def test_get_all_executions_with_output(self, mock_get_all):
resp = self.app.get('/v2/executions?include_output=true')
self.assertEqual(200, resp.status_int)
args, kwargs = mock_get_all.call_args
resource_function = kwargs['resource_function']
self.assertEqual(
execution._get_workflow_execution_resource,
resource_function
)
@mock.patch.object(db_api, 'get_workflow_executions', MOCK_WF_EXECUTIONS)
@mock.patch.object(rest_utils, 'get_all')
def test_get_all_executions_without_output(self, mock_get_all):
resp = self.app.get('/v2/executions')
self.assertEqual(200, resp.status_int)
args, kwargs = mock_get_all.call_args
resource_function = kwargs['resource_function']
self.assertIsNone(resource_function)
@mock.patch('mistral.db.v2.api.get_workflow_executions')
@mock.patch('mistral.context.MistralContext.from_environ')
def test_get_all_projects_admin(self, mock_context, mock_get_execs):
admin_ctx = unit_base.get_context(admin=True)
mock_context.return_value = admin_ctx
resp = self.app.get('/v2/executions?all_projects=true')
self.assertEqual(200, resp.status_int)
self.assertTrue(mock_get_execs.call_args[1].get('insecure', False))
def test_get_all_projects_normal_user(self):
resp = self.app.get(
'/v2/executions?all_projects=true',
expect_errors=True
)
self.assertEqual(403, resp.status_int)
@mock.patch('mistral.db.v2.api.get_workflow_executions')
@mock.patch('mistral.context.MistralContext.from_environ')
def test_get_all_filter_by_project_id(self, mock_context, mock_get_execs):
admin_ctx = unit_base.get_context(admin=True)
mock_context.return_value = admin_ctx
fake_project_id = uuidutils.generate_uuid()
resp = self.app.get('/v2/executions?project_id=%s' % fake_project_id)
self.assertEqual(200, resp.status_int)
self.assertTrue(mock_get_execs.call_args[1].get('insecure', False))
self.assertTrue(
mock_get_execs.call_args[1].get('project_id', fake_project_id)
)
|
StackStorm/mistral
|
mistral/tests/unit/api/v2/test_executions.py
|
Python
|
apache-2.0
| 30,260
|
# -*- coding: utf-8 -*-
import os
from nose.tools import with_setup, eq_ as eq, ok_ as ok
from common import vim, cleanup
cid = vim.channel_id
def host_setup():
cleanup()
# Spawn the python host
vim.command(
'let pyhost_id = ' +
'rpcstart("python", ["-c", "import neovim; neovim.start_host()"])')
ok(vim.eval('g:pyhost_id'))
# Use rpc_request to wait for the host setup(rpc_spawn will return a channel
# id but only after a while the channel will register handlers for python_*
# methods)
ok(vim.eval('rpcrequest(g:pyhost_id, "python_eval", "10")') == 10)
# Verify the feature
ok(vim.eval('has("python")'))
# Import the vim module
vim.command('python import vim')
# Ensure the python host was updated accordingly
ok(vim.eval('pyeval("vim.channel_id") == g:pyhost_id'))
def host_teardown():
ok(vim.eval('rpcstop(g:pyhost_id)'))
# After the channel is closed, the feature should not be available
ok(not vim.eval('has("python")'))
@with_setup(setup=host_setup, teardown=host_teardown)
def test_python_command():
vim.command('python vim.command("let set_by_python = [100,0]")')
eq(vim.vars['set_by_python'], [100, 0])
@with_setup(setup=host_setup, teardown=host_teardown)
def test_python_nested_commands():
nested = """python vim.command('python vim.command("python vim.command(\\'let set_by_nested_python = 555\\')")')"""
vim.command(nested)
eq(vim.vars['set_by_nested_python'], 555)
@with_setup(setup=host_setup, teardown=host_teardown)
def test_pyfile():
fname = 'pyfile.py'
text = 'vim.command("let set_by_pyfile = 123")'
with open(fname, 'w') as f: f.write(text)
vim.command('pyfile pyfile.py')
eq(vim.vars['set_by_pyfile'], 123)
os.unlink(fname)
@with_setup(setup=host_setup, teardown=host_teardown)
def test_pydo():
# :pydo 42 returns None for all lines,
# the buffer should not be changed
vim.command('normal :pydo 42\n')
eq(vim.current.buffer.options['mod'], False)
# insert some text
vim.command('normal iabc\ndef\nghi')
eq(vim.current.buffer[:], ['abc', 'def', 'ghi'])
# go to top and select and replace the first two lines
vim.command('normal ggvj:pydo return str(linenr)\n')
eq(vim.current.buffer[:], ['1', '2', 'ghi'])
@with_setup(setup=host_setup, teardown=host_teardown)
def test_pyeval():
vim.command('let python_expr = pyeval("[1, 2, 3]")')
eq(vim.vars['python_expr'], [1, 2, 3])
|
fwalch/python-client
|
test/test_script_host.py
|
Python
|
apache-2.0
| 2,487
|
import bottle
from bottle import route, run, request, abort, debug, template , static_file
import MySQLdb as mdb
@route('/packets', method='GET')
def packets_list():
print "list all received packets and their protocols"
dbcon = mdb.connect("localhost","testuser","test123","attackdb" )
cursor = dbcon.cursor()
cursor.execute("SELECT * FROM packets")
result = cursor.fetchall()
if not result:
return template('emptyTable', back_url='/home')
return template('packets', rows=result)
@route('/home', method='GET')
@route('/', method='GET')
def display_home():
print "home page"
return template('home')
@route('/packets_filter', method='POST')
def packets_list_filtered():
print "list all received packets and their protocols-filtered"
filter_name = request.forms.get('filter_name')
filter_param = request.forms.get('filter_param')
dbcon = mdb.connect("localhost","testuser","test123","attackdb" )
cursor = dbcon.cursor()
query= "SELECT * FROM packets where %s = '%s'"%(filter_name, filter_param)
print query
cursor.execute(query)
result = cursor.fetchall()
if not result:
return template('emptyTable', back_url='/packets')
return template('packets', rows=result)
@route('/packets-ip/:ipaddr', method='GET')
def packets_list_filtered(ipaddr):
print "list all received packets and their protocols-filtered for given ip address"
filter_name = request.forms.get('filter_name')
filter_param = request.forms.get('filter_param')
dbcon = mdb.connect("localhost","testuser","test123","attackdb" )
cursor = dbcon.cursor()
query= "SELECT * FROM packets where sourceip = '%s' or destip = '%s'"%(ipaddr, ipaddr)
print query
cursor.execute(query)
result = cursor.fetchall()
if not result:
return template('emptyTable', back_url='/home')
return template('packets', rows=result)
@route('/attacks_filter', method='POST')
def attacks_list_filtered():
print "list all attacks-filtered"
filter_name = request.forms.get('filter_name')
filter_param = request.forms.get('filter_param')
dbcon = mdb.connect("localhost","testuser","test123","attackdb" )
cursor = dbcon.cursor()
query= "SELECT * FROM attacks where %s = '%s'"%(filter_name, filter_param)
print query
cursor.execute(query)
result = cursor.fetchall()
if not result:
return template('emptyTable', back_url='/attacks')
return template('packets', rows=result)
@route('/attacks', method='GET')
def attacks_list():
print "list all attacks caught"
dbcon = mdb.connect("localhost","testuser","test123","attackdb" )
cursor = dbcon.cursor()
cursor.execute("SELECT * FROM attacks")
result = cursor.fetchall()
if not result:
return template('emptyTable', back_url='/home')
return template('attacks', rows=result)
@route('/action', method='GET')
def action_list():
print "list all received packets and their protocols"
dbcon = mdb.connect("localhost","testuser","test123","attackdb" )
cursor = dbcon.cursor()
cursor.execute("SELECT * FROM packets")
result = cursor.fetchall()
if not result:
return template('emptyTable', back_url='/home')
return template('action', rows=result)
@route('/rules', method='GET')
def rules_list():
print "list all attacks rules"
fname = '/home/rashmi/RYU295/ryu/lib/ids/rules.txt'
with open(fname) as f:
rules = f.readlines()
return template('rules', rows=rules)
@route('/editRules', method='GET')
def edit_rules():
print "Edit attacks rules"
fname = '/home/rashmi/RYU295/ryu/lib/ids/rules.txt'
with open(fname) as f:
rules = f.read()
print rules
return template('editRules', rows=rules)
@route('/rules', method='POST')
def change_rules():
print "change attacks rules"
post_rules = request.forms.get('rule_data')
print "new rules : ", post_rules
fname = '/home/rashmi/RYU295/ryu/lib/ids/rules.txt'
open(fname,'w').close()
f = open(fname, 'w')
f.write(post_rules)
f.close()
with open(fname) as f:
rules = f.readlines()
return template('rules', rows=rules)
@route('/<filename:re:.*\.css>')
@route('/packets-ip/<filename:re:.*\.css>')
def stylesheets(filename):
return static_file(filename, root='static/css')
@route('/<filename:re:.*\.png>')
@route('/packets-ip/<filename:re:.*\.png>')
def images(filename):
return static_file(filename, root='static/img')
@route('/<filename:re:.*\.js>')
def javascriptFiles(filename):
return static_file(filename, root='static/js')
debug(True)
run(reloader=True)
|
Rashminadig/SDN
|
ryu/Gui/home.py
|
Python
|
apache-2.0
| 4,669
|
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Report generation class."""
import csv
import datetime
import json
import operator
import os.path
import platform
# external dependencies (from third_party)
import jinja2
from . import addr_util
from . import nameserver
from . import nameserver_list
from . import util
from ..common import charts
from ..common import url_map
# Only bother showing a percentage if we have this many tests.
MIN_RELEVANT_COUNT = 50
class ReportGenerator(object):
"""Generate reports - ASCII, HTML, etc."""
def __init__(self, config, nameservers, results, index=None, geodata=None,
status_callback=None):
"""Constructor.
Args:
config: A dictionary of configuration information.
nameservers: A list of nameserver objects to include in the report.
results: A dictionary of results from Benchmark.Run()
index: A dictionary of results for index hosts.
geodata: A dictionary of geographic information.
status_callback: where to send msg() calls.
"""
self.nameservers = nameservers
self.results = results
self.index = index
self.config = config
self.geodata = geodata
self.status_callback = status_callback
self.cached_averages = {}
self.cached_summary = None
def msg(self, msg, **kwargs):
if self.status_callback:
self.status_callback(msg, **kwargs)
def ComputeAverages(self):
"""Process all runs for all hosts, yielding an average for each host."""
if len(self.results) in self.cached_averages:
return self.cached_averages[len(self.results)]
records = []
for ns in self.results:
if ns.is_disabled or ns.is_hidden:
continue
failure_count = 0
nx_count = 0
run_averages = []
for test_run in self.results[ns]:
# x: record, req_type, duration, response
total_count = len(test_run)
failure_count += len([x for x in test_run if not x[3]])
nx_count += len([x for x in test_run if x[3] and not x[3].answer])
duration = sum([x[2] for x in test_run])
run_averages.append(duration / len(test_run))
# This appears to be a safe use of averaging averages
overall_average = util.CalculateListAverage(run_averages)
(fastest, slowest) = self.FastestAndSlowestDurationForNameServer(ns)
records.append((ns, overall_average, run_averages, fastest, slowest,
failure_count, nx_count, total_count))
self.cached_averages[len(self.results)] = records
return self.cached_averages[len(self.results)]
def FastestAndSlowestDurationForNameServer(self, ns):
"""For a given nameserver, find the fastest/slowest non-error durations."""
fastest_duration = 2**32
slowest_duration = -1
durations = []
for test_run_results in self.results[ns]:
for (unused_host, unused_type, duration, response, unused_error) in test_run_results:
durations.append(duration)
if response and response.answer:
if duration < fastest_duration:
fastest_duration = duration
if duration > slowest_duration:
slowest_duration = duration
# If we have no error-free durations, settle for anything.
if fastest_duration == 2**32:
fastest_duration = min(durations)
if slowest_duration == -1:
slowest_duration = max(durations)
return (fastest_duration, slowest_duration)
def FastestNameServerResult(self):
"""Process all runs for all hosts, yielding an average for each host."""
# TODO(tstromberg): This should not count queries which failed.
fastest = [(ns, self.FastestAndSlowestDurationForNameServer(ns)[0]) for ns in self.results]
return sorted(fastest, key=operator.itemgetter(1))
def BestOverallNameServer(self):
"""Return the best nameserver we found."""
sorted_averages = sorted(self.ComputeAverages(), key=operator.itemgetter(1))
hosts = [x[0] for x in sorted_averages]
for host in hosts:
if not host.is_failure_prone and not host.is_disabled:
return host
# return something if none of them are good.
return hosts[0]
def NearestNameServers(self, count=2):
"""Return the nameservers with the least latency."""
min_responses = sorted(self.FastestNameServerResult(),
key=operator.itemgetter(1))
return [x[0] for x in min_responses if not x.is_disabled][0:count]
def _LowestLatencyAsciiChart(self):
"""Return a simple set of tuples to generate an ASCII chart from."""
fastest = self.FastestNameServerResult()
slowest_result = fastest[-1][1]
chart = []
for (ns, duration) in fastest:
textbar = util.DrawTextBar(duration, slowest_result)
chart.append((ns.name, textbar, duration))
return chart
def _MeanRequestAsciiChart(self):
"""Creates an ASCII Chart of Mean Response Time."""
sorted_averages = sorted(self.ComputeAverages(), key=operator.itemgetter(1))
max_result = sorted_averages[-1][1]
chart = []
for result in sorted_averages:
(ns, overall_mean) = result[0:2]
textbar = util.DrawTextBar(overall_mean, max_result)
chart.append((ns.name, textbar, overall_mean))
return chart
def CreateReport(self, format='ascii', output_fp=None, csv_path=None,
sharing_url=None, sharing_state=None):
"""Create a Report in a given format.
Args:
format: string (ascii, html, etc.) which defines what template to load.
output_fp: A File object to send the output to (optional)
csv_path: A string pathname to the CSV output to link to (optional)
sharing_url: A string URL where the results have been shared to. (optional)
sharing_state: A string showing what the shared result state is (optional)
Returns:
A rendered template (string)
"""
# First generate all of the charts necessary.
if format == 'ascii':
lowest_latency = self._LowestLatencyAsciiChart()
mean_duration = self._MeanRequestAsciiChart()
else:
lowest_latency = None
mean_duration = None
sorted_averages = sorted(self.ComputeAverages(), key=operator.itemgetter(1))
runs_data = [(x[0].name, x[2]) for x in sorted_averages]
mean_duration_url = charts.PerRunDurationBarGraph(runs_data)
min_duration_url = charts.MinimumDurationBarGraph(self.FastestNameServerResult())
distribution_url_200 = charts.DistributionLineGraph(self.DigestedResults(),
scale=200)
distribution_url = charts.DistributionLineGraph(self.DigestedResults(),
scale=self.config.timeout * 1000)
# Now generate all of the required textual information.
ns_summary = self._GenerateNameServerSummary()
best_ns = self.BestOverallNameServer()
recommended = [ns_summary[0]]
for row in sorted(ns_summary, key=operator.itemgetter('duration_min')):
if row['ip'] != ns_summary[0]['ip']:
recommended.append(row)
if len(recommended) == 3:
break
compare_title = 'Undecided'
compare_subtitle = 'Not enough servers to compare.'
compare_reference = None
for ns_record in ns_summary:
if ns_record.get('is_reference'):
if ns_record == ns_summary[0]:
compare_reference = ns_record
compare_title = 'N/A'
compare_subtitle = ''
elif len(ns_record['durations'][0]) >= MIN_RELEVANT_COUNT:
compare_reference = ns_record
compare_title = '%0.1f%%' % ns_summary[0]['diff']
compare_subtitle = 'Faster'
else:
compare_subtitle = 'Too few tests (needs %s)' % (MIN_RELEVANT_COUNT)
break
# Fragile, makes assumption about the CSV being in the same path as the HTML file
if csv_path:
csv_link = os.path.basename(csv_path)
else:
csv_link = None
template_name = '%s.tmpl' % format
template_path = util.FindDataFile(os.path.join('templates', template_name))
filtered_config = self.FilteredConfig()
template_dir = os.path.dirname(template_path)
env = jinja2.Environment(loader=jinja2.FileSystemLoader(template_dir))
template = env.get_template(template_name)
rendered = template.render(
best_ns=best_ns,
timestamp=datetime.datetime.now(),
lowest_latency=lowest_latency,
version=self.config.version,
compare_subtitle=compare_subtitle,
compare_title=compare_title,
compare_reference=compare_reference,
sharing_url=sharing_url,
sharing_state=sharing_state,
config=filtered_config,
mean_duration=mean_duration,
ns_summary=ns_summary,
mean_duration_url=mean_duration_url,
min_duration_url=min_duration_url,
distribution_url=distribution_url,
distribution_url_200=distribution_url_200,
recommended=recommended,
csv_link=csv_link
)
if output_fp:
output_fp.write(rendered)
output_fp.close()
else:
return rendered
def FilteredConfig(self):
"""Generate a watered down config listing for our report."""
keys = [x for x in dir(self.config) if not x.startswith('_') and x not in ('config', 'site_url')]
config_items = []
for key in keys:
value = getattr(self.config, key)
# > values are ConfigParser internals. None values are just noise.
if isinstance(value, int) or isinstance(value, float) or isinstance(value, str):
config_items.append((key, value))
return sorted(config_items)
def DigestedResults(self):
"""Return a tuple of nameserver and all associated durations."""
duration_data = []
for ns in self.results:
durations = []
for test_run_results in self.results[ns]:
durations += [x[2] for x in test_run_results]
duration_data.append((ns, durations))
return duration_data
def _GenerateNameServerSummary(self):
if self.cached_summary:
return self.cached_summary
nsdata = {}
sorted_averages = sorted(self.ComputeAverages(), key=operator.itemgetter(1))
placed_at = -1
fastest = {}
fastest_normal = {}
reference = {}
# Fill in basic information for all nameservers, even those without scores.
fake_position = 1000
for ns in sorted(self.nameservers.visible_servers, key=operator.attrgetter('check_average')):
if ns.is_hidden:
continue
fake_position += 1
nsdata[ns] = {
'ip': ns.ip,
'name': ns.name,
'hostname': ns.hostname,
'version': ns.version,
'node_ids': list(ns.node_ids),
'sys_position': ns.system_position,
'is_failure_prone': ns.is_failure_prone,
'duration_min': float(ns.fastest_check_duration),
'is_reference': False,
'is_disabled': ns.is_disabled,
'check_average': ns.check_average,
'error_count': ns.error_count,
'timeout_count': ns.timeout_count,
'notes': url_map.CreateNoteUrlTuples(ns.notes),
'position': fake_position
}
# Fill the scores in.
for (ns, overall_average, run_averages, fastest, slowest, unused_failures, nx_count, unused_total) in sorted_averages:
placed_at += 1
durations = []
for _ in self.results[ns]:
durations.append([x[2] for x in self.results[ns][0]])
nsdata[ns].update({
'position': placed_at,
'overall_average': overall_average,
'cdn_result_min': ns.cdn_ping_min,
'cdn_result_avg': ns.cdn_ping_avg,
'cdn_result_max': ns.cdn_ping_max,
'averages': run_averages,
'duration_min': float(fastest),
'duration_max': slowest,
'nx_count': nx_count,
'durations': durations,
'index': self._GenerateIndexSummary(ns),
})
# Determine which nameserver to refer to for improvement scoring
if not ns.is_disabled:
if ns.system_position == 0:
reference = ns
elif not fastest_normal and not ns.HasTag('preferred'):
fastest_normal = ns
# If no reference was found, use the fastest non-global nameserver record.
if not reference:
if fastest_normal:
reference = fastest_normal
else:
# The second ns.
if len(sorted_averages) > 1:
reference = sorted_averages[1][0]
# Update the improvement scores for each nameserver.
if reference:
for ns in nsdata:
if nsdata[ns]['ip'] != nsdata[reference]['ip']:
if 'overall_average' in nsdata[ns]:
nsdata[ns]['diff'] = ((nsdata[reference]['overall_average'] /
nsdata[ns]['overall_average']) - 1) * 100
else:
nsdata[ns]['is_reference'] = True
self.cached_summary = sorted(list(nsdata.values()), key=operator.itemgetter('position'))
return self.cached_summary
def _GenerateIndexSummary(self, ns):
# Get the meat out of the index data.
index = []
if ns in self.index:
for host, req_type, duration, response, unused_x in self.index[ns]:
answer_count, ttl = self._ResponseToCountTtlText(response)[0:2]
index.append((host, req_type, duration, answer_count, ttl,
nameserver.ResponseToAscii(response)))
return index
def _GetPlatform(self):
my_platform = platform.system()
if my_platform == 'Darwin':
if os.path.exists('/usr/sbin/sw_vers') or os.path.exists('/usr/sbin/system_profiler'):
my_platform = 'Mac OS X'
if my_platform == 'Linux':
distro = platform.dist()[0]
if distro:
my_platform = 'Linux (%s)' % distro
return my_platform
def _CreateSharingData(self):
config = dict(self.FilteredConfig())
config['platform'] = self._GetPlatform()
# Purge sensitive information (be aggressive!)
purged_rows = []
for row in self._GenerateNameServerSummary():
# This will be our censored record.
p = dict(row)
p['notes'] = []
for note in row['notes']:
p['notes'].append({'text': addr_util.MaskStringWithIPs(note['text']), 'url': note['url']})
p['ip'], p['hostname'], p['name'] = addr_util.MaskPrivateHost(row['ip'], row['hostname'], row['name'])
if (addr_util.IsPrivateIP(row['ip']) or addr_util.IsLoopbackIP(row['ip'])
or addr_util.IsPrivateHostname(row['hostname'])):
p['node_ids'] = []
p['version'] = None
purged_rows.append(p)
return {'config': config, 'nameservers': purged_rows, 'geodata': self.geodata}
def CreateJsonData(self):
sharing_data = self._CreateSharingData()
return json.dumps(sharing_data)
def _ResponseToCountTtlText(self, response):
"""For a given DNS response, parse the most important details out.
Args:
response: DNS response
Returns:
tuple of (answer_count, ttl, answer_text)
"""
answer_text = ''
answer_count = -1
ttl = -1
if response:
if response.answer:
answer_count = len(response.answer)
ttl = response.answer[0].ttl
answer_text = nameserver.ResponseToAscii(response)
return (answer_count, ttl, answer_text)
def SaveResultsToCsv(self, filename):
"""Write out a CSV file with detailed results on each request.
Args:
filename: full path on where to save results (string)
Sample output:
nameserver, test_number, test, type, duration, answer_count, ttl
"""
self.msg('Opening %s for write' % filename, debug=True)
csv_file = open(filename, 'w')
output = csv.writer(csv_file)
output.writerow(['IP', 'Name', 'Test_Num', 'Record',
'Record_Type', 'Duration', 'TTL', 'Answer_Count',
'Response'])
for ns in self.results:
self.msg('Saving detailed data for %s' % ns, debug=True)
for (test_run, test_results) in enumerate(self.results[ns]):
for (record, req_type, duration, response, error_msg) in test_results:
(answer_count, ttl, answer_text) = self._ResponseToCountTtlText(response)
output.writerow([ns.ip, ns.name, test_run, record, req_type, duration,
ttl, answer_count, answer_text, error_msg])
csv_file.close()
self.msg('%s saved.' % filename, debug=True)
|
rogers0/namebench
|
namebench/client/reporter.py
|
Python
|
apache-2.0
| 16,903
|
#!/usr/bin/env python3
# Copyright 2020 The Pigweed Authors
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""Runs the local presubmit checks for the Pigweed repository."""
import argparse
import json
import logging
import os
from pathlib import Path
import re
import subprocess
import sys
from typing import Sequence, IO, Tuple, Optional, Callable, List
try:
import pw_presubmit
except ImportError:
# Append the pw_presubmit package path to the module search path to allow
# running this module without installing the pw_presubmit package.
sys.path.append(os.path.dirname(os.path.dirname(
os.path.abspath(__file__))))
import pw_presubmit
import pw_package.pigweed_packages
from pw_presubmit import (
build,
cli,
cpp_checks,
format_code,
git_repo,
call,
filter_paths,
inclusive_language,
plural,
PresubmitContext,
PresubmitFailure,
Programs,
python_checks,
)
from pw_presubmit.install_hook import install_hook
_LOG = logging.getLogger(__name__)
pw_package.pigweed_packages.initialize()
# Trigger builds if files with these extensions change.
_BUILD_EXTENSIONS = ('.py', '.rst', '.gn', '.gni',
*format_code.C_FORMAT.extensions)
def _at_all_optimization_levels(target):
for level in ('debug', 'size_optimized', 'speed_optimized'):
yield f'{target}_{level}'
#
# Build presubmit checks
#
def gn_clang_build(ctx: PresubmitContext):
build.gn_gen(ctx.root,
ctx.output_dir,
pw_RUN_INTEGRATION_TESTS=(sys.platform != 'win32'))
build.ninja(ctx.output_dir, *_at_all_optimization_levels('host_clang'))
@filter_paths(endswith=_BUILD_EXTENSIONS)
def gn_gcc_build(ctx: PresubmitContext):
build.gn_gen(ctx.root, ctx.output_dir)
build.ninja(ctx.output_dir, *_at_all_optimization_levels('host_gcc'))
_HOST_COMPILER = 'gcc' if sys.platform == 'win32' else 'clang'
def gn_host_build(ctx: PresubmitContext):
build.gn_gen(ctx.root, ctx.output_dir)
build.ninja(ctx.output_dir,
*_at_all_optimization_levels(f'host_{_HOST_COMPILER}'))
@filter_paths(endswith=_BUILD_EXTENSIONS)
def gn_quick_build_check(ctx: PresubmitContext):
"""Checks the state of the GN build by running gn gen and gn check."""
build.gn_gen(ctx.root, ctx.output_dir)
@filter_paths(endswith=_BUILD_EXTENSIONS)
def gn_full_build_check(ctx: PresubmitContext):
build.gn_gen(ctx.root, ctx.output_dir)
build.ninja(ctx.output_dir, *_at_all_optimization_levels('stm32f429i'),
*_at_all_optimization_levels(f'host_{_HOST_COMPILER}'),
'python.tests', 'python.lint', 'docs', 'fuzzers',
'pw_env_setup:build_pigweed_python_source_tree')
@filter_paths(endswith=_BUILD_EXTENSIONS)
def gn_full_qemu_check(ctx: PresubmitContext):
build.gn_gen(ctx.root, ctx.output_dir)
build.ninja(
ctx.output_dir,
*_at_all_optimization_levels('qemu_gcc'),
# TODO(pwbug/321) Re-enable clang.
# *_at_all_optimization_levels('qemu_clang'),
)
@filter_paths(endswith=_BUILD_EXTENSIONS)
def gn_arm_build(ctx: PresubmitContext):
build.gn_gen(ctx.root, ctx.output_dir)
build.ninja(ctx.output_dir, *_at_all_optimization_levels('stm32f429i'))
@filter_paths(endswith=_BUILD_EXTENSIONS)
def stm32f429i(ctx: PresubmitContext):
build.gn_gen(ctx.root, ctx.output_dir, pw_use_test_server=True)
with build.test_server('stm32f429i_disc1_test_server', ctx.output_dir):
build.ninja(ctx.output_dir, *_at_all_optimization_levels('stm32f429i'))
@filter_paths(endswith=_BUILD_EXTENSIONS)
def gn_boringssl_build(ctx: PresubmitContext):
build.install_package(ctx.package_root, 'boringssl')
build.gn_gen(ctx.root,
ctx.output_dir,
dir_pw_third_party_boringssl='"{}"'.format(ctx.package_root /
'boringssl'))
build.ninja(
ctx.output_dir,
*_at_all_optimization_levels('stm32f429i'),
*_at_all_optimization_levels('host_clang'),
)
@filter_paths(endswith=_BUILD_EXTENSIONS)
def gn_nanopb_build(ctx: PresubmitContext):
build.install_package(ctx.package_root, 'nanopb')
build.gn_gen(ctx.root,
ctx.output_dir,
dir_pw_third_party_nanopb='"{}"'.format(ctx.package_root /
'nanopb'))
build.ninja(
ctx.output_dir,
*_at_all_optimization_levels('stm32f429i'),
*_at_all_optimization_levels('host_clang'),
)
@filter_paths(endswith=_BUILD_EXTENSIONS)
def gn_crypto_mbedtls_build(ctx: PresubmitContext):
build.install_package(ctx.package_root, 'mbedtls')
build.gn_gen(
ctx.root,
ctx.output_dir,
dir_pw_third_party_mbedtls='"{}"'.format(ctx.package_root / 'mbedtls'),
pw_crypto_SHA256_BACKEND='"{}"'.format(ctx.root /
'pw_crypto:sha256_mbedtls'),
pw_crypto_ECDSA_BACKEND='"{}"'.format(ctx.root /
'pw_crypto:ecdsa_mbedtls'))
build.ninja(ctx.output_dir)
@filter_paths(endswith=_BUILD_EXTENSIONS)
def gn_crypto_boringssl_build(ctx: PresubmitContext):
build.install_package(ctx.package_root, 'boringssl')
build.gn_gen(
ctx.root,
ctx.output_dir,
dir_pw_third_party_boringssl='"{}"'.format(ctx.package_root /
'boringssl'),
pw_crypto_SHA256_BACKEND='"{}"'.format(ctx.root /
'pw_crypto:sha256_boringssl'),
pw_crypto_ECDSA_BACKEND='"{}"'.format(ctx.root /
'pw_crypto:ecdsa_boringssl'),
)
build.ninja(ctx.output_dir)
@filter_paths(endswith=_BUILD_EXTENSIONS)
def gn_crypto_micro_ecc_build(ctx: PresubmitContext):
build.install_package(ctx.package_root, 'micro-ecc')
build.gn_gen(
ctx.root,
ctx.output_dir,
dir_pw_third_party_micro_ecc='"{}"'.format(ctx.package_root /
'micro-ecc'),
pw_crypto_ECDSA_BACKEND='"{}"'.format(ctx.root /
'pw_crypto:ecdsa_uecc'),
)
build.ninja(ctx.output_dir)
@filter_paths(endswith=_BUILD_EXTENSIONS)
def gn_teensy_build(ctx: PresubmitContext):
build.install_package(ctx.package_root, 'teensy')
build.gn_gen(ctx.root,
ctx.output_dir,
pw_arduino_build_CORE_PATH='"{}"'.format(str(
ctx.package_root)),
pw_arduino_build_CORE_NAME='teensy',
pw_arduino_build_PACKAGE_NAME='teensy/avr',
pw_arduino_build_BOARD='teensy40')
build.ninja(ctx.output_dir, *_at_all_optimization_levels('arduino'))
@filter_paths(endswith=_BUILD_EXTENSIONS)
def gn_software_update_build(ctx: PresubmitContext):
build.install_package(ctx.package_root, 'nanopb')
build.install_package(ctx.package_root, 'protobuf')
build.install_package(ctx.package_root, 'mbedtls')
build.install_package(ctx.package_root, 'micro-ecc')
build.gn_gen(
ctx.root,
ctx.output_dir,
dir_pw_third_party_protobuf='"{}"'.format(ctx.package_root /
'protobuf'),
dir_pw_third_party_nanopb='"{}"'.format(ctx.package_root / 'nanopb'),
dir_pw_third_party_micro_ecc='"{}"'.format(ctx.package_root /
'micro-ecc'),
pw_crypto_ECDSA_BACKEND='"{}"'.format(ctx.root /
'pw_crypto:ecdsa_uecc'),
dir_pw_third_party_mbedtls='"{}"'.format(ctx.package_root / 'mbedtls'),
pw_crypto_SHA256_BACKEND='"{}"'.format(ctx.root /
'pw_crypto:sha256_mbedtls'))
build.ninja(
ctx.output_dir,
*_at_all_optimization_levels('host_clang'),
)
@filter_paths(endswith=_BUILD_EXTENSIONS)
def gn_pw_system_demo_build(ctx: PresubmitContext):
build.install_package(ctx.package_root, 'freertos')
build.install_package(ctx.package_root, 'nanopb')
build.install_package(ctx.package_root, 'stm32cube_f4')
build.gn_gen(
ctx.root,
ctx.output_dir,
dir_pw_third_party_freertos='"{}"'.format(ctx.package_root /
'freertos'),
dir_pw_third_party_nanopb='"{}"'.format(ctx.package_root / 'nanopb'),
dir_pw_third_party_stm32cube_f4='"{}"'.format(ctx.package_root /
'stm32cube_f4'),
)
build.ninja(ctx.output_dir, 'pw_system_demo')
@filter_paths(endswith=_BUILD_EXTENSIONS)
def gn_qemu_build(ctx: PresubmitContext):
build.gn_gen(ctx.root, ctx.output_dir)
build.ninja(ctx.output_dir, *_at_all_optimization_levels('qemu_gcc'))
@filter_paths(endswith=_BUILD_EXTENSIONS)
def gn_qemu_clang_build(ctx: PresubmitContext):
build.gn_gen(ctx.root, ctx.output_dir)
build.ninja(ctx.output_dir, *_at_all_optimization_levels('qemu_clang'))
def gn_docs_build(ctx: PresubmitContext):
build.gn_gen(ctx.root, ctx.output_dir)
build.ninja(ctx.output_dir, 'docs')
def gn_host_tools(ctx: PresubmitContext):
build.gn_gen(ctx.root, ctx.output_dir)
build.ninja(ctx.output_dir, 'host_tools')
@filter_paths(endswith=format_code.C_FORMAT.extensions)
def oss_fuzz_build(ctx: PresubmitContext):
build.gn_gen(ctx.root, ctx.output_dir, pw_toolchain_OSS_FUZZ_ENABLED=True)
build.ninja(ctx.output_dir, "fuzzers")
def _run_cmake(ctx: PresubmitContext, toolchain='host_clang') -> None:
build.install_package(ctx.package_root, 'nanopb')
env = None
if 'clang' in toolchain:
env = build.env_with_clang_vars()
toolchain_path = ctx.root / 'pw_toolchain' / toolchain / 'toolchain.cmake'
build.cmake(ctx.root,
ctx.output_dir,
f'-DCMAKE_TOOLCHAIN_FILE={toolchain_path}',
'-DCMAKE_EXPORT_COMPILE_COMMANDS=1',
f'-Ddir_pw_third_party_nanopb={ctx.package_root / "nanopb"}',
'-Dpw_third_party_nanopb_ADD_SUBDIRECTORY=ON',
env=env)
@filter_paths(endswith=(*format_code.C_FORMAT.extensions, '.cmake',
'CMakeLists.txt'))
def cmake_clang(ctx: PresubmitContext):
_run_cmake(ctx, toolchain='host_clang')
build.ninja(ctx.output_dir, 'pw_apps', 'pw_run_tests.modules')
@filter_paths(endswith=(*format_code.C_FORMAT.extensions, '.cmake',
'CMakeLists.txt'))
def cmake_gcc(ctx: PresubmitContext):
_run_cmake(ctx, toolchain='host_gcc')
build.ninja(ctx.output_dir, 'pw_apps', 'pw_run_tests.modules')
# TODO(pwbug/180): Slowly add modules here that work with bazel until all
# modules are added. Then replace with //...
_MODULES_THAT_BUILD_WITH_BAZEL = [
'//pw_allocator/...',
'//pw_analog/...',
'//pw_assert/...',
'//pw_assert_basic/...',
'//pw_assert_log/...',
'//pw_base64/...',
'//pw_bloat/...',
'//pw_build/...',
'//pw_checksum/...',
'//pw_chrono_embos/...',
'//pw_chrono_freertos/...',
'//pw_chrono_stl/...',
'//pw_chrono_threadx/...',
'//pw_cli/...',
'//pw_containers/...',
'//pw_cpu_exception/...',
'//pw_docgen/...',
'//pw_doctor/...',
'//pw_env_setup/...',
'//pw_fuzzer/...',
'//pw_hex_dump/...',
'//pw_i2c/...',
'//pw_interrupt/...',
'//pw_interrupt_cortex_m/...',
'//pw_libc/...',
'//pw_log/...',
'//pw_log_basic/...',
'//pw_malloc/...',
'//pw_malloc_freelist/...',
'//pw_multisink/...',
'//pw_polyfill/...',
'//pw_preprocessor/...',
'//pw_protobuf/...',
'//pw_protobuf_compiler/...',
'//pw_random/...',
'//pw_result/...',
'//pw_rpc/...',
'//pw_span/...',
'//pw_status/...',
'//pw_stream/...',
'//pw_string/...',
'//pw_sync_baremetal/...',
'//pw_sync_embos/...',
'//pw_sync_freertos/...',
'//pw_sync_stl/...',
'//pw_sync_threadx/...',
'//pw_sys_io/...',
'//pw_sys_io_baremetal_lm3s6965evb/...',
'//pw_sys_io_baremetal_stm32f429/...',
'//pw_sys_io_stdio/...',
'//pw_thread_stl/...',
'//pw_tool/...',
'//pw_toolchain/...',
'//pw_transfer/...',
'//pw_unit_test/...',
'//pw_varint/...',
'//pw_web_ui/...',
]
# TODO(pwbug/180): Slowly add modules here that work with bazel until all
# modules are added. Then replace with //...
_MODULES_THAT_TEST_WITH_BAZEL = [
'//pw_allocator/...',
'//pw_analog/...',
'//pw_assert/...',
'//pw_base64/...',
'//pw_checksum/...',
'//pw_cli/...',
'//pw_containers/...',
'//pw_hex_dump/...',
'//pw_i2c/...',
'//pw_libc/...',
'//pw_log/...',
'//pw_multisink/...',
'//pw_polyfill/...',
'//pw_preprocessor/...',
'//pw_protobuf/...',
'//pw_protobuf_compiler/...',
'//pw_random/...',
'//pw_result/...',
'//pw_rpc/...',
'//pw_span/...',
'//pw_status/...',
'//pw_stream/...',
'//pw_string/...',
'//pw_thread_stl/...',
'//pw_unit_test/...',
'//pw_varint/...',
'//:buildifier_test',
]
@filter_paths(endswith=(*format_code.C_FORMAT.extensions, '.bazel', '.bzl',
'BUILD'))
def bazel_test(ctx: PresubmitContext) -> None:
"""Runs bazel test on each bazel compatible module"""
build.bazel(ctx, 'test', *_MODULES_THAT_TEST_WITH_BAZEL,
'--test_output=errors')
@filter_paths(endswith=(*format_code.C_FORMAT.extensions, '.bazel', '.bzl',
'BUILD'))
def bazel_build(ctx: PresubmitContext) -> None:
"""Runs Bazel build on each Bazel compatible module."""
build.bazel(ctx, 'build', *_MODULES_THAT_BUILD_WITH_BAZEL)
#
# General presubmit checks
#
def _clang_system_include_paths(lang: str) -> List[str]:
"""Generate default system header paths.
Returns the list of system include paths used by the host
clang installation.
"""
# Dump system include paths with preprocessor verbose.
command = [
'clang++', '-Xpreprocessor', '-v', '-x', f'{lang}', f'{os.devnull}',
'-fsyntax-only'
]
process = subprocess.run(command,
check=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
# Parse the command output to retrieve system include paths.
# The paths are listed one per line.
output = process.stdout.decode(errors='backslashreplace')
include_paths: List[str] = []
for line in output.splitlines():
path = line.strip()
if os.path.exists(path):
include_paths.append(f'-isystem{path}')
return include_paths
def edit_compile_commands(in_path: Path, out_path: Path,
func: Callable[[str, str, str], str]) -> None:
"""Edit the selected compile command file.
Calls the input callback on all triplets (file, directory, command) in
the input compile commands database. The return value replaces the old
compile command in the output database.
"""
with open(in_path) as in_file:
compile_commands = json.load(in_file)
for item in compile_commands:
item['command'] = func(item['file'], item['directory'],
item['command'])
with open(out_path, 'w') as out_file:
json.dump(compile_commands, out_file, indent=2)
# The first line must be regex because of the '20\d\d' date
COPYRIGHT_FIRST_LINE = r'Copyright 20\d\d The Pigweed Authors'
COPYRIGHT_COMMENTS = r'(#|//| \*|REM|::)'
COPYRIGHT_BLOCK_COMMENTS = (
# HTML comments
(r'<!--', r'-->'),
# Jinja comments
(r'{#', r'#}'),
)
COPYRIGHT_FIRST_LINE_EXCEPTIONS = (
'#!',
'/*',
'@echo off',
'# -*-',
':',
)
COPYRIGHT_LINES = tuple("""\
Licensed under the Apache License, Version 2.0 (the "License"); you may not
use this file except in compliance with the License. You may obtain a copy of
the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
License for the specific language governing permissions and limitations under
the License.
""".splitlines())
_EXCLUDE_FROM_COPYRIGHT_NOTICE: Sequence[str] = (
# Configuration
r'^(?:.+/)?\..+$',
r'\bPW_PLUGINS$',
r'\bconstraint.list$',
# Metadata
r'^docker/tag$',
r'\bAUTHORS$',
r'\bLICENSE$',
r'\bOWNERS$',
r'\bPIGWEED_MODULES$',
r'\brequirements.txt$',
r'\bgo.(mod|sum)$',
r'\bpackage.json$',
r'\byarn.lock$',
# Data files
r'\.elf$',
r'\.gif$',
r'\.jpg$',
r'\.json$',
r'\.png$',
r'\.svg$',
# Documentation
r'\.md$',
r'\.rst$',
# Generated protobuf files
r'\.pb\.h$',
r'\.pb\.c$',
r'\_pb2.pyi?$',
# Diff/Patch files
r'\.diff$',
r'\.patch$',
)
def match_block_comment_start(line: str) -> Optional[str]:
"""Matches the start of a block comment and returns the end."""
for block_comment in COPYRIGHT_BLOCK_COMMENTS:
if re.match(block_comment[0], line):
# Return the end of the block comment
return block_comment[1]
return None
def copyright_read_first_line(
file: IO) -> Tuple[Optional[str], Optional[str], Optional[str]]:
"""Reads the file until it reads a valid first copyright line.
Returns (comment, block_comment, line). comment and block_comment are
mutually exclusive and refer to the comment character sequence and whether
they form a block comment or a line comment. line is the first line of
the copyright, and is used for error reporting.
"""
line = file.readline()
first_line_matcher = re.compile(COPYRIGHT_COMMENTS + ' ' +
COPYRIGHT_FIRST_LINE)
while line:
end_block_comment = match_block_comment_start(line)
if end_block_comment:
next_line = file.readline()
copyright_line = re.match(COPYRIGHT_FIRST_LINE, next_line)
if not copyright_line:
return (None, None, line)
return (None, end_block_comment, line)
first_line = first_line_matcher.match(line)
if first_line:
return (first_line.group(1), None, line)
if (line.strip()
and not line.startswith(COPYRIGHT_FIRST_LINE_EXCEPTIONS)):
return (None, None, line)
line = file.readline()
return (None, None, None)
@filter_paths(exclude=_EXCLUDE_FROM_COPYRIGHT_NOTICE)
def copyright_notice(ctx: PresubmitContext):
"""Checks that the Pigweed copyright notice is present."""
errors = []
for path in ctx.paths:
if path.stat().st_size == 0:
continue # Skip empty files
if path.is_dir():
continue # Skip submodules which are included in ctx.paths.
with path.open() as file:
(comment, end_block_comment,
line) = copyright_read_first_line(file)
if not line:
_LOG.warning('%s: invalid first line', path)
errors.append(path)
continue
if not (comment or end_block_comment):
_LOG.warning('%s: invalid first line %r', path, line)
errors.append(path)
continue
if end_block_comment:
expected_lines = COPYRIGHT_LINES + (end_block_comment, )
else:
expected_lines = COPYRIGHT_LINES
for expected, actual in zip(expected_lines, file):
if end_block_comment:
expected_line = expected + '\n'
elif comment:
expected_line = (comment + ' ' + expected).rstrip() + '\n'
if expected_line != actual:
_LOG.warning(' bad line: %r', actual)
_LOG.warning(' expected: %r', expected_line)
errors.append(path)
break
if errors:
_LOG.warning('%s with a missing or incorrect copyright notice:\n%s',
plural(errors, 'file'), '\n'.join(str(e) for e in errors))
raise PresubmitFailure
_BAZEL_SOURCES_IN_BUILD = tuple(format_code.C_FORMAT.extensions)
_GN_SOURCES_IN_BUILD = ('setup.cfg', '.toml', '.rst', '.py',
*_BAZEL_SOURCES_IN_BUILD)
@filter_paths(endswith=(*_GN_SOURCES_IN_BUILD, 'BUILD', '.bzl', '.gn', '.gni'),
exclude=['zephyr.*/', 'android.*/'])
def source_is_in_build_files(ctx: PresubmitContext):
"""Checks that source files are in the GN and Bazel builds."""
missing = build.check_builds_for_files(
_BAZEL_SOURCES_IN_BUILD,
_GN_SOURCES_IN_BUILD,
ctx.paths,
bazel_dirs=[ctx.root],
gn_build_files=git_repo.list_files(pathspecs=['BUILD.gn', '*BUILD.gn'],
repo_path=ctx.root))
if missing:
_LOG.warning(
'All source files must appear in BUILD and BUILD.gn files')
raise PresubmitFailure
_run_cmake(ctx)
cmake_missing = build.check_compile_commands_for_files(
ctx.output_dir / 'compile_commands.json',
(f for f in ctx.paths if f.suffix in ('.c', '.cc')))
if cmake_missing:
_LOG.warning('The CMake build is missing %d files', len(cmake_missing))
_LOG.warning('Files missing from CMake:\n%s',
'\n'.join(str(f) for f in cmake_missing))
# TODO(hepler): Many files are missing from the CMake build. Make this
# check an error when the missing files are fixed.
# raise PresubmitFailure
def build_env_setup(ctx: PresubmitContext):
if 'PW_CARGO_SETUP' not in os.environ:
_LOG.warning(
'Skipping build_env_setup since PW_CARGO_SETUP is not set')
return
tmpl = ctx.root.joinpath('pw_env_setup', 'py', 'pyoxidizer.bzl.tmpl')
out = ctx.output_dir.joinpath('pyoxidizer.bzl')
with open(tmpl, 'r') as ins:
cfg = ins.read().replace('${PW_ROOT}', str(ctx.root))
with open(out, 'w') as outs:
outs.write(cfg)
call('pyoxidizer', 'build', cwd=ctx.output_dir)
def commit_message_format(_: PresubmitContext):
"""Checks that the top commit's message is correctly formatted."""
lines = git_repo.commit_message().splitlines()
# Show limits and current commit message in log.
_LOG.debug('%-25s%+25s%+22s', 'Line limits', '72|', '72|')
for line in lines:
_LOG.debug(line)
# Ignore Gerrit-generated reverts.
if ('Revert' in lines[0]
and 'This reverts commit ' in git_repo.commit_message()
and 'Reason for revert: ' in git_repo.commit_message()):
_LOG.warning('Ignoring apparent Gerrit-generated revert')
return
if not lines:
_LOG.error('The commit message is too short!')
raise PresubmitFailure
errors = 0
if len(lines[0]) > 72:
_LOG.warning("The commit message's first line must be no longer than "
'72 characters.')
_LOG.warning('The first line is %d characters:\n %s', len(lines[0]),
lines[0])
errors += 1
if lines[0].endswith('.'):
_LOG.warning(
"The commit message's first line must not end with a period:\n %s",
lines[0])
errors += 1
if len(lines) > 1 and lines[1]:
_LOG.warning("The commit message's second line must be blank.")
_LOG.warning('The second line has %d characters:\n %s', len(lines[1]),
lines[1])
errors += 1
# Check that the lines are 72 characters or less, but skip any lines that
# might possibly have a URL, path, or metadata in them. Also skip any lines
# with non-ASCII characters.
for i, line in enumerate(lines[2:], 3):
if any(c in line for c in ':/>') or not line.isascii():
continue
if len(line) > 72:
_LOG.warning(
'Commit message lines must be no longer than 72 characters.')
_LOG.warning('Line %d has %d characters:\n %s', i, len(line),
line)
errors += 1
if errors:
_LOG.error('Found %s in the commit message', plural(errors, 'error'))
raise PresubmitFailure
@filter_paths(endswith=(*format_code.C_FORMAT.extensions, '.py'))
def static_analysis(ctx: PresubmitContext):
"""Runs all available static analysis tools."""
build.gn_gen(ctx.root, ctx.output_dir)
build.ninja(ctx.output_dir, 'python.lint', 'static_analysis')
def renode_check(ctx: PresubmitContext):
"""Placeholder for future check."""
_LOG.info('%s %s', ctx.root, ctx.output_dir)
#
# Presubmit check programs
#
OTHER_CHECKS = (
cpp_checks.all_sanitizers(),
# Build that attempts to duplicate the build OSS-Fuzz does. Currently
# failing.
oss_fuzz_build,
# TODO(pwbug/346): Enable all Bazel tests when they're fixed.
bazel_test,
cmake_clang,
cmake_gcc,
gn_boringssl_build,
build.gn_gen_check,
gn_nanopb_build,
gn_crypto_mbedtls_build,
gn_crypto_boringssl_build,
gn_crypto_micro_ecc_build,
gn_software_update_build,
gn_full_build_check,
gn_full_qemu_check,
gn_clang_build,
gn_gcc_build,
gn_pw_system_demo_build,
renode_check,
stm32f429i,
)
_LINTFORMAT = (
commit_message_format,
copyright_notice,
format_code.presubmit_checks(),
inclusive_language.inclusive_language.with_filter(
exclude=(r'\byarn.lock$', )),
cpp_checks.pragma_once,
build.bazel_lint,
source_is_in_build_files,
)
LINTFORMAT = (
_LINTFORMAT,
static_analysis,
pw_presubmit.python_checks.check_python_versions,
pw_presubmit.python_checks.gn_python_lint,
)
QUICK = (
_LINTFORMAT,
gn_quick_build_check,
# TODO(pwbug/141): Re-enable CMake and Bazel for Mac after we have fixed the
# the clang issues. The problem is that all clang++ invocations need the
# two extra flags: "-nostdc++" and "${clang_prefix}/../lib/libc++.a".
cmake_clang if sys.platform != 'darwin' else (),
)
FULL = (
_LINTFORMAT,
gn_host_build,
gn_arm_build,
gn_docs_build,
gn_host_tools,
bazel_test if sys.platform == 'linux' else (),
bazel_build if sys.platform == 'linux' else (),
# On Mac OS, system 'gcc' is a symlink to 'clang' by default, so skip GCC
# host builds on Mac for now. Skip it on Windows too, since gn_host_build
# already uses 'gcc' on Windows.
gn_gcc_build if sys.platform not in ('darwin', 'win32') else (),
# Windows doesn't support QEMU yet.
gn_qemu_build if sys.platform != 'win32' else (),
gn_qemu_clang_build if sys.platform != 'win32' else (),
source_is_in_build_files,
python_checks.gn_python_check,
python_checks.gn_python_test_coverage,
build_env_setup,
# Skip gn_teensy_build if running on Windows. The Teensycore installer is
# an exe that requires an admin role.
gn_teensy_build if sys.platform in ['linux', 'darwin'] else (),
)
PROGRAMS = Programs(
full=FULL,
lintformat=LINTFORMAT,
other_checks=OTHER_CHECKS,
quick=QUICK,
)
def parse_args() -> argparse.Namespace:
"""Creates an argument parser and parses arguments."""
parser = argparse.ArgumentParser(description=__doc__)
cli.add_arguments(parser, PROGRAMS, 'quick')
parser.add_argument(
'--install',
action='store_true',
help='Install the presubmit as a Git pre-push hook and exit.')
return parser.parse_args()
def run(install: bool, **presubmit_args) -> int:
"""Entry point for presubmit."""
if install:
# TODO(pwbug/209, pwbug/386) inclusive-language: disable
install_hook(__file__, 'pre-push',
['--base', 'origin/master..HEAD', '--program', 'quick'],
Path.cwd())
# TODO(pwbug/209, pwbug/386) inclusive-language: enable
return 0
return cli.run(**presubmit_args)
def main() -> int:
"""Run the presubmit for the Pigweed repository."""
return run(**vars(parse_args()))
if __name__ == '__main__':
try:
# If pw_cli is available, use it to initialize logs.
from pw_cli import log
log.install(logging.INFO)
except ImportError:
# If pw_cli isn't available, display log messages like a simple print.
logging.basicConfig(format='%(message)s', level=logging.INFO)
sys.exit(main())
|
google/pigweed
|
pw_presubmit/py/pw_presubmit/pigweed_presubmit.py
|
Python
|
apache-2.0
| 29,379
|
#!/usr/bin/env python
'''
This script parses a JSON file output by Karma which contains mentions
and converts these to features and outputs a single FC chunk file.
Its architecture is that of a mini pipeline: define global functions
prefixed with ``trans_`` and run them using the ``--transform`` command
line option.
'''
from __future__ import absolute_import, division, print_function
import argparse
from collections import defaultdict
import json
import os
import io
from dossier.fc import \
FeatureCollection, FeatureCollectionCborChunk, StringCounter
HTML_TABLE = u'<table><tr><th>Attr</th><th>Values</th></tr>{rows}</table>'
HTML_TR = u'<tr><td>{attr}</td><td>{vals}</td></tr>'
def group_mentions_as_features(jsonfile):
'''
Groups features from the json file coming from karma [which isn't really json]
'''
grouped = defaultdict(dict)
json_array = json.load(jsonfile, "utf-8")
for item in json_array:
obj_id = item['@id']
if 'schema:mentions' in item:
mentions = item['schema:mentions']
if isinstance(mentions, list) and len(mentions) > 1:
for mention in item['schema:mentions']:
feature = mention['memex:feature']
feature_splits = feature.rsplit('/', 2)
featname = feature_splits[1]
featval = feature_splits[2]
grouped[obj_id].setdefault(featname, []).append(featval)
else:
# This means mentions is a single object
mention = mentions
feature = mention['memex:feature']
feature_splits = feature.rsplit('/', 2)
featname = feature_splits[1]
featval = feature_splits[2]
grouped[obj_id].setdefault(featname, []).append(featval)
return grouped
def trans_display(fc, adid, attrvals):
'''
:type fc: FeatureCollection
:type adid: str
:type attrvals: featname |--> [featval]
'''
rows = []
for attr, vals in attrvals.iteritems():
rows.append(HTML_TR.format(attr=attr, vals=', '.join(vals)))
fc['display'] = HTML_TABLE.format(rows='\n'.join(rows))
def trans_features(fc, adid, attrvals):
'''
create features from json file
also create Bag of Soft Biometric traits (bosb)
:type fc: FeatureCollection
:type adid: str
:type attrvals: featname |--> [featval]
'''
## this list can be adjusted as needed
appearance_set = set(['weight', 'height', 'ethnicity', 'age', 'hair',
'hairlength', 'hairtype', 'tattoos', 'build', 'cup',
'grooming', 'implants', 'piercings' ])
## create bosb
fc['bosb'] = StringCounter()
for attr, vals in attrvals.iteritems():
fc[attr] = StringCounter()
for val in vals:
## create a feature for each attribute
fc[attr][val] += 1
## populate bosb
if attr in appearance_set:
for val in vals:
feature = attr + '-' + val
fc['bosb'][feature] += 1
if __name__ == '__main__':
p = argparse.ArgumentParser(
description='Convert Karma JSON file to FeatureCollections.')
p.add_argument('karma_json', metavar='JSON_FILE')
p.add_argument('fc_chunk', metavar='FC_CHUNK_FILE')
p.add_argument('--transforms', action='append')
p.add_argument('--overwrite', action='store_true')
args = p.parse_args()
if args.overwrite:
try:
os.unlink(args.fc_chunk)
except OSError:
pass
with open(args.karma_json) as fjson:
grouped = group_mentions_as_features(fjson)
chunk = FeatureCollectionCborChunk(path=args.fc_chunk, mode='wb')
for adid, attrvals in grouped.iteritems():
fc = FeatureCollection()
fc['adid'] = adid
fc['attrvals'] = json.dumps(attrvals).decode('utf-8')
fc['NAME'] = StringCounter({adid: 1})
for trans in args.transforms:
globals()['trans_%s' % trans](fc, adid, attrvals)
chunk.add(fc)
chunk.flush()
|
usc-isi-i2/dig-features
|
extract-mentions-features.py
|
Python
|
apache-2.0
| 4,132
|
#!/usr/bin/env python
# encoding: utf-8
"""
valid_parentheses.py
Created by Shengwei on 2014-07-24.
"""
# https://oj.leetcode.com/problems/valid-parentheses/
# tags: easy, array, parentheses, stack
"""
Given a string containing just the characters '(', ')', '{', '}', '[' and ']', determine if the input string is valid.
The brackets must close in the correct order, "()" and "()[]{}" are all valid but "(]" and "([)]" are not.
"""
class Solution:
# @return a boolean
def isValid(self, s):
mappings = {')': '(', ']': '[', '}': '{'}
stack = []
for par in s:
if par in mappings.values():
stack.append(par)
elif stack and stack[-1] == mappings[par]:
stack.pop()
else:
return False
# note: remember to check if stack is empty
return False if stack else True
|
CodingVault/LeetCodeInPython
|
valid_parentheses.py
|
Python
|
apache-2.0
| 909
|
#!/usr/bin/env python3
#
# Copyright (c) 2016-2017 Nest Labs, Inc.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# @file
# Calls Weave WDM mutual subscribe between mock device and real service.
# F08: Mutual Susbscribe: Root path. Null Version. Mutate data in initiator. Publisher in initiator aborts
# M12: Stress Mutual Susbscribe: Root path. Null Version. Mutate data in initiator. Publisher in initiator aborts
#
from __future__ import absolute_import
from __future__ import print_function
import unittest
from weave_wdm_next_test_service_base import weave_wdm_next_test_service_base
class test_weave_wdm_next_service_mutual_subscribe_12(weave_wdm_next_test_service_base):
def test_weave_wdm_next_service_mutual_subscribe_12(self):
wdm_next_args = {}
wdm_next_args['wdm_option'] = "mutual_subscribe"
wdm_next_args['final_client_status'] = 3
wdm_next_args['enable_client_flip'] = 1
wdm_next_args['client_clear_state_between_iterations'] = True
wdm_next_args['test_client_iterations'] = 10
wdm_next_args['client_log_check'] = [('bound mutual subscription is going away', wdm_next_args['test_client_iterations']),
('Client\[0\] \[(ALIVE|CONFM)\] TerminateSubscription ', wdm_next_args['test_client_iterations']),
('Client->kEvent_OnNotificationProcessed', wdm_next_args['test_client_iterations']),
('Client\[0\] moving to \[ FREE\] Ref\(0\)', wdm_next_args['test_client_iterations']),
('Handler\[0\] Moving to \[ FREE\] Ref\(0\)', wdm_next_args['test_client_iterations'] )]
wdm_next_args['test_tag'] = self.__class__.__name__
wdm_next_args['test_case_name'] = ['Wdm-NestService-F08: Mutual Susbscribe: Root path. Null Version. Mutate data in initiator. Publisher in initiator aborts',
'Wdm-NestService-M12: Stress Mutual Susbscribe: Root path. Null Version. Mutate data in initiator. Publisher in initiator aborts']
print('test file: ' + self.__class__.__name__)
print("weave-wdm-next test F08 and M12")
super(test_weave_wdm_next_service_mutual_subscribe_12, self).weave_wdm_next_test_service_base(wdm_next_args)
if __name__ == "__main__":
unittest.main()
|
openweave/openweave-core
|
src/test-apps/happy/tests/service/wdmNext/test_weave_wdm_next_service_mutual_subscribe_12.py
|
Python
|
apache-2.0
| 2,976
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Downloads and converts picture data to TFRecords of TF-Example protos.
This module reads the pictures and creates two TFRecord datasets: one for train
and one for test. Each TFRecord dataset is comprised of a set of TF-Example
protocol buffers, each of which contain a single image and label.
The script should take several minutes to run.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import random
import tensorflow as tf
import json
from datasets import dataset_utils
# The height and width of each image.
_IMAGE_SIZE_WIDTH = 48
_IMAGE_SIZE_HEIGHT = 48
def _add_to_tfrecord(filename, tfrecord_writer,labels_to_class_names, offset=0):
"""Loads pic data from the filename and writes files to a TFRecord.
Args:
filename: The filename of one picture .
tfrecord_writer: The TFRecord writer to use for writing.
offset: An offset into the absolute number of images previously written.
Returns:
The new offset.
"""
image = tf.gfile.FastGFile(filename,'r').read()
label = labels_to_class_names[filename.split('/')[-2]]
with tf.Graph().as_default():
with tf.Session('') as sess:
example = dataset_utils.image_to_tfexample(
image, b'jpg', _IMAGE_SIZE_HEIGHT, _IMAGE_SIZE_WIDTH, label)
tfrecord_writer.write(example.SerializeToString())
return offset + 1
def _get_output_filename(dataset_dir, split_name):
"""Creates the output filename.
Args:
dataset_dir: The dataset directory where the dataset is stored.
split_name: The name of the train/test split.
Returns:
An absolute file path.
"""
return '%s/fer_%s.tfrecord' % (dataset_dir, split_name)
def run(dataset_dir,pic_path):
"""Runs the conversion operation.
Args:
dataset_dir: The dataset directory where the dataset is stored.
"""
if not tf.gfile.Exists(dataset_dir):
tf.gfile.MakeDirs(dataset_dir)
training_filename = _get_output_filename(dataset_dir, 'train')
testing_filename = _get_output_filename(dataset_dir, 'test')
if tf.gfile.Exists(training_filename) and tf.gfile.Exists(testing_filename):
print('Dataset files already exist. Exiting without re-creating them.')
return
class_names = os.listdir(pic_path)
labels_to_class_names = dict(zip(class_names,range(len(class_names))))
picnames=[]
for label in class_names:
alabel_path=os.path.join(pic_path,label)
names=os.listdir(alabel_path)
picnames.extend([os.path.join(alabel_path,name) for name in names])
random.shuffle(picnames)
train_picnames = picnames[:int(0.7*len(picnames))]
test_picnames = picnames[int(0.7*len(picnames)):]
# First, process the training data:
with tf.python_io.TFRecordWriter(training_filename) as tfrecord_writer:
offset = 0
for name in train_picnames:
offset = _add_to_tfrecord(name, tfrecord_writer, labels_to_class_names, offset)
# Next, process the testing data:
with tf.python_io.TFRecordWriter(testing_filename) as tfrecord_writer:
offset = 0
for name in test_picnames:
offset = _add_to_tfrecord(name, tfrecord_writer, labels_to_class_names, offset)
# Finally, write the labels file:
labels_to_class_names = dict(zip(labels_to_class_names.values(),labels_to_class_names.keys()))
dataset_utils.write_label_file(labels_to_class_names, dataset_dir)
with open(os.path.join(dataset_dir,'info.json'),'w') as f:
info=json.dumps({'num_class':len(class_names),'num_sample_train':len(train_picnames),'num_sample_test':len(test_picnames)})
f.write(info)
print('\nFinished converting the dataset in the {}!'.format(pic_path))
print('\nThe tfrecord files,info.json and labels file is located in the {}'.format(dataset_dir))
|
ucloud/uai-sdk
|
examples/tensorflow/train/slim/train/code/datasets/download_and_convert_fer.py
|
Python
|
apache-2.0
| 4,480
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""Code to talk to the sleeve using a serial protocol."""
import numpy as np
import serial
import serial.tools.list_ports
# Tactor layout on the sleeve:
# (4)-(1)
# / \ (2)
# (3) (8) (6) (5) Control box
# \ / (9)
# (7)-(10)
class SleeveUSB(object):
"""Allows us to talk to the tactile sleeve using a serial protocol."""
# Constants needed for the hardware.
TACTILE_FRAMES_IN_ONE_PACKET = 1
BYTES_IN_HEADER = 4
PWM_VALUES_IN_TACTILE_FRAME = 8
TACTILE_CHANNELS = 12
SAMPLE_RATE = 2000
def __init__(self, debug=False):
self._debug = debug
self._modem = None
def find_usb_modem(self) -> str:
"""Searches through USB devices looking for serial port."""
ports = list(serial.tools.list_ports.comports())
for p in ports:
if 'usbmodem' in str(p):
dev, _ = str(p).split(' ', 1)
self._modem = dev
return
raise ValueError('Could not find modem in this list of ports: %s' % ports)
def open_serial_port(self) -> serial.Serial:
"""Open serial port.
The port name will need to be changed in a different computer.
In MacOS port will be /dev/tty.usbmodemXXXXXX
Returns:
A serial port object
"""
if not self._modem:
self.find_usb_modem()
print('Connecting to port: ', self._modem)
return serial.Serial(self._modem)
def wait_for_reception(self, ser: serial.Serial):
while True:
read_raw = ser.readline()
read_out = read_raw.decode('ascii')
read_out = read_out.replace('\n', '').replace('\r', '')
if self._debug:
print('received <' + read_out + '>')
if 'buffer_copied' in read_out:
return True
return False
def send_waves_to_tactors(self, waveforms: np.ndarray):
"""Sends wave (np array) to tactors. Data must be limited to +/-1."""
assert waveforms.shape[1] == SleeveUSB.TACTILE_CHANNELS, (
'Waveforms does not have %d channels: %s' %
(self.TACTILE_CHANNELS, waveforms.shape))
# So we stop all the tactors at the end
waveforms[-SleeveUSB.PWM_VALUES_IN_TACTILE_FRAME:, :] = 0.0
with self.open_serial_port() as ser:
for frame_start in range(0, waveforms.shape[0],
SleeveUSB.PWM_VALUES_IN_TACTILE_FRAME):
num_frames = min(SleeveUSB.PWM_VALUES_IN_TACTILE_FRAME,
waveforms.shape[0] - frame_start)
# A byte array to send to the sleeve.
tactile_frame_array = bytearray(SleeveUSB.TACTILE_CHANNELS*num_frames +
SleeveUSB.BYTES_IN_HEADER)
# Set the bytes for the header. Defined by serial protocol in
# serial_puck_sleeve.h
tactile_frame_array[0] = 200 # Start packet
tactile_frame_array[1] = 201 # Start packet
tactile_frame_array[2] = 17 # Playback code
tactile_frame_array[3] = 128 # number of samples
for i in range(num_frames):
for j in range(SleeveUSB.TACTILE_CHANNELS):
w = waveforms[frame_start + i, j] * 128 + 128
w = int(min(max(w, 0), 255))
tactile_frame_array[SleeveUSB.BYTES_IN_HEADER +
i * SleeveUSB.TACTILE_CHANNELS + j] = w
try:
ser.write(tactile_frame_array)
except serial.SerialException:
print('error sending')
ser.flush()
if False and frame_start:
ser.reset_input_buffer()
ser.reset_output_buffer()
if not self.wait_for_reception(ser):
print('Got a reception error. Aborting.')
break
|
google/audio-to-tactile
|
extras/python/psycho/sleeve_usb.py
|
Python
|
apache-2.0
| 4,262
|
from collections import defaultdict
import itertools
from horizon import exceptions
from horizon.utils.memoized import memoized
from openstack_dashboard.api import nova, cinder, network
from openstack_dashboard.api.base import is_service_enabled, QuotaSet
class QuotaUsage(dict):
""" Tracks quota limit, used, and available for a given set of quotas."""
def __init__(self):
self.usages = defaultdict(dict)
def __getitem__(self, key):
return self.usages[key]
def __setitem__(self, key, value):
raise NotImplemented("Directly setting QuotaUsage values is not "
"supported. Please use the add_quota and "
"tally methods.")
def __repr__(self):
return repr(dict(self.usages))
def add_quota(self, quota):
""" Adds an internal tracking reference for the given quota. """
if quota.limit is None or quota.limit == -1:
# Handle "unlimited" quotas.
self.usages[quota.name]['quota'] = float("inf")
self.usages[quota.name]['available'] = float("inf")
else:
self.usages[quota.name]['quota'] = int(quota.limit)
def tally(self, name, value):
""" Adds to the "used" metric for the given quota. """
value = value or 0 # Protection against None.
# Start at 0 if this is the first value.
if 'used' not in self.usages[name]:
self.usages[name]['used'] = 0
# Increment our usage and update the "available" metric.
self.usages[name]['used'] += int(value) # Fail if can't coerce to int.
self.update_available(name)
def update_available(self, name):
""" Updates the "available" metric for the given quota. """
available = self.usages[name]['quota'] - self.usages[name]['used']
if available < 0:
available = 0
self.usages[name]['available'] = available
def _get_quota_data(request, method_name, disabled_quotas=[]):
quotasets = []
tenant_id = request.user.tenant_id
quotasets.append(getattr(nova, method_name)(request, tenant_id))
qs = QuotaSet()
if 'volumes' not in disabled_quotas:
quotasets.append(getattr(cinder, method_name)(request, tenant_id))
for quota in itertools.chain(*quotasets):
if quota.name not in disabled_quotas:
qs[quota.name] = quota.limit
return qs
def get_default_quota_data(request, disabled_quotas=[]):
return _get_quota_data(request, "default_quota_get", disabled_quotas)
def get_tenant_quota_data(request, disabled_quotas=[]):
return _get_quota_data(request, "tenant_quota_get", disabled_quotas)
@memoized
def tenant_quota_usages(request):
# Get our quotas and construct our usage object.
disabled_quotas = []
if not is_service_enabled(request, 'volume'):
disabled_quotas.extend(['volumes', 'gigabytes'])
usages = QuotaUsage()
for quota in get_tenant_quota_data(request, disabled_quotas):
usages.add_quota(quota)
# Get our usages.
floating_ips = network.tenant_floating_ip_list(request)
flavors = dict([(f.id, f) for f in nova.flavor_list(request)])
instances = nova.server_list(request)
# Fetch deleted flavors if necessary.
missing_flavors = [instance.flavor['id'] for instance in instances
if instance.flavor['id'] not in flavors]
for missing in missing_flavors:
if missing not in flavors:
try:
flavors[missing] = nova.flavor_get(request, missing)
except:
flavors[missing] = {}
exceptions.handle(request, ignore=True)
usages.tally('instances', len(instances))
usages.tally('floating_ips', len(floating_ips))
if 'volumes' not in disabled_quotas:
volumes = cinder.volume_list(request)
usages.tally('gigabytes', sum([int(v.size) for v in volumes]))
usages.tally('volumes', len(volumes))
# Sum our usage based on the flavors of the instances.
for flavor in [flavors[instance.flavor['id']] for instance in instances]:
usages.tally('cores', getattr(flavor, 'vcpus', None))
usages.tally('ram', getattr(flavor, 'ram', None))
# Initialise the tally if no instances have been launched yet
if len(instances) == 0:
usages.tally('cores', 0)
usages.tally('ram', 0)
return usages
|
99cloud/keystone_register
|
openstack_dashboard/usage/quotas.py
|
Python
|
apache-2.0
| 4,420
|
#!/usr/bin/env python
from os.path import join, dirname
from cloudify import ctx
ctx.download_resource(
join('components', 'utils.py'),
join(dirname(__file__), 'utils.py'))
import utils # NOQA
def remove():
ctx.logger.info('Removing Cloudify CLI...')
utils.yum_remove('cloudify')
ctx.logger.info('Cloudify CLI successfully removed ')
remove()
|
cloudify-cosmo/cloudify-manager-blueprints
|
components/cli/scripts/delete.py
|
Python
|
apache-2.0
| 372
|
#!/usr/bin/python
#
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Adds an ad customizer feed.
Associates the feed with customer and adds an ad that
uses the feed to populate dynamic data.
Tags: CustomerFeedService.mutate, FeedItemService.mutate
Tags: FeedMappingService.mutate, FeedService.mutate
Tags: AdGroupAdService.mutate
"""
__author__ = ('api.msaniscalchi@gmail.com (Mark Saniscalchi)',
'yufeng.dev@gmail.com (Yufeng Guo)')
# Import appropriate classes from the client library.
from googleads import adwords
# See the Placeholder reference page for a list of all the placeholder types
# and fields:
# https://developers.google.com/adwords/api/docs/appendix/placeholders
PLACEHOLDER_AD_CUSTOMIZER = '10'
PLACEHOLDER_FIELD_INTEGER = '1'
PLACEHOLDER_FIELD_FLOAT = '2'
PLACEHOLDER_FIELD_PRICE = '3'
PLACEHOLDER_FIELD_DATE = '4'
PLACEHOLDER_FIELD_STRING = '5'
ADGROUPS = [
'INSERT_ADGROUP_ID_HERE',
'INSERT_ADGROUP_ID_HERE'
]
FEEDNAME = 'INSERT_FEED_NAME_HERE'
def main(client, adgroups):
# Initialize appropriate services.
ad_group_ad_service = client.GetService('AdGroupAdService', version='v201406')
customer_feed_service = client.GetService(
'CustomerFeedService', version='v201406')
feed_item_service = client.GetService('FeedItemService', version='v201406')
feed_mapping_service = client.GetService(
'FeedMappingService', version='v201406')
feed_service = client.GetService('FeedService', version='v201406')
# First, create a customizer feed. One feed per account can be used for all
# ads.
customizer_feed = {
'name': FEEDNAME,
'attributes': [
{'type': 'STRING', 'name': 'Name'},
{'type': 'STRING', 'name': 'Price'},
{'type': 'DATE_TIME', 'name': 'Date'}
]
}
feed_service_operation = {
'operator': 'ADD',
'operand': customizer_feed
}
response = feed_service.mutate([feed_service_operation])
if response and 'value' in response:
feed = response['value'][0]
feed_data = {
'feedId': feed['id'],
'nameId': feed['attributes'][0]['id'],
'priceId': feed['attributes'][1]['id'],
'dateId': feed['attributes'][2]['id']
}
print ('Feed with name \'%s\' and ID %s was added with:'
'\tName attribute ID %s and price attribute ID %s and date attribute'
'ID %s') % (feed['name'], feed['id'], feed_data['nameId'],
feed_data['priceId'], feed_data['dateId'])
else:
raise Exception('No feeds were added')
# Creating feed mapping to map the fields with customizer IDs.
feed_mapping = {
'placeholderType': PLACEHOLDER_AD_CUSTOMIZER,
'feedId': feed_data['feedId'],
'attributeFieldMappings': [
{
'feedAttributeId': feed_data['nameId'],
'fieldId': PLACEHOLDER_FIELD_STRING
},
{
'feedAttributeId': feed_data['priceId'],
'fieldId': PLACEHOLDER_FIELD_PRICE
},
{
'feedAttributeId': feed_data['dateId'],
'fieldId': PLACEHOLDER_FIELD_DATE
}
]
}
feed_mapping_operation = {
'operator': 'ADD',
'operand': feed_mapping
}
response = feed_mapping_service.mutate([feed_mapping_operation])
if response and 'value' in response:
feed_mapping = response['value'][0]
print ('Feed mapping with ID %s and placeholder type %s was saved for feed'
' with ID %s.') % (feed_mapping['feedMappingId'],
feed_mapping['placeholderType'],
feed_mapping['feedId'])
else:
raise Exception('No feed mappings were added.')
# Now adding feed items -- the values we'd like to place.
items_data = [
{
'name': 'Mars',
'price': '$1234.56',
'date': '20140601 000000',
'adGroupId': adgroups[0]
},
{
'name': 'Venus',
'price': '$1450.00',
'date': '20140615 120000',
'adGroupId': adgroups[1]
}
]
feed_items = [{'feedId': feed_data['feedId'],
'adGroupTargeting': {
'TargetingAdGroupId': item['adGroupId']
},
'attributeValues': [
{
'feedAttributeId': feed_data['nameId'],
'stringValue': item['name']
},
{
'feedAttributeId': feed_data['priceId'],
'stringValue': item['price']
},
{
'feedAttributeId': feed_data['dateId'],
'stringValue': item['date']
}
]} for item in items_data]
feed_item_operations = [{
'operator': 'ADD',
'operand': feed_item
} for feed_item in feed_items]
response = feed_item_service.mutate(feed_item_operations)
if response and 'value' in response:
for feed_item in response['value']:
print 'Feed item with ID %s was added.' % feed_item['feedItemId']
else:
raise Exception('No feed items were added.')
# Finally, creating a customer (account-level) feed with a matching function
# that determines when to use this feed. For this case we use the "IDENTITY"
# matching function that is always 'true' just to associate this feed with
# the customer. The targeting is done within the feed items using the
# :campaign_targeting, :ad_group_targeting, or :keyword_targeting attributes.
matching_function = {
'operator': 'IDENTITY',
'lhsOperand': [
{
'xsi_type': 'ConstantOperand',
'type': 'BOOLEAN',
'booleanValue': 'true'
}
]
}
customer_feed = {
'feedId': feed_data['feedId'],
'matchingFunction': matching_function,
'placeholderTypes': [PLACEHOLDER_AD_CUSTOMIZER]
}
customer_feed_operation = {
'operator': 'ADD',
'operand': customer_feed
}
response = customer_feed_service.mutate([customer_feed_operation])
if response and 'value' in response:
feed = response['value'][0]
print 'Customer feed with ID %s was added.' % feed['feedId']
else:
raise Exception('No customer feeds were added.')
# All set! We can now create ads with customizations.
text_ad = {
'xsi_type': 'TextAd',
'headline': 'Luxury Cruise to {=%s.Name}' % FEEDNAME,
'description1': 'Only {=%s.Price}' % FEEDNAME,
'description2': 'Offer ends in {=countdown(%s.Date)}!' % FEEDNAME,
'url': 'http://www.example.com',
'displayUrl': 'www.example.com'
}
# We add the same ad to both ad groups. When they serve, they will show
# different values, since they match different feed items.
operations = [{
'operator': 'ADD',
'operand': {
'adGroupId': adgroup,
'ad': text_ad
}
} for adgroup in adgroups]
print operations
response = ad_group_ad_service.mutate(operations)
print '===ad group ad service==='
print response
if response and 'value' in response:
for ad in response['value']:
print ('\tCreated an ad with ID \'%s\', type \'%s\', and status \'%s\'.'
% (ad['ad']['id'], ad['ad']['Ad.Type'], ad['status']))
else:
raise Exception('No ads were added.')
if __name__ == '__main__':
# Initialize client object.
adwords_client = adwords.AdWordsClient.LoadFromStorage()
main(adwords_client, ADGROUPS)
|
dietrichc/streamline-ppc-reports
|
examples/adwords/v201406/advanced_operations/add_ad_customizer.py
|
Python
|
apache-2.0
| 8,087
|
import openface
import numpy as np
import os
import cv2
import pickle
from pymongo import MongoClient
import time
id_name = ["Alec", "Emil", "Greg", "Phong", "Thinh"]
def push_to_db(person_id):
client = MongoClient('mongodb://localhost:27017')
db = client['ta_sas']
# Get the userId for the classifyId
db_id = db['UserClassifyId']
record = db_id.find_one({"classifyId":person_id})
user_name = record['userName']
# Content to push
location = "Tokyo Academics"
type = "in"
created_at = time.time()
post = {"userName": user_name,
"location": location,
"type": type,
"createdAt": created_at}
db_to_push = db['CheckInQueue']
post_id = db_to_push.insert_one(post).inserted_id
print(post_id)
def classify(aligned_face, net, clf, le):
rep = net.forward(aligned_face)
predictions = clf.predict_proba(rep.reshape((1, len(rep)))).ravel()
maxI = np.argmax(predictions)
person = le.inverse_transform(maxI)
confidence = predictions[maxI]
print("Predict {} with {:.2f} confidence.".format(person, confidence))
return person, confidence
if __name__ == "__main__":
CONFIDENCE_THRESHOLD = 0.5
show_video = False
# path to the face alignment model
dLib_predictor = "../../resource/shape_predictor_68_face_landmarks.dat"
# construct the face alignment model
align = openface.AlignDlib(dLib_predictor)
# path to deep neural network for feature representation
network_model = "../../resource/nn4.small2.v1.t7"
# construct the network for feature represenation
net = openface.TorchNeuralNet(network_model, 96)
classifier_model = "../../resource/svm.pkl"
with open(classifier_model, 'r') as f:
(le, clf) = pickle.load(f)
print("Successfully loaded SVM model")
video = cv2.VideoCapture(0)
if video is None:
exit()
while True:
# grab image
ret, cameraFrame = video.read()
if not ret:
exit()
try:
bbs = align.getAllFaceBoundingBoxes(cameraFrame)
print("Found {} face".format(len(bbs)))
for bb2 in bbs:
alignedFace = align.align(96, cameraFrame, bb2,
landmarkIndices=openface.AlignDlib.OUTER_EYES_AND_NOSE)
id, confidence = classify(alignedFace, net, clf, le)
if float(confidence) >= CONFIDENCE_THRESHOLD:
push_to_db(id)
print("Pushed to DB")
person_name = id_name[id]
print(person_name)
if show_video:
rectColor = (0,255, 0)
textColor = (255, 0, 0)
face_top_left = (bb2.left(), bb2.top())
face_bottom_right = (bb2.right(), bb2.bottom())
cv2.rectangle(cameraFrame, face_top_left, face_bottom_right, rectColor)
cv2.putText(cameraFrame, person_name, face_top_left,
fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=1, color=textColor, thickness=2)
if show_video:
cv2.imshow('FaceRecognizer', cameraFrame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
except:
if show_video:
cv2.imshow('FaceRecognize', cameraFrame)
continue
|
xphongvn/smart-attendance-system-ta
|
src/backend/live_classify_local_camera_connect_mongodb.py
|
Python
|
apache-2.0
| 3,578
|
async def application(scope, receive, send):
assert scope['type'] == 'http'
await send(
{
'type': 'http.response.start',
'status': 200,
'headers': [
(b'content-length', b'0'),
(b'server-port', str(scope['server'][1]).encode()),
],
}
)
|
nginx/unit
|
test/python/server_port/asgi.py
|
Python
|
apache-2.0
| 344
|
#gradient descent for training a linear unit
import numpy as np
import matplotlib.pyplot as plt
import csv
def main():
x = [[0.,0.],[0.,1.],[1.,0.],[1.,1.]]
d = [0.,1.,1.,1.]
w = [np.random.uniform(0.,1.),np.random.uniform(0.,1.)]
alfa = 0.1
w_old = []
G_E = []
while True:
E = 0.
print('start'+str(w))
w_old = w[:]
for i in range(len(x)):
y = np.array(x[i]).dot(w)
error = d[i]-y
print('[d[i],y,error]=['+str(d[i])+','+str(y)+','+str(error)+']')
for k in range(len(w)):
w[k] = w[k]+alfa*error*x[i][k]
E += np.power(error,2)
E = 0.5*E
G_E.append(E)
print('LSM='+str(E))
print('end'+str(w))
if E < 0.1 or (len(G_E) > 2 and G_E[len(G_E)-2] - G_E[len(G_E)-1] == 0.):
print('not changing')
break
plt.plot(range(len(G_E)),G_E,'r')
plt.show()
if __name__ == '__main__':
main()
|
xSakix/etf_expert
|
py_code/neural/nn_untreasholded_grad_descent_stochastic.py
|
Python
|
apache-2.0
| 853
|
__source__ = 'https://leetcode.com/problems/longest-line-of-consecutive-one-in-matrix/'
# Time: O(m*n*4)
# Space: O(m*n*4)
#
# Description: 562. Longest Line of Consecutive One in Matrix
#
# Given a 01 matrix M, find the longest line of consecutive one in the matrix.
# The line could be horizontal, vertical, diagonal or anti-diagonal.
#
# Example:
# Input:
# [[0,1,1,0],
# [0,1,1,0],
# [0,0,0,1]]
# Output: 3
# Hint: The number of elements in the given matrix will not exceed 10,000.
#
# Hide Company Tags Google
# Hide Tags Array
#
import unittest
class Solution(object):
pass # your function here
class TestMethods(unittest.TestCase):
def test_Local(self):
self.assertEqual(1, 1)
if __name__ == '__main__':
unittest.main()
Java = '''
# Thought: https://leetcode.com/problems/longest-line-of-consecutive-one-in-matrix/solution/
1.
Note that each cell of the DP table only depends on the current row or previous row
so you can easily optimize the above algorithm to use only O(m) space.
# DP
# 25ms 22.15%
class Solution {
public int longestLine(int[][] M) {
int m = M.length, max = 0;
if (m == 0) return max;
int n = M[0].length;
int[][][] dp = new int[m][n][4];
for (int i = 0;i < m; i++) {
for (int j = 0;j < n; j++) {
if (M[i][j] == 0) continue;
for (int k = 0; k < 4; k++) dp[i][j][k] = 1;
if (j > 0) dp[i][j][0] += dp[i][j-1][0]; // horizontal line
if (j > 0 && i > 0) dp[i][j][1] += dp[i-1][j-1][1]; // anti-diagonal line
if (i > 0) dp[i][j][2] += dp[i-1][j][2]; // vertical line
if (j < n-1 && i > 0) dp[i][j][3] += dp[i-1][j+1][3]; // diagonal line
max = Math.max(max, Math.max(dp[i][j][0], dp[i][j][1]));
max = Math.max(max, Math.max(dp[i][j][2], dp[i][j][3]));
}
}
return max;
}
}
2.
# 207ms 4.75%
class Solution {
int[][] DIRS = new int[][]{{1,0},{0,1},{1,1}, {1,-1}};
public int longestLine(int[][] M) {
if (M == null || M.length == 0) return 0;
int count = 0;
for (int i = 0 ; i < M.length; i++) {
for (int j = 0; j < M[0].length; j++) {
if (M[i][j] == 1) {
count = Math.max(getOneLineMax(M, i, j), count);
}
}
}
return count;
}
public int getOneLineMax(int[][] M, int i, int j) {
int res = 1;
for (int[] d : DIRS) {
int x = i + d[0];
int y = j + d[1];
int count = 1;
while (x >= 0 && x < M.length && y >= 0 && y < M[0].length && M[x][y] == 1) {
count++;
x += d[0];
y += d[1];
}
res = Math.max(res, count);
}
return res;
}
}
'''
|
JulyKikuAkita/PythonPrac
|
cs15211/LongestLineOfConsecutiveOneInMatrix.py
|
Python
|
apache-2.0
| 2,908
|
from appfd.models import Basemap, Feature, Order, Place, Test
from drf_queryfields import QueryFieldsMixin
from rest_framework.serializers import HiddenField, IntegerField, NullBooleanField, CharField, ChoiceField, URLField
from rest_framework.serializers import HyperlinkedModelSerializer, ModelSerializer, Serializer, SerializerMethodField
####
from rest_framework.utils.serializer_helpers import (
BindingDict, BoundField, JSONBoundField, NestedBoundField, ReturnDict,
ReturnList
)
class MapRequestSerializer(Serializer):
basemap = CharField(max_length=200, allow_blank=True, allow_null=True, required=False)
case_insensitive = NullBooleanField(required=False)
end_user_timezone = CharField(max_length=200, allow_null=True, required=False)
map_format = ChoiceField(["all","geojson", "gif", "jpg", "png", "xy"], required=False)
text = CharField(max_length=1e10, trim_whitespace=True, allow_null=True, required=False)
url = URLField(allow_null=True, required=False)
# Serializers define the API representation.
class BasemapSerializer(QueryFieldsMixin, ModelSerializer):
class Meta:
model = Basemap
fields = ["id", "name"]
class FeatureSerializer(QueryFieldsMixin, HyperlinkedModelSerializer):
class Meta:
model = Feature
fields = ["name", "order"]
class OrderSerializer(ModelSerializer):
class Meta:
model = Order
fields = ["complete", "duration", "end", "start", "token"]
class QueryableOrderSerializer(QueryFieldsMixin, OrderSerializer):
class Meta:
model = Order
fields = ["complete", "duration", "end", "start", "token"]
class PlaceSerializer(QueryFieldsMixin, ModelSerializer):
"""
feature_type = SerializerMethodField()
def get_feature_type(self, place):
lookup = {
"FRM": "Farm",
"PCLI": "Independent Political Entity",
"PPL": "Populated Place",
"PPLA": "Admin 1",
"PPLA2": "Admin 2",
"PPLA3": "Admin 3",
"PPLA4": "Admin 4",
"PPLL": "Populated Locality",
"ST": "Street"
}
return lookup.get(place.feature_code, place.feature_code)
"""
class Meta:
model = Place
fields = ["id", "attribution", "country_code", "name", "point"]
class VerbosePlaceSerializer(PlaceSerializer):
class Meta:
model = Place
fields = [
"id", "name",
"attribution", "enwiki_title", "geonames_id", "osm_id",
"pcode", "fips",
"admin1_code", "admin2_code", "admin3_code", "admin4_code", "admin_level",
"east", "north", "south", "west",
"name", "name_ascii", "name_display", "name_en", "name_normalized", "other_names",
"geonames_feature_class", "geonames_feature_code", "place_type",
"latitude", "longitude", "area_sqkm",
"importance", "osmname_class", "osmname_type", "osm_type", "place_rank",
"dem", "elevation",
"city", "county", "country", "country_code", "state", "street",
"population", "popularity", "timezone"
]
class TestSerializer(QueryFieldsMixin, ModelSerializer):
class Meta:
model = Test
fields = ["accuracy", "created"]
|
FirstDraftGIS/firstdraft
|
projfd/apifd/serializers.py
|
Python
|
apache-2.0
| 3,304
|
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Wrapper around pngquant command.
This runs the pnquant command as a subprocess, forwarding all the unparsed options
after '--', while also handling two special error codes 98 or 99 by simply copying the
input file to the output file.
Pngquant exits with 98 when the conversion results in a larger file than the original;
and with 99 when the conversion results in quality below the requested minimum.
Usage:
$ python -m nanoemoji.pngquant -i INPUT -o OUTPUT -- [PNGQUANTFLAGS]
"""
from absl import app
from absl import flags
import shlex
import shutil
import subprocess
FLAGS = flags.FLAGS
flags.DEFINE_string("input_file", None, "Input filename", short_name="i")
flags.DEFINE_string("output_file", None, "Output filename", short_name="o")
def main(argv):
pngquant = shutil.which("pngquant")
if pngquant is None:
raise RuntimeError(
"'pngquant' command-line tool not found on $PATH. "
"Try `pip install pngquant-cli` or visit https://github.com/kornelski/pngquant."
)
pngquant_args = argv[1:]
infile = FLAGS.input_file
outfile = FLAGS.output_file
p = subprocess.run([pngquant, *pngquant_args, "-o", outfile, infile])
err = p.returncode
if err in (98, 99):
print(f"Reuse {infile}")
shutil.copyfile(infile, outfile)
err = 0
return err
if __name__ == "__main__":
flags.mark_flag_as_required("input_file")
flags.mark_flag_as_required("output_file")
app.run(main)
|
googlefonts/nanoemoji
|
src/nanoemoji/pngquant.py
|
Python
|
apache-2.0
| 2,064
|
"""
Support for IP Cameras.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/camera.generic/
"""
import asyncio
import logging
import aiohttp
import async_timeout
import requests
from requests.auth import HTTPDigestAuth
import voluptuous as vol
from homeassistant.const import (
CONF_NAME, CONF_USERNAME, CONF_PASSWORD, CONF_AUTHENTICATION,
HTTP_BASIC_AUTHENTICATION, HTTP_DIGEST_AUTHENTICATION, CONF_VERIFY_SSL)
from homeassistant.exceptions import TemplateError
from homeassistant.components.camera import (
PLATFORM_SCHEMA, DEFAULT_CONTENT_TYPE, Camera)
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from homeassistant.helpers import config_validation as cv
from homeassistant.util.async_ import run_coroutine_threadsafe
_LOGGER = logging.getLogger(__name__)
CONF_CONTENT_TYPE = 'content_type'
CONF_LIMIT_REFETCH_TO_URL_CHANGE = 'limit_refetch_to_url_change'
CONF_STILL_IMAGE_URL = 'still_image_url'
CONF_FRAMERATE = 'framerate'
DEFAULT_NAME = 'Generic Camera'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_STILL_IMAGE_URL): cv.template,
vol.Optional(CONF_AUTHENTICATION, default=HTTP_BASIC_AUTHENTICATION):
vol.In([HTTP_BASIC_AUTHENTICATION, HTTP_DIGEST_AUTHENTICATION]),
vol.Optional(CONF_LIMIT_REFETCH_TO_URL_CHANGE, default=False): cv.boolean,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_PASSWORD): cv.string,
vol.Optional(CONF_USERNAME): cv.string,
vol.Optional(CONF_CONTENT_TYPE, default=DEFAULT_CONTENT_TYPE): cv.string,
vol.Optional(CONF_FRAMERATE, default=2): cv.positive_int,
vol.Optional(CONF_VERIFY_SSL, default=True): cv.boolean,
})
@asyncio.coroutine
def async_setup_platform(hass, config, async_add_entities,
discovery_info=None):
"""Set up a generic IP Camera."""
async_add_entities([GenericCamera(hass, config)])
class GenericCamera(Camera):
"""A generic implementation of an IP camera."""
def __init__(self, hass, device_info):
"""Initialize a generic camera."""
super().__init__()
self.hass = hass
self._authentication = device_info.get(CONF_AUTHENTICATION)
self._name = device_info.get(CONF_NAME)
self._still_image_url = device_info[CONF_STILL_IMAGE_URL]
self._still_image_url.hass = hass
self._limit_refetch = device_info[CONF_LIMIT_REFETCH_TO_URL_CHANGE]
self._frame_interval = 1 / device_info[CONF_FRAMERATE]
self.content_type = device_info[CONF_CONTENT_TYPE]
self.verify_ssl = device_info[CONF_VERIFY_SSL]
username = device_info.get(CONF_USERNAME)
password = device_info.get(CONF_PASSWORD)
if username and password:
if self._authentication == HTTP_DIGEST_AUTHENTICATION:
self._auth = HTTPDigestAuth(username, password)
else:
self._auth = aiohttp.BasicAuth(username, password=password)
else:
self._auth = None
self._last_url = None
self._last_image = None
@property
def frame_interval(self):
"""Return the interval between frames of the mjpeg stream."""
return self._frame_interval
def camera_image(self):
"""Return bytes of camera image."""
return run_coroutine_threadsafe(
self.async_camera_image(), self.hass.loop).result()
@asyncio.coroutine
def async_camera_image(self):
"""Return a still image response from the camera."""
try:
url = self._still_image_url.async_render()
except TemplateError as err:
_LOGGER.error(
"Error parsing template %s: %s", self._still_image_url, err)
return self._last_image
if url == self._last_url and self._limit_refetch:
return self._last_image
# aiohttp don't support DigestAuth yet
if self._authentication == HTTP_DIGEST_AUTHENTICATION:
def fetch():
"""Read image from a URL."""
try:
response = requests.get(url, timeout=10, auth=self._auth,
verify=self.verify_ssl)
return response.content
except requests.exceptions.RequestException as error:
_LOGGER.error("Error getting camera image: %s", error)
return self._last_image
self._last_image = yield from self.hass.async_add_job(
fetch)
# async
else:
try:
websession = async_get_clientsession(
self.hass, verify_ssl=self.verify_ssl)
with async_timeout.timeout(10, loop=self.hass.loop):
response = yield from websession.get(
url, auth=self._auth)
self._last_image = yield from response.read()
except asyncio.TimeoutError:
_LOGGER.error("Timeout getting camera image")
return self._last_image
except aiohttp.ClientError as err:
_LOGGER.error("Error getting new camera image: %s", err)
return self._last_image
self._last_url = url
return self._last_image
@property
def name(self):
"""Return the name of this device."""
return self._name
|
persandstrom/home-assistant
|
homeassistant/components/camera/generic.py
|
Python
|
apache-2.0
| 5,462
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generate coco class mapping to voc 20 class.
The resulting mapping dict will be hard-coded in coco.py
python dataset/cls_mapping_coco_voc.py
"""
voc_class_names = [
"motorbike", "dog", "person", "horse", "sofa", "bicycle", "cow", "boat",
"train", "car", "bird", "cat", "chair", "pottedplant", "sheep", "aeroplane",
"bottle", "bus", "diningtable", "tvmonitor"
]
coco_class_names = [
"person", "bicycle", "car", "motorcycle", "airplane", "bus", "train",
"truck", "boat", "traffic light", "fire hydrant", "stop sign",
"parking meter", "bench", "bird", "cat", "dog", "horse", "sheep", "cow",
"elephant", "bear", "zebra", "giraffe", "backpack", "umbrella", "handbag",
"tie", "suitcase", "frisbee", "skis", "snowboard", "sports ball", "kite",
"baseball bat", "baseball glove", "skateboard", "surfboard",
"tennis racket", "bottle", "wine glass", "cup", "fork", "knife", "spoon",
"bowl", "banana", "apple", "sandwich", "orange", "broccoli", "carrot",
"hot dog", "pizza", "donut", "cake", "chair", "couch", "potted plant",
"bed", "dining table", "toilet", "tv", "laptop", "mouse", "remote",
"keyboard", "cell phone", "microwave", "oven", "toaster", "sink",
"refrigerator", "book", "clock", "vase", "scissors", "teddy bear",
"hair drier", "toothbrush"
]
identity_name_mapping = {
# voc to coco name mapping of same class
"aeroplane": "airplane",
"motorbike": "motorcycle",
"sofa": "couch",
"pottedplant": "potted plant",
"tvmonitor": "tv",
"diningtable": "dining table",
}
COCO_id_to_category_id = {
13: 12,
14: 13,
15: 14,
16: 15,
17: 16,
18: 17,
19: 18,
20: 19,
21: 20,
22: 21,
23: 22,
24: 23,
25: 24,
27: 25,
28: 26,
31: 27,
32: 28,
33: 29,
34: 30,
35: 31,
36: 32,
37: 33,
38: 34,
39: 35,
40: 36,
41: 37,
42: 38,
43: 39,
44: 40,
46: 41,
47: 42,
48: 43,
49: 44,
50: 45,
51: 46,
52: 47,
53: 48,
54: 49,
55: 50,
56: 51,
57: 52,
58: 53,
59: 54,
60: 55,
61: 56,
62: 57,
63: 58,
64: 59,
65: 60,
67: 61,
70: 62,
72: 63,
73: 64,
74: 65,
75: 66,
76: 67,
77: 68,
78: 69,
79: 70,
80: 71,
81: 72,
82: 73,
84: 74,
85: 75,
86: 76,
87: 77,
88: 78,
89: 79,
90: 80
} # noqa
category_id_to_COCO_id = {v: k for k, v in COCO_id_to_category_id.items()}
mapping = {}
for i, name in enumerate(voc_class_names):
index = coco_class_names.index(identity_name_mapping.get(name, name)) + 1
coco_index = category_id_to_COCO_id.get(index, index)
mapping[coco_index] = i + 1
print(
mapping
) # {64: 12, 1: 1, 67: 17, 3: 9, 4: 7, 5: 3, 6: 4, 7: 15, 72: 13, 9: 8, 44: 16, 2: 19, 16: 6, 17: 14, 18: 18, 19: 10, 20: 20, 21: 5, 62: 2, 63: 11}
|
google-research/ssl_detection
|
detection/dataset/cls_mapping_coco_voc.py
|
Python
|
apache-2.0
| 3,471
|
"""
Pipeline subroutines
"""
|
SKA-ScienceDataProcessor/FastImaging-Python
|
src/fastimgproto/pipeline/__init__.py
|
Python
|
apache-2.0
| 29
|
# coding=utf-8
# Copyright 2019 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, division, print_function, unicode_literals
from pants.backend.python.subsystems.pex_build_util import identify_missing_init_files
from pants.engine.fs import EMPTY_DIRECTORY_DIGEST, Digest, Snapshot
from pants.engine.isolated_process import ExecuteProcessRequest, ExecuteProcessResult
from pants.engine.rules import rule
from pants.engine.selectors import Get
from pants.util.objects import datatype
# TODO(#7710): Once this gets fixed, rename this to InitInjectedDigest.
class InjectedInitDigest(datatype([('directory_digest', Digest)])): pass
@rule(InjectedInitDigest, [Snapshot])
def inject_init(snapshot):
"""Ensure that every package has an __init__.py file in it."""
missing_init_files = tuple(sorted(identify_missing_init_files(snapshot.files)))
if not missing_init_files:
new_init_files_digest = EMPTY_DIRECTORY_DIGEST
else:
# TODO(7718): add a builtin rule for FilesContent->Snapshot, so that we can avoid using touch
# and the absolute path and have the engine build the files for us.
touch_init_request = ExecuteProcessRequest(
argv=("/usr/bin/touch",) + missing_init_files,
output_files=missing_init_files,
description="Inject missing __init__.py files: {}".format(", ".join(missing_init_files)),
input_files=snapshot.directory_digest,
)
touch_init_result = yield Get(ExecuteProcessResult, ExecuteProcessRequest, touch_init_request)
new_init_files_digest = touch_init_result.output_directory_digest
# TODO(#7710): Once this gets fixed, merge the original source digest and the new init digest
# into one unified digest.
yield InjectedInitDigest(directory_digest=new_init_files_digest)
def rules():
return [
inject_init,
]
|
twitter/pants
|
src/python/pants/backend/python/rules/inject_init.py
|
Python
|
apache-2.0
| 1,906
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# IkaLog
# ======
# Copyright (C) 2015 Takeshi HASEGAWA
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import time
import os
import pprint
import urllib3
import umsgpack
from ikalog.constants import fes_rank_titles
from ikalog.version import IKALOG_VERSION
from ikalog.utils import *
# Needed in GUI mode
try:
import wx
except:
pass
# @package ikalog.outputs.statink
# IkaLog Output Plugin for Stat.ink
class StatInk(object):
def apply_ui(self):
self.enabled = self.checkEnable.GetValue()
self.show_response_enabled = self.checkShowResponseEnable.GetValue()
self.track_objective_enabled = self.checkTrackObjectiveEnable.GetValue()
self.track_splatzone_enabled = self.checkTrackSplatzoneEnable.GetValue()
self.api_key = self.editApiKey.GetValue()
def refresh_ui(self):
self.checkEnable.SetValue(self.enabled)
self.checkShowResponseEnable.SetValue(self.show_response_enabled)
self.checkTrackObjectiveEnable.SetValue(self.track_objective_enabled)
self.checkTrackSplatzoneEnable.SetValue(self.track_splatzone_enabled)
if not self.api_key is None:
self.editApiKey.SetValue(self.api_key)
else:
self.editApiKey.SetValue('')
def on_config_reset(self, context=None):
self.enabled = False
self.show_response_enabled = False
self.track_objective_enabled = False
self.track_splatzone_enabled = False
self.api_key = None
def on_config_load_from_context(self, context):
self.on_config_reset(context)
try:
conf = context['config']['stat.ink']
except:
conf = {}
self.enabled = conf.get('Enable', False)
self.show_response_enabled = conf.get('ShowResponse', False)
self.track_objective_enabled = conf.get('TrackObjective', False)
self.track_splatzone_enabled = conf.get('TrackSplatzone', False)
self.api_key = conf.get('APIKEY', '')
self.refresh_ui()
return True
def on_config_save_to_context(self, context):
context['config']['stat.ink'] = {
'Enable': self.enabled,
'ShowResponse': self.show_response_enabled,
'TrackObjective': self.track_objective_enabled,
'TrackSplatzone': self.track_splatzone_enabled,
'APIKEY': self.api_key,
}
def on_config_apply(self, context):
self.apply_ui()
def on_option_tab_create(self, notebook):
self.panel = wx.Panel(notebook, wx.ID_ANY)
self.page = notebook.InsertPage(0, self.panel, 'stat.ink')
self.layout = wx.BoxSizer(wx.VERTICAL)
self.panel.SetSizer(self.layout)
self.checkEnable = wx.CheckBox(
self.panel, wx.ID_ANY, 'stat.ink へのスコアを送信する')
self.checkTrackObjectiveEnable = wx.CheckBox(
self.panel, wx.ID_ANY, 'ガチヤグラ/ガチホコの位置を検出する (Experimental)')
self.checkTrackSplatzoneEnable = wx.CheckBox(
self.panel, wx.ID_ANY, 'ガチエリアのカウントを検出する (Experimental)')
self.checkShowResponseEnable = wx.CheckBox(
self.panel, wx.ID_ANY, 'stat.ink からの応答をコンソールに出力する')
self.editApiKey = wx.TextCtrl(self.panel, wx.ID_ANY, u'hoge')
self.layout.Add(self.checkEnable)
self.layout.Add(self.checkShowResponseEnable)
self.layout.Add(self.checkTrackObjectiveEnable)
self.layout.Add(self.checkTrackSplatzoneEnable)
self.layout.Add(wx.StaticText(
self.panel, wx.ID_ANY, u'APIキー'))
self.layout.Add(self.editApiKey, flag=wx.EXPAND)
self.panel.SetSizer(self.layout)
def encode_stage_name(self, context):
try:
stage = IkaUtils.map2text(context['game']['map'])
return {
'アロワナモール': 'arowana',
'Bバスパーク': 'bbass',
'デカライン高架下': 'dekaline',
'ハコフグ倉庫': 'hakofugu',
'ヒラメが丘団地': 'hirame',
'ホッケふ頭': 'hokke',
'キンメダイ美術館': 'kinmedai',
'マヒマヒリゾート&スパ': 'mahimahi',
'マサバ海峡大橋': 'masaba',
'モンガラキャンプ場': 'mongara',
'モズク農園': 'mozuku',
'ネギトロ炭鉱': 'negitoro',
'シオノメ油田': 'shionome',
'タチウオパーキング': 'tachiuo'
}[stage]
except:
IkaUtils.dprint(
'%s: Failed convert staage name %s to stat.ink value' % (self, stage))
return None
def encode_rule_name(self, context):
try:
rule = IkaUtils.rule2text(context['game']['rule'])
return {
'ナワバリバトル': 'nawabari',
'ガチエリア': 'area',
'ガチヤグラ': 'yagura',
'ガチホコバトル': 'hoko',
}[rule]
except:
IkaUtils.dprint(
'%s: Failed convert rule name %s to stat.ink value' % (self, rule))
return None
def encode_weapon_name(self, weapon):
try:
return {
'ガロン52': '52gal',
'ガロンデコ52': '52gal_deco',
'ガロン96': '96gal',
'ガロンデコ96': '96gal_deco',
'ボールドマーカー': 'bold',
'ボールドマーカーネオ': 'bold_neo',
'デュアルスイーパー': 'dualsweeper',
'デュアルスイーパーカスタム': 'dualsweeper_custom',
'H3リールガン': 'h3reelgun',
'H3リールガンD': 'h3reelgun_d',
'ハイドラント': 'hydra',
'ヒーローシューターレプリカ': 'heroshooter_replica',
'ホットブラスター': 'hotblaster',
'ホットブラスターカスタム': 'hotblaster_custom',
'ジェットスイーパー': 'jetsweeper',
'ジェットスイーパーカスタム': 'jetsweeper_custom',
'L3リールガン': 'l3reelgun',
'L3リールガンD': 'l3reelgun_d',
'ロングブラスター': 'longblaster',
'ロングブラスターカスタム': 'longblaster_custom',
'もみじシューター': 'momiji',
'ノヴァブラスター': 'nova',
'ノヴァブラスターネオ': 'nova_neo',
'N-ZAP85': 'nzap85',
'N-ZAP89': 'nzap89',
'オクタシューターレプリカ': 'octoshooter_replica',
'プライムシューター': 'prime',
'プライムシューターコラボ': 'prime_collabo',
'プロモデラーMG': 'promodeler_mg',
'プロモデラーRG': 'promodeler_rg',
'ラピッドブラスター': 'rapid',
'ラピッドブラスターデコ': 'rapid_deco',
'Rブラスターエリート': 'rapid_elite',
'シャープマーカー': 'sharp',
'シャープマーカーネオ': 'sharp_neo',
'スプラシューター': 'sshooter',
'スプラシューターコラボ': 'sshooter_collabo',
'わかばシューター': 'wakaba',
'カーボンローラー': 'carbon',
'カーボンローラーデコ': 'carbon_deco',
'ダイナモローラー': 'dynamo',
'ダイナモローラーテスラ': 'dynamo_tesla',
'ヒーローローラーレプリカ': 'heroroller_replica',
'ホクサイ': 'hokusai',
'パブロ': 'pablo',
'パブロ・ヒュー': 'pablo_hue',
'スプラローラー': 'splatroller',
'スプラローラーコラボ': 'splatroller_collabo',
'14式竹筒銃・甲': 'bamboo14mk1',
'14式竹筒銃・乙': 'bamboo14mk2',
'ヒーローチャージャーレプリカ': 'herocharger_replica',
'リッター3K': 'liter3k',
'リッター3Kカスタム': 'liter3k_custom',
'3Kスコープ': 'liter3k_scope',
'3Kスコープカスタム': 'liter3k_scope_custom',
'スプラチャージャー': 'splatcharger',
'スプラチャージャーワカメ': 'splatcharger_wakame',
'スプラスコープ': 'splatscope',
'スプラスコープワカメ': 'splatscope_wakame',
'スクイックリンα': 'squiclean_a',
'スクイックリンβ': 'squiclean_b',
'バケットスロッシャー': 'bucketslosher',
'バケットスロッシャーデコ': 'bucketslosher_deco',
'ヒッセン': 'hissen',
'スクリュースロッシャー': 'screwslosher',
'バレルスピナー': 'barrelspinner',
'バレルスピナーデコ': 'barrelspinner_deco',
'スプラスピナー': 'splatspinner',
'スプラスピナーコラボ': 'splatspinner_collabo',
}[weapon]
except:
IkaUtils.dprint(
'%s: Failed convert weapon name %s to stas.ink value' % (self, weapon))
return None
def encode_image(self, img):
if IkaUtils.isWindows():
temp_file = os.path.join(
os.environ['TMP'], '_image_for_statink.png')
else:
temp_file = '_image_for_statink.png'
IkaUtils.dprint('%s: Using temporary file %s' % (self, temp_file))
try:
if os.path.exists(temp_file):
os.remove(temp_file)
except:
IkaUtils.dprint(
'%s: Failed to remove existing temporary file %s' % (self, temp_file))
IkaUtils.dprint(traceback.format_exc())
try:
# ToDo: statink accepts only 16x9
IkaUtils.writeScreenshot(temp_file, img)
f = open(temp_file, 'rb')
s = f.read()
try:
f.close()
os.remove(temp_file)
except:
pass
except:
IkaUtils.dprint('%s: Failed to attach image_result' % self)
return None
IkaUtils.dprint('%s: Encoded screenshot (%dx%d %d bytes)' %
(self, img.shape[1], img.shape[0], len(s)))
return s
def _set_values(self, fields, dest, src):
for field in fields:
f_type = field[0]
f_statink = field[1]
f_ikalog = field[2]
if (f_ikalog in src) and (src[f_ikalog] is not None):
if f_type == 'int':
try:
dest[f_statink] = int(src[f_ikalog])
except: # ValueError
IkaUtils.dprint('%s: field %s failed: src[%s] == %s' % (
self, f_statink, f_ikalog, src[f_ikalog]))
pass
elif f_type == 'str':
dest[f_statink] = str(src[f_ikalog])
elif f_type == 'str_lower':
dest[f_statink] = str(src[f_ikalog]).lower()
def _add_event(self, context, event_data=None):
assert event_data is not None
if (not 'at'in event_data) and (self.time_start_at_msec is not None):
offset_msec = context['engine']['msec'] - self.time_start_at_msec
event_data['at'] = int(offset_msec / 100) / 10
else:
IkaUtils.dprint('%s: Event %s not logged due to no timing information.' % (
self, event_data['type']))
return
self.events.append(event_data)
def _add_ranked_battle_event(self, context, event_sub_type=None):
assert event_sub_type is not None
self._add_event(context, {
'type': 'ranked_battle_event',
'value': event_sub_type,
})
def composite_payload(self, context):
payload = {}
# Lobby
lobby_type = context['lobby'].get('type', None)
if lobby_type == 'public':
payload['lobby'] = 'standard'
elif lobby_type == 'private':
payload['lobby'] = 'private'
elif context['game']['is_fes'] or (lobby_type == 'festa'):
payload['lobby'] = 'fest'
elif lobby_type == 'tag':
num_members = context['lobby'].get('team_members', None)
if num_members in [2, 3, 4]:
payload['lobby'] = 'squad_%d' % num_members
else:
IkaUtils.dprint('%s: invalid lobby key squad_%s' %
(self, num_members))
else:
IkaUtils.dprint('%s: No lobby information.' % self)
# GameStart
stage = self.encode_stage_name(context)
if stage:
payload['map'] = stage
rule = self.encode_rule_name(context)
if rule:
payload['rule'] = rule
if self.time_start_at and self.time_end_at:
payload['start_at'] = int(self.time_start_at)
payload['end_at'] = int(self.time_end_at)
# In-game logs
if len(context['game']['death_reasons'].keys()) > 0:
payload['death_reasons'] = context['game']['death_reasons'].copy()
if len(self.events) > 0:
payload['events'] = list(self.events)
# ResultJudge
if payload.get('rule', None) in ['nawabari']:
scores = context['game'].get('nawabari_scores_pct', None)
print('nawabari scores = %s' % scores)
if scores is not None:
payload['my_team_final_percent'] = scores[0]
payload['his_team_final_percent'] = scores[1]
if payload.get('rule', None) in ['area', 'yagura', 'hoko']:
scores = context['game'].get('ranked_scores', None)
print('ranked scores = %s' % scores)
if scores is not None:
payload['my_team_count'] = scores[0]
payload['his_team_count'] = scores[1]
scores = context['game'].get('earned_scores', None)
if 0: # scores is not None:
payload['my_team_final_point'] = scores[0]
payload['his_team_final_point'] = scores[1]
# ResultDetail
me = IkaUtils.getMyEntryFromContext(context)
payload['result'] = IkaUtils.getWinLoseText(
context['game']['won'],
win_text='win',
lose_text='lose',
unknown_text=None
)
if 'weapon' in me:
weapon = self.encode_weapon_name(me['weapon'])
if weapon:
payload['weapon'] = weapon
if context['game']['is_fes']:
payload['gender'] = me['gender_en']
payload['fest_title'] = str(me['prefix_en']).lower()
self._set_values(
[ # 'type', 'stat.ink Field', 'IkaLog Field'
['int', 'rank_in_team', 'rank_in_team'],
['int', 'kill', 'kills'],
['int', 'death', 'deaths'],
['int', 'level', 'rank'],
['int', 'my_point', 'score'],
], payload, me)
players = []
for e in context['game']['players']:
player = {}
player['team'] = 'my' if (e['team'] == me['team']) else 'his'
player['is_me'] = 'yes' if e['me'] else 'no'
self._set_values(
[ # 'type', 'stat.ink Field', 'IkaLog Field'
['int', 'rank_in_team', 'rank_in_team'],
['int', 'kill', 'kills'],
['int', 'death', 'deaths'],
['int', 'level', 'rank'],
['int', 'point', 'score'],
], player, e)
if 'weapon' in e:
weapon = self.encode_weapon_name(e['weapon'])
if weapon:
player['weapon'] = weapon
if payload.get('rule', 'nawabari') != 'nawabari':
if 'udemae_pre' in e:
player['rank'] = str(e['udemae_pre']).lower()
players.append(player)
payload['players'] = players
# ResultUdemae
if payload.get('rule', 'nawabari') != 'nawabari':
self._set_values(
[ # 'type', 'stat.ink Field', 'IkaLog Field'
['str_lower', 'rank', 'result_udemae_str_pre'],
['int', 'rank_exp', 'result_udemae_exp_pre'],
['str_lower', 'rank_after', 'result_udemae_str'],
['int', 'rank_exp_after', 'result_udemae_exp'],
], payload, context['game'])
knockout = context['game'].get('knockout', None)
if (payload.get('rule', 'nawabari') != 'nawabari') and (knockout is not None):
payload['knock_out'] = {True: 'yes', False: 'no'}[knockout]
# ResultGears
if 'result_gears' in context['scenes']:
self._set_values(
[ # 'type', 'stat.ink Field', 'IkaLog Field'
['int', 'cash_after', 'cash'],
], payload, context['scenes']['result_gears'])
# ResultFesta
if payload.get('lobby', None) == 'fest':
self._set_values(
[ # 'type', 'stat.ink Field', 'IkaLog Field'
['int', 'fest_exp', 'result_festa_exp_pre'],
['int', 'fest_exp_after', 'result_festa_exp'],
], payload, context['game'])
if payload.get('fest_title', None) is not None:
current_title = payload['fest_title']
if context['game'].get('result_festa_title_changed', False):
try:
index = fes_rank_titles.index(current_title)
current_title = fes_rank_titles[index + 1]
except IndexError:
IkaUtils.dprint(
'%s: IndexError at fes_rank_titles' % self)
payload['fest_title_after'] = current_title.lower()
# Team colors
if ('my_team_color' in context['game']):
payload['my_team_color'] = {
'hue': context['game']['my_team_color']['hsv'][0] * 2,
'rgb': context['game']['my_team_color']['rgb'],
}
payload['his_team_color'] = {
'hue': context['game']['counter_team_color']['hsv'][0] * 2,
'rgb': context['game']['counter_team_color']['rgb'],
}
# Screenshots
if self.img_result_detail is not None:
payload['image_result'] = self.encode_image(self.img_result_detail)
else:
IkaUtils.dprint('%s: img_result_detail is empty.' % self)
if self.img_judge is not None:
payload['image_judge'] = self.encode_image(self.img_judge)
else:
IkaUtils.dprint('%s: img_judge is empty.' % self)
# Agent Information
payload['agent'] = 'IkaLog'
payload['agent_version'] = IKALOG_VERSION
for field in payload.keys():
if payload[field] is None:
IkaUtils.dprint('%s: [FIXME] payload has blank entry %s:%s' % (
self, field, payload[field]))
return payload
def write_response_to_file(self, r_header, r_body, basename=None):
if basename is None:
t = datetime.now().strftime("%Y%m%d_%H%M")
basename = os.path.join('/tmp', 'statink_%s' % t)
try:
f = open(basename + '.r_header', 'w')
f.write(r_header)
f.close()
except:
IkaUtils.dprint('%s: Failed to write file' % self)
IkaUtils.dprint(traceback.format_exc())
try:
f = open(basename + '.r_body', 'w')
f.write(r_body)
f.close()
except:
IkaUtils.dprint('%s: Failed to write file' % self)
IkaUtils.dprint(traceback.format_exc())
def write_payload_to_file(self, payload, basename=None):
if basename is None:
t = datetime.now().strftime("%Y%m%d_%H%M")
basename = os.path.join('/tmp', 'statink_%s' % t)
try:
f = open(basename + '.msgpack', 'w')
f.write(''.join(map(chr, umsgpack.packb(payload))))
f.close()
except:
IkaUtils.dprint('%s: Failed to write msgpack file' % self)
IkaUtils.dprint(traceback.format_exc())
def post_payload(self, payload, api_key=None):
if self.dry_run:
IkaUtils.dprint(
'%s: Dry-run mode, skipping POST to stat.ink.' % self)
return
url_statink_v1_battle = 'https://stat.ink/api/v1/battle'
if api_key is None:
api_key = self.api_key
if api_key is None:
raise('No API key specified')
http_headers = {
'Content-Type': 'application/x-msgpack',
}
# Payload data will be modified, so we copy it.
# It is not deep copy, so only dict object is
# duplicated.
payload = payload.copy()
payload['apikey'] = api_key
mp_payload_bytes = umsgpack.packb(payload)
mp_payload = ''.join(map(chr, mp_payload_bytes))
pool = urllib3.PoolManager()
req = pool.urlopen('POST', url_statink_v1_battle,
headers=http_headers,
body=mp_payload,
)
if self.show_response_enabled:
print(req.data.decode('utf-8'))
def print_payload(self, payload):
payload = payload.copy()
if 'image_result' in payload:
payload['image_result'] = '(PNG Data)'
if 'image_judge' in payload:
payload['image_judge'] = '(PNG Data)'
if 'events' in payload:
payload['events'] = '(Events)'
pprint.pprint(payload)
def on_game_go_sign(self, context):
self.time_start_at = int(time.time())
self.time_end_at = None
self.events = []
self.time_last_score_msec = None
self.time_last_objective_msec = None
# check if context['engine']['msec'] exists
# to allow unit test.
if 'msec' in context['engine']:
self.time_start_at_msec = context['engine']['msec']
def on_game_start(self, context):
# ゴーサインをベースにカウントするが、ゴーサインを認識
# できなかった場合の保険として on_game_start も拾っておく
self.on_game_go_sign(context)
def on_game_finish(self, context):
self.time_end_at = int(time.time())
if ('msec' in context['engine']) and (self.time_start_at_msec is not None):
duration_msec = context['engine']['msec'] - self.time_start_at_msec
if duration_msec >= 0.0:
self.time_start_at = int(
self.time_end_at - int(duration_msec / 1000))
# 戦績画面はこの後にくるはずなので今までにあるデータは捨てる
self.img_result_detail = None
self.img_judge = None
IkaUtils.dprint('%s: Discarded screenshots' % self)
##
# on_game_individual_result Hook
# @param self The Object Pointer
# @param context IkaLog context
#
def on_game_individual_result(self, context):
self.img_result_detail = context['engine']['frame']
IkaUtils.dprint('%s: Gathered img_result (%s)' %
(self, self.img_result_detail.shape))
def on_result_judge(self, context):
self.img_judge = context['game'].get('image_judge', None)
IkaUtils.dprint('%s: Gathered img_judge(%s)' %
(self, self.img_judge.shape))
def on_game_session_end(self, context):
IkaUtils.dprint('%s (enabled = %s)' % (self, self.enabled))
if (not self.enabled) and (not self.dry_run):
return False
payload = self.composite_payload(context)
self.print_payload(payload)
if self.debug_writePayloadToFile:
self.write_payload_to_file(payload)
self.post_payload(payload)
def on_game_killed(self, context):
self._add_event(context, {'type': 'killed'})
def on_game_dead(self, context):
self._add_event(context, {'type': 'dead'})
def on_game_paint_score_update(self, context):
score = context['game'].get('paint_score', 0)
if (score > 0 and 'msec' in context['engine']) and (self.time_start_at_msec is not None):
event_msec = context['engine']['msec'] - self.time_start_at_msec
# 前回のスコアイベントから 200ms 経っていない場合は処理しない
if (self.time_last_score_msec is None) or (event_msec - self.time_last_score_msec >= 200):
self._add_event(context, {
'type': 'point',
'point': score,
})
self.time_last_score_msec = event_msec
def on_game_objective_position_update(self, context):
if not self.track_objective_enabled:
return
event_msec = context['engine']['msec'] - self.time_start_at_msec
if (self.time_last_objective_msec is None) or (event_msec - self.time_last_objective_msec >= 200):
self._add_event(context, {
'type': 'objective',
'position': context['game']['tower'].get('pos', 0),
})
self.time_last_objective_msec = event_msec
def on_game_splatzone_counter_update(self, context):
if not self.track_splatzone_enabled:
return
event_msec = context['engine']['msec'] - self.time_start_at_msec
self._add_event(context, {
'type': 'splatzone',
'my_team_count': context['game']['splatzone_my_team_counter']['value'],
'my_team_injury_count': None,
'his_team_count': context['game']['splatzone_counter_team_counter']['value'],
'his_team_injury_count': None,
})
self.time_last_score_msec = event_msec
def on_game_splatzone_we_got(self, context):
self._add_ranked_battle_event(context, 'we_got')
def on_game_splatzone_we_lost(self, context):
self._add_ranked_battle_event(context, 'we_lost')
def on_game_splatzone_they_got(self, context):
self._add_ranked_battle_event(context, 'they_got')
def on_game_splatzone_they_lost(self, context):
self._add_ranked_battle_event(context, 'they_lost')
def on_game_rainmaker_we_got(self, context):
self._add_ranked_battle_event(context, 'we_got')
def on_game_rainmaker_we_lost(self, context):
self._add_ranked_battle_event(context, 'we_lost')
def on_game_rainmaker_they_got(self, context):
self._add_ranked_battle_event(context, 'they_got')
def on_game_rainmaker_they_lost(self, context):
self._add_ranked_battle_event(context, 'they_lost')
def on_game_towercontrol_we_took(self, context):
self._add_ranked_battle_event(context, 'we_got')
def on_game_towercontrol_we_lost(self, context):
self._add_ranked_battle_event(context, 'we_lost')
def on_game_towercontrol_they_took(self, context):
self._add_ranked_battle_event(context, 'they_got')
def on_game_towercontrol_they_lost(self, context):
self._add_ranked_battle_event(context, 'they_lost')
def on_game_ranked_we_lead(self, context):
self._add_ranked_battle_event(context, 'we_lead')
def on_game_ranked_they_lead(self, context):
self._add_ranked_battle_event(context, 'they_lead')
def __init__(self, api_key=None, track_objective=False, track_splatzone=False, debug=False, dry_run=False):
self.enabled = not (api_key is None)
self.api_key = api_key
self.dry_run = dry_run
self.time_start_at = None
self.time_end_at = None
self.time_start_at_msec = None
self.events = []
self.time_last_score_msec = None
self.time_last_objective_msec = None
self.img_result_detail = None
self.img_judge = None
self.debug_writePayloadToFile = debug
self.show_response_enabled = debug
self.track_objective_enabled = track_objective
self.track_splatzone_enabled = track_splatzone
if __name__ == "__main__":
# main として呼ばれた場合
#
# 第一引数で指定された戦績画面スクリーンショットを、
# ハコフグ倉庫・ガチエリアということにして Post する
#
# APIキーを環境変数 IKALOG_STATINK_APIKEY に設定して
# おくこと
from ikalog.scenes.result_detail import *
obj = StatInk(
api_key=os.environ['IKALOG_STATINK_APIKEY'],
dry_run=False,
debug=True,
)
# 最低限の context
file = sys.argv[1]
context = {
'engine': {
'frame': cv2.imread(file),
},
'game': {
'map': {'name': 'ハコフグ倉庫', },
'rule': {'name': 'ガチエリア'},
'death_reasons': {},
},
'scenes': {
},
'lobby': {
},
}
# 各プレイヤーの状況を分析
ResultDetail().analyze(context)
# stat.ink へのトリガ
obj.on_game_session_end(context)
|
hrhtspr/IkaLog
|
ikalog/outputs/statink.py
|
Python
|
apache-2.0
| 30,510
|
class Quantifier(object):
def __init__(self):
self.behaviour = {}
self.latest_observation = None
self.latest_behaviour = None
def add(self, observation, behaviour):
self.behaviour[observation] = behaviour
self.latest_observation = observation
self.latest_behaviour = behaviour
def adjust_reward(self, found, knowledge):
for observation in self.behaviour:
behaviour = self.behaviour[observation]
information = knowledge.get_information(observation)
if found:
information.adjust_behaviour(behaviour, 2)
else:
information.adjust_behaviour(behaviour, 0.9)
return
self.adjust_latest_behaviour(found, knowledge)
def adjust_latest_behaviour(self, found, knowledge):
if not found and self.latest_behaviour:
behaviour = self.behaviour[self.latest_observation]
information = knowledge.get_information(self.latest_behaviour)
information.adjust_behaviour(behaviour, 0.5)
|
sergiuionescu/gym-agents
|
agents/Quantifier.py
|
Python
|
apache-2.0
| 1,080
|