repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
lmzintgraf/MultiMAuS
|
refs/heads/master
|
simulator/customers.py
|
1
|
import numpy as np
from pytz import timezone, country_timezones
from simulator.customer_abstract import AbstractCustomer
class BaseCustomer(AbstractCustomer):
def __init__(self, transaction_model, fraudster):
"""
Base class for customers/fraudsters that support uni-modal authentication.
:param transaction_model:
:param fraudster:
"""
unique_id = transaction_model.get_next_customer_id(fraudster)
super().__init__(unique_id, transaction_model, fraudster)
# initialise probability of making a transaction per month/hour/...
self.noise_level = self.params['noise_level']
# average number of transaction per hour in general; varies per customer
self.avg_trans_per_hour = self.initialise_avg_trans_per_hour()
# initialise transaction probabilities per month/monthday/weekday/hour
self.trans_prob_month, self.trans_prob_monthday, self.trans_prob_weekday, self.trans_prob_hour = self.initialise_transaction_probabilities()
# whether the current transaction was cancelled by the customer
self.curr_trans_cancelled = False
def decide_making_transaction(self):
# reset that the current transaction was not cancelled
self.curr_trans_cancelled = False
if self.stay:
make_transaction = self.get_transaction_prob() > self.random_state.uniform(0, 1)
else:
make_transaction = False
return make_transaction
def post_process_transaction(self):
# decide whether to stay
self.stay = self.stay_after_transaction()
def get_transaction_prob(self):
# get the current local time
self.local_datetime = self.get_local_datetime()
# get the average transactions per hour
trans_prob = self.avg_trans_per_hour
# now weigh by probabilities of transactions per month/week/...
trans_prob *= 12 * self.trans_prob_month[self.local_datetime.month - 1]
trans_prob *= 24 * self.trans_prob_hour[self.local_datetime.hour]
trans_prob *= 30.5 * self.trans_prob_monthday[self.local_datetime.day - 1]
trans_prob *= 7 * self.trans_prob_weekday[self.local_datetime.weekday()]
return trans_prob
def get_local_datetime(self):
# convert global to local date (first add global timezone info, then convert to local)
local_datetime = self.model.curr_global_date
local_datetime = local_datetime.astimezone(timezone(country_timezones(self.country)[0]))
return local_datetime
def get_curr_merchant(self):
"""
Can be called at each transaction; will select a merchant to buy from.
:return: merchant ID
"""
merchant_prob = self.params['merchant_per_currency'][self.fraudster]
merchant_prob = merchant_prob.loc[self.currency]
merchant_ID = self.random_state.choice(merchant_prob.index.values, p=merchant_prob.values.flatten())
return next(m for m in self.model.merchants if m.unique_id == merchant_ID)
def get_curr_amount(self):
return self.curr_merchant.get_amount(self)
def stay_after_transaction(self):
return self.get_staying_prob() > self.random_state.uniform(0, 1)
def get_staying_prob(self):
return self.params['stay_prob'][self.fraudster]
def initialise_country(self):
country_frac = self.params['country_frac']
return self.random_state.choice(country_frac.index.values, p=country_frac.iloc[:, self.fraudster].values)
def initialise_currency(self):
currency_prob = self.params['currency_per_country'][self.fraudster]
currency_prob = currency_prob.loc[self.country]
return self.random_state.choice(currency_prob.index.values, p=currency_prob.values.flatten())
def initialise_card_id(self):
return self.model.get_next_card_id()
def initialise_transaction_probabilities(self):
# transaction probability per month
trans_prob_month = self.params['frac_month'][:, self.fraudster]
trans_prob_month = self.random_state.multivariate_normal(trans_prob_month, np.eye(12) * self.noise_level / 1200)
trans_prob_month[trans_prob_month < 0] = 0
# transaction probability per day in month
trans_prob_monthday = self.params['frac_monthday'][:, self.fraudster]
trans_prob_monthday = self.random_state.multivariate_normal(trans_prob_monthday, np.eye(31) * self.noise_level / 305)
trans_prob_monthday[trans_prob_monthday < 0] = 0
# transaction probability per weekday (we assume this differs per individual)
trans_prob_weekday = self.params['frac_weekday'][:, self.fraudster]
trans_prob_weekday = self.random_state.multivariate_normal(trans_prob_weekday, np.eye(7) * self.noise_level / 70)
trans_prob_weekday[trans_prob_weekday < 0] = 0
# transaction probability per hour (we assume this differs per individual)
trans_prob_hour = self.params['frac_hour'][:, self.fraudster]
trans_prob_hour = self.random_state.multivariate_normal(trans_prob_hour, np.eye(24) * self.noise_level / 240)
trans_prob_hour[trans_prob_hour < 0] = 0
return trans_prob_month, trans_prob_monthday, trans_prob_weekday, trans_prob_hour
def initialise_avg_trans_per_hour(self):
trans_per_year = self.params['trans_per_year'][self.fraudster]
rand_addition = self.random_state.normal(0, self.noise_level * trans_per_year)
if trans_per_year + rand_addition > 0:
trans_per_year += rand_addition
avg_trans_per_hour = trans_per_year / 366. / 24.
avg_trans_per_hour *= self.params['transaction_motivation'][self.fraudster]
return avg_trans_per_hour
class GenuineCustomer(BaseCustomer):
def __init__(self, transaction_model, satisfaction=1):
super().__init__(transaction_model, fraudster=False)
# add field for whether the credit card was corrupted by a fraudster
self.card_corrupted = False
# field whether current transaction was authorised or not
self.curr_auth_step = 0
# initialise the customer's patience (optimistically)
self.patience = self.random_state.beta(10, 2)
# instantiate the customer's satisfaction
self.satisfaction = satisfaction
def stay_after_transaction(self):
stay_prob = self.satisfaction * self.params['stay_prob'][self.fraudster]
return (1-stay_prob) <= self.random_state.uniform(0, 1)
def card_got_corrupted(self):
self.card_corrupted = True
def get_transaction_prob(self):
return self.satisfaction * super().get_transaction_prob()
def decide_making_transaction(self):
"""
For a genuine customer, we add the option of leaving
when the customer's card was subject to fraud
:return:
"""
# reset authentication step count
self.curr_auth_step = 0
# if the card was corrupted, the user is more likely to leave
if self.card_corrupted:
if self.params['stay_after_fraud'] < self.random_state.uniform(0, 1):
self.stay = False
# can skip the entire super().decide_making_transaction() computation
return False
return super().decide_making_transaction()
def post_process_transaction(self):
self.update_satisfaction()
super().post_process_transaction()
def update_satisfaction(self):
"""
Adjust the satisfaction of the user after a transaction was made.
:return:
"""
# if the customer cancelled the transaction, the satisfaction goes down by 5%
if self.curr_trans_cancelled:
self.satisfaction *= 0.95
else:
# if no authentication was done, the satisfaction goes up by 0.01
if self.curr_auth_step == 0:
self.satisfaction *= 1.01
# otherwise, it goes down by 1%
else:
self.satisfaction *= 0.99
self.satisfaction = min([1, self.satisfaction])
self.satisfaction = max([0, self.satisfaction])
def give_authentication(self):
"""
Authenticate self; this can be called several times per transaction.
Returns the authentication quality.
:return:
"""
curr_patience = 0.8 * self.patience + 0.2 * self.curr_amount/self.curr_merchant.max_amount
if curr_patience > self.random_state.uniform(0, 1):
auth_quality = 1
else:
# cancel the transaction
self.curr_trans_cancelled = True
auth_quality = None
self.curr_auth_step += 1
return auth_quality
class FraudulentCustomer(BaseCustomer):
def __init__(self, transaction_model):
super().__init__(transaction_model, fraudster=True)
def initialise_card_id(self):
"""
Pick a card either by using a card from an existing user,
or a completely new one (i.e., from customers unnknown to the processing platform)
:return:
"""
if self.params['fraud_cards_in_genuine'] > self.random_state.uniform(0, 1):
# the fraudster picks a customer...
# ... (1) from a familiar country
fraudster_countries = self.params['country_frac'].index[self.params['country_frac']['fraud'] !=0].values
# ... (2) from a familiar currency
fraudster_currencies = self.params['currency_per_country'][1].index.get_level_values(1).unique()
# ... (3) that has already made a transaction
customers_with_active_cards = [c for c in self.model.customers if c.card_id is not None]
# now pick the fraud target (if there are no targets get own credit card)
try:
customer = self.random_state.choice([c for c in customers_with_active_cards if (c.country in fraudster_countries) and (c.currency in fraudster_currencies)])
# get the information from the target
card = customer.card_id
self.country = customer.country
self.currency = customer.currency
except ValueError:
card = super().initialise_card_id()
else:
card = super().initialise_card_id()
return card
def give_authentication(self):
"""
Authenticate self; this can be called several times per transaction.
Returns the authentication quality.
:return:
"""
# we assume that the fraudster cannot provide a second authentication
self.curr_trans_cancelled = True
return None
|
felipenaselva/felipe.repository
|
refs/heads/master
|
script.module.liveresolver/lib/js2py/test.py
|
31
|
import js2py
c = js2py.EvalJs()
# c.execute('a = {d:4,f:function k() {return 1}}')
# c.execute('function k(a) {console.log(a);console.log(this)}')
# c.execute('f = function (){}')
a = r'''
Number( Date())
'''
#c.execute(a)
res = js2py.translate_js(a)
with open('test_res.py', 'wb') as f:
f.write(res)
def f(a, b, c):
return [xrange(100),0]
e = js2py.eval_js(a)
context = js2py.EvalJs({'python_sum': sum})
print context.eval('python_sum([1,2,3])')
|
rlishtaba/py-algorithms
|
refs/heads/master
|
py_algorithms/data_structures/heap.py
|
1
|
import sys
from typing import Any
from typing import Callable
from typing import List
from typing import Union
from ..utils import test_iterable
class _HeapNode:
def __init__(self, key: Any, value: Any):
self.key = key
self.value = value
self.degree = 0
self.marked = False
self.right = self
self.left = self
self.parent = None
self.child = None
def is_marked(self) -> bool:
return self.marked is True
class Heap:
MAX_MIN = -sys.maxsize
def __init__(self, comparator_f2: Callable[[Any, Any], bool], xs: List[Any] = ()):
test_iterable(xs)
self._size = 0
self._comparator_f2 = comparator_f2
self._next = None
self._stored = {}
# default initial values
for x in xs:
self.push(x, x)
@staticmethod
def _get_by_index(array, index) -> Union[None, Any]:
try:
return array[index]
except IndexError:
return None
@classmethod
def _set_entry_by_index(cls, array, index, value):
if cls._get_by_index(array, index) == cls.MAX_MIN:
array[index] = value
return array
else:
array.extend([cls.MAX_MIN] * (index - len(array) + 1))
return cls._set_entry_by_index(array, index, value)
@property
def size(self):
return self._size
@property
def next(self) -> Union[Any, None]:
if self._next:
return self._next.value
return None
@property
def next_key(self) -> Union[Any, None]:
if self._next:
return self._next.key
return None
@property
def is_empty(self) -> bool:
return self._next is None
def clear(self) -> 'Heap':
self._next = None
self._size = 0
self._stored = {}
return self
def contains_key(self, key) -> bool:
if self._stored.get(key, None) and self._stored.get(key):
return True
return False
def push(self, key: Any, value: any) -> Any:
if key is None:
raise RuntimeError('Could not process heap keys equal to Null.')
node = _HeapNode(key, value)
if self._next:
node.right = self._next
node.left = self._next.left
node.left.right = node
self._next.left = node
if self._comparator_f2(key, self._next.key):
self._next = node
else:
self._next = node
self._size += 1
w = self._next.right
while w is not self._next:
w = w.right
if not self._stored.get(key, None):
self._stored[key] = []
self._stored[key].append(node)
return value
def pop(self) -> Any:
if not self._next:
return None
popped = self._next
if self._size == 1:
self.clear()
return popped.value
# things getting hairy here, we need to merge popped
# `popped` node's children to the root node
if self._next.child:
self._next.child.parent = None
sibling = self._next.child.right
while not sibling == self._next.child:
sibling.parent = None
sibling = sibling.right
# Merge children into root.
# If next is a singular root node,
# make its child pointer the next node
if self._next.right == self._next:
self._next = self._next.child
else:
next_left, next_right = self._next.left, self._next.right
current_child = self._next.child
self._next.right.left = current_child
self._next.left.right = current_child.right
current_child.right.left = next_left
current_child.right = next_right
self._next = self._next.right
else:
self._next.left.right = self._next.right
self._next.right.left = self._next.left
self._next = self._next.right
self._consolidate()
if not self._stored.get(popped.key, None):
raise RuntimeError("Could not delete a heap entry.")
self._size -= 1
return popped.value
def _consolidate(self):
roots = []
root = self._next
_min = root
while True: # find the nodes in the list
roots.append(root)
root = root.right
if root == self._next:
break
degrees = []
for root in roots:
if self._comparator_f2(root.key, _min.key):
_min = root
# check if we need to merge
if not self._get_by_index(degrees, root.degree):
self._set_entry_by_index(degrees, root.degree, root)
else:
# there is another node(s) with the same degree,
# we'll try to consolidate them
degree = root.degree
while not (self._get_by_index(degrees, degree) in [self.MAX_MIN, None]):
other_root_with_degree = degrees[degree]
if self._comparator_f2(root.key, other_root_with_degree.key):
# determine which node is the parent, which one is the
# child
smaller, larger = root, other_root_with_degree
else:
smaller, larger = other_root_with_degree, root
self._link_nodes(larger, smaller)
degrees[degree] = self.MAX_MIN
root = smaller
degree += 1
self._set_entry_by_index(degrees, degree, root)
# make sure duplicate keys in the right order
if _min.key == root.key:
_min = root
self._next = _min
@staticmethod
def _link_nodes(child, parent) -> None:
"""make node a child of a parent"""
# link the child's siblings
child.left.right = child.right
child.right.left = child.left
child.parent = parent
# if parent doesn't have children, make new child its only child
if not parent.child:
parent.child = child.right = child.left = child
# otherwise insert new child into parent's children list
else:
current_child = parent.child
child.left = current_child
child.right = current_child.right
current_child.right.left = child
current_child.right = child
parent.degree += 1
child.marked = False
|
mayblue9/bokeh
|
refs/heads/master
|
bokeh/charts/builder/tests/test_heatmap_builder.py
|
33
|
""" This is the Bokeh charts testing interface.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import absolute_import
from collections import OrderedDict
import unittest
import numpy as np
from numpy.testing import assert_array_equal
import pandas as pd
from bokeh.charts import HeatMap
from bokeh.models import FactorRange
from bokeh.charts.builder.tests._utils import create_chart
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
class TestHeatMap(unittest.TestCase):
def test_supported_input(self):
xyvalues = OrderedDict()
xyvalues['apples'] = [4,5,8]
xyvalues['bananas'] = [1,2,4]
xyvalues['pears'] = [6,5,4]
xyvaluesdf = pd.DataFrame(xyvalues, index=['2009', '2010', '2011'])
# prepare some data to check tests results...
heights = widths = [0.95] * 9
colors = ['#e2e2e2', '#75968f', '#cc7878', '#ddb7b1', '#a5bab7', '#ddb7b1',
'#550b1d', '#e2e2e2', '#e2e2e2']
catx = ['apples', 'bananas', 'pears', 'apples', 'bananas', 'pears',
'apples', 'bananas', 'pears']
rates = [4, 1, 6, 5, 2, 5, 8, 4, 4]
caty = ['a', 'a', 'a', 'b', 'b', 'b', 'c', 'c', 'c']
for i, _xy in enumerate([xyvalues, xyvaluesdf]):
hm = create_chart(HeatMap, _xy, palette=colors)
builder = hm._builders[0]
# TODO: Fix bug
#self.assertEqual(sorted(hm.groups), sorted(list(xyvalues.keys())))
assert_array_equal(builder._data['height'], heights)
assert_array_equal(builder._data['width'], widths)
assert_array_equal(builder._data['catx'], catx)
assert_array_equal(builder._data['rate'], rates)
assert_array_equal(builder._source._data, builder._data)
assert_array_equal(hm.x_range.factors, builder._catsx)
assert_array_equal(hm.y_range.factors, builder._catsy)
self.assertIsInstance(hm.x_range, FactorRange)
self.assertIsInstance(hm.y_range, FactorRange)
# TODO: (bev) not sure what correct behaviour is
#assert_array_equal(builder._data['color'], colors)
if i == 0: # if DataFrame
assert_array_equal(builder._data['caty'], caty)
else:
_caty = ['2009']*3 + ['2010']*3 + ['2011']*3
assert_array_equal(builder._data['caty'], _caty)
catx = ['0', '1', '2', '0', '1', '2', '0', '1', '2']
lvalues = [[4,5,8], [1,2,4], [6,5,4]]
for _xy in [lvalues, np.array(lvalues)]:
hm = create_chart(HeatMap, _xy, palette=colors)
builder = hm._builders[0]
# TODO: FIX bug
#self.assertEqual(sorted(hm.groups), sorted(list(xyvalues.keys())))
assert_array_equal(builder._data['height'], heights)
assert_array_equal(builder._data['width'], widths)
assert_array_equal(builder._data['catx'], catx)
assert_array_equal(builder._data['rate'], rates)
assert_array_equal(builder._source._data, builder._data)
assert_array_equal(hm.x_range.factors, builder._catsx)
assert_array_equal(hm.y_range.factors, builder._catsy)
self.assertIsInstance(hm.x_range, FactorRange)
self.assertIsInstance(hm.y_range, FactorRange)
assert_array_equal(builder._data['caty'], caty)
# TODO: (bev) not sure what correct behaviour is
# assert_array_equal(builder._data['color'], colors)
|
michael2012z/myKernel
|
refs/heads/master
|
tools/power/pm-graph/analyze_suspend.py
|
84
|
#!/usr/bin/python
#
# Tool for analyzing suspend/resume timing
# Copyright (c) 2013, Intel Corporation.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms and conditions of the GNU General Public License,
# version 2, as published by the Free Software Foundation.
#
# This program is distributed in the hope it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# Authors:
# Todd Brandt <todd.e.brandt@linux.intel.com>
#
# Links:
# Home Page
# https://01.org/suspendresume
# Source repo
# https://github.com/01org/pm-graph
#
# Description:
# This tool is designed to assist kernel and OS developers in optimizing
# their linux stack's suspend/resume time. Using a kernel image built
# with a few extra options enabled, the tool will execute a suspend and
# will capture dmesg and ftrace data until resume is complete. This data
# is transformed into a device timeline and a callgraph to give a quick
# and detailed view of which devices and callbacks are taking the most
# time in suspend/resume. The output is a single html file which can be
# viewed in firefox or chrome.
#
# The following kernel build options are required:
# CONFIG_PM_DEBUG=y
# CONFIG_PM_SLEEP_DEBUG=y
# CONFIG_FTRACE=y
# CONFIG_FUNCTION_TRACER=y
# CONFIG_FUNCTION_GRAPH_TRACER=y
# CONFIG_KPROBES=y
# CONFIG_KPROBES_ON_FTRACE=y
#
# For kernel versions older than 3.15:
# The following additional kernel parameters are required:
# (e.g. in file /etc/default/grub)
# GRUB_CMDLINE_LINUX_DEFAULT="... initcall_debug log_buf_len=16M ..."
#
# ----------------- LIBRARIES --------------------
import sys
import time
import os
import string
import re
import platform
from datetime import datetime
import struct
import ConfigParser
from threading import Thread
from subprocess import call, Popen, PIPE
# ----------------- CLASSES --------------------
# Class: SystemValues
# Description:
# A global, single-instance container used to
# store system values and test parameters
class SystemValues:
title = 'SleepGraph'
version = '4.7'
ansi = False
verbose = False
testlog = True
dmesglog = False
ftracelog = False
mindevlen = 0.0
mincglen = 0.0
cgphase = ''
cgtest = -1
max_graph_depth = 0
callloopmaxgap = 0.0001
callloopmaxlen = 0.005
cpucount = 0
memtotal = 204800
srgap = 0
cgexp = False
testdir = ''
tpath = '/sys/kernel/debug/tracing/'
fpdtpath = '/sys/firmware/acpi/tables/FPDT'
epath = '/sys/kernel/debug/tracing/events/power/'
traceevents = [
'suspend_resume',
'device_pm_callback_end',
'device_pm_callback_start'
]
logmsg = ''
testcommand = ''
mempath = '/dev/mem'
powerfile = '/sys/power/state'
mempowerfile = '/sys/power/mem_sleep'
suspendmode = 'mem'
memmode = ''
hostname = 'localhost'
prefix = 'test'
teststamp = ''
sysstamp = ''
dmesgstart = 0.0
dmesgfile = ''
ftracefile = ''
htmlfile = 'output.html'
embedded = False
rtcwake = True
rtcwaketime = 15
rtcpath = ''
devicefilter = []
stamp = 0
execcount = 1
x2delay = 0
usecallgraph = False
usetraceevents = False
usetraceeventsonly = False
usetracemarkers = True
usekprobes = True
usedevsrc = False
useprocmon = False
notestrun = False
mixedphaseheight = True
devprops = dict()
predelay = 0
postdelay = 0
procexecfmt = 'ps - (?P<ps>.*)$'
devpropfmt = '# Device Properties: .*'
tracertypefmt = '# tracer: (?P<t>.*)'
firmwarefmt = '# fwsuspend (?P<s>[0-9]*) fwresume (?P<r>[0-9]*)$'
tracefuncs = {
'sys_sync': dict(),
'pm_prepare_console': dict(),
'pm_notifier_call_chain': dict(),
'freeze_processes': dict(),
'freeze_kernel_threads': dict(),
'pm_restrict_gfp_mask': dict(),
'acpi_suspend_begin': dict(),
'suspend_console': dict(),
'acpi_pm_prepare': dict(),
'syscore_suspend': dict(),
'arch_enable_nonboot_cpus_end': dict(),
'syscore_resume': dict(),
'acpi_pm_finish': dict(),
'resume_console': dict(),
'acpi_pm_end': dict(),
'pm_restore_gfp_mask': dict(),
'thaw_processes': dict(),
'pm_restore_console': dict(),
'CPU_OFF': {
'func':'_cpu_down',
'args_x86_64': {'cpu':'%di:s32'},
'format': 'CPU_OFF[{cpu}]'
},
'CPU_ON': {
'func':'_cpu_up',
'args_x86_64': {'cpu':'%di:s32'},
'format': 'CPU_ON[{cpu}]'
},
}
dev_tracefuncs = {
# general wait/delay/sleep
'msleep': { 'args_x86_64': {'time':'%di:s32'}, 'ub': 1 },
'schedule_timeout_uninterruptible': { 'args_x86_64': {'timeout':'%di:s32'}, 'ub': 1 },
'schedule_timeout': { 'args_x86_64': {'timeout':'%di:s32'}, 'ub': 1 },
'udelay': { 'func':'__const_udelay', 'args_x86_64': {'loops':'%di:s32'}, 'ub': 1 },
'usleep_range': { 'args_x86_64': {'min':'%di:s32', 'max':'%si:s32'}, 'ub': 1 },
'mutex_lock_slowpath': { 'func':'__mutex_lock_slowpath', 'ub': 1 },
'acpi_os_stall': {'ub': 1},
# ACPI
'acpi_resume_power_resources': dict(),
'acpi_ps_parse_aml': dict(),
# filesystem
'ext4_sync_fs': dict(),
# 80211
'iwlagn_mac_start': dict(),
'iwlagn_alloc_bcast_station': dict(),
'iwl_trans_pcie_start_hw': dict(),
'iwl_trans_pcie_start_fw': dict(),
'iwl_run_init_ucode': dict(),
'iwl_load_ucode_wait_alive': dict(),
'iwl_alive_start': dict(),
'iwlagn_mac_stop': dict(),
'iwlagn_mac_suspend': dict(),
'iwlagn_mac_resume': dict(),
'iwlagn_mac_add_interface': dict(),
'iwlagn_mac_remove_interface': dict(),
'iwlagn_mac_change_interface': dict(),
'iwlagn_mac_config': dict(),
'iwlagn_configure_filter': dict(),
'iwlagn_mac_hw_scan': dict(),
'iwlagn_bss_info_changed': dict(),
'iwlagn_mac_channel_switch': dict(),
'iwlagn_mac_flush': dict(),
# ATA
'ata_eh_recover': { 'args_x86_64': {'port':'+36(%di):s32'} },
# i915
'i915_gem_resume': dict(),
'i915_restore_state': dict(),
'intel_opregion_setup': dict(),
'g4x_pre_enable_dp': dict(),
'vlv_pre_enable_dp': dict(),
'chv_pre_enable_dp': dict(),
'g4x_enable_dp': dict(),
'vlv_enable_dp': dict(),
'intel_hpd_init': dict(),
'intel_opregion_register': dict(),
'intel_dp_detect': dict(),
'intel_hdmi_detect': dict(),
'intel_opregion_init': dict(),
'intel_fbdev_set_suspend': dict(),
}
kprobes = dict()
timeformat = '%.3f'
def __init__(self):
# if this is a phoronix test run, set some default options
if('LOG_FILE' in os.environ and 'TEST_RESULTS_IDENTIFIER' in os.environ):
self.embedded = True
self.dmesglog = self.ftracelog = True
self.htmlfile = os.environ['LOG_FILE']
self.archargs = 'args_'+platform.machine()
self.hostname = platform.node()
if(self.hostname == ''):
self.hostname = 'localhost'
rtc = "rtc0"
if os.path.exists('/dev/rtc'):
rtc = os.readlink('/dev/rtc')
rtc = '/sys/class/rtc/'+rtc
if os.path.exists(rtc) and os.path.exists(rtc+'/date') and \
os.path.exists(rtc+'/time') and os.path.exists(rtc+'/wakealarm'):
self.rtcpath = rtc
if (hasattr(sys.stdout, 'isatty') and sys.stdout.isatty()):
self.ansi = True
self.testdir = datetime.now().strftime('suspend-%y%m%d-%H%M%S')
def rootCheck(self, fatal=True):
if(os.access(self.powerfile, os.W_OK)):
return True
if fatal:
doError('This command requires sysfs mount and root access')
return False
def rootUser(self, fatal=False):
if 'USER' in os.environ and os.environ['USER'] == 'root':
return True
if fatal:
doError('This command must be run as root')
return False
def setPrecision(self, num):
if num < 0 or num > 6:
return
self.timeformat = '%.{0}f'.format(num)
def setOutputFolder(self, value):
args = dict()
n = datetime.now()
args['date'] = n.strftime('%y%m%d')
args['time'] = n.strftime('%H%M%S')
args['hostname'] = self.hostname
return value.format(**args)
def setOutputFile(self):
if self.dmesgfile != '':
m = re.match('(?P<name>.*)_dmesg\.txt$', self.dmesgfile)
if(m):
self.htmlfile = m.group('name')+'.html'
if self.ftracefile != '':
m = re.match('(?P<name>.*)_ftrace\.txt$', self.ftracefile)
if(m):
self.htmlfile = m.group('name')+'.html'
def systemInfo(self, info):
p = c = m = b = ''
if 'baseboard-manufacturer' in info:
m = info['baseboard-manufacturer']
elif 'system-manufacturer' in info:
m = info['system-manufacturer']
if 'baseboard-product-name' in info:
p = info['baseboard-product-name']
elif 'system-product-name' in info:
p = info['system-product-name']
if 'processor-version' in info:
c = info['processor-version']
if 'bios-version' in info:
b = info['bios-version']
self.sysstamp = '# sysinfo | man:%s | plat:%s | cpu:%s | bios:%s | numcpu:%d | memsz:%d' % \
(m, p, c, b, self.cpucount, self.memtotal)
def printSystemInfo(self):
self.rootCheck(True)
out = dmidecode(self.mempath, True)
fmt = '%-24s: %s'
for name in sorted(out):
print fmt % (name, out[name])
print fmt % ('cpucount', ('%d' % self.cpucount))
print fmt % ('memtotal', ('%d kB' % self.memtotal))
def cpuInfo(self):
self.cpucount = 0
fp = open('/proc/cpuinfo', 'r')
for line in fp:
if re.match('^processor[ \t]*:[ \t]*[0-9]*', line):
self.cpucount += 1
fp.close()
fp = open('/proc/meminfo', 'r')
for line in fp:
m = re.match('^MemTotal:[ \t]*(?P<sz>[0-9]*) *kB', line)
if m:
self.memtotal = int(m.group('sz'))
break
fp.close()
def initTestOutput(self, name):
self.prefix = self.hostname
v = open('/proc/version', 'r').read().strip()
kver = string.split(v)[2]
fmt = name+'-%m%d%y-%H%M%S'
testtime = datetime.now().strftime(fmt)
self.teststamp = \
'# '+testtime+' '+self.prefix+' '+self.suspendmode+' '+kver
if(self.embedded):
self.dmesgfile = \
'/tmp/'+testtime+'_'+self.suspendmode+'_dmesg.txt'
self.ftracefile = \
'/tmp/'+testtime+'_'+self.suspendmode+'_ftrace.txt'
return
self.dmesgfile = \
self.testdir+'/'+self.prefix+'_'+self.suspendmode+'_dmesg.txt'
self.ftracefile = \
self.testdir+'/'+self.prefix+'_'+self.suspendmode+'_ftrace.txt'
self.htmlfile = \
self.testdir+'/'+self.prefix+'_'+self.suspendmode+'.html'
if not os.path.isdir(self.testdir):
os.mkdir(self.testdir)
def setDeviceFilter(self, value):
self.devicefilter = []
if value:
value = value.split(',')
for i in value:
self.devicefilter.append(i.strip())
def rtcWakeAlarmOn(self):
call('echo 0 > '+self.rtcpath+'/wakealarm', shell=True)
outD = open(self.rtcpath+'/date', 'r').read().strip()
outT = open(self.rtcpath+'/time', 'r').read().strip()
mD = re.match('^(?P<y>[0-9]*)-(?P<m>[0-9]*)-(?P<d>[0-9]*)', outD)
mT = re.match('^(?P<h>[0-9]*):(?P<m>[0-9]*):(?P<s>[0-9]*)', outT)
if(mD and mT):
# get the current time from hardware
utcoffset = int((datetime.now() - datetime.utcnow()).total_seconds())
dt = datetime(\
int(mD.group('y')), int(mD.group('m')), int(mD.group('d')),
int(mT.group('h')), int(mT.group('m')), int(mT.group('s')))
nowtime = int(dt.strftime('%s')) + utcoffset
else:
# if hardware time fails, use the software time
nowtime = int(datetime.now().strftime('%s'))
alarm = nowtime + self.rtcwaketime
call('echo %d > %s/wakealarm' % (alarm, self.rtcpath), shell=True)
def rtcWakeAlarmOff(self):
call('echo 0 > %s/wakealarm' % self.rtcpath, shell=True)
def initdmesg(self):
# get the latest time stamp from the dmesg log
fp = Popen('dmesg', stdout=PIPE).stdout
ktime = '0'
for line in fp:
line = line.replace('\r\n', '')
idx = line.find('[')
if idx > 1:
line = line[idx:]
m = re.match('[ \t]*(\[ *)(?P<ktime>[0-9\.]*)(\]) (?P<msg>.*)', line)
if(m):
ktime = m.group('ktime')
fp.close()
self.dmesgstart = float(ktime)
def getdmesg(self):
# store all new dmesg lines since initdmesg was called
fp = Popen('dmesg', stdout=PIPE).stdout
op = open(self.dmesgfile, 'a')
for line in fp:
line = line.replace('\r\n', '')
idx = line.find('[')
if idx > 1:
line = line[idx:]
m = re.match('[ \t]*(\[ *)(?P<ktime>[0-9\.]*)(\]) (?P<msg>.*)', line)
if(not m):
continue
ktime = float(m.group('ktime'))
if ktime > self.dmesgstart:
op.write(line)
fp.close()
op.close()
def addFtraceFilterFunctions(self, file):
fp = open(file)
list = fp.read().split('\n')
fp.close()
for i in list:
if len(i) < 2:
continue
self.tracefuncs[i] = dict()
def getFtraceFilterFunctions(self, current):
self.rootCheck(True)
if not current:
call('cat '+self.tpath+'available_filter_functions', shell=True)
return
fp = open(self.tpath+'available_filter_functions')
master = fp.read().split('\n')
fp.close()
for i in self.tracefuncs:
if 'func' in self.tracefuncs[i]:
i = self.tracefuncs[i]['func']
if i in master:
print i
else:
print self.colorText(i)
def setFtraceFilterFunctions(self, list):
fp = open(self.tpath+'available_filter_functions')
master = fp.read().split('\n')
fp.close()
flist = ''
for i in list:
if i not in master:
continue
if ' [' in i:
flist += i.split(' ')[0]+'\n'
else:
flist += i+'\n'
fp = open(self.tpath+'set_graph_function', 'w')
fp.write(flist)
fp.close()
def basicKprobe(self, name):
self.kprobes[name] = {'name': name,'func': name,'args': dict(),'format': name}
def defaultKprobe(self, name, kdata):
k = kdata
for field in ['name', 'format', 'func']:
if field not in k:
k[field] = name
if self.archargs in k:
k['args'] = k[self.archargs]
else:
k['args'] = dict()
k['format'] = name
self.kprobes[name] = k
def kprobeColor(self, name):
if name not in self.kprobes or 'color' not in self.kprobes[name]:
return ''
return self.kprobes[name]['color']
def kprobeDisplayName(self, name, dataraw):
if name not in self.kprobes:
self.basicKprobe(name)
data = ''
quote=0
# first remvoe any spaces inside quotes, and the quotes
for c in dataraw:
if c == '"':
quote = (quote + 1) % 2
if quote and c == ' ':
data += '_'
elif c != '"':
data += c
fmt, args = self.kprobes[name]['format'], self.kprobes[name]['args']
arglist = dict()
# now process the args
for arg in sorted(args):
arglist[arg] = ''
m = re.match('.* '+arg+'=(?P<arg>.*) ', data);
if m:
arglist[arg] = m.group('arg')
else:
m = re.match('.* '+arg+'=(?P<arg>.*)', data);
if m:
arglist[arg] = m.group('arg')
out = fmt.format(**arglist)
out = out.replace(' ', '_').replace('"', '')
return out
def kprobeText(self, kname, kprobe):
name = fmt = func = kname
args = dict()
if 'name' in kprobe:
name = kprobe['name']
if 'format' in kprobe:
fmt = kprobe['format']
if 'func' in kprobe:
func = kprobe['func']
if self.archargs in kprobe:
args = kprobe[self.archargs]
if 'args' in kprobe:
args = kprobe['args']
if re.findall('{(?P<n>[a-z,A-Z,0-9]*)}', func):
doError('Kprobe "%s" has format info in the function name "%s"' % (name, func))
for arg in re.findall('{(?P<n>[a-z,A-Z,0-9]*)}', fmt):
if arg not in args:
doError('Kprobe "%s" is missing argument "%s"' % (name, arg))
val = 'p:%s_cal %s' % (name, func)
for i in sorted(args):
val += ' %s=%s' % (i, args[i])
val += '\nr:%s_ret %s $retval\n' % (name, func)
return val
def addKprobes(self, output=False):
if len(self.kprobes) < 1:
return
if output:
print(' kprobe functions in this kernel:')
# first test each kprobe
rejects = []
# sort kprobes: trace, ub-dev, custom, dev
kpl = [[], [], [], []]
for name in sorted(self.kprobes):
res = self.colorText('YES', 32)
if not self.testKprobe(name, self.kprobes[name]):
res = self.colorText('NO')
rejects.append(name)
else:
if name in self.tracefuncs:
kpl[0].append(name)
elif name in self.dev_tracefuncs:
if 'ub' in self.dev_tracefuncs[name]:
kpl[1].append(name)
else:
kpl[3].append(name)
else:
kpl[2].append(name)
if output:
print(' %s: %s' % (name, res))
kplist = kpl[0] + kpl[1] + kpl[2] + kpl[3]
# remove all failed ones from the list
for name in rejects:
self.kprobes.pop(name)
# set the kprobes all at once
self.fsetVal('', 'kprobe_events')
kprobeevents = ''
for kp in kplist:
kprobeevents += self.kprobeText(kp, self.kprobes[kp])
self.fsetVal(kprobeevents, 'kprobe_events')
# verify that the kprobes were set as ordered
check = self.fgetVal('kprobe_events')
linesout = len(kprobeevents.split('\n')) - 1
linesack = len(check.split('\n')) - 1
if output:
res = '%d/%d' % (linesack, linesout)
if linesack < linesout:
res = self.colorText(res, 31)
else:
res = self.colorText(res, 32)
print(' working kprobe functions enabled: %s' % res)
self.fsetVal('1', 'events/kprobes/enable')
def testKprobe(self, kname, kprobe):
self.fsetVal('0', 'events/kprobes/enable')
kprobeevents = self.kprobeText(kname, kprobe)
if not kprobeevents:
return False
try:
self.fsetVal(kprobeevents, 'kprobe_events')
check = self.fgetVal('kprobe_events')
except:
return False
linesout = len(kprobeevents.split('\n'))
linesack = len(check.split('\n'))
if linesack < linesout:
return False
return True
def fsetVal(self, val, path, mode='w'):
file = self.tpath+path
if not os.path.exists(file):
return False
try:
fp = open(file, mode, 0)
fp.write(val)
fp.flush()
fp.close()
except:
return False
return True
def fgetVal(self, path):
file = self.tpath+path
res = ''
if not os.path.exists(file):
return res
try:
fp = open(file, 'r')
res = fp.read()
fp.close()
except:
pass
return res
def cleanupFtrace(self):
if(self.usecallgraph or self.usetraceevents):
self.fsetVal('0', 'events/kprobes/enable')
self.fsetVal('', 'kprobe_events')
def setupAllKprobes(self):
for name in self.tracefuncs:
self.defaultKprobe(name, self.tracefuncs[name])
for name in self.dev_tracefuncs:
self.defaultKprobe(name, self.dev_tracefuncs[name])
def isCallgraphFunc(self, name):
if len(self.tracefuncs) < 1 and self.suspendmode == 'command':
return True
for i in self.tracefuncs:
if 'func' in self.tracefuncs[i]:
f = self.tracefuncs[i]['func']
else:
f = i
if name == f:
return True
return False
def initFtrace(self, testing=False):
print('INITIALIZING FTRACE...')
# turn trace off
self.fsetVal('0', 'tracing_on')
self.cleanupFtrace()
# set the trace clock to global
self.fsetVal('global', 'trace_clock')
self.fsetVal('nop', 'current_tracer')
# set trace buffer to a huge value
if self.usecallgraph or self.usedevsrc:
tgtsize = min(self.memtotal / 2, 2*1024*1024)
maxbuf = '%d' % (tgtsize / max(1, self.cpucount))
if self.cpucount < 1 or not self.fsetVal(maxbuf, 'buffer_size_kb'):
self.fsetVal('131072', 'buffer_size_kb')
else:
self.fsetVal('16384', 'buffer_size_kb')
# go no further if this is just a status check
if testing:
return
# initialize the callgraph trace
if(self.usecallgraph):
# set trace type
self.fsetVal('function_graph', 'current_tracer')
self.fsetVal('', 'set_ftrace_filter')
# set trace format options
self.fsetVal('print-parent', 'trace_options')
self.fsetVal('funcgraph-abstime', 'trace_options')
self.fsetVal('funcgraph-cpu', 'trace_options')
self.fsetVal('funcgraph-duration', 'trace_options')
self.fsetVal('funcgraph-proc', 'trace_options')
self.fsetVal('funcgraph-tail', 'trace_options')
self.fsetVal('nofuncgraph-overhead', 'trace_options')
self.fsetVal('context-info', 'trace_options')
self.fsetVal('graph-time', 'trace_options')
self.fsetVal('%d' % self.max_graph_depth, 'max_graph_depth')
cf = ['dpm_run_callback']
if(self.usetraceeventsonly):
cf += ['dpm_prepare', 'dpm_complete']
for fn in self.tracefuncs:
if 'func' in self.tracefuncs[fn]:
cf.append(self.tracefuncs[fn]['func'])
else:
cf.append(fn)
self.setFtraceFilterFunctions(cf)
# initialize the kprobe trace
elif self.usekprobes:
for name in self.tracefuncs:
self.defaultKprobe(name, self.tracefuncs[name])
if self.usedevsrc:
for name in self.dev_tracefuncs:
self.defaultKprobe(name, self.dev_tracefuncs[name])
print('INITIALIZING KPROBES...')
self.addKprobes(self.verbose)
if(self.usetraceevents):
# turn trace events on
events = iter(self.traceevents)
for e in events:
self.fsetVal('1', 'events/power/'+e+'/enable')
# clear the trace buffer
self.fsetVal('', 'trace')
def verifyFtrace(self):
# files needed for any trace data
files = ['buffer_size_kb', 'current_tracer', 'trace', 'trace_clock',
'trace_marker', 'trace_options', 'tracing_on']
# files needed for callgraph trace data
tp = self.tpath
if(self.usecallgraph):
files += [
'available_filter_functions',
'set_ftrace_filter',
'set_graph_function'
]
for f in files:
if(os.path.exists(tp+f) == False):
return False
return True
def verifyKprobes(self):
# files needed for kprobes to work
files = ['kprobe_events', 'events']
tp = self.tpath
for f in files:
if(os.path.exists(tp+f) == False):
return False
return True
def colorText(self, str, color=31):
if not self.ansi:
return str
return '\x1B[%d;40m%s\x1B[m' % (color, str)
def writeDatafileHeader(self, filename, fwdata=[]):
fp = open(filename, 'w')
fp.write(self.teststamp+'\n')
fp.write(self.sysstamp+'\n')
if(self.suspendmode == 'mem' or self.suspendmode == 'command'):
for fw in fwdata:
if(fw):
fp.write('# fwsuspend %u fwresume %u\n' % (fw[0], fw[1]))
fp.close()
sysvals = SystemValues()
suspendmodename = {
'freeze': 'Freeze (S0)',
'standby': 'Standby (S1)',
'mem': 'Suspend (S3)',
'disk': 'Hibernate (S4)'
}
# Class: DevProps
# Description:
# Simple class which holds property values collected
# for all the devices used in the timeline.
class DevProps:
syspath = ''
altname = ''
async = True
xtraclass = ''
xtrainfo = ''
def out(self, dev):
return '%s,%s,%d;' % (dev, self.altname, self.async)
def debug(self, dev):
print '%s:\n\taltname = %s\n\t async = %s' % (dev, self.altname, self.async)
def altName(self, dev):
if not self.altname or self.altname == dev:
return dev
return '%s [%s]' % (self.altname, dev)
def xtraClass(self):
if self.xtraclass:
return ' '+self.xtraclass
if not self.async:
return ' sync'
return ''
def xtraInfo(self):
if self.xtraclass:
return ' '+self.xtraclass
if self.async:
return ' async_device'
return ' sync_device'
# Class: DeviceNode
# Description:
# A container used to create a device hierachy, with a single root node
# and a tree of child nodes. Used by Data.deviceTopology()
class DeviceNode:
name = ''
children = 0
depth = 0
def __init__(self, nodename, nodedepth):
self.name = nodename
self.children = []
self.depth = nodedepth
# Class: Data
# Description:
# The primary container for suspend/resume test data. There is one for
# each test run. The data is organized into a cronological hierarchy:
# Data.dmesg {
# phases {
# 10 sequential, non-overlapping phases of S/R
# contents: times for phase start/end, order/color data for html
# devlist {
# device callback or action list for this phase
# device {
# a single device callback or generic action
# contents: start/stop times, pid/cpu/driver info
# parents/children, html id for timeline/callgraph
# optionally includes an ftrace callgraph
# optionally includes dev/ps data
# }
# }
# }
# }
#
class Data:
dmesg = {} # root data structure
phases = [] # ordered list of phases
start = 0.0 # test start
end = 0.0 # test end
tSuspended = 0.0 # low-level suspend start
tResumed = 0.0 # low-level resume start
tKernSus = 0.0 # kernel level suspend start
tKernRes = 0.0 # kernel level resume end
tLow = 0.0 # time spent in low-level suspend (standby/freeze)
fwValid = False # is firmware data available
fwSuspend = 0 # time spent in firmware suspend
fwResume = 0 # time spent in firmware resume
dmesgtext = [] # dmesg text file in memory
pstl = 0 # process timeline
testnumber = 0
idstr = ''
html_device_id = 0
stamp = 0
outfile = ''
devpids = []
kerror = False
def __init__(self, num):
idchar = 'abcdefghij'
self.pstl = dict()
self.testnumber = num
self.idstr = idchar[num]
self.dmesgtext = []
self.phases = []
self.dmesg = { # fixed list of 10 phases
'suspend_prepare': {'list': dict(), 'start': -1.0, 'end': -1.0,
'row': 0, 'color': '#CCFFCC', 'order': 0},
'suspend': {'list': dict(), 'start': -1.0, 'end': -1.0,
'row': 0, 'color': '#88FF88', 'order': 1},
'suspend_late': {'list': dict(), 'start': -1.0, 'end': -1.0,
'row': 0, 'color': '#00AA00', 'order': 2},
'suspend_noirq': {'list': dict(), 'start': -1.0, 'end': -1.0,
'row': 0, 'color': '#008888', 'order': 3},
'suspend_machine': {'list': dict(), 'start': -1.0, 'end': -1.0,
'row': 0, 'color': '#0000FF', 'order': 4},
'resume_machine': {'list': dict(), 'start': -1.0, 'end': -1.0,
'row': 0, 'color': '#FF0000', 'order': 5},
'resume_noirq': {'list': dict(), 'start': -1.0, 'end': -1.0,
'row': 0, 'color': '#FF9900', 'order': 6},
'resume_early': {'list': dict(), 'start': -1.0, 'end': -1.0,
'row': 0, 'color': '#FFCC00', 'order': 7},
'resume': {'list': dict(), 'start': -1.0, 'end': -1.0,
'row': 0, 'color': '#FFFF88', 'order': 8},
'resume_complete': {'list': dict(), 'start': -1.0, 'end': -1.0,
'row': 0, 'color': '#FFFFCC', 'order': 9}
}
self.phases = self.sortedPhases()
self.devicegroups = []
for phase in self.phases:
self.devicegroups.append([phase])
self.errorinfo = {'suspend':[],'resume':[]}
def extractErrorInfo(self, dmesg):
error = ''
tm = 0.0
for i in range(len(dmesg)):
if 'Call Trace:' in dmesg[i]:
m = re.match('[ \t]*(\[ *)(?P<ktime>[0-9\.]*)(\]) .*', dmesg[i])
if not m:
continue
tm = float(m.group('ktime'))
if tm < self.start or tm > self.end:
continue
for j in range(i-10, i+1):
error += dmesg[j]
continue
if error:
m = re.match('[ \t]*\[ *[0-9\.]*\] \[\<[0-9a-fA-F]*\>\] .*', dmesg[i])
if m:
error += dmesg[i]
else:
if tm < self.tSuspended:
dir = 'suspend'
else:
dir = 'resume'
error = error.replace('<', '<').replace('>', '>')
vprint('kernel error found in %s at %f' % (dir, tm))
self.errorinfo[dir].append((tm, error))
self.kerror = True
error = ''
def setStart(self, time):
self.start = time
def setEnd(self, time):
self.end = time
def isTraceEventOutsideDeviceCalls(self, pid, time):
for phase in self.phases:
list = self.dmesg[phase]['list']
for dev in list:
d = list[dev]
if(d['pid'] == pid and time >= d['start'] and
time < d['end']):
return False
return True
def sourcePhase(self, start):
for phase in self.phases:
pend = self.dmesg[phase]['end']
if start <= pend:
return phase
return 'resume_complete'
def sourceDevice(self, phaselist, start, end, pid, type):
tgtdev = ''
for phase in phaselist:
list = self.dmesg[phase]['list']
for devname in list:
dev = list[devname]
# pid must match
if dev['pid'] != pid:
continue
devS = dev['start']
devE = dev['end']
if type == 'device':
# device target event is entirely inside the source boundary
if(start < devS or start >= devE or end <= devS or end > devE):
continue
elif type == 'thread':
# thread target event will expand the source boundary
if start < devS:
dev['start'] = start
if end > devE:
dev['end'] = end
tgtdev = dev
break
return tgtdev
def addDeviceFunctionCall(self, displayname, kprobename, proc, pid, start, end, cdata, rdata):
# try to place the call in a device
tgtdev = self.sourceDevice(self.phases, start, end, pid, 'device')
# calls with device pids that occur outside device bounds are dropped
# TODO: include these somehow
if not tgtdev and pid in self.devpids:
return False
# try to place the call in a thread
if not tgtdev:
tgtdev = self.sourceDevice(self.phases, start, end, pid, 'thread')
# create new thread blocks, expand as new calls are found
if not tgtdev:
if proc == '<...>':
threadname = 'kthread-%d' % (pid)
else:
threadname = '%s-%d' % (proc, pid)
tgtphase = self.sourcePhase(start)
self.newAction(tgtphase, threadname, pid, '', start, end, '', ' kth', '')
return self.addDeviceFunctionCall(displayname, kprobename, proc, pid, start, end, cdata, rdata)
# this should not happen
if not tgtdev:
vprint('[%f - %f] %s-%d %s %s %s' % \
(start, end, proc, pid, kprobename, cdata, rdata))
return False
# place the call data inside the src element of the tgtdev
if('src' not in tgtdev):
tgtdev['src'] = []
dtf = sysvals.dev_tracefuncs
ubiquitous = False
if kprobename in dtf and 'ub' in dtf[kprobename]:
ubiquitous = True
title = cdata+' '+rdata
mstr = '\(.*\) *(?P<args>.*) *\((?P<caller>.*)\+.* arg1=(?P<ret>.*)'
m = re.match(mstr, title)
if m:
c = m.group('caller')
a = m.group('args').strip()
r = m.group('ret')
if len(r) > 6:
r = ''
else:
r = 'ret=%s ' % r
if ubiquitous and c in dtf and 'ub' in dtf[c]:
return False
color = sysvals.kprobeColor(kprobename)
e = DevFunction(displayname, a, c, r, start, end, ubiquitous, proc, pid, color)
tgtdev['src'].append(e)
return True
def overflowDevices(self):
# get a list of devices that extend beyond the end of this test run
devlist = []
for phase in self.phases:
list = self.dmesg[phase]['list']
for devname in list:
dev = list[devname]
if dev['end'] > self.end:
devlist.append(dev)
return devlist
def mergeOverlapDevices(self, devlist):
# merge any devices that overlap devlist
for dev in devlist:
devname = dev['name']
for phase in self.phases:
list = self.dmesg[phase]['list']
if devname not in list:
continue
tdev = list[devname]
o = min(dev['end'], tdev['end']) - max(dev['start'], tdev['start'])
if o <= 0:
continue
dev['end'] = tdev['end']
if 'src' not in dev or 'src' not in tdev:
continue
dev['src'] += tdev['src']
del list[devname]
def usurpTouchingThread(self, name, dev):
# the caller test has priority of this thread, give it to him
for phase in self.phases:
list = self.dmesg[phase]['list']
if name in list:
tdev = list[name]
if tdev['start'] - dev['end'] < 0.1:
dev['end'] = tdev['end']
if 'src' not in dev:
dev['src'] = []
if 'src' in tdev:
dev['src'] += tdev['src']
del list[name]
break
def stitchTouchingThreads(self, testlist):
# merge any threads between tests that touch
for phase in self.phases:
list = self.dmesg[phase]['list']
for devname in list:
dev = list[devname]
if 'htmlclass' not in dev or 'kth' not in dev['htmlclass']:
continue
for data in testlist:
data.usurpTouchingThread(devname, dev)
def optimizeDevSrc(self):
# merge any src call loops to reduce timeline size
for phase in self.phases:
list = self.dmesg[phase]['list']
for dev in list:
if 'src' not in list[dev]:
continue
src = list[dev]['src']
p = 0
for e in sorted(src, key=lambda event: event.time):
if not p or not e.repeat(p):
p = e
continue
# e is another iteration of p, move it into p
p.end = e.end
p.length = p.end - p.time
p.count += 1
src.remove(e)
def trimTimeVal(self, t, t0, dT, left):
if left:
if(t > t0):
if(t - dT < t0):
return t0
return t - dT
else:
return t
else:
if(t < t0 + dT):
if(t > t0):
return t0 + dT
return t + dT
else:
return t
def trimTime(self, t0, dT, left):
self.tSuspended = self.trimTimeVal(self.tSuspended, t0, dT, left)
self.tResumed = self.trimTimeVal(self.tResumed, t0, dT, left)
self.start = self.trimTimeVal(self.start, t0, dT, left)
self.tKernSus = self.trimTimeVal(self.tKernSus, t0, dT, left)
self.tKernRes = self.trimTimeVal(self.tKernRes, t0, dT, left)
self.end = self.trimTimeVal(self.end, t0, dT, left)
for phase in self.phases:
p = self.dmesg[phase]
p['start'] = self.trimTimeVal(p['start'], t0, dT, left)
p['end'] = self.trimTimeVal(p['end'], t0, dT, left)
list = p['list']
for name in list:
d = list[name]
d['start'] = self.trimTimeVal(d['start'], t0, dT, left)
d['end'] = self.trimTimeVal(d['end'], t0, dT, left)
if('ftrace' in d):
cg = d['ftrace']
cg.start = self.trimTimeVal(cg.start, t0, dT, left)
cg.end = self.trimTimeVal(cg.end, t0, dT, left)
for line in cg.list:
line.time = self.trimTimeVal(line.time, t0, dT, left)
if('src' in d):
for e in d['src']:
e.time = self.trimTimeVal(e.time, t0, dT, left)
def normalizeTime(self, tZero):
# trim out any standby or freeze clock time
if(self.tSuspended != self.tResumed):
if(self.tResumed > tZero):
self.trimTime(self.tSuspended, \
self.tResumed-self.tSuspended, True)
else:
self.trimTime(self.tSuspended, \
self.tResumed-self.tSuspended, False)
def getTimeValues(self):
sktime = (self.dmesg['suspend_machine']['end'] - \
self.tKernSus) * 1000
rktime = (self.dmesg['resume_complete']['end'] - \
self.dmesg['resume_machine']['start']) * 1000
return (sktime, rktime)
def setPhase(self, phase, ktime, isbegin):
if(isbegin):
self.dmesg[phase]['start'] = ktime
else:
self.dmesg[phase]['end'] = ktime
def dmesgSortVal(self, phase):
return self.dmesg[phase]['order']
def sortedPhases(self):
return sorted(self.dmesg, key=self.dmesgSortVal)
def sortedDevices(self, phase):
list = self.dmesg[phase]['list']
slist = []
tmp = dict()
for devname in list:
dev = list[devname]
if dev['length'] == 0:
continue
tmp[dev['start']] = devname
for t in sorted(tmp):
slist.append(tmp[t])
return slist
def fixupInitcalls(self, phase):
# if any calls never returned, clip them at system resume end
phaselist = self.dmesg[phase]['list']
for devname in phaselist:
dev = phaselist[devname]
if(dev['end'] < 0):
for p in self.phases:
if self.dmesg[p]['end'] > dev['start']:
dev['end'] = self.dmesg[p]['end']
break
vprint('%s (%s): callback didnt return' % (devname, phase))
def deviceFilter(self, devicefilter):
for phase in self.phases:
list = self.dmesg[phase]['list']
rmlist = []
for name in list:
keep = False
for filter in devicefilter:
if filter in name or \
('drv' in list[name] and filter in list[name]['drv']):
keep = True
if not keep:
rmlist.append(name)
for name in rmlist:
del list[name]
def fixupInitcallsThatDidntReturn(self):
# if any calls never returned, clip them at system resume end
for phase in self.phases:
self.fixupInitcalls(phase)
def phaseOverlap(self, phases):
rmgroups = []
newgroup = []
for group in self.devicegroups:
for phase in phases:
if phase not in group:
continue
for p in group:
if p not in newgroup:
newgroup.append(p)
if group not in rmgroups:
rmgroups.append(group)
for group in rmgroups:
self.devicegroups.remove(group)
self.devicegroups.append(newgroup)
def newActionGlobal(self, name, start, end, pid=-1, color=''):
# which phase is this device callback or action in
targetphase = 'none'
htmlclass = ''
overlap = 0.0
phases = []
for phase in self.phases:
pstart = self.dmesg[phase]['start']
pend = self.dmesg[phase]['end']
# see if the action overlaps this phase
o = max(0, min(end, pend) - max(start, pstart))
if o > 0:
phases.append(phase)
# set the target phase to the one that overlaps most
if o > overlap:
if overlap > 0 and phase == 'post_resume':
continue
targetphase = phase
overlap = o
# if no target phase was found, pin it to the edge
if targetphase == 'none':
p0start = self.dmesg[self.phases[0]]['start']
if start <= p0start:
targetphase = self.phases[0]
else:
targetphase = self.phases[-1]
if pid == -2:
htmlclass = ' bg'
elif pid == -3:
htmlclass = ' ps'
if len(phases) > 1:
htmlclass = ' bg'
self.phaseOverlap(phases)
if targetphase in self.phases:
newname = self.newAction(targetphase, name, pid, '', start, end, '', htmlclass, color)
return (targetphase, newname)
return False
def newAction(self, phase, name, pid, parent, start, end, drv, htmlclass='', color=''):
# new device callback for a specific phase
self.html_device_id += 1
devid = '%s%d' % (self.idstr, self.html_device_id)
list = self.dmesg[phase]['list']
length = -1.0
if(start >= 0 and end >= 0):
length = end - start
if pid == -2:
i = 2
origname = name
while(name in list):
name = '%s[%d]' % (origname, i)
i += 1
list[name] = {'name': name, 'start': start, 'end': end, 'pid': pid,
'par': parent, 'length': length, 'row': 0, 'id': devid, 'drv': drv }
if htmlclass:
list[name]['htmlclass'] = htmlclass
if color:
list[name]['color'] = color
return name
def deviceChildren(self, devname, phase):
devlist = []
list = self.dmesg[phase]['list']
for child in list:
if(list[child]['par'] == devname):
devlist.append(child)
return devlist
def printDetails(self):
vprint('Timeline Details:')
vprint(' test start: %f' % self.start)
vprint('kernel suspend start: %f' % self.tKernSus)
for phase in self.phases:
dc = len(self.dmesg[phase]['list'])
vprint(' %16s: %f - %f (%d devices)' % (phase, \
self.dmesg[phase]['start'], self.dmesg[phase]['end'], dc))
vprint(' kernel resume end: %f' % self.tKernRes)
vprint(' test end: %f' % self.end)
def deviceChildrenAllPhases(self, devname):
devlist = []
for phase in self.phases:
list = self.deviceChildren(devname, phase)
for dev in list:
if dev not in devlist:
devlist.append(dev)
return devlist
def masterTopology(self, name, list, depth):
node = DeviceNode(name, depth)
for cname in list:
# avoid recursions
if name == cname:
continue
clist = self.deviceChildrenAllPhases(cname)
cnode = self.masterTopology(cname, clist, depth+1)
node.children.append(cnode)
return node
def printTopology(self, node):
html = ''
if node.name:
info = ''
drv = ''
for phase in self.phases:
list = self.dmesg[phase]['list']
if node.name in list:
s = list[node.name]['start']
e = list[node.name]['end']
if list[node.name]['drv']:
drv = ' {'+list[node.name]['drv']+'}'
info += ('<li>%s: %.3fms</li>' % (phase, (e-s)*1000))
html += '<li><b>'+node.name+drv+'</b>'
if info:
html += '<ul>'+info+'</ul>'
html += '</li>'
if len(node.children) > 0:
html += '<ul>'
for cnode in node.children:
html += self.printTopology(cnode)
html += '</ul>'
return html
def rootDeviceList(self):
# list of devices graphed
real = []
for phase in self.dmesg:
list = self.dmesg[phase]['list']
for dev in list:
if list[dev]['pid'] >= 0 and dev not in real:
real.append(dev)
# list of top-most root devices
rootlist = []
for phase in self.dmesg:
list = self.dmesg[phase]['list']
for dev in list:
pdev = list[dev]['par']
pid = list[dev]['pid']
if(pid < 0 or re.match('[0-9]*-[0-9]*\.[0-9]*[\.0-9]*\:[\.0-9]*$', pdev)):
continue
if pdev and pdev not in real and pdev not in rootlist:
rootlist.append(pdev)
return rootlist
def deviceTopology(self):
rootlist = self.rootDeviceList()
master = self.masterTopology('', rootlist, 0)
return self.printTopology(master)
def selectTimelineDevices(self, widfmt, tTotal, mindevlen):
# only select devices that will actually show up in html
self.tdevlist = dict()
for phase in self.dmesg:
devlist = []
list = self.dmesg[phase]['list']
for dev in list:
length = (list[dev]['end'] - list[dev]['start']) * 1000
width = widfmt % (((list[dev]['end']-list[dev]['start'])*100)/tTotal)
if width != '0.000000' and length >= mindevlen:
devlist.append(dev)
self.tdevlist[phase] = devlist
def addHorizontalDivider(self, devname, devend):
phase = 'suspend_prepare'
self.newAction(phase, devname, -2, '', \
self.start, devend, '', ' sec', '')
if phase not in self.tdevlist:
self.tdevlist[phase] = []
self.tdevlist[phase].append(devname)
d = DevItem(0, phase, self.dmesg[phase]['list'][devname])
return d
def addProcessUsageEvent(self, name, times):
# get the start and end times for this process
maxC = 0
tlast = 0
start = -1
end = -1
for t in sorted(times):
if tlast == 0:
tlast = t
continue
if name in self.pstl[t]:
if start == -1 or tlast < start:
start = tlast
if end == -1 or t > end:
end = t
tlast = t
if start == -1 or end == -1:
return 0
# add a new action for this process and get the object
out = self.newActionGlobal(name, start, end, -3)
if not out:
return 0
phase, devname = out
dev = self.dmesg[phase]['list'][devname]
# get the cpu exec data
tlast = 0
clast = 0
cpuexec = dict()
for t in sorted(times):
if tlast == 0 or t <= start or t > end:
tlast = t
continue
list = self.pstl[t]
c = 0
if name in list:
c = list[name]
if c > maxC:
maxC = c
if c != clast:
key = (tlast, t)
cpuexec[key] = c
tlast = t
clast = c
dev['cpuexec'] = cpuexec
return maxC
def createProcessUsageEvents(self):
# get an array of process names
proclist = []
for t in self.pstl:
pslist = self.pstl[t]
for ps in pslist:
if ps not in proclist:
proclist.append(ps)
# get a list of data points for suspend and resume
tsus = []
tres = []
for t in sorted(self.pstl):
if t < self.tSuspended:
tsus.append(t)
else:
tres.append(t)
# process the events for suspend and resume
if len(proclist) > 0:
vprint('Process Execution:')
for ps in proclist:
c = self.addProcessUsageEvent(ps, tsus)
if c > 0:
vprint('%25s (sus): %d' % (ps, c))
c = self.addProcessUsageEvent(ps, tres)
if c > 0:
vprint('%25s (res): %d' % (ps, c))
# Class: DevFunction
# Description:
# A container for kprobe function data we want in the dev timeline
class DevFunction:
row = 0
count = 1
def __init__(self, name, args, caller, ret, start, end, u, proc, pid, color):
self.name = name
self.args = args
self.caller = caller
self.ret = ret
self.time = start
self.length = end - start
self.end = end
self.ubiquitous = u
self.proc = proc
self.pid = pid
self.color = color
def title(self):
cnt = ''
if self.count > 1:
cnt = '(x%d)' % self.count
l = '%0.3fms' % (self.length * 1000)
if self.ubiquitous:
title = '%s(%s)%s <- %s, %s(%s)' % \
(self.name, self.args, cnt, self.caller, self.ret, l)
else:
title = '%s(%s) %s%s(%s)' % (self.name, self.args, self.ret, cnt, l)
return title.replace('"', '')
def text(self):
if self.count > 1:
text = '%s(x%d)' % (self.name, self.count)
else:
text = self.name
return text
def repeat(self, tgt):
# is the tgt call just a repeat of this call (e.g. are we in a loop)
dt = self.time - tgt.end
# only combine calls if -all- attributes are identical
if tgt.caller == self.caller and \
tgt.name == self.name and tgt.args == self.args and \
tgt.proc == self.proc and tgt.pid == self.pid and \
tgt.ret == self.ret and dt >= 0 and \
dt <= sysvals.callloopmaxgap and \
self.length < sysvals.callloopmaxlen:
return True
return False
# Class: FTraceLine
# Description:
# A container for a single line of ftrace data. There are six basic types:
# callgraph line:
# call: " dpm_run_callback() {"
# return: " }"
# leaf: " dpm_run_callback();"
# trace event:
# tracing_mark_write: SUSPEND START or RESUME COMPLETE
# suspend_resume: phase or custom exec block data
# device_pm_callback: device callback info
class FTraceLine:
time = 0.0
length = 0.0
fcall = False
freturn = False
fevent = False
fkprobe = False
depth = 0
name = ''
type = ''
def __init__(self, t, m='', d=''):
self.time = float(t)
if not m and not d:
return
# is this a trace event
if(d == 'traceevent' or re.match('^ *\/\* *(?P<msg>.*) \*\/ *$', m)):
if(d == 'traceevent'):
# nop format trace event
msg = m
else:
# function_graph format trace event
em = re.match('^ *\/\* *(?P<msg>.*) \*\/ *$', m)
msg = em.group('msg')
emm = re.match('^(?P<call>.*?): (?P<msg>.*)', msg)
if(emm):
self.name = emm.group('msg')
self.type = emm.group('call')
else:
self.name = msg
km = re.match('^(?P<n>.*)_cal$', self.type)
if km:
self.fcall = True
self.fkprobe = True
self.type = km.group('n')
return
km = re.match('^(?P<n>.*)_ret$', self.type)
if km:
self.freturn = True
self.fkprobe = True
self.type = km.group('n')
return
self.fevent = True
return
# convert the duration to seconds
if(d):
self.length = float(d)/1000000
# the indentation determines the depth
match = re.match('^(?P<d> *)(?P<o>.*)$', m)
if(not match):
return
self.depth = self.getDepth(match.group('d'))
m = match.group('o')
# function return
if(m[0] == '}'):
self.freturn = True
if(len(m) > 1):
# includes comment with function name
match = re.match('^} *\/\* *(?P<n>.*) *\*\/$', m)
if(match):
self.name = match.group('n').strip()
# function call
else:
self.fcall = True
# function call with children
if(m[-1] == '{'):
match = re.match('^(?P<n>.*) *\(.*', m)
if(match):
self.name = match.group('n').strip()
# function call with no children (leaf)
elif(m[-1] == ';'):
self.freturn = True
match = re.match('^(?P<n>.*) *\(.*', m)
if(match):
self.name = match.group('n').strip()
# something else (possibly a trace marker)
else:
self.name = m
def getDepth(self, str):
return len(str)/2
def debugPrint(self, dev=''):
if(self.freturn and self.fcall):
print('%s -- %f (%02d): %s(); (%.3f us)' % (dev, self.time, \
self.depth, self.name, self.length*1000000))
elif(self.freturn):
print('%s -- %f (%02d): %s} (%.3f us)' % (dev, self.time, \
self.depth, self.name, self.length*1000000))
else:
print('%s -- %f (%02d): %s() { (%.3f us)' % (dev, self.time, \
self.depth, self.name, self.length*1000000))
def startMarker(self):
# Is this the starting line of a suspend?
if not self.fevent:
return False
if sysvals.usetracemarkers:
if(self.name == 'SUSPEND START'):
return True
return False
else:
if(self.type == 'suspend_resume' and
re.match('suspend_enter\[.*\] begin', self.name)):
return True
return False
def endMarker(self):
# Is this the ending line of a resume?
if not self.fevent:
return False
if sysvals.usetracemarkers:
if(self.name == 'RESUME COMPLETE'):
return True
return False
else:
if(self.type == 'suspend_resume' and
re.match('thaw_processes\[.*\] end', self.name)):
return True
return False
# Class: FTraceCallGraph
# Description:
# A container for the ftrace callgraph of a single recursive function.
# This can be a dpm_run_callback, dpm_prepare, or dpm_complete callgraph
# Each instance is tied to a single device in a single phase, and is
# comprised of an ordered list of FTraceLine objects
class FTraceCallGraph:
id = ''
start = -1.0
end = -1.0
list = []
invalid = False
depth = 0
pid = 0
name = ''
def __init__(self, pid):
self.start = -1.0
self.end = -1.0
self.list = []
self.depth = 0
self.pid = pid
def addLine(self, line, debug=False):
# if this is already invalid, just leave
if(self.invalid):
return False
# invalidate on too much data or bad depth
if(len(self.list) >= 1000000 or self.depth < 0):
self.invalidate(line)
return False
# compare current depth with this lines pre-call depth
prelinedep = line.depth
if(line.freturn and not line.fcall):
prelinedep += 1
last = 0
lasttime = line.time
virtualfname = 'missing_function_name'
if len(self.list) > 0:
last = self.list[-1]
lasttime = last.time
# handle low misalignments by inserting returns
if prelinedep < self.depth:
if debug and last:
print '-------- task %d --------' % self.pid
last.debugPrint()
idx = 0
# add return calls to get the depth down
while prelinedep < self.depth:
if debug:
print 'MISALIGN LOW (add returns): C%d - eC%d' % (self.depth, prelinedep)
self.depth -= 1
if idx == 0 and last and last.fcall and not last.freturn:
# special case, turn last call into a leaf
last.depth = self.depth
last.freturn = True
last.length = line.time - last.time
if debug:
last.debugPrint()
else:
vline = FTraceLine(lasttime)
vline.depth = self.depth
vline.name = virtualfname
vline.freturn = True
self.list.append(vline)
if debug:
vline.debugPrint()
idx += 1
if debug:
line.debugPrint()
print ''
# handle high misalignments by inserting calls
elif prelinedep > self.depth:
if debug and last:
print '-------- task %d --------' % self.pid
last.debugPrint()
idx = 0
# add calls to get the depth up
while prelinedep > self.depth:
if debug:
print 'MISALIGN HIGH (add calls): C%d - eC%d' % (self.depth, prelinedep)
if idx == 0 and line.freturn and not line.fcall:
# special case, turn this return into a leaf
line.fcall = True
prelinedep -= 1
else:
vline = FTraceLine(lasttime)
vline.depth = self.depth
vline.name = virtualfname
vline.fcall = True
if debug:
vline.debugPrint()
self.list.append(vline)
self.depth += 1
if not last:
self.start = vline.time
idx += 1
if debug:
line.debugPrint()
print ''
# process the call and set the new depth
if(line.fcall and not line.freturn):
self.depth += 1
elif(line.freturn and not line.fcall):
self.depth -= 1
if len(self.list) < 1:
self.start = line.time
self.list.append(line)
if(line.depth == 0 and line.freturn):
if(self.start < 0):
self.start = line.time
self.end = line.time
if line.fcall:
self.end += line.length
if self.list[0].name == virtualfname:
self.invalid = True
return True
return False
def invalidate(self, line):
if(len(self.list) > 0):
first = self.list[0]
self.list = []
self.list.append(first)
self.invalid = True
id = 'task %s' % (self.pid)
window = '(%f - %f)' % (self.start, line.time)
if(self.depth < 0):
vprint('Too much data for '+id+\
' (buffer overflow), ignoring this callback')
else:
vprint('Too much data for '+id+\
' '+window+', ignoring this callback')
def slice(self, t0, tN):
minicg = FTraceCallGraph(0)
count = -1
firstdepth = 0
for l in self.list:
if(l.time < t0 or l.time > tN):
continue
if(count < 0):
if(not l.fcall or l.name == 'dev_driver_string'):
continue
firstdepth = l.depth
count = 0
l.depth -= firstdepth
minicg.addLine(l)
if((count == 0 and l.freturn and l.fcall) or
(count > 0 and l.depth <= 0)):
break
count += 1
return minicg
def repair(self, enddepth):
# bring the depth back to 0 with additional returns
fixed = False
last = self.list[-1]
for i in reversed(range(enddepth)):
t = FTraceLine(last.time)
t.depth = i
t.freturn = True
fixed = self.addLine(t)
if fixed:
self.end = last.time
return True
return False
def postProcess(self, debug=False):
if len(self.list) > 0:
self.name = self.list[0].name
stack = dict()
cnt = 0
last = 0
for l in self.list:
# ftrace bug: reported duration is not reliable
# check each leaf and clip it at max possible length
if(last and last.freturn and last.fcall):
if last.length > l.time - last.time:
last.length = l.time - last.time
if(l.fcall and not l.freturn):
stack[l.depth] = l
cnt += 1
elif(l.freturn and not l.fcall):
if(l.depth not in stack):
if debug:
print 'Post Process Error: Depth missing'
l.debugPrint()
return False
# calculate call length from call/return lines
stack[l.depth].length = l.time - stack[l.depth].time
stack.pop(l.depth)
l.length = 0
cnt -= 1
last = l
if(cnt == 0):
# trace caught the whole call tree
return True
elif(cnt < 0):
if debug:
print 'Post Process Error: Depth is less than 0'
return False
# trace ended before call tree finished
return self.repair(cnt)
def deviceMatch(self, pid, data):
found = False
# add the callgraph data to the device hierarchy
borderphase = {
'dpm_prepare': 'suspend_prepare',
'dpm_complete': 'resume_complete'
}
if(self.name in borderphase):
p = borderphase[self.name]
list = data.dmesg[p]['list']
for devname in list:
dev = list[devname]
if(pid == dev['pid'] and
self.start <= dev['start'] and
self.end >= dev['end']):
dev['ftrace'] = self.slice(dev['start'], dev['end'])
found = True
return found
for p in data.phases:
if(data.dmesg[p]['start'] <= self.start and
self.start <= data.dmesg[p]['end']):
list = data.dmesg[p]['list']
for devname in list:
dev = list[devname]
if(pid == dev['pid'] and
self.start <= dev['start'] and
self.end >= dev['end']):
dev['ftrace'] = self
found = True
break
break
return found
def newActionFromFunction(self, data):
name = self.name
if name in ['dpm_run_callback', 'dpm_prepare', 'dpm_complete']:
return
fs = self.start
fe = self.end
if fs < data.start or fe > data.end:
return
phase = ''
for p in data.phases:
if(data.dmesg[p]['start'] <= self.start and
self.start < data.dmesg[p]['end']):
phase = p
break
if not phase:
return
out = data.newActionGlobal(name, fs, fe, -2)
if out:
phase, myname = out
data.dmesg[phase]['list'][myname]['ftrace'] = self
def debugPrint(self):
print('[%f - %f] %s (%d)') % (self.start, self.end, self.name, self.pid)
for l in self.list:
if(l.freturn and l.fcall):
print('%f (%02d): %s(); (%.3f us)' % (l.time, \
l.depth, l.name, l.length*1000000))
elif(l.freturn):
print('%f (%02d): %s} (%.3f us)' % (l.time, \
l.depth, l.name, l.length*1000000))
else:
print('%f (%02d): %s() { (%.3f us)' % (l.time, \
l.depth, l.name, l.length*1000000))
print(' ')
class DevItem:
def __init__(self, test, phase, dev):
self.test = test
self.phase = phase
self.dev = dev
def isa(self, cls):
if 'htmlclass' in self.dev and cls in self.dev['htmlclass']:
return True
return False
# Class: Timeline
# Description:
# A container for a device timeline which calculates
# all the html properties to display it correctly
class Timeline:
html = ''
height = 0 # total timeline height
scaleH = 20 # timescale (top) row height
rowH = 30 # device row height
bodyH = 0 # body height
rows = 0 # total timeline rows
rowlines = dict()
rowheight = dict()
html_tblock = '<div id="block{0}" class="tblock" style="left:{1}%;width:{2}%;"><div class="tback" style="height:{3}px"></div>\n'
html_device = '<div id="{0}" title="{1}" class="thread{7}" style="left:{2}%;top:{3}px;height:{4}px;width:{5}%;{8}">{6}</div>\n'
html_phase = '<div class="phase" style="left:{0}%;width:{1}%;top:{2}px;height:{3}px;background:{4}">{5}</div>\n'
html_phaselet = '<div id="{0}" class="phaselet" style="left:{1}%;width:{2}%;background:{3}"></div>\n'
html_legend = '<div id="p{3}" class="square" style="left:{0}%;background:{1}"> {2}</div>\n'
def __init__(self, rowheight, scaleheight):
self.rowH = rowheight
self.scaleH = scaleheight
self.html = ''
def createHeader(self, sv):
if(not sv.stamp['time']):
return
self.html += '<div class="version"><a href="https://01.org/suspendresume">%s v%s</a></div>' \
% (sv.title, sv.version)
if sv.logmsg and sv.testlog:
self.html += '<button id="showtest" class="logbtn btnfmt">log</button>'
if sv.dmesglog:
self.html += '<button id="showdmesg" class="logbtn btnfmt">dmesg</button>'
if sv.ftracelog:
self.html += '<button id="showftrace" class="logbtn btnfmt">ftrace</button>'
headline_stamp = '<div class="stamp">{0} {1} {2} {3}</div>\n'
self.html += headline_stamp.format(sv.stamp['host'], sv.stamp['kernel'],
sv.stamp['mode'], sv.stamp['time'])
if 'man' in sv.stamp and 'plat' in sv.stamp and 'cpu' in sv.stamp:
headline_sysinfo = '<div class="stamp sysinfo">{0} {1} <i>with</i> {2}</div>\n'
self.html += headline_sysinfo.format(sv.stamp['man'],
sv.stamp['plat'], sv.stamp['cpu'])
# Function: getDeviceRows
# Description:
# determine how may rows the device funcs will take
# Arguments:
# rawlist: the list of devices/actions for a single phase
# Output:
# The total number of rows needed to display this phase of the timeline
def getDeviceRows(self, rawlist):
# clear all rows and set them to undefined
sortdict = dict()
for item in rawlist:
item.row = -1
sortdict[item] = item.length
sortlist = sorted(sortdict, key=sortdict.get, reverse=True)
remaining = len(sortlist)
rowdata = dict()
row = 1
# try to pack each row with as many ranges as possible
while(remaining > 0):
if(row not in rowdata):
rowdata[row] = []
for i in sortlist:
if(i.row >= 0):
continue
s = i.time
e = i.time + i.length
valid = True
for ritem in rowdata[row]:
rs = ritem.time
re = ritem.time + ritem.length
if(not (((s <= rs) and (e <= rs)) or
((s >= re) and (e >= re)))):
valid = False
break
if(valid):
rowdata[row].append(i)
i.row = row
remaining -= 1
row += 1
return row
# Function: getPhaseRows
# Description:
# Organize the timeline entries into the smallest
# number of rows possible, with no entry overlapping
# Arguments:
# devlist: the list of devices/actions in a group of contiguous phases
# Output:
# The total number of rows needed to display this phase of the timeline
def getPhaseRows(self, devlist, row=0, sortby='length'):
# clear all rows and set them to undefined
remaining = len(devlist)
rowdata = dict()
sortdict = dict()
myphases = []
# initialize all device rows to -1 and calculate devrows
for item in devlist:
dev = item.dev
tp = (item.test, item.phase)
if tp not in myphases:
myphases.append(tp)
dev['row'] = -1
if sortby == 'start':
# sort by start 1st, then length 2nd
sortdict[item] = (-1*float(dev['start']), float(dev['end']) - float(dev['start']))
else:
# sort by length 1st, then name 2nd
sortdict[item] = (float(dev['end']) - float(dev['start']), item.dev['name'])
if 'src' in dev:
dev['devrows'] = self.getDeviceRows(dev['src'])
# sort the devlist by length so that large items graph on top
sortlist = sorted(sortdict, key=sortdict.get, reverse=True)
orderedlist = []
for item in sortlist:
if item.dev['pid'] == -2:
orderedlist.append(item)
for item in sortlist:
if item not in orderedlist:
orderedlist.append(item)
# try to pack each row with as many devices as possible
while(remaining > 0):
rowheight = 1
if(row not in rowdata):
rowdata[row] = []
for item in orderedlist:
dev = item.dev
if(dev['row'] < 0):
s = dev['start']
e = dev['end']
valid = True
for ritem in rowdata[row]:
rs = ritem.dev['start']
re = ritem.dev['end']
if(not (((s <= rs) and (e <= rs)) or
((s >= re) and (e >= re)))):
valid = False
break
if(valid):
rowdata[row].append(item)
dev['row'] = row
remaining -= 1
if 'devrows' in dev and dev['devrows'] > rowheight:
rowheight = dev['devrows']
for t, p in myphases:
if t not in self.rowlines or t not in self.rowheight:
self.rowlines[t] = dict()
self.rowheight[t] = dict()
if p not in self.rowlines[t] or p not in self.rowheight[t]:
self.rowlines[t][p] = dict()
self.rowheight[t][p] = dict()
rh = self.rowH
# section headers should use a different row height
if len(rowdata[row]) == 1 and \
'htmlclass' in rowdata[row][0].dev and \
'sec' in rowdata[row][0].dev['htmlclass']:
rh = 15
self.rowlines[t][p][row] = rowheight
self.rowheight[t][p][row] = rowheight * rh
row += 1
if(row > self.rows):
self.rows = int(row)
return row
def phaseRowHeight(self, test, phase, row):
return self.rowheight[test][phase][row]
def phaseRowTop(self, test, phase, row):
top = 0
for i in sorted(self.rowheight[test][phase]):
if i >= row:
break
top += self.rowheight[test][phase][i]
return top
def calcTotalRows(self):
# Calculate the heights and offsets for the header and rows
maxrows = 0
standardphases = []
for t in self.rowlines:
for p in self.rowlines[t]:
total = 0
for i in sorted(self.rowlines[t][p]):
total += self.rowlines[t][p][i]
if total > maxrows:
maxrows = total
if total == len(self.rowlines[t][p]):
standardphases.append((t, p))
self.height = self.scaleH + (maxrows*self.rowH)
self.bodyH = self.height - self.scaleH
# if there is 1 line per row, draw them the standard way
for t, p in standardphases:
for i in sorted(self.rowheight[t][p]):
self.rowheight[t][p][i] = self.bodyH/len(self.rowlines[t][p])
def createZoomBox(self, mode='command', testcount=1):
# Create bounding box, add buttons
html_zoombox = '<center><button id="zoomin">ZOOM IN +</button><button id="zoomout">ZOOM OUT -</button><button id="zoomdef">ZOOM 1:1</button></center>\n'
html_timeline = '<div id="dmesgzoombox" class="zoombox">\n<div id="{0}" class="timeline" style="height:{1}px">\n'
html_devlist1 = '<button id="devlist1" class="devlist" style="float:left;">Device Detail{0}</button>'
html_devlist2 = '<button id="devlist2" class="devlist" style="float:right;">Device Detail2</button>\n'
if mode != 'command':
if testcount > 1:
self.html += html_devlist2
self.html += html_devlist1.format('1')
else:
self.html += html_devlist1.format('')
self.html += html_zoombox
self.html += html_timeline.format('dmesg', self.height)
# Function: createTimeScale
# Description:
# Create the timescale for a timeline block
# Arguments:
# m0: start time (mode begin)
# mMax: end time (mode end)
# tTotal: total timeline time
# mode: suspend or resume
# Output:
# The html code needed to display the time scale
def createTimeScale(self, m0, mMax, tTotal, mode):
timescale = '<div class="t" style="right:{0}%">{1}</div>\n'
rline = '<div class="t" style="left:0;border-left:1px solid black;border-right:0;">{0}</div>\n'
output = '<div class="timescale">\n'
# set scale for timeline
mTotal = mMax - m0
tS = 0.1
if(tTotal <= 0):
return output+'</div>\n'
if(tTotal > 4):
tS = 1
divTotal = int(mTotal/tS) + 1
divEdge = (mTotal - tS*(divTotal-1))*100/mTotal
for i in range(divTotal):
htmlline = ''
if(mode == 'suspend'):
pos = '%0.3f' % (100 - ((float(i)*tS*100)/mTotal) - divEdge)
val = '%0.fms' % (float(i-divTotal+1)*tS*1000)
if(i == divTotal - 1):
val = mode
htmlline = timescale.format(pos, val)
else:
pos = '%0.3f' % (100 - ((float(i)*tS*100)/mTotal))
val = '%0.fms' % (float(i)*tS*1000)
htmlline = timescale.format(pos, val)
if(i == 0):
htmlline = rline.format(mode)
output += htmlline
self.html += output+'</div>\n'
# Class: TestProps
# Description:
# A list of values describing the properties of these test runs
class TestProps:
stamp = ''
sysinfo = ''
S0i3 = False
fwdata = []
stampfmt = '# [a-z]*-(?P<m>[0-9]{2})(?P<d>[0-9]{2})(?P<y>[0-9]{2})-'+\
'(?P<H>[0-9]{2})(?P<M>[0-9]{2})(?P<S>[0-9]{2})'+\
' (?P<host>.*) (?P<mode>.*) (?P<kernel>.*)$'
sysinfofmt = '^# sysinfo .*'
ftrace_line_fmt_fg = \
'^ *(?P<time>[0-9\.]*) *\| *(?P<cpu>[0-9]*)\)'+\
' *(?P<proc>.*)-(?P<pid>[0-9]*) *\|'+\
'[ +!#\*@$]*(?P<dur>[0-9\.]*) .*\| (?P<msg>.*)'
ftrace_line_fmt_nop = \
' *(?P<proc>.*)-(?P<pid>[0-9]*) *\[(?P<cpu>[0-9]*)\] *'+\
'(?P<flags>.{4}) *(?P<time>[0-9\.]*): *'+\
'(?P<msg>.*)'
ftrace_line_fmt = ftrace_line_fmt_nop
cgformat = False
data = 0
ktemp = dict()
def __init__(self):
self.ktemp = dict()
def setTracerType(self, tracer):
if(tracer == 'function_graph'):
self.cgformat = True
self.ftrace_line_fmt = self.ftrace_line_fmt_fg
elif(tracer == 'nop'):
self.ftrace_line_fmt = self.ftrace_line_fmt_nop
else:
doError('Invalid tracer format: [%s]' % tracer)
def parseStamp(self, data, sv):
m = re.match(self.stampfmt, self.stamp)
data.stamp = {'time': '', 'host': '', 'mode': ''}
dt = datetime(int(m.group('y'))+2000, int(m.group('m')),
int(m.group('d')), int(m.group('H')), int(m.group('M')),
int(m.group('S')))
data.stamp['time'] = dt.strftime('%B %d %Y, %I:%M:%S %p')
data.stamp['host'] = m.group('host')
data.stamp['mode'] = m.group('mode')
data.stamp['kernel'] = m.group('kernel')
if re.match(self.sysinfofmt, self.sysinfo):
for f in self.sysinfo.split('|'):
if '#' in f:
continue
tmp = f.strip().split(':', 1)
key = tmp[0]
val = tmp[1]
data.stamp[key] = val
sv.hostname = data.stamp['host']
sv.suspendmode = data.stamp['mode']
if sv.suspendmode == 'command' and sv.ftracefile != '':
modes = ['on', 'freeze', 'standby', 'mem']
out = Popen(['grep', 'suspend_enter', sv.ftracefile],
stderr=PIPE, stdout=PIPE).stdout.read()
m = re.match('.* suspend_enter\[(?P<mode>.*)\]', out)
if m and m.group('mode') in ['1', '2', '3']:
sv.suspendmode = modes[int(m.group('mode'))]
data.stamp['mode'] = sv.suspendmode
if not sv.stamp:
sv.stamp = data.stamp
# Class: TestRun
# Description:
# A container for a suspend/resume test run. This is necessary as
# there could be more than one, and they need to be separate.
class TestRun:
ftemp = dict()
ttemp = dict()
data = 0
def __init__(self, dataobj):
self.data = dataobj
self.ftemp = dict()
self.ttemp = dict()
class ProcessMonitor:
proclist = dict()
running = False
def procstat(self):
c = ['cat /proc/[1-9]*/stat 2>/dev/null']
process = Popen(c, shell=True, stdout=PIPE)
running = dict()
for line in process.stdout:
data = line.split()
pid = data[0]
name = re.sub('[()]', '', data[1])
user = int(data[13])
kern = int(data[14])
kjiff = ujiff = 0
if pid not in self.proclist:
self.proclist[pid] = {'name' : name, 'user' : user, 'kern' : kern}
else:
val = self.proclist[pid]
ujiff = user - val['user']
kjiff = kern - val['kern']
val['user'] = user
val['kern'] = kern
if ujiff > 0 or kjiff > 0:
running[pid] = ujiff + kjiff
process.wait()
out = ''
for pid in running:
jiffies = running[pid]
val = self.proclist[pid]
if out:
out += ','
out += '%s-%s %d' % (val['name'], pid, jiffies)
return 'ps - '+out
def processMonitor(self, tid):
while self.running:
out = self.procstat()
if out:
sysvals.fsetVal(out, 'trace_marker')
def start(self):
self.thread = Thread(target=self.processMonitor, args=(0,))
self.running = True
self.thread.start()
def stop(self):
self.running = False
# ----------------- FUNCTIONS --------------------
# Function: vprint
# Description:
# verbose print (prints only with -verbose option)
# Arguments:
# msg: the debug/log message to print
def vprint(msg):
sysvals.logmsg += msg+'\n'
if(sysvals.verbose):
print(msg)
# Function: doesTraceLogHaveTraceEvents
# Description:
# Quickly determine if the ftrace log has some or all of the trace events
# required for primary parsing. Set the usetraceevents and/or
# usetraceeventsonly flags in the global sysvals object
def doesTraceLogHaveTraceEvents():
# check for kprobes
sysvals.usekprobes = False
out = call('grep -q "_cal: (" '+sysvals.ftracefile, shell=True)
if(out == 0):
sysvals.usekprobes = True
# check for callgraph data on trace event blocks
out = call('grep -q "_cpu_down()" '+sysvals.ftracefile, shell=True)
if(out == 0):
sysvals.usekprobes = True
out = Popen(['head', '-1', sysvals.ftracefile],
stderr=PIPE, stdout=PIPE).stdout.read().replace('\n', '')
# figure out what level of trace events are supported
sysvals.usetraceeventsonly = True
sysvals.usetraceevents = False
for e in sysvals.traceevents:
out = call('grep -q "'+e+': " '+sysvals.ftracefile, shell=True)
if(out != 0):
sysvals.usetraceeventsonly = False
if(e == 'suspend_resume' and out == 0):
sysvals.usetraceevents = True
# determine is this log is properly formatted
for e in ['SUSPEND START', 'RESUME COMPLETE']:
out = call('grep -q "'+e+'" '+sysvals.ftracefile, shell=True)
if(out != 0):
sysvals.usetracemarkers = False
# Function: appendIncompleteTraceLog
# Description:
# [deprecated for kernel 3.15 or newer]
# Legacy support of ftrace outputs that lack the device_pm_callback
# and/or suspend_resume trace events. The primary data should be
# taken from dmesg, and this ftrace is used only for callgraph data
# or custom actions in the timeline. The data is appended to the Data
# objects provided.
# Arguments:
# testruns: the array of Data objects obtained from parseKernelLog
def appendIncompleteTraceLog(testruns):
# create TestRun vessels for ftrace parsing
testcnt = len(testruns)
testidx = 0
testrun = []
for data in testruns:
testrun.append(TestRun(data))
# extract the callgraph and traceevent data
vprint('Analyzing the ftrace data...')
tp = TestProps()
tf = open(sysvals.ftracefile, 'r')
data = 0
for line in tf:
# remove any latent carriage returns
line = line.replace('\r\n', '')
# grab the stamp and sysinfo
if re.match(tp.stampfmt, line):
tp.stamp = line
continue
elif re.match(tp.sysinfofmt, line):
tp.sysinfo = line
continue
# determine the trace data type (required for further parsing)
m = re.match(sysvals.tracertypefmt, line)
if(m):
tp.setTracerType(m.group('t'))
continue
# device properties line
if(re.match(sysvals.devpropfmt, line)):
devProps(line)
continue
# parse only valid lines, if this is not one move on
m = re.match(tp.ftrace_line_fmt, line)
if(not m):
continue
# gather the basic message data from the line
m_time = m.group('time')
m_pid = m.group('pid')
m_msg = m.group('msg')
if(tp.cgformat):
m_param3 = m.group('dur')
else:
m_param3 = 'traceevent'
if(m_time and m_pid and m_msg):
t = FTraceLine(m_time, m_msg, m_param3)
pid = int(m_pid)
else:
continue
# the line should be a call, return, or event
if(not t.fcall and not t.freturn and not t.fevent):
continue
# look for the suspend start marker
if(t.startMarker()):
data = testrun[testidx].data
tp.parseStamp(data, sysvals)
data.setStart(t.time)
continue
if(not data):
continue
# find the end of resume
if(t.endMarker()):
data.setEnd(t.time)
testidx += 1
if(testidx >= testcnt):
break
continue
# trace event processing
if(t.fevent):
# general trace events have two types, begin and end
if(re.match('(?P<name>.*) begin$', t.name)):
isbegin = True
elif(re.match('(?P<name>.*) end$', t.name)):
isbegin = False
else:
continue
m = re.match('(?P<name>.*)\[(?P<val>[0-9]*)\] .*', t.name)
if(m):
val = m.group('val')
if val == '0':
name = m.group('name')
else:
name = m.group('name')+'['+val+']'
else:
m = re.match('(?P<name>.*) .*', t.name)
name = m.group('name')
# special processing for trace events
if re.match('dpm_prepare\[.*', name):
continue
elif re.match('machine_suspend.*', name):
continue
elif re.match('suspend_enter\[.*', name):
if(not isbegin):
data.dmesg['suspend_prepare']['end'] = t.time
continue
elif re.match('dpm_suspend\[.*', name):
if(not isbegin):
data.dmesg['suspend']['end'] = t.time
continue
elif re.match('dpm_suspend_late\[.*', name):
if(isbegin):
data.dmesg['suspend_late']['start'] = t.time
else:
data.dmesg['suspend_late']['end'] = t.time
continue
elif re.match('dpm_suspend_noirq\[.*', name):
if(isbegin):
data.dmesg['suspend_noirq']['start'] = t.time
else:
data.dmesg['suspend_noirq']['end'] = t.time
continue
elif re.match('dpm_resume_noirq\[.*', name):
if(isbegin):
data.dmesg['resume_machine']['end'] = t.time
data.dmesg['resume_noirq']['start'] = t.time
else:
data.dmesg['resume_noirq']['end'] = t.time
continue
elif re.match('dpm_resume_early\[.*', name):
if(isbegin):
data.dmesg['resume_early']['start'] = t.time
else:
data.dmesg['resume_early']['end'] = t.time
continue
elif re.match('dpm_resume\[.*', name):
if(isbegin):
data.dmesg['resume']['start'] = t.time
else:
data.dmesg['resume']['end'] = t.time
continue
elif re.match('dpm_complete\[.*', name):
if(isbegin):
data.dmesg['resume_complete']['start'] = t.time
else:
data.dmesg['resume_complete']['end'] = t.time
continue
# skip trace events inside devices calls
if(not data.isTraceEventOutsideDeviceCalls(pid, t.time)):
continue
# global events (outside device calls) are simply graphed
if(isbegin):
# store each trace event in ttemp
if(name not in testrun[testidx].ttemp):
testrun[testidx].ttemp[name] = []
testrun[testidx].ttemp[name].append(\
{'begin': t.time, 'end': t.time})
else:
# finish off matching trace event in ttemp
if(name in testrun[testidx].ttemp):
testrun[testidx].ttemp[name][-1]['end'] = t.time
# call/return processing
elif sysvals.usecallgraph:
# create a callgraph object for the data
if(pid not in testrun[testidx].ftemp):
testrun[testidx].ftemp[pid] = []
testrun[testidx].ftemp[pid].append(FTraceCallGraph(pid))
# when the call is finished, see which device matches it
cg = testrun[testidx].ftemp[pid][-1]
if(cg.addLine(t)):
testrun[testidx].ftemp[pid].append(FTraceCallGraph(pid))
tf.close()
for test in testrun:
# add the traceevent data to the device hierarchy
if(sysvals.usetraceevents):
for name in test.ttemp:
for event in test.ttemp[name]:
test.data.newActionGlobal(name, event['begin'], event['end'])
# add the callgraph data to the device hierarchy
for pid in test.ftemp:
for cg in test.ftemp[pid]:
if len(cg.list) < 1 or cg.invalid:
continue
if(not cg.postProcess()):
id = 'task %s cpu %s' % (pid, m.group('cpu'))
vprint('Sanity check failed for '+\
id+', ignoring this callback')
continue
callstart = cg.start
callend = cg.end
for p in test.data.phases:
if(test.data.dmesg[p]['start'] <= callstart and
callstart <= test.data.dmesg[p]['end']):
list = test.data.dmesg[p]['list']
for devname in list:
dev = list[devname]
if(pid == dev['pid'] and
callstart <= dev['start'] and
callend >= dev['end']):
dev['ftrace'] = cg
break
test.data.printDetails()
# Function: parseTraceLog
# Description:
# Analyze an ftrace log output file generated from this app during
# the execution phase. Used when the ftrace log is the primary data source
# and includes the suspend_resume and device_pm_callback trace events
# The ftrace filename is taken from sysvals
# Output:
# An array of Data objects
def parseTraceLog():
vprint('Analyzing the ftrace data...')
if(os.path.exists(sysvals.ftracefile) == False):
doError('%s does not exist' % sysvals.ftracefile)
sysvals.setupAllKprobes()
tracewatch = []
if sysvals.usekprobes:
tracewatch += ['sync_filesystems', 'freeze_processes', 'syscore_suspend',
'syscore_resume', 'resume_console', 'thaw_processes', 'CPU_ON', 'CPU_OFF']
# extract the callgraph and traceevent data
tp = TestProps()
testruns = []
testdata = []
testrun = 0
data = 0
tf = open(sysvals.ftracefile, 'r')
phase = 'suspend_prepare'
for line in tf:
# remove any latent carriage returns
line = line.replace('\r\n', '')
# stamp and sysinfo lines
if re.match(tp.stampfmt, line):
tp.stamp = line
continue
elif re.match(tp.sysinfofmt, line):
tp.sysinfo = line
continue
# firmware line: pull out any firmware data
m = re.match(sysvals.firmwarefmt, line)
if(m):
tp.fwdata.append((int(m.group('s')), int(m.group('r'))))
continue
# tracer type line: determine the trace data type
m = re.match(sysvals.tracertypefmt, line)
if(m):
tp.setTracerType(m.group('t'))
continue
# device properties line
if(re.match(sysvals.devpropfmt, line)):
devProps(line)
continue
# ignore all other commented lines
if line[0] == '#':
continue
# ftrace line: parse only valid lines
m = re.match(tp.ftrace_line_fmt, line)
if(not m):
continue
# gather the basic message data from the line
m_time = m.group('time')
m_proc = m.group('proc')
m_pid = m.group('pid')
m_msg = m.group('msg')
if(tp.cgformat):
m_param3 = m.group('dur')
else:
m_param3 = 'traceevent'
if(m_time and m_pid and m_msg):
t = FTraceLine(m_time, m_msg, m_param3)
pid = int(m_pid)
else:
continue
# the line should be a call, return, or event
if(not t.fcall and not t.freturn and not t.fevent):
continue
# find the start of suspend
if(t.startMarker()):
phase = 'suspend_prepare'
data = Data(len(testdata))
testdata.append(data)
testrun = TestRun(data)
testruns.append(testrun)
tp.parseStamp(data, sysvals)
data.setStart(t.time)
data.tKernSus = t.time
continue
if(not data):
continue
# process cpu exec line
if t.type == 'tracing_mark_write':
m = re.match(sysvals.procexecfmt, t.name)
if(m):
proclist = dict()
for ps in m.group('ps').split(','):
val = ps.split()
if not val:
continue
name = val[0].replace('--', '-')
proclist[name] = int(val[1])
data.pstl[t.time] = proclist
continue
# find the end of resume
if(t.endMarker()):
data.setEnd(t.time)
if data.tKernRes == 0.0:
data.tKernRes = t.time
if data.dmesg['resume_complete']['end'] < 0:
data.dmesg['resume_complete']['end'] = t.time
if sysvals.suspendmode == 'mem' and len(tp.fwdata) > data.testnumber:
data.fwSuspend, data.fwResume = tp.fwdata[data.testnumber]
if(data.tSuspended != 0 and data.tResumed != 0 and \
(data.fwSuspend > 0 or data.fwResume > 0)):
data.fwValid = True
if(not sysvals.usetracemarkers):
# no trace markers? then quit and be sure to finish recording
# the event we used to trigger resume end
if(len(testrun.ttemp['thaw_processes']) > 0):
# if an entry exists, assume this is its end
testrun.ttemp['thaw_processes'][-1]['end'] = t.time
break
continue
# trace event processing
if(t.fevent):
if(phase == 'post_resume'):
data.setEnd(t.time)
if(t.type == 'suspend_resume'):
# suspend_resume trace events have two types, begin and end
if(re.match('(?P<name>.*) begin$', t.name)):
isbegin = True
elif(re.match('(?P<name>.*) end$', t.name)):
isbegin = False
else:
continue
m = re.match('(?P<name>.*)\[(?P<val>[0-9]*)\] .*', t.name)
if(m):
val = m.group('val')
if val == '0':
name = m.group('name')
else:
name = m.group('name')+'['+val+']'
else:
m = re.match('(?P<name>.*) .*', t.name)
name = m.group('name')
# ignore these events
if(name.split('[')[0] in tracewatch):
continue
# -- phase changes --
# start of kernel suspend
if(re.match('suspend_enter\[.*', t.name)):
if(isbegin):
data.dmesg[phase]['start'] = t.time
data.tKernSus = t.time
continue
# suspend_prepare start
elif(re.match('dpm_prepare\[.*', t.name)):
phase = 'suspend_prepare'
if(not isbegin):
data.dmesg[phase]['end'] = t.time
continue
# suspend start
elif(re.match('dpm_suspend\[.*', t.name)):
phase = 'suspend'
data.setPhase(phase, t.time, isbegin)
continue
# suspend_late start
elif(re.match('dpm_suspend_late\[.*', t.name)):
phase = 'suspend_late'
data.setPhase(phase, t.time, isbegin)
continue
# suspend_noirq start
elif(re.match('dpm_suspend_noirq\[.*', t.name)):
phase = 'suspend_noirq'
data.setPhase(phase, t.time, isbegin)
if(not isbegin):
phase = 'suspend_machine'
data.dmesg[phase]['start'] = t.time
continue
# suspend_machine/resume_machine
elif(re.match('machine_suspend\[.*', t.name)):
if(isbegin):
phase = 'suspend_machine'
data.dmesg[phase]['end'] = t.time
data.tSuspended = t.time
else:
if(sysvals.suspendmode in ['mem', 'disk'] and not tp.S0i3):
data.dmesg['suspend_machine']['end'] = t.time
data.tSuspended = t.time
phase = 'resume_machine'
data.dmesg[phase]['start'] = t.time
data.tResumed = t.time
data.tLow = data.tResumed - data.tSuspended
continue
# acpi_suspend
elif(re.match('acpi_suspend\[.*', t.name)):
# acpi_suspend[0] S0i3
if(re.match('acpi_suspend\[0\] begin', t.name)):
if(sysvals.suspendmode == 'mem'):
tp.S0i3 = True
data.dmesg['suspend_machine']['end'] = t.time
data.tSuspended = t.time
continue
# resume_noirq start
elif(re.match('dpm_resume_noirq\[.*', t.name)):
phase = 'resume_noirq'
data.setPhase(phase, t.time, isbegin)
if(isbegin):
data.dmesg['resume_machine']['end'] = t.time
continue
# resume_early start
elif(re.match('dpm_resume_early\[.*', t.name)):
phase = 'resume_early'
data.setPhase(phase, t.time, isbegin)
continue
# resume start
elif(re.match('dpm_resume\[.*', t.name)):
phase = 'resume'
data.setPhase(phase, t.time, isbegin)
continue
# resume complete start
elif(re.match('dpm_complete\[.*', t.name)):
phase = 'resume_complete'
if(isbegin):
data.dmesg[phase]['start'] = t.time
continue
# skip trace events inside devices calls
if(not data.isTraceEventOutsideDeviceCalls(pid, t.time)):
continue
# global events (outside device calls) are graphed
if(name not in testrun.ttemp):
testrun.ttemp[name] = []
if(isbegin):
# create a new list entry
testrun.ttemp[name].append(\
{'begin': t.time, 'end': t.time, 'pid': pid})
else:
if(len(testrun.ttemp[name]) > 0):
# if an entry exists, assume this is its end
testrun.ttemp[name][-1]['end'] = t.time
elif(phase == 'post_resume'):
# post resume events can just have ends
testrun.ttemp[name].append({
'begin': data.dmesg[phase]['start'],
'end': t.time})
# device callback start
elif(t.type == 'device_pm_callback_start'):
m = re.match('(?P<drv>.*) (?P<d>.*), parent: *(?P<p>.*), .*',\
t.name);
if(not m):
continue
drv = m.group('drv')
n = m.group('d')
p = m.group('p')
if(n and p):
data.newAction(phase, n, pid, p, t.time, -1, drv)
if pid not in data.devpids:
data.devpids.append(pid)
# device callback finish
elif(t.type == 'device_pm_callback_end'):
m = re.match('(?P<drv>.*) (?P<d>.*), err.*', t.name);
if(not m):
continue
n = m.group('d')
list = data.dmesg[phase]['list']
if(n in list):
dev = list[n]
dev['length'] = t.time - dev['start']
dev['end'] = t.time
# kprobe event processing
elif(t.fkprobe):
kprobename = t.type
kprobedata = t.name
key = (kprobename, pid)
# displayname is generated from kprobe data
displayname = ''
if(t.fcall):
displayname = sysvals.kprobeDisplayName(kprobename, kprobedata)
if not displayname:
continue
if(key not in tp.ktemp):
tp.ktemp[key] = []
tp.ktemp[key].append({
'pid': pid,
'begin': t.time,
'end': t.time,
'name': displayname,
'cdata': kprobedata,
'proc': m_proc,
})
elif(t.freturn):
if(key not in tp.ktemp) or len(tp.ktemp[key]) < 1:
continue
e = tp.ktemp[key][-1]
if e['begin'] < 0.0 or t.time - e['begin'] < 0.000001:
tp.ktemp[key].pop()
else:
e['end'] = t.time
e['rdata'] = kprobedata
# end of kernel resume
if(kprobename == 'pm_notifier_call_chain' or \
kprobename == 'pm_restore_console'):
data.dmesg[phase]['end'] = t.time
data.tKernRes = t.time
# callgraph processing
elif sysvals.usecallgraph:
# create a callgraph object for the data
key = (m_proc, pid)
if(key not in testrun.ftemp):
testrun.ftemp[key] = []
testrun.ftemp[key].append(FTraceCallGraph(pid))
# when the call is finished, see which device matches it
cg = testrun.ftemp[key][-1]
if(cg.addLine(t)):
testrun.ftemp[key].append(FTraceCallGraph(pid))
tf.close()
if sysvals.suspendmode == 'command':
for test in testruns:
for p in test.data.phases:
if p == 'suspend_prepare':
test.data.dmesg[p]['start'] = test.data.start
test.data.dmesg[p]['end'] = test.data.end
else:
test.data.dmesg[p]['start'] = test.data.end
test.data.dmesg[p]['end'] = test.data.end
test.data.tSuspended = test.data.end
test.data.tResumed = test.data.end
test.data.tLow = 0
test.data.fwValid = False
# dev source and procmon events can be unreadable with mixed phase height
if sysvals.usedevsrc or sysvals.useprocmon:
sysvals.mixedphaseheight = False
for i in range(len(testruns)):
test = testruns[i]
data = test.data
# find the total time range for this test (begin, end)
tlb, tle = data.start, data.end
if i < len(testruns) - 1:
tle = testruns[i+1].data.start
# add the process usage data to the timeline
if sysvals.useprocmon:
data.createProcessUsageEvents()
# add the traceevent data to the device hierarchy
if(sysvals.usetraceevents):
# add actual trace funcs
for name in test.ttemp:
for event in test.ttemp[name]:
data.newActionGlobal(name, event['begin'], event['end'], event['pid'])
# add the kprobe based virtual tracefuncs as actual devices
for key in tp.ktemp:
name, pid = key
if name not in sysvals.tracefuncs:
continue
for e in tp.ktemp[key]:
kb, ke = e['begin'], e['end']
if kb == ke or tlb > kb or tle <= kb:
continue
color = sysvals.kprobeColor(name)
data.newActionGlobal(e['name'], kb, ke, pid, color)
# add config base kprobes and dev kprobes
if sysvals.usedevsrc:
for key in tp.ktemp:
name, pid = key
if name in sysvals.tracefuncs or name not in sysvals.dev_tracefuncs:
continue
for e in tp.ktemp[key]:
kb, ke = e['begin'], e['end']
if kb == ke or tlb > kb or tle <= kb:
continue
data.addDeviceFunctionCall(e['name'], name, e['proc'], pid, kb,
ke, e['cdata'], e['rdata'])
if sysvals.usecallgraph:
# add the callgraph data to the device hierarchy
sortlist = dict()
for key in test.ftemp:
proc, pid = key
for cg in test.ftemp[key]:
if len(cg.list) < 1 or cg.invalid:
continue
if(not cg.postProcess()):
id = 'task %s' % (pid)
vprint('Sanity check failed for '+\
id+', ignoring this callback')
continue
# match cg data to devices
if sysvals.suspendmode == 'command' or not cg.deviceMatch(pid, data):
sortkey = '%f%f%d' % (cg.start, cg.end, pid)
sortlist[sortkey] = cg
# create blocks for orphan cg data
for sortkey in sorted(sortlist):
cg = sortlist[sortkey]
name = cg.name
if sysvals.isCallgraphFunc(name):
vprint('Callgraph found for task %d: %.3fms, %s' % (cg.pid, (cg.end - cg.start)*1000, name))
cg.newActionFromFunction(data)
if sysvals.suspendmode == 'command':
for data in testdata:
data.printDetails()
return testdata
# fill in any missing phases
for data in testdata:
lp = data.phases[0]
for p in data.phases:
if(data.dmesg[p]['start'] < 0 and data.dmesg[p]['end'] < 0):
vprint('WARNING: phase "%s" is missing!' % p)
if(data.dmesg[p]['start'] < 0):
data.dmesg[p]['start'] = data.dmesg[lp]['end']
if(p == 'resume_machine'):
data.tSuspended = data.dmesg[lp]['end']
data.tResumed = data.dmesg[lp]['end']
data.tLow = 0
if(data.dmesg[p]['end'] < 0):
data.dmesg[p]['end'] = data.dmesg[p]['start']
if(p != lp and not ('machine' in p and 'machine' in lp)):
data.dmesg[lp]['end'] = data.dmesg[p]['start']
lp = p
if(len(sysvals.devicefilter) > 0):
data.deviceFilter(sysvals.devicefilter)
data.fixupInitcallsThatDidntReturn()
if sysvals.usedevsrc:
data.optimizeDevSrc()
data.printDetails()
# x2: merge any overlapping devices between test runs
if sysvals.usedevsrc and len(testdata) > 1:
tc = len(testdata)
for i in range(tc - 1):
devlist = testdata[i].overflowDevices()
for j in range(i + 1, tc):
testdata[j].mergeOverlapDevices(devlist)
testdata[0].stitchTouchingThreads(testdata[1:])
return testdata
# Function: loadKernelLog
# Description:
# [deprecated for kernel 3.15.0 or newer]
# load the dmesg file into memory and fix up any ordering issues
# The dmesg filename is taken from sysvals
# Output:
# An array of empty Data objects with only their dmesgtext attributes set
def loadKernelLog(justtext=False):
vprint('Analyzing the dmesg data...')
if(os.path.exists(sysvals.dmesgfile) == False):
doError('%s does not exist' % sysvals.dmesgfile)
if justtext:
dmesgtext = []
# there can be multiple test runs in a single file
tp = TestProps()
tp.stamp = datetime.now().strftime('# suspend-%m%d%y-%H%M%S localhost mem unknown')
testruns = []
data = 0
lf = open(sysvals.dmesgfile, 'r')
for line in lf:
line = line.replace('\r\n', '')
idx = line.find('[')
if idx > 1:
line = line[idx:]
# grab the stamp and sysinfo
if re.match(tp.stampfmt, line):
tp.stamp = line
continue
elif re.match(tp.sysinfofmt, line):
tp.sysinfo = line
continue
m = re.match(sysvals.firmwarefmt, line)
if(m):
tp.fwdata.append((int(m.group('s')), int(m.group('r'))))
continue
m = re.match('[ \t]*(\[ *)(?P<ktime>[0-9\.]*)(\]) (?P<msg>.*)', line)
if(not m):
continue
msg = m.group("msg")
if justtext:
dmesgtext.append(line)
continue
if(re.match('PM: Syncing filesystems.*', msg)):
if(data):
testruns.append(data)
data = Data(len(testruns))
tp.parseStamp(data, sysvals)
if len(tp.fwdata) > data.testnumber:
data.fwSuspend, data.fwResume = tp.fwdata[data.testnumber]
if(data.fwSuspend > 0 or data.fwResume > 0):
data.fwValid = True
if(not data):
continue
m = re.match('.* *(?P<k>[0-9]\.[0-9]{2}\.[0-9]-.*) .*', msg)
if(m):
sysvals.stamp['kernel'] = m.group('k')
m = re.match('PM: Preparing system for (?P<m>.*) sleep', msg)
if(m):
sysvals.stamp['mode'] = sysvals.suspendmode = m.group('m')
data.dmesgtext.append(line)
lf.close()
if justtext:
return dmesgtext
if data:
testruns.append(data)
if len(testruns) < 1:
doError(' dmesg log has no suspend/resume data: %s' \
% sysvals.dmesgfile)
# fix lines with same timestamp/function with the call and return swapped
for data in testruns:
last = ''
for line in data.dmesgtext:
mc = re.match('.*(\[ *)(?P<t>[0-9\.]*)(\]) calling '+\
'(?P<f>.*)\+ @ .*, parent: .*', line)
mr = re.match('.*(\[ *)(?P<t>[0-9\.]*)(\]) call '+\
'(?P<f>.*)\+ returned .* after (?P<dt>.*) usecs', last)
if(mc and mr and (mc.group('t') == mr.group('t')) and
(mc.group('f') == mr.group('f'))):
i = data.dmesgtext.index(last)
j = data.dmesgtext.index(line)
data.dmesgtext[i] = line
data.dmesgtext[j] = last
last = line
return testruns
# Function: parseKernelLog
# Description:
# [deprecated for kernel 3.15.0 or newer]
# Analyse a dmesg log output file generated from this app during
# the execution phase. Create a set of device structures in memory
# for subsequent formatting in the html output file
# This call is only for legacy support on kernels where the ftrace
# data lacks the suspend_resume or device_pm_callbacks trace events.
# Arguments:
# data: an empty Data object (with dmesgtext) obtained from loadKernelLog
# Output:
# The filled Data object
def parseKernelLog(data):
phase = 'suspend_runtime'
if(data.fwValid):
vprint('Firmware Suspend = %u ns, Firmware Resume = %u ns' % \
(data.fwSuspend, data.fwResume))
# dmesg phase match table
dm = {
'suspend_prepare': 'PM: Syncing filesystems.*',
'suspend': 'PM: Entering [a-z]* sleep.*',
'suspend_late': 'PM: suspend of devices complete after.*',
'suspend_noirq': 'PM: late suspend of devices complete after.*',
'suspend_machine': 'PM: noirq suspend of devices complete after.*',
'resume_machine': 'ACPI: Low-level resume complete.*',
'resume_noirq': 'ACPI: Waking up from system sleep state.*',
'resume_early': 'PM: noirq resume of devices complete after.*',
'resume': 'PM: early resume of devices complete after.*',
'resume_complete': 'PM: resume of devices complete after.*',
'post_resume': '.*Restarting tasks \.\.\..*',
}
if(sysvals.suspendmode == 'standby'):
dm['resume_machine'] = 'PM: Restoring platform NVS memory'
elif(sysvals.suspendmode == 'disk'):
dm['suspend_late'] = 'PM: freeze of devices complete after.*'
dm['suspend_noirq'] = 'PM: late freeze of devices complete after.*'
dm['suspend_machine'] = 'PM: noirq freeze of devices complete after.*'
dm['resume_machine'] = 'PM: Restoring platform NVS memory'
dm['resume_early'] = 'PM: noirq restore of devices complete after.*'
dm['resume'] = 'PM: early restore of devices complete after.*'
dm['resume_complete'] = 'PM: restore of devices complete after.*'
elif(sysvals.suspendmode == 'freeze'):
dm['resume_machine'] = 'ACPI: resume from mwait'
# action table (expected events that occur and show up in dmesg)
at = {
'sync_filesystems': {
'smsg': 'PM: Syncing filesystems.*',
'emsg': 'PM: Preparing system for mem sleep.*' },
'freeze_user_processes': {
'smsg': 'Freezing user space processes .*',
'emsg': 'Freezing remaining freezable tasks.*' },
'freeze_tasks': {
'smsg': 'Freezing remaining freezable tasks.*',
'emsg': 'PM: Entering (?P<mode>[a-z,A-Z]*) sleep.*' },
'ACPI prepare': {
'smsg': 'ACPI: Preparing to enter system sleep state.*',
'emsg': 'PM: Saving platform NVS memory.*' },
'PM vns': {
'smsg': 'PM: Saving platform NVS memory.*',
'emsg': 'Disabling non-boot CPUs .*' },
}
t0 = -1.0
cpu_start = -1.0
prevktime = -1.0
actions = dict()
for line in data.dmesgtext:
# parse each dmesg line into the time and message
m = re.match('[ \t]*(\[ *)(?P<ktime>[0-9\.]*)(\]) (?P<msg>.*)', line)
if(m):
val = m.group('ktime')
try:
ktime = float(val)
except:
continue
msg = m.group('msg')
# initialize data start to first line time
if t0 < 0:
data.setStart(ktime)
t0 = ktime
else:
continue
# hack for determining resume_machine end for freeze
if(not sysvals.usetraceevents and sysvals.suspendmode == 'freeze' \
and phase == 'resume_machine' and \
re.match('calling (?P<f>.*)\+ @ .*, parent: .*', msg)):
data.dmesg['resume_machine']['end'] = ktime
phase = 'resume_noirq'
data.dmesg[phase]['start'] = ktime
# suspend start
if(re.match(dm['suspend_prepare'], msg)):
phase = 'suspend_prepare'
data.dmesg[phase]['start'] = ktime
data.setStart(ktime)
data.tKernSus = ktime
# suspend start
elif(re.match(dm['suspend'], msg)):
data.dmesg['suspend_prepare']['end'] = ktime
phase = 'suspend'
data.dmesg[phase]['start'] = ktime
# suspend_late start
elif(re.match(dm['suspend_late'], msg)):
data.dmesg['suspend']['end'] = ktime
phase = 'suspend_late'
data.dmesg[phase]['start'] = ktime
# suspend_noirq start
elif(re.match(dm['suspend_noirq'], msg)):
data.dmesg['suspend_late']['end'] = ktime
phase = 'suspend_noirq'
data.dmesg[phase]['start'] = ktime
# suspend_machine start
elif(re.match(dm['suspend_machine'], msg)):
data.dmesg['suspend_noirq']['end'] = ktime
phase = 'suspend_machine'
data.dmesg[phase]['start'] = ktime
# resume_machine start
elif(re.match(dm['resume_machine'], msg)):
if(sysvals.suspendmode in ['freeze', 'standby']):
data.tSuspended = prevktime
data.dmesg['suspend_machine']['end'] = prevktime
else:
data.tSuspended = ktime
data.dmesg['suspend_machine']['end'] = ktime
phase = 'resume_machine'
data.tResumed = ktime
data.tLow = data.tResumed - data.tSuspended
data.dmesg[phase]['start'] = ktime
# resume_noirq start
elif(re.match(dm['resume_noirq'], msg)):
data.dmesg['resume_machine']['end'] = ktime
phase = 'resume_noirq'
data.dmesg[phase]['start'] = ktime
# resume_early start
elif(re.match(dm['resume_early'], msg)):
data.dmesg['resume_noirq']['end'] = ktime
phase = 'resume_early'
data.dmesg[phase]['start'] = ktime
# resume start
elif(re.match(dm['resume'], msg)):
data.dmesg['resume_early']['end'] = ktime
phase = 'resume'
data.dmesg[phase]['start'] = ktime
# resume complete start
elif(re.match(dm['resume_complete'], msg)):
data.dmesg['resume']['end'] = ktime
phase = 'resume_complete'
data.dmesg[phase]['start'] = ktime
# post resume start
elif(re.match(dm['post_resume'], msg)):
data.dmesg['resume_complete']['end'] = ktime
data.setEnd(ktime)
data.tKernRes = ktime
break
# -- device callbacks --
if(phase in data.phases):
# device init call
if(re.match('calling (?P<f>.*)\+ @ .*, parent: .*', msg)):
sm = re.match('calling (?P<f>.*)\+ @ '+\
'(?P<n>.*), parent: (?P<p>.*)', msg);
f = sm.group('f')
n = sm.group('n')
p = sm.group('p')
if(f and n and p):
data.newAction(phase, f, int(n), p, ktime, -1, '')
# device init return
elif(re.match('call (?P<f>.*)\+ returned .* after '+\
'(?P<t>.*) usecs', msg)):
sm = re.match('call (?P<f>.*)\+ returned .* after '+\
'(?P<t>.*) usecs(?P<a>.*)', msg);
f = sm.group('f')
t = sm.group('t')
list = data.dmesg[phase]['list']
if(f in list):
dev = list[f]
dev['length'] = int(t)
dev['end'] = ktime
# if trace events are not available, these are better than nothing
if(not sysvals.usetraceevents):
# look for known actions
for a in at:
if(re.match(at[a]['smsg'], msg)):
if(a not in actions):
actions[a] = []
actions[a].append({'begin': ktime, 'end': ktime})
if(re.match(at[a]['emsg'], msg)):
if(a in actions):
actions[a][-1]['end'] = ktime
# now look for CPU on/off events
if(re.match('Disabling non-boot CPUs .*', msg)):
# start of first cpu suspend
cpu_start = ktime
elif(re.match('Enabling non-boot CPUs .*', msg)):
# start of first cpu resume
cpu_start = ktime
elif(re.match('smpboot: CPU (?P<cpu>[0-9]*) is now offline', msg)):
# end of a cpu suspend, start of the next
m = re.match('smpboot: CPU (?P<cpu>[0-9]*) is now offline', msg)
cpu = 'CPU'+m.group('cpu')
if(cpu not in actions):
actions[cpu] = []
actions[cpu].append({'begin': cpu_start, 'end': ktime})
cpu_start = ktime
elif(re.match('CPU(?P<cpu>[0-9]*) is up', msg)):
# end of a cpu resume, start of the next
m = re.match('CPU(?P<cpu>[0-9]*) is up', msg)
cpu = 'CPU'+m.group('cpu')
if(cpu not in actions):
actions[cpu] = []
actions[cpu].append({'begin': cpu_start, 'end': ktime})
cpu_start = ktime
prevktime = ktime
# fill in any missing phases
lp = data.phases[0]
for p in data.phases:
if(data.dmesg[p]['start'] < 0 and data.dmesg[p]['end'] < 0):
print('WARNING: phase "%s" is missing, something went wrong!' % p)
print(' In %s, this dmesg line denotes the start of %s:' % \
(sysvals.suspendmode, p))
print(' "%s"' % dm[p])
if(data.dmesg[p]['start'] < 0):
data.dmesg[p]['start'] = data.dmesg[lp]['end']
if(p == 'resume_machine'):
data.tSuspended = data.dmesg[lp]['end']
data.tResumed = data.dmesg[lp]['end']
data.tLow = 0
if(data.dmesg[p]['end'] < 0):
data.dmesg[p]['end'] = data.dmesg[p]['start']
lp = p
# fill in any actions we've found
for name in actions:
for event in actions[name]:
data.newActionGlobal(name, event['begin'], event['end'])
data.printDetails()
if(len(sysvals.devicefilter) > 0):
data.deviceFilter(sysvals.devicefilter)
data.fixupInitcallsThatDidntReturn()
return True
def callgraphHTML(sv, hf, num, cg, title, color, devid):
html_func_top = '<article id="{0}" class="atop" style="background:{1}">\n<input type="checkbox" class="pf" id="f{2}" checked/><label for="f{2}">{3} {4}</label>\n'
html_func_start = '<article>\n<input type="checkbox" class="pf" id="f{0}" checked/><label for="f{0}">{1} {2}</label>\n'
html_func_end = '</article>\n'
html_func_leaf = '<article>{0} {1}</article>\n'
cgid = devid
if cg.id:
cgid += cg.id
cglen = (cg.end - cg.start) * 1000
if cglen < sv.mincglen:
return num
fmt = '<r>(%.3f ms @ '+sv.timeformat+' to '+sv.timeformat+')</r>'
flen = fmt % (cglen, cg.start, cg.end)
hf.write(html_func_top.format(cgid, color, num, title, flen))
num += 1
for line in cg.list:
if(line.length < 0.000000001):
flen = ''
else:
fmt = '<n>(%.3f ms @ '+sv.timeformat+')</n>'
flen = fmt % (line.length*1000, line.time)
if(line.freturn and line.fcall):
hf.write(html_func_leaf.format(line.name, flen))
elif(line.freturn):
hf.write(html_func_end)
else:
hf.write(html_func_start.format(num, line.name, flen))
num += 1
hf.write(html_func_end)
return num
def addCallgraphs(sv, hf, data):
hf.write('<section id="callgraphs" class="callgraph">\n')
# write out the ftrace data converted to html
num = 0
for p in data.phases:
if sv.cgphase and p != sv.cgphase:
continue
list = data.dmesg[p]['list']
for devname in data.sortedDevices(p):
if len(sv.devicefilter) > 0 and devname not in sv.devicefilter:
continue
dev = list[devname]
color = 'white'
if 'color' in data.dmesg[p]:
color = data.dmesg[p]['color']
if 'color' in dev:
color = dev['color']
name = devname
if(devname in sv.devprops):
name = sv.devprops[devname].altName(devname)
if sv.suspendmode in suspendmodename:
name += ' '+p
if('ftrace' in dev):
cg = dev['ftrace']
num = callgraphHTML(sv, hf, num, cg,
name, color, dev['id'])
if('ftraces' in dev):
for cg in dev['ftraces']:
num = callgraphHTML(sv, hf, num, cg,
name+' → '+cg.name, color, dev['id'])
hf.write('\n\n </section>\n')
# Function: createHTMLSummarySimple
# Description:
# Create summary html file for a series of tests
# Arguments:
# testruns: array of Data objects from parseTraceLog
def createHTMLSummarySimple(testruns, htmlfile, folder):
# write the html header first (html head, css code, up to body start)
html = '<!DOCTYPE html>\n<html>\n<head>\n\
<meta http-equiv="content-type" content="text/html; charset=UTF-8">\n\
<title>SleepGraph Summary</title>\n\
<style type=\'text/css\'>\n\
.stamp {width: 100%;text-align:center;background:#888;line-height:30px;color:white;font: 25px Arial;}\n\
table {width:100%;border-collapse: collapse;}\n\
.summary {border:1px solid;}\n\
th {border: 1px solid black;background:#222;color:white;}\n\
td {font: 16px "Times New Roman";text-align: center;}\n\
tr.alt td {background:#ddd;}\n\
tr.avg td {background:#aaa;}\n\
</style>\n</head>\n<body>\n'
# group test header
html += '<div class="stamp">%s (%d tests)</div>\n' % (folder, len(testruns))
th = '\t<th>{0}</th>\n'
td = '\t<td>{0}</td>\n'
tdlink = '\t<td><a href="{0}">html</a></td>\n'
# table header
html += '<table class="summary">\n<tr>\n' + th.format('#') +\
th.format('Mode') + th.format('Host') + th.format('Kernel') +\
th.format('Test Time') + th.format('Suspend') + th.format('Resume') +\
th.format('Detail') + '</tr>\n'
# test data, 1 row per test
avg = '<tr class="avg"><td></td><td></td><td></td><td></td>'+\
'<td>Average of {0} {1} tests</td><td>{2}</td><td>{3}</td><td></td></tr>\n'
sTimeAvg = rTimeAvg = 0.0
mode = ''
num = 0
for data in sorted(testruns, key=lambda v:(v['mode'], v['host'], v['kernel'])):
if mode != data['mode']:
# test average line
if(num > 0):
sTimeAvg /= (num - 1)
rTimeAvg /= (num - 1)
html += avg.format('%d' % (num - 1), mode,
'%3.3f ms' % sTimeAvg, '%3.3f ms' % rTimeAvg)
sTimeAvg = rTimeAvg = 0.0
mode = data['mode']
num = 1
# alternate row color
if num % 2 == 1:
html += '<tr class="alt">\n'
else:
html += '<tr>\n'
html += td.format("%d" % num)
num += 1
# basic info
for item in ['mode', 'host', 'kernel', 'time']:
val = "unknown"
if(item in data):
val = data[item]
html += td.format(val)
# suspend time
sTime = float(data['suspend'])
sTimeAvg += sTime
html += td.format('%.3f ms' % sTime)
# resume time
rTime = float(data['resume'])
rTimeAvg += rTime
html += td.format('%.3f ms' % rTime)
# link to the output html
html += tdlink.format(data['url']) + '</tr>\n'
# last test average line
if(num > 0):
sTimeAvg /= (num - 1)
rTimeAvg /= (num - 1)
html += avg.format('%d' % (num - 1), mode,
'%3.3f ms' % sTimeAvg, '%3.3f ms' % rTimeAvg)
# flush the data to file
hf = open(htmlfile, 'w')
hf.write(html+'</table>\n</body>\n</html>\n')
hf.close()
def ordinal(value):
suffix = 'th'
if value < 10 or value > 19:
if value % 10 == 1:
suffix = 'st'
elif value % 10 == 2:
suffix = 'nd'
elif value % 10 == 3:
suffix = 'rd'
return '%d%s' % (value, suffix)
# Function: createHTML
# Description:
# Create the output html file from the resident test data
# Arguments:
# testruns: array of Data objects from parseKernelLog or parseTraceLog
# Output:
# True if the html file was created, false if it failed
def createHTML(testruns):
if len(testruns) < 1:
print('ERROR: Not enough test data to build a timeline')
return
kerror = False
for data in testruns:
if data.kerror:
kerror = True
data.normalizeTime(testruns[-1].tSuspended)
# html function templates
html_error = '<div id="{1}" title="kernel error/warning" class="err" style="right:{0}%">ERROR→</div>\n'
html_traceevent = '<div title="{0}" class="traceevent{6}" style="left:{1}%;top:{2}px;height:{3}px;width:{4}%;line-height:{3}px;{7}">{5}</div>\n'
html_cpuexec = '<div class="jiffie" style="left:{0}%;top:{1}px;height:{2}px;width:{3}%;background:{4};"></div>\n'
html_timetotal = '<table class="time1">\n<tr>'\
'<td class="green" title="{3}">{2} Suspend Time: <b>{0} ms</b></td>'\
'<td class="yellow" title="{4}">{2} Resume Time: <b>{1} ms</b></td>'\
'</tr>\n</table>\n'
html_timetotal2 = '<table class="time1">\n<tr>'\
'<td class="green" title="{4}">{3} Suspend Time: <b>{0} ms</b></td>'\
'<td class="gray" title="time spent in low-power mode with clock running">'+sysvals.suspendmode+' time: <b>{1} ms</b></td>'\
'<td class="yellow" title="{5}">{3} Resume Time: <b>{2} ms</b></td>'\
'</tr>\n</table>\n'
html_timetotal3 = '<table class="time1">\n<tr>'\
'<td class="green">Execution Time: <b>{0} ms</b></td>'\
'<td class="yellow">Command: <b>{1}</b></td>'\
'</tr>\n</table>\n'
html_timegroups = '<table class="time2">\n<tr>'\
'<td class="green" title="time from kernel enter_state({5}) to firmware mode [kernel time only]">{4}Kernel Suspend: {0} ms</td>'\
'<td class="purple">{4}Firmware Suspend: {1} ms</td>'\
'<td class="purple">{4}Firmware Resume: {2} ms</td>'\
'<td class="yellow" title="time from firmware mode to return from kernel enter_state({5}) [kernel time only]">{4}Kernel Resume: {3} ms</td>'\
'</tr>\n</table>\n'
# html format variables
scaleH = 20
if kerror:
scaleH = 40
# device timeline
vprint('Creating Device Timeline...')
devtl = Timeline(30, scaleH)
# write the test title and general info header
devtl.createHeader(sysvals)
# Generate the header for this timeline
for data in testruns:
tTotal = data.end - data.start
sktime, rktime = data.getTimeValues()
if(tTotal == 0):
print('ERROR: No timeline data')
sys.exit()
if(data.tLow > 0):
low_time = '%.0f'%(data.tLow*1000)
if sysvals.suspendmode == 'command':
run_time = '%.0f'%((data.end-data.start)*1000)
if sysvals.testcommand:
testdesc = sysvals.testcommand
else:
testdesc = 'unknown'
if(len(testruns) > 1):
testdesc = ordinal(data.testnumber+1)+' '+testdesc
thtml = html_timetotal3.format(run_time, testdesc)
devtl.html += thtml
elif data.fwValid:
suspend_time = '%.0f'%(sktime + (data.fwSuspend/1000000.0))
resume_time = '%.0f'%(rktime + (data.fwResume/1000000.0))
testdesc1 = 'Total'
testdesc2 = ''
stitle = 'time from kernel enter_state(%s) to low-power mode [kernel & firmware time]' % sysvals.suspendmode
rtitle = 'time from low-power mode to return from kernel enter_state(%s) [firmware & kernel time]' % sysvals.suspendmode
if(len(testruns) > 1):
testdesc1 = testdesc2 = ordinal(data.testnumber+1)
testdesc2 += ' '
if(data.tLow == 0):
thtml = html_timetotal.format(suspend_time, \
resume_time, testdesc1, stitle, rtitle)
else:
thtml = html_timetotal2.format(suspend_time, low_time, \
resume_time, testdesc1, stitle, rtitle)
devtl.html += thtml
sftime = '%.3f'%(data.fwSuspend / 1000000.0)
rftime = '%.3f'%(data.fwResume / 1000000.0)
devtl.html += html_timegroups.format('%.3f'%sktime, \
sftime, rftime, '%.3f'%rktime, testdesc2, sysvals.suspendmode)
else:
suspend_time = '%.3f' % sktime
resume_time = '%.3f' % rktime
testdesc = 'Kernel'
stitle = 'time from kernel enter_state(%s) to firmware mode [kernel time only]' % sysvals.suspendmode
rtitle = 'time from firmware mode to return from kernel enter_state(%s) [kernel time only]' % sysvals.suspendmode
if(len(testruns) > 1):
testdesc = ordinal(data.testnumber+1)+' '+testdesc
if(data.tLow == 0):
thtml = html_timetotal.format(suspend_time, \
resume_time, testdesc, stitle, rtitle)
else:
thtml = html_timetotal2.format(suspend_time, low_time, \
resume_time, testdesc, stitle, rtitle)
devtl.html += thtml
# time scale for potentially multiple datasets
t0 = testruns[0].start
tMax = testruns[-1].end
tTotal = tMax - t0
# determine the maximum number of rows we need to draw
fulllist = []
threadlist = []
pscnt = 0
devcnt = 0
for data in testruns:
data.selectTimelineDevices('%f', tTotal, sysvals.mindevlen)
for group in data.devicegroups:
devlist = []
for phase in group:
for devname in data.tdevlist[phase]:
d = DevItem(data.testnumber, phase, data.dmesg[phase]['list'][devname])
devlist.append(d)
if d.isa('kth'):
threadlist.append(d)
else:
if d.isa('ps'):
pscnt += 1
else:
devcnt += 1
fulllist.append(d)
if sysvals.mixedphaseheight:
devtl.getPhaseRows(devlist)
if not sysvals.mixedphaseheight:
if len(threadlist) > 0 and len(fulllist) > 0:
if pscnt > 0 and devcnt > 0:
msg = 'user processes & device pm callbacks'
elif pscnt > 0:
msg = 'user processes'
else:
msg = 'device pm callbacks'
d = testruns[0].addHorizontalDivider(msg, testruns[-1].end)
fulllist.insert(0, d)
devtl.getPhaseRows(fulllist)
if len(threadlist) > 0:
d = testruns[0].addHorizontalDivider('asynchronous kernel threads', testruns[-1].end)
threadlist.insert(0, d)
devtl.getPhaseRows(threadlist, devtl.rows)
devtl.calcTotalRows()
# draw the full timeline
devtl.createZoomBox(sysvals.suspendmode, len(testruns))
phases = {'suspend':[],'resume':[]}
for phase in data.dmesg:
if 'resume' in phase:
phases['resume'].append(phase)
else:
phases['suspend'].append(phase)
# draw each test run chronologically
for data in testruns:
# now draw the actual timeline blocks
for dir in phases:
# draw suspend and resume blocks separately
bname = '%s%d' % (dir[0], data.testnumber)
if dir == 'suspend':
m0 = data.start
mMax = data.tSuspended
left = '%f' % (((m0-t0)*100.0)/tTotal)
else:
m0 = data.tSuspended
mMax = data.end
# in an x2 run, remove any gap between blocks
if len(testruns) > 1 and data.testnumber == 0:
mMax = testruns[1].start
left = '%f' % ((((m0-t0)*100.0)+sysvals.srgap/2)/tTotal)
mTotal = mMax - m0
# if a timeline block is 0 length, skip altogether
if mTotal == 0:
continue
width = '%f' % (((mTotal*100.0)-sysvals.srgap/2)/tTotal)
devtl.html += devtl.html_tblock.format(bname, left, width, devtl.scaleH)
for b in sorted(phases[dir]):
# draw the phase color background
phase = data.dmesg[b]
length = phase['end']-phase['start']
left = '%f' % (((phase['start']-m0)*100.0)/mTotal)
width = '%f' % ((length*100.0)/mTotal)
devtl.html += devtl.html_phase.format(left, width, \
'%.3f'%devtl.scaleH, '%.3f'%devtl.bodyH, \
data.dmesg[b]['color'], '')
for e in data.errorinfo[dir]:
# draw red lines for any kernel errors found
t, err = e
right = '%f' % (((mMax-t)*100.0)/mTotal)
devtl.html += html_error.format(right, err)
for b in sorted(phases[dir]):
# draw the devices for this phase
phaselist = data.dmesg[b]['list']
for d in data.tdevlist[b]:
name = d
drv = ''
dev = phaselist[d]
xtraclass = ''
xtrainfo = ''
xtrastyle = ''
if 'htmlclass' in dev:
xtraclass = dev['htmlclass']
if 'color' in dev:
xtrastyle = 'background:%s;' % dev['color']
if(d in sysvals.devprops):
name = sysvals.devprops[d].altName(d)
xtraclass = sysvals.devprops[d].xtraClass()
xtrainfo = sysvals.devprops[d].xtraInfo()
elif xtraclass == ' kth':
xtrainfo = ' kernel_thread'
if('drv' in dev and dev['drv']):
drv = ' {%s}' % dev['drv']
rowheight = devtl.phaseRowHeight(data.testnumber, b, dev['row'])
rowtop = devtl.phaseRowTop(data.testnumber, b, dev['row'])
top = '%.3f' % (rowtop + devtl.scaleH)
left = '%f' % (((dev['start']-m0)*100)/mTotal)
width = '%f' % (((dev['end']-dev['start'])*100)/mTotal)
length = ' (%0.3f ms) ' % ((dev['end']-dev['start'])*1000)
title = name+drv+xtrainfo+length
if sysvals.suspendmode == 'command':
title += sysvals.testcommand
elif xtraclass == ' ps':
if 'suspend' in b:
title += 'pre_suspend_process'
else:
title += 'post_resume_process'
else:
title += b
devtl.html += devtl.html_device.format(dev['id'], \
title, left, top, '%.3f'%rowheight, width, \
d+drv, xtraclass, xtrastyle)
if('cpuexec' in dev):
for t in sorted(dev['cpuexec']):
start, end = t
j = float(dev['cpuexec'][t]) / 5
if j > 1.0:
j = 1.0
height = '%.3f' % (rowheight/3)
top = '%.3f' % (rowtop + devtl.scaleH + 2*rowheight/3)
left = '%f' % (((start-m0)*100)/mTotal)
width = '%f' % ((end-start)*100/mTotal)
color = 'rgba(255, 0, 0, %f)' % j
devtl.html += \
html_cpuexec.format(left, top, height, width, color)
if('src' not in dev):
continue
# draw any trace events for this device
for e in dev['src']:
height = '%.3f' % devtl.rowH
top = '%.3f' % (rowtop + devtl.scaleH + (e.row*devtl.rowH))
left = '%f' % (((e.time-m0)*100)/mTotal)
width = '%f' % (e.length*100/mTotal)
xtrastyle = ''
if e.color:
xtrastyle = 'background:%s;' % e.color
devtl.html += \
html_traceevent.format(e.title(), \
left, top, height, width, e.text(), '', xtrastyle)
# draw the time scale, try to make the number of labels readable
devtl.createTimeScale(m0, mMax, tTotal, dir)
devtl.html += '</div>\n'
# timeline is finished
devtl.html += '</div>\n</div>\n'
# draw a legend which describes the phases by color
if sysvals.suspendmode != 'command':
data = testruns[-1]
devtl.html += '<div class="legend">\n'
pdelta = 100.0/len(data.phases)
pmargin = pdelta / 4.0
for phase in data.phases:
tmp = phase.split('_')
id = tmp[0][0]
if(len(tmp) > 1):
id += tmp[1][0]
order = '%.2f' % ((data.dmesg[phase]['order'] * pdelta) + pmargin)
name = string.replace(phase, '_', ' ')
devtl.html += devtl.html_legend.format(order, \
data.dmesg[phase]['color'], name, id)
devtl.html += '</div>\n'
hf = open(sysvals.htmlfile, 'w')
# no header or css if its embedded
if(sysvals.embedded):
hf.write('pass True tSus %.3f tRes %.3f tLow %.3f fwvalid %s tSus %.3f tRes %.3f\n' %
(data.tSuspended-data.start, data.end-data.tSuspended, data.tLow, data.fwValid, \
data.fwSuspend/1000000, data.fwResume/1000000))
else:
addCSS(hf, sysvals, len(testruns), kerror)
# write the device timeline
hf.write(devtl.html)
hf.write('<div id="devicedetailtitle"></div>\n')
hf.write('<div id="devicedetail" style="display:none;">\n')
# draw the colored boxes for the device detail section
for data in testruns:
hf.write('<div id="devicedetail%d">\n' % data.testnumber)
pscolor = 'linear-gradient(to top left, #ccc, #eee)'
hf.write(devtl.html_phaselet.format('pre_suspend_process', \
'0', '0', pscolor))
for b in data.phases:
phase = data.dmesg[b]
length = phase['end']-phase['start']
left = '%.3f' % (((phase['start']-t0)*100.0)/tTotal)
width = '%.3f' % ((length*100.0)/tTotal)
hf.write(devtl.html_phaselet.format(b, left, width, \
data.dmesg[b]['color']))
hf.write(devtl.html_phaselet.format('post_resume_process', \
'0', '0', pscolor))
if sysvals.suspendmode == 'command':
hf.write(devtl.html_phaselet.format('cmdexec', '0', '0', pscolor))
hf.write('</div>\n')
hf.write('</div>\n')
# write the ftrace data (callgraph)
if sysvals.cgtest >= 0 and len(testruns) > sysvals.cgtest:
data = testruns[sysvals.cgtest]
else:
data = testruns[-1]
if(sysvals.usecallgraph and not sysvals.embedded):
addCallgraphs(sysvals, hf, data)
# add the test log as a hidden div
if sysvals.testlog and sysvals.logmsg:
hf.write('<div id="testlog" style="display:none;">\n'+sysvals.logmsg+'</div>\n')
# add the dmesg log as a hidden div
if sysvals.dmesglog and sysvals.dmesgfile:
hf.write('<div id="dmesglog" style="display:none;">\n')
lf = open(sysvals.dmesgfile, 'r')
for line in lf:
line = line.replace('<', '<').replace('>', '>')
hf.write(line)
lf.close()
hf.write('</div>\n')
# add the ftrace log as a hidden div
if sysvals.ftracelog and sysvals.ftracefile:
hf.write('<div id="ftracelog" style="display:none;">\n')
lf = open(sysvals.ftracefile, 'r')
for line in lf:
hf.write(line)
lf.close()
hf.write('</div>\n')
if(not sysvals.embedded):
# write the footer and close
addScriptCode(hf, testruns)
hf.write('</body>\n</html>\n')
else:
# embedded out will be loaded in a page, skip the js
t0 = (testruns[0].start - testruns[-1].tSuspended) * 1000
tMax = (testruns[-1].end - testruns[-1].tSuspended) * 1000
# add js code in a div entry for later evaluation
detail = 'var bounds = [%f,%f];\n' % (t0, tMax)
detail += 'var devtable = [\n'
for data in testruns:
topo = data.deviceTopology()
detail += '\t"%s",\n' % (topo)
detail += '];\n'
hf.write('<div id=customcode style=display:none>\n'+detail+'</div>\n')
hf.close()
return True
def addCSS(hf, sv, testcount=1, kerror=False, extra=''):
kernel = sv.stamp['kernel']
host = sv.hostname[0].upper()+sv.hostname[1:]
mode = sv.suspendmode
if sv.suspendmode in suspendmodename:
mode = suspendmodename[sv.suspendmode]
title = host+' '+mode+' '+kernel
# various format changes by flags
cgchk = 'checked'
cgnchk = 'not(:checked)'
if sv.cgexp:
cgchk = 'not(:checked)'
cgnchk = 'checked'
hoverZ = 'z-index:8;'
if sv.usedevsrc:
hoverZ = ''
devlistpos = 'absolute'
if testcount > 1:
devlistpos = 'relative'
scaleTH = 20
if kerror:
scaleTH = 60
# write the html header first (html head, css code, up to body start)
html_header = '<!DOCTYPE html>\n<html>\n<head>\n\
<meta http-equiv="content-type" content="text/html; charset=UTF-8">\n\
<title>'+title+'</title>\n\
<style type=\'text/css\'>\n\
body {overflow-y:scroll;}\n\
.stamp {width:100%;text-align:center;background:gray;line-height:30px;color:white;font:25px Arial;}\n\
.stamp.sysinfo {font:10px Arial;}\n\
.callgraph {margin-top:30px;box-shadow:5px 5px 20px black;}\n\
.callgraph article * {padding-left:28px;}\n\
h1 {color:black;font:bold 30px Times;}\n\
t0 {color:black;font:bold 30px Times;}\n\
t1 {color:black;font:30px Times;}\n\
t2 {color:black;font:25px Times;}\n\
t3 {color:black;font:20px Times;white-space:nowrap;}\n\
t4 {color:black;font:bold 30px Times;line-height:60px;white-space:nowrap;}\n\
cS {font:bold 13px Times;}\n\
table {width:100%;}\n\
.gray {background:rgba(80,80,80,0.1);}\n\
.green {background:rgba(204,255,204,0.4);}\n\
.purple {background:rgba(128,0,128,0.2);}\n\
.yellow {background:rgba(255,255,204,0.4);}\n\
.blue {background:rgba(169,208,245,0.4);}\n\
.time1 {font:22px Arial;border:1px solid;}\n\
.time2 {font:15px Arial;border-bottom:1px solid;border-left:1px solid;border-right:1px solid;}\n\
td {text-align:center;}\n\
r {color:#500000;font:15px Tahoma;}\n\
n {color:#505050;font:15px Tahoma;}\n\
.tdhl {color:red;}\n\
.hide {display:none;}\n\
.pf {display:none;}\n\
.pf:'+cgchk+' + label {background:url(\'data:image/svg+xml;utf,<?xml version="1.0" standalone="no"?><svg xmlns="http://www.w3.org/2000/svg" height="18" width="18" version="1.1"><circle cx="9" cy="9" r="8" stroke="black" stroke-width="1" fill="white"/><rect x="4" y="8" width="10" height="2" style="fill:black;stroke-width:0"/><rect x="8" y="4" width="2" height="10" style="fill:black;stroke-width:0"/></svg>\') no-repeat left center;}\n\
.pf:'+cgnchk+' ~ label {background:url(\'data:image/svg+xml;utf,<?xml version="1.0" standalone="no"?><svg xmlns="http://www.w3.org/2000/svg" height="18" width="18" version="1.1"><circle cx="9" cy="9" r="8" stroke="black" stroke-width="1" fill="white"/><rect x="4" y="8" width="10" height="2" style="fill:black;stroke-width:0"/></svg>\') no-repeat left center;}\n\
.pf:'+cgchk+' ~ *:not(:nth-child(2)) {display:none;}\n\
.zoombox {position:relative;width:100%;overflow-x:scroll;-webkit-user-select:none;-moz-user-select:none;user-select:none;}\n\
.timeline {position:relative;font-size:14px;cursor:pointer;width:100%; overflow:hidden;background:linear-gradient(#cccccc, white);}\n\
.thread {position:absolute;height:0%;overflow:hidden;z-index:7;line-height:30px;font-size:14px;border:1px solid;text-align:center;white-space:nowrap;}\n\
.thread.ps {border-radius:3px;background:linear-gradient(to top, #ccc, #eee);}\n\
.thread:hover {background:white;border:1px solid red;'+hoverZ+'}\n\
.thread.sec,.thread.sec:hover {background:black;border:0;color:white;line-height:15px;font-size:10px;}\n\
.hover {background:white;border:1px solid red;'+hoverZ+'}\n\
.hover.sync {background:white;}\n\
.hover.bg,.hover.kth,.hover.sync,.hover.ps {background:white;}\n\
.jiffie {position:absolute;pointer-events: none;z-index:8;}\n\
.traceevent {position:absolute;font-size:10px;z-index:7;overflow:hidden;color:black;text-align:center;white-space:nowrap;border-radius:5px;border:1px solid black;background:linear-gradient(to bottom right,#CCC,#969696);}\n\
.traceevent:hover {color:white;font-weight:bold;border:1px solid white;}\n\
.phase {position:absolute;overflow:hidden;border:0px;text-align:center;}\n\
.phaselet {float:left;overflow:hidden;border:0px;text-align:center;min-height:100px;font-size:24px;}\n\
.t {position:absolute;line-height:'+('%d'%scaleTH)+'px;pointer-events:none;top:0;height:100%;border-right:1px solid black;z-index:6;}\n\
.err {position:absolute;top:0%;height:100%;border-right:3px solid red;color:red;font:bold 14px Times;line-height:18px;}\n\
.legend {position:relative; width:100%; height:40px; text-align:center;margin-bottom:20px}\n\
.legend .square {position:absolute;cursor:pointer;top:10px; width:0px;height:20px;border:1px solid;padding-left:20px;}\n\
button {height:40px;width:200px;margin-bottom:20px;margin-top:20px;font-size:24px;}\n\
.btnfmt {position:relative;float:right;height:25px;width:auto;margin-top:3px;margin-bottom:0;font-size:10px;text-align:center;}\n\
.devlist {position:'+devlistpos+';width:190px;}\n\
a:link {color:white;text-decoration:none;}\n\
a:visited {color:white;}\n\
a:hover {color:white;}\n\
a:active {color:white;}\n\
.version {position:relative;float:left;color:white;font-size:10px;line-height:30px;margin-left:10px;}\n\
#devicedetail {min-height:100px;box-shadow:5px 5px 20px black;}\n\
.tblock {position:absolute;height:100%;background:#ddd;}\n\
.tback {position:absolute;width:100%;background:linear-gradient(#ccc, #ddd);}\n\
.bg {z-index:1;}\n\
'+extra+'\
</style>\n</head>\n<body>\n'
hf.write(html_header)
# Function: addScriptCode
# Description:
# Adds the javascript code to the output html
# Arguments:
# hf: the open html file pointer
# testruns: array of Data objects from parseKernelLog or parseTraceLog
def addScriptCode(hf, testruns):
t0 = testruns[0].start * 1000
tMax = testruns[-1].end * 1000
# create an array in javascript memory with the device details
detail = ' var devtable = [];\n'
for data in testruns:
topo = data.deviceTopology()
detail += ' devtable[%d] = "%s";\n' % (data.testnumber, topo)
detail += ' var bounds = [%f,%f];\n' % (t0, tMax)
# add the code which will manipulate the data in the browser
script_code = \
'<script type="text/javascript">\n'+detail+\
' var resolution = -1;\n'\
' var dragval = [0, 0];\n'\
' function redrawTimescale(t0, tMax, tS) {\n'\
' var rline = \'<div class="t" style="left:0;border-left:1px solid black;border-right:0;">\';\n'\
' var tTotal = tMax - t0;\n'\
' var list = document.getElementsByClassName("tblock");\n'\
' for (var i = 0; i < list.length; i++) {\n'\
' var timescale = list[i].getElementsByClassName("timescale")[0];\n'\
' var m0 = t0 + (tTotal*parseFloat(list[i].style.left)/100);\n'\
' var mTotal = tTotal*parseFloat(list[i].style.width)/100;\n'\
' var mMax = m0 + mTotal;\n'\
' var html = "";\n'\
' var divTotal = Math.floor(mTotal/tS) + 1;\n'\
' if(divTotal > 1000) continue;\n'\
' var divEdge = (mTotal - tS*(divTotal-1))*100/mTotal;\n'\
' var pos = 0.0, val = 0.0;\n'\
' for (var j = 0; j < divTotal; j++) {\n'\
' var htmlline = "";\n'\
' var mode = list[i].id[5];\n'\
' if(mode == "s") {\n'\
' pos = 100 - (((j)*tS*100)/mTotal) - divEdge;\n'\
' val = (j-divTotal+1)*tS;\n'\
' if(j == divTotal - 1)\n'\
' htmlline = \'<div class="t" style="right:\'+pos+\'%"><cS>S→</cS></div>\';\n'\
' else\n'\
' htmlline = \'<div class="t" style="right:\'+pos+\'%">\'+val+\'ms</div>\';\n'\
' } else {\n'\
' pos = 100 - (((j)*tS*100)/mTotal);\n'\
' val = (j)*tS;\n'\
' htmlline = \'<div class="t" style="right:\'+pos+\'%">\'+val+\'ms</div>\';\n'\
' if(j == 0)\n'\
' if(mode == "r")\n'\
' htmlline = rline+"<cS>←R</cS></div>";\n'\
' else\n'\
' htmlline = rline+"<cS>0ms</div>";\n'\
' }\n'\
' html += htmlline;\n'\
' }\n'\
' timescale.innerHTML = html;\n'\
' }\n'\
' }\n'\
' function zoomTimeline() {\n'\
' var dmesg = document.getElementById("dmesg");\n'\
' var zoombox = document.getElementById("dmesgzoombox");\n'\
' var left = zoombox.scrollLeft;\n'\
' var val = parseFloat(dmesg.style.width);\n'\
' var newval = 100;\n'\
' var sh = window.outerWidth / 2;\n'\
' if(this.id == "zoomin") {\n'\
' newval = val * 1.2;\n'\
' if(newval > 910034) newval = 910034;\n'\
' dmesg.style.width = newval+"%";\n'\
' zoombox.scrollLeft = ((left + sh) * newval / val) - sh;\n'\
' } else if (this.id == "zoomout") {\n'\
' newval = val / 1.2;\n'\
' if(newval < 100) newval = 100;\n'\
' dmesg.style.width = newval+"%";\n'\
' zoombox.scrollLeft = ((left + sh) * newval / val) - sh;\n'\
' } else {\n'\
' zoombox.scrollLeft = 0;\n'\
' dmesg.style.width = "100%";\n'\
' }\n'\
' var tS = [10000, 5000, 2000, 1000, 500, 200, 100, 50, 20, 10, 5, 2, 1];\n'\
' var t0 = bounds[0];\n'\
' var tMax = bounds[1];\n'\
' var tTotal = tMax - t0;\n'\
' var wTotal = tTotal * 100.0 / newval;\n'\
' var idx = 7*window.innerWidth/1100;\n'\
' for(var i = 0; (i < tS.length)&&((wTotal / tS[i]) < idx); i++);\n'\
' if(i >= tS.length) i = tS.length - 1;\n'\
' if(tS[i] == resolution) return;\n'\
' resolution = tS[i];\n'\
' redrawTimescale(t0, tMax, tS[i]);\n'\
' }\n'\
' function deviceName(title) {\n'\
' var name = title.slice(0, title.indexOf(" ("));\n'\
' return name;\n'\
' }\n'\
' function deviceHover() {\n'\
' var name = deviceName(this.title);\n'\
' var dmesg = document.getElementById("dmesg");\n'\
' var dev = dmesg.getElementsByClassName("thread");\n'\
' var cpu = -1;\n'\
' if(name.match("CPU_ON\[[0-9]*\]"))\n'\
' cpu = parseInt(name.slice(7));\n'\
' else if(name.match("CPU_OFF\[[0-9]*\]"))\n'\
' cpu = parseInt(name.slice(8));\n'\
' for (var i = 0; i < dev.length; i++) {\n'\
' dname = deviceName(dev[i].title);\n'\
' var cname = dev[i].className.slice(dev[i].className.indexOf("thread"));\n'\
' if((cpu >= 0 && dname.match("CPU_O[NF]*\\\[*"+cpu+"\\\]")) ||\n'\
' (name == dname))\n'\
' {\n'\
' dev[i].className = "hover "+cname;\n'\
' } else {\n'\
' dev[i].className = cname;\n'\
' }\n'\
' }\n'\
' }\n'\
' function deviceUnhover() {\n'\
' var dmesg = document.getElementById("dmesg");\n'\
' var dev = dmesg.getElementsByClassName("thread");\n'\
' for (var i = 0; i < dev.length; i++) {\n'\
' dev[i].className = dev[i].className.slice(dev[i].className.indexOf("thread"));\n'\
' }\n'\
' }\n'\
' function deviceTitle(title, total, cpu) {\n'\
' var prefix = "Total";\n'\
' if(total.length > 3) {\n'\
' prefix = "Average";\n'\
' total[1] = (total[1]+total[3])/2;\n'\
' total[2] = (total[2]+total[4])/2;\n'\
' }\n'\
' var devtitle = document.getElementById("devicedetailtitle");\n'\
' var name = deviceName(title);\n'\
' if(cpu >= 0) name = "CPU"+cpu;\n'\
' var driver = "";\n'\
' var tS = "<t2>(</t2>";\n'\
' var tR = "<t2>)</t2>";\n'\
' if(total[1] > 0)\n'\
' tS = "<t2>("+prefix+" Suspend:</t2><t0> "+total[1].toFixed(3)+" ms</t0> ";\n'\
' if(total[2] > 0)\n'\
' tR = " <t2>"+prefix+" Resume:</t2><t0> "+total[2].toFixed(3)+" ms<t2>)</t2></t0>";\n'\
' var s = title.indexOf("{");\n'\
' var e = title.indexOf("}");\n'\
' if((s >= 0) && (e >= 0))\n'\
' driver = title.slice(s+1, e) + " <t1>@</t1> ";\n'\
' if(total[1] > 0 && total[2] > 0)\n'\
' devtitle.innerHTML = "<t0>"+driver+name+"</t0> "+tS+tR;\n'\
' else\n'\
' devtitle.innerHTML = "<t0>"+title+"</t0>";\n'\
' return name;\n'\
' }\n'\
' function deviceDetail() {\n'\
' var devinfo = document.getElementById("devicedetail");\n'\
' devinfo.style.display = "block";\n'\
' var name = deviceName(this.title);\n'\
' var cpu = -1;\n'\
' if(name.match("CPU_ON\[[0-9]*\]"))\n'\
' cpu = parseInt(name.slice(7));\n'\
' else if(name.match("CPU_OFF\[[0-9]*\]"))\n'\
' cpu = parseInt(name.slice(8));\n'\
' var dmesg = document.getElementById("dmesg");\n'\
' var dev = dmesg.getElementsByClassName("thread");\n'\
' var idlist = [];\n'\
' var pdata = [[]];\n'\
' if(document.getElementById("devicedetail1"))\n'\
' pdata = [[], []];\n'\
' var pd = pdata[0];\n'\
' var total = [0.0, 0.0, 0.0];\n'\
' for (var i = 0; i < dev.length; i++) {\n'\
' dname = deviceName(dev[i].title);\n'\
' if((cpu >= 0 && dname.match("CPU_O[NF]*\\\[*"+cpu+"\\\]")) ||\n'\
' (name == dname))\n'\
' {\n'\
' idlist[idlist.length] = dev[i].id;\n'\
' var tidx = 1;\n'\
' if(dev[i].id[0] == "a") {\n'\
' pd = pdata[0];\n'\
' } else {\n'\
' if(pdata.length == 1) pdata[1] = [];\n'\
' if(total.length == 3) total[3]=total[4]=0.0;\n'\
' pd = pdata[1];\n'\
' tidx = 3;\n'\
' }\n'\
' var info = dev[i].title.split(" ");\n'\
' var pname = info[info.length-1];\n'\
' pd[pname] = parseFloat(info[info.length-3].slice(1));\n'\
' total[0] += pd[pname];\n'\
' if(pname.indexOf("suspend") >= 0)\n'\
' total[tidx] += pd[pname];\n'\
' else\n'\
' total[tidx+1] += pd[pname];\n'\
' }\n'\
' }\n'\
' var devname = deviceTitle(this.title, total, cpu);\n'\
' var left = 0.0;\n'\
' for (var t = 0; t < pdata.length; t++) {\n'\
' pd = pdata[t];\n'\
' devinfo = document.getElementById("devicedetail"+t);\n'\
' var phases = devinfo.getElementsByClassName("phaselet");\n'\
' for (var i = 0; i < phases.length; i++) {\n'\
' if(phases[i].id in pd) {\n'\
' var w = 100.0*pd[phases[i].id]/total[0];\n'\
' var fs = 32;\n'\
' if(w < 8) fs = 4*w | 0;\n'\
' var fs2 = fs*3/4;\n'\
' phases[i].style.width = w+"%";\n'\
' phases[i].style.left = left+"%";\n'\
' phases[i].title = phases[i].id+" "+pd[phases[i].id]+" ms";\n'\
' left += w;\n'\
' var time = "<t4 style=\\"font-size:"+fs+"px\\">"+pd[phases[i].id]+" ms<br></t4>";\n'\
' var pname = "<t3 style=\\"font-size:"+fs2+"px\\">"+phases[i].id.replace(new RegExp("_", "g"), " ")+"</t3>";\n'\
' phases[i].innerHTML = time+pname;\n'\
' } else {\n'\
' phases[i].style.width = "0%";\n'\
' phases[i].style.left = left+"%";\n'\
' }\n'\
' }\n'\
' }\n'\
' if(typeof devstats !== \'undefined\')\n'\
' callDetail(this.id, this.title);\n'\
' var cglist = document.getElementById("callgraphs");\n'\
' if(!cglist) return;\n'\
' var cg = cglist.getElementsByClassName("atop");\n'\
' if(cg.length < 10) return;\n'\
' for (var i = 0; i < cg.length; i++) {\n'\
' cgid = cg[i].id.split("x")[0]\n'\
' if(idlist.indexOf(cgid) >= 0) {\n'\
' cg[i].style.display = "block";\n'\
' } else {\n'\
' cg[i].style.display = "none";\n'\
' }\n'\
' }\n'\
' }\n'\
' function callDetail(devid, devtitle) {\n'\
' if(!(devid in devstats) || devstats[devid].length < 1)\n'\
' return;\n'\
' var list = devstats[devid];\n'\
' var tmp = devtitle.split(" ");\n'\
' var name = tmp[0], phase = tmp[tmp.length-1];\n'\
' var dd = document.getElementById(phase);\n'\
' var total = parseFloat(tmp[1].slice(1));\n'\
' var mlist = [];\n'\
' var maxlen = 0;\n'\
' var info = []\n'\
' for(var i in list) {\n'\
' if(list[i][0] == "@") {\n'\
' info = list[i].split("|");\n'\
' continue;\n'\
' }\n'\
' var tmp = list[i].split("|");\n'\
' var t = parseFloat(tmp[0]), f = tmp[1], c = parseInt(tmp[2]);\n'\
' var p = (t*100.0/total).toFixed(2);\n'\
' mlist[mlist.length] = [f, c, t.toFixed(2), p+"%"];\n'\
' if(f.length > maxlen)\n'\
' maxlen = f.length;\n'\
' }\n'\
' var pad = 5;\n'\
' if(mlist.length == 0) pad = 30;\n'\
' var html = \'<div style="padding-top:\'+pad+\'px"><t3> <b>\'+name+\':</b>\';\n'\
' if(info.length > 2)\n'\
' html += " start=<b>"+info[1]+"</b>, end=<b>"+info[2]+"</b>";\n'\
' if(info.length > 3)\n'\
' html += ", length<i>(w/o overhead)</i>=<b>"+info[3]+" ms</b>";\n'\
' if(info.length > 4)\n'\
' html += ", return=<b>"+info[4]+"</b>";\n'\
' html += "</t3></div>";\n'\
' if(mlist.length > 0) {\n'\
' html += \'<table class=fstat style="padding-top:\'+(maxlen*5)+\'px;"><tr><th>Function</th>\';\n'\
' for(var i in mlist)\n'\
' html += "<td class=vt>"+mlist[i][0]+"</td>";\n'\
' html += "</tr><tr><th>Calls</th>";\n'\
' for(var i in mlist)\n'\
' html += "<td>"+mlist[i][1]+"</td>";\n'\
' html += "</tr><tr><th>Time(ms)</th>";\n'\
' for(var i in mlist)\n'\
' html += "<td>"+mlist[i][2]+"</td>";\n'\
' html += "</tr><tr><th>Percent</th>";\n'\
' for(var i in mlist)\n'\
' html += "<td>"+mlist[i][3]+"</td>";\n'\
' html += "</tr></table>";\n'\
' }\n'\
' dd.innerHTML = html;\n'\
' var height = (maxlen*5)+100;\n'\
' dd.style.height = height+"px";\n'\
' document.getElementById("devicedetail").style.height = height+"px";\n'\
' }\n'\
' function callSelect() {\n'\
' var cglist = document.getElementById("callgraphs");\n'\
' if(!cglist) return;\n'\
' var cg = cglist.getElementsByClassName("atop");\n'\
' for (var i = 0; i < cg.length; i++) {\n'\
' if(this.id == cg[i].id) {\n'\
' cg[i].style.display = "block";\n'\
' } else {\n'\
' cg[i].style.display = "none";\n'\
' }\n'\
' }\n'\
' }\n'\
' function devListWindow(e) {\n'\
' var win = window.open();\n'\
' var html = "<title>"+e.target.innerHTML+"</title>"+\n'\
' "<style type=\\"text/css\\">"+\n'\
' " ul {list-style-type:circle;padding-left:10px;margin-left:10px;}"+\n'\
' "</style>"\n'\
' var dt = devtable[0];\n'\
' if(e.target.id != "devlist1")\n'\
' dt = devtable[1];\n'\
' win.document.write(html+dt);\n'\
' }\n'\
' function errWindow() {\n'\
' var text = this.id;\n'\
' var win = window.open();\n'\
' win.document.write("<pre>"+text+"</pre>");\n'\
' win.document.close();\n'\
' }\n'\
' function logWindow(e) {\n'\
' var name = e.target.id.slice(4);\n'\
' var win = window.open();\n'\
' var log = document.getElementById(name+"log");\n'\
' var title = "<title>"+document.title.split(" ")[0]+" "+name+" log</title>";\n'\
' win.document.write(title+"<pre>"+log.innerHTML+"</pre>");\n'\
' win.document.close();\n'\
' }\n'\
' function onMouseDown(e) {\n'\
' dragval[0] = e.clientX;\n'\
' dragval[1] = document.getElementById("dmesgzoombox").scrollLeft;\n'\
' document.onmousemove = onMouseMove;\n'\
' }\n'\
' function onMouseMove(e) {\n'\
' var zoombox = document.getElementById("dmesgzoombox");\n'\
' zoombox.scrollLeft = dragval[1] + dragval[0] - e.clientX;\n'\
' }\n'\
' function onMouseUp(e) {\n'\
' document.onmousemove = null;\n'\
' }\n'\
' function onKeyPress(e) {\n'\
' var c = e.charCode;\n'\
' if(c != 42 && c != 43 && c != 45) return;\n'\
' var click = document.createEvent("Events");\n'\
' click.initEvent("click", true, false);\n'\
' if(c == 43) \n'\
' document.getElementById("zoomin").dispatchEvent(click);\n'\
' else if(c == 45)\n'\
' document.getElementById("zoomout").dispatchEvent(click);\n'\
' else if(c == 42)\n'\
' document.getElementById("zoomdef").dispatchEvent(click);\n'\
' }\n'\
' window.addEventListener("resize", function () {zoomTimeline();});\n'\
' window.addEventListener("load", function () {\n'\
' var dmesg = document.getElementById("dmesg");\n'\
' dmesg.style.width = "100%"\n'\
' dmesg.onmousedown = onMouseDown;\n'\
' document.onmouseup = onMouseUp;\n'\
' document.onkeypress = onKeyPress;\n'\
' document.getElementById("zoomin").onclick = zoomTimeline;\n'\
' document.getElementById("zoomout").onclick = zoomTimeline;\n'\
' document.getElementById("zoomdef").onclick = zoomTimeline;\n'\
' var list = document.getElementsByClassName("err");\n'\
' for (var i = 0; i < list.length; i++)\n'\
' list[i].onclick = errWindow;\n'\
' var list = document.getElementsByClassName("logbtn");\n'\
' for (var i = 0; i < list.length; i++)\n'\
' list[i].onclick = logWindow;\n'\
' list = document.getElementsByClassName("devlist");\n'\
' for (var i = 0; i < list.length; i++)\n'\
' list[i].onclick = devListWindow;\n'\
' var dev = dmesg.getElementsByClassName("thread");\n'\
' for (var i = 0; i < dev.length; i++) {\n'\
' dev[i].onclick = deviceDetail;\n'\
' dev[i].onmouseover = deviceHover;\n'\
' dev[i].onmouseout = deviceUnhover;\n'\
' }\n'\
' var dev = dmesg.getElementsByClassName("srccall");\n'\
' for (var i = 0; i < dev.length; i++)\n'\
' dev[i].onclick = callSelect;\n'\
' zoomTimeline();\n'\
' });\n'\
'</script>\n'
hf.write(script_code);
# Function: executeSuspend
# Description:
# Execute system suspend through the sysfs interface, then copy the output
# dmesg and ftrace files to the test output directory.
def executeSuspend():
pm = ProcessMonitor()
tp = sysvals.tpath
fwdata = []
# mark the start point in the kernel ring buffer just as we start
sysvals.initdmesg()
# start ftrace
if(sysvals.usecallgraph or sysvals.usetraceevents):
print('START TRACING')
sysvals.fsetVal('1', 'tracing_on')
if sysvals.useprocmon:
pm.start()
# execute however many s/r runs requested
for count in range(1,sysvals.execcount+1):
# x2delay in between test runs
if(count > 1 and sysvals.x2delay > 0):
sysvals.fsetVal('WAIT %d' % sysvals.x2delay, 'trace_marker')
time.sleep(sysvals.x2delay/1000.0)
sysvals.fsetVal('WAIT END', 'trace_marker')
# start message
if sysvals.testcommand != '':
print('COMMAND START')
else:
if(sysvals.rtcwake):
print('SUSPEND START')
else:
print('SUSPEND START (press a key to resume)')
# set rtcwake
if(sysvals.rtcwake):
print('will issue an rtcwake in %d seconds' % sysvals.rtcwaketime)
sysvals.rtcWakeAlarmOn()
# start of suspend trace marker
if(sysvals.usecallgraph or sysvals.usetraceevents):
sysvals.fsetVal('SUSPEND START', 'trace_marker')
# predelay delay
if(count == 1 and sysvals.predelay > 0):
sysvals.fsetVal('WAIT %d' % sysvals.predelay, 'trace_marker')
time.sleep(sysvals.predelay/1000.0)
sysvals.fsetVal('WAIT END', 'trace_marker')
# initiate suspend or command
if sysvals.testcommand != '':
call(sysvals.testcommand+' 2>&1', shell=True);
else:
mode = sysvals.suspendmode
if sysvals.memmode and os.path.exists(sysvals.mempowerfile):
mode = 'mem'
pf = open(sysvals.mempowerfile, 'w')
pf.write(sysvals.memmode)
pf.close()
pf = open(sysvals.powerfile, 'w')
pf.write(mode)
# execution will pause here
try:
pf.close()
except:
pass
if(sysvals.rtcwake):
sysvals.rtcWakeAlarmOff()
# postdelay delay
if(count == sysvals.execcount and sysvals.postdelay > 0):
sysvals.fsetVal('WAIT %d' % sysvals.postdelay, 'trace_marker')
time.sleep(sysvals.postdelay/1000.0)
sysvals.fsetVal('WAIT END', 'trace_marker')
# return from suspend
print('RESUME COMPLETE')
if(sysvals.usecallgraph or sysvals.usetraceevents):
sysvals.fsetVal('RESUME COMPLETE', 'trace_marker')
if(sysvals.suspendmode == 'mem' or sysvals.suspendmode == 'command'):
fwdata.append(getFPDT(False))
# stop ftrace
if(sysvals.usecallgraph or sysvals.usetraceevents):
if sysvals.useprocmon:
pm.stop()
sysvals.fsetVal('0', 'tracing_on')
print('CAPTURING TRACE')
sysvals.writeDatafileHeader(sysvals.ftracefile, fwdata)
call('cat '+tp+'trace >> '+sysvals.ftracefile, shell=True)
sysvals.fsetVal('', 'trace')
devProps()
# grab a copy of the dmesg output
print('CAPTURING DMESG')
sysvals.writeDatafileHeader(sysvals.dmesgfile, fwdata)
sysvals.getdmesg()
# Function: setUSBDevicesAuto
# Description:
# Set the autosuspend control parameter of all USB devices to auto
# This can be dangerous, so use at your own risk, most devices are set
# to always-on since the kernel cant determine if the device can
# properly autosuspend
def setUSBDevicesAuto():
sysvals.rootCheck(True)
for dirname, dirnames, filenames in os.walk('/sys/devices'):
if(re.match('.*/usb[0-9]*.*', dirname) and
'idVendor' in filenames and 'idProduct' in filenames):
call('echo auto > %s/power/control' % dirname, shell=True)
name = dirname.split('/')[-1]
desc = Popen(['cat', '%s/product' % dirname],
stderr=PIPE, stdout=PIPE).stdout.read().replace('\n', '')
ctrl = Popen(['cat', '%s/power/control' % dirname],
stderr=PIPE, stdout=PIPE).stdout.read().replace('\n', '')
print('control is %s for %6s: %s' % (ctrl, name, desc))
# Function: yesno
# Description:
# Print out an equivalent Y or N for a set of known parameter values
# Output:
# 'Y', 'N', or ' ' if the value is unknown
def yesno(val):
yesvals = ['auto', 'enabled', 'active', '1']
novals = ['on', 'disabled', 'suspended', 'forbidden', 'unsupported']
if val in yesvals:
return 'Y'
elif val in novals:
return 'N'
return ' '
# Function: ms2nice
# Description:
# Print out a very concise time string in minutes and seconds
# Output:
# The time string, e.g. "1901m16s"
def ms2nice(val):
ms = 0
try:
ms = int(val)
except:
return 0.0
m = ms / 60000
s = (ms / 1000) - (m * 60)
return '%3dm%2ds' % (m, s)
# Function: detectUSB
# Description:
# Detect all the USB hosts and devices currently connected and add
# a list of USB device names to sysvals for better timeline readability
def detectUSB():
field = {'idVendor':'', 'idProduct':'', 'product':'', 'speed':''}
power = {'async':'', 'autosuspend':'', 'autosuspend_delay_ms':'',
'control':'', 'persist':'', 'runtime_enabled':'',
'runtime_status':'', 'runtime_usage':'',
'runtime_active_time':'',
'runtime_suspended_time':'',
'active_duration':'',
'connected_duration':''}
print('LEGEND')
print('---------------------------------------------------------------------------------------------')
print(' A = async/sync PM queue Y/N D = autosuspend delay (seconds)')
print(' S = autosuspend Y/N rACTIVE = runtime active (min/sec)')
print(' P = persist across suspend Y/N rSUSPEN = runtime suspend (min/sec)')
print(' E = runtime suspend enabled/forbidden Y/N ACTIVE = active duration (min/sec)')
print(' R = runtime status active/suspended Y/N CONNECT = connected duration (min/sec)')
print(' U = runtime usage count')
print('---------------------------------------------------------------------------------------------')
print(' NAME ID DESCRIPTION SPEED A S P E R U D rACTIVE rSUSPEN ACTIVE CONNECT')
print('---------------------------------------------------------------------------------------------')
for dirname, dirnames, filenames in os.walk('/sys/devices'):
if(re.match('.*/usb[0-9]*.*', dirname) and
'idVendor' in filenames and 'idProduct' in filenames):
for i in field:
field[i] = Popen(['cat', '%s/%s' % (dirname, i)],
stderr=PIPE, stdout=PIPE).stdout.read().replace('\n', '')
name = dirname.split('/')[-1]
for i in power:
power[i] = Popen(['cat', '%s/power/%s' % (dirname, i)],
stderr=PIPE, stdout=PIPE).stdout.read().replace('\n', '')
if(re.match('usb[0-9]*', name)):
first = '%-8s' % name
else:
first = '%8s' % name
print('%s [%s:%s] %-20s %-4s %1s %1s %1s %1s %1s %1s %1s %s %s %s %s' % \
(first, field['idVendor'], field['idProduct'], \
field['product'][0:20], field['speed'], \
yesno(power['async']), \
yesno(power['control']), \
yesno(power['persist']), \
yesno(power['runtime_enabled']), \
yesno(power['runtime_status']), \
power['runtime_usage'], \
power['autosuspend'], \
ms2nice(power['runtime_active_time']), \
ms2nice(power['runtime_suspended_time']), \
ms2nice(power['active_duration']), \
ms2nice(power['connected_duration'])))
# Function: devProps
# Description:
# Retrieve a list of properties for all devices in the trace log
def devProps(data=0):
props = dict()
if data:
idx = data.index(': ') + 2
if idx >= len(data):
return
devlist = data[idx:].split(';')
for dev in devlist:
f = dev.split(',')
if len(f) < 3:
continue
dev = f[0]
props[dev] = DevProps()
props[dev].altname = f[1]
if int(f[2]):
props[dev].async = True
else:
props[dev].async = False
sysvals.devprops = props
if sysvals.suspendmode == 'command' and 'testcommandstring' in props:
sysvals.testcommand = props['testcommandstring'].altname
return
if(os.path.exists(sysvals.ftracefile) == False):
doError('%s does not exist' % sysvals.ftracefile)
# first get the list of devices we need properties for
msghead = 'Additional data added by AnalyzeSuspend'
alreadystamped = False
tp = TestProps()
tf = open(sysvals.ftracefile, 'r')
for line in tf:
if msghead in line:
alreadystamped = True
continue
# determine the trace data type (required for further parsing)
m = re.match(sysvals.tracertypefmt, line)
if(m):
tp.setTracerType(m.group('t'))
continue
# parse only valid lines, if this is not one move on
m = re.match(tp.ftrace_line_fmt, line)
if(not m or 'device_pm_callback_start' not in line):
continue
m = re.match('.*: (?P<drv>.*) (?P<d>.*), parent: *(?P<p>.*), .*', m.group('msg'));
if(not m):
continue
dev = m.group('d')
if dev not in props:
props[dev] = DevProps()
tf.close()
if not alreadystamped and sysvals.suspendmode == 'command':
out = '#\n# '+msghead+'\n# Device Properties: '
out += 'testcommandstring,%s,0;' % (sysvals.testcommand)
with open(sysvals.ftracefile, 'a') as fp:
fp.write(out+'\n')
sysvals.devprops = props
return
# now get the syspath for each of our target devices
for dirname, dirnames, filenames in os.walk('/sys/devices'):
if(re.match('.*/power', dirname) and 'async' in filenames):
dev = dirname.split('/')[-2]
if dev in props and (not props[dev].syspath or len(dirname) < len(props[dev].syspath)):
props[dev].syspath = dirname[:-6]
# now fill in the properties for our target devices
for dev in props:
dirname = props[dev].syspath
if not dirname or not os.path.exists(dirname):
continue
with open(dirname+'/power/async') as fp:
text = fp.read()
props[dev].async = False
if 'enabled' in text:
props[dev].async = True
fields = os.listdir(dirname)
if 'product' in fields:
with open(dirname+'/product') as fp:
props[dev].altname = fp.read()
elif 'name' in fields:
with open(dirname+'/name') as fp:
props[dev].altname = fp.read()
elif 'model' in fields:
with open(dirname+'/model') as fp:
props[dev].altname = fp.read()
elif 'description' in fields:
with open(dirname+'/description') as fp:
props[dev].altname = fp.read()
elif 'id' in fields:
with open(dirname+'/id') as fp:
props[dev].altname = fp.read()
elif 'idVendor' in fields and 'idProduct' in fields:
idv, idp = '', ''
with open(dirname+'/idVendor') as fp:
idv = fp.read().strip()
with open(dirname+'/idProduct') as fp:
idp = fp.read().strip()
props[dev].altname = '%s:%s' % (idv, idp)
if props[dev].altname:
out = props[dev].altname.strip().replace('\n', ' ')
out = out.replace(',', ' ')
out = out.replace(';', ' ')
props[dev].altname = out
# and now write the data to the ftrace file
if not alreadystamped:
out = '#\n# '+msghead+'\n# Device Properties: '
for dev in sorted(props):
out += props[dev].out(dev)
with open(sysvals.ftracefile, 'a') as fp:
fp.write(out+'\n')
sysvals.devprops = props
# Function: getModes
# Description:
# Determine the supported power modes on this system
# Output:
# A string list of the available modes
def getModes():
modes = []
if(os.path.exists(sysvals.powerfile)):
fp = open(sysvals.powerfile, 'r')
modes = string.split(fp.read())
fp.close()
if(os.path.exists(sysvals.mempowerfile)):
deep = False
fp = open(sysvals.mempowerfile, 'r')
for m in string.split(fp.read()):
memmode = m.strip('[]')
if memmode == 'deep':
deep = True
else:
modes.append('mem-%s' % memmode)
fp.close()
if 'mem' in modes and not deep:
modes.remove('mem')
return modes
# Function: dmidecode
# Description:
# Read the bios tables and pull out system info
# Arguments:
# mempath: /dev/mem or custom mem path
# fatal: True to exit on error, False to return empty dict
# Output:
# A dict object with all available key/values
def dmidecode(mempath, fatal=False):
out = dict()
# the list of values to retrieve, with hardcoded (type, idx)
info = {
'bios-vendor': (0, 4),
'bios-version': (0, 5),
'bios-release-date': (0, 8),
'system-manufacturer': (1, 4),
'system-product-name': (1, 5),
'system-version': (1, 6),
'system-serial-number': (1, 7),
'baseboard-manufacturer': (2, 4),
'baseboard-product-name': (2, 5),
'baseboard-version': (2, 6),
'baseboard-serial-number': (2, 7),
'chassis-manufacturer': (3, 4),
'chassis-type': (3, 5),
'chassis-version': (3, 6),
'chassis-serial-number': (3, 7),
'processor-manufacturer': (4, 7),
'processor-version': (4, 16),
}
if(not os.path.exists(mempath)):
if(fatal):
doError('file does not exist: %s' % mempath)
return out
if(not os.access(mempath, os.R_OK)):
if(fatal):
doError('file is not readable: %s' % mempath)
return out
# by default use legacy scan, but try to use EFI first
memaddr = 0xf0000
memsize = 0x10000
for ep in ['/sys/firmware/efi/systab', '/proc/efi/systab']:
if not os.path.exists(ep) or not os.access(ep, os.R_OK):
continue
fp = open(ep, 'r')
buf = fp.read()
fp.close()
i = buf.find('SMBIOS=')
if i >= 0:
try:
memaddr = int(buf[i+7:], 16)
memsize = 0x20
except:
continue
# read in the memory for scanning
fp = open(mempath, 'rb')
try:
fp.seek(memaddr)
buf = fp.read(memsize)
except:
if(fatal):
doError('DMI table is unreachable, sorry')
else:
return out
fp.close()
# search for either an SM table or DMI table
i = base = length = num = 0
while(i < memsize):
if buf[i:i+4] == '_SM_' and i < memsize - 16:
length = struct.unpack('H', buf[i+22:i+24])[0]
base, num = struct.unpack('IH', buf[i+24:i+30])
break
elif buf[i:i+5] == '_DMI_':
length = struct.unpack('H', buf[i+6:i+8])[0]
base, num = struct.unpack('IH', buf[i+8:i+14])
break
i += 16
if base == 0 and length == 0 and num == 0:
if(fatal):
doError('Neither SMBIOS nor DMI were found')
else:
return out
# read in the SM or DMI table
fp = open(mempath, 'rb')
try:
fp.seek(base)
buf = fp.read(length)
except:
if(fatal):
doError('DMI table is unreachable, sorry')
else:
return out
fp.close()
# scan the table for the values we want
count = i = 0
while(count < num and i <= len(buf) - 4):
type, size, handle = struct.unpack('BBH', buf[i:i+4])
n = i + size
while n < len(buf) - 1:
if 0 == struct.unpack('H', buf[n:n+2])[0]:
break
n += 1
data = buf[i+size:n+2].split('\0')
for name in info:
itype, idxadr = info[name]
if itype == type:
idx = struct.unpack('B', buf[i+idxadr])[0]
if idx > 0 and idx < len(data) - 1:
s = data[idx-1].strip()
if s and s.lower() != 'to be filled by o.e.m.':
out[name] = data[idx-1]
i = n + 2
count += 1
return out
# Function: getFPDT
# Description:
# Read the acpi bios tables and pull out FPDT, the firmware data
# Arguments:
# output: True to output the info to stdout, False otherwise
def getFPDT(output):
rectype = {}
rectype[0] = 'Firmware Basic Boot Performance Record'
rectype[1] = 'S3 Performance Table Record'
prectype = {}
prectype[0] = 'Basic S3 Resume Performance Record'
prectype[1] = 'Basic S3 Suspend Performance Record'
sysvals.rootCheck(True)
if(not os.path.exists(sysvals.fpdtpath)):
if(output):
doError('file does not exist: %s' % sysvals.fpdtpath)
return False
if(not os.access(sysvals.fpdtpath, os.R_OK)):
if(output):
doError('file is not readable: %s' % sysvals.fpdtpath)
return False
if(not os.path.exists(sysvals.mempath)):
if(output):
doError('file does not exist: %s' % sysvals.mempath)
return False
if(not os.access(sysvals.mempath, os.R_OK)):
if(output):
doError('file is not readable: %s' % sysvals.mempath)
return False
fp = open(sysvals.fpdtpath, 'rb')
buf = fp.read()
fp.close()
if(len(buf) < 36):
if(output):
doError('Invalid FPDT table data, should '+\
'be at least 36 bytes')
return False
table = struct.unpack('4sIBB6s8sI4sI', buf[0:36])
if(output):
print('')
print('Firmware Performance Data Table (%s)' % table[0])
print(' Signature : %s' % table[0])
print(' Table Length : %u' % table[1])
print(' Revision : %u' % table[2])
print(' Checksum : 0x%x' % table[3])
print(' OEM ID : %s' % table[4])
print(' OEM Table ID : %s' % table[5])
print(' OEM Revision : %u' % table[6])
print(' Creator ID : %s' % table[7])
print(' Creator Revision : 0x%x' % table[8])
print('')
if(table[0] != 'FPDT'):
if(output):
doError('Invalid FPDT table')
return False
if(len(buf) <= 36):
return False
i = 0
fwData = [0, 0]
records = buf[36:]
fp = open(sysvals.mempath, 'rb')
while(i < len(records)):
header = struct.unpack('HBB', records[i:i+4])
if(header[0] not in rectype):
i += header[1]
continue
if(header[1] != 16):
i += header[1]
continue
addr = struct.unpack('Q', records[i+8:i+16])[0]
try:
fp.seek(addr)
first = fp.read(8)
except:
if(output):
print('Bad address 0x%x in %s' % (addr, sysvals.mempath))
return [0, 0]
rechead = struct.unpack('4sI', first)
recdata = fp.read(rechead[1]-8)
if(rechead[0] == 'FBPT'):
record = struct.unpack('HBBIQQQQQ', recdata)
if(output):
print('%s (%s)' % (rectype[header[0]], rechead[0]))
print(' Reset END : %u ns' % record[4])
print(' OS Loader LoadImage Start : %u ns' % record[5])
print(' OS Loader StartImage Start : %u ns' % record[6])
print(' ExitBootServices Entry : %u ns' % record[7])
print(' ExitBootServices Exit : %u ns' % record[8])
elif(rechead[0] == 'S3PT'):
if(output):
print('%s (%s)' % (rectype[header[0]], rechead[0]))
j = 0
while(j < len(recdata)):
prechead = struct.unpack('HBB', recdata[j:j+4])
if(prechead[0] not in prectype):
continue
if(prechead[0] == 0):
record = struct.unpack('IIQQ', recdata[j:j+prechead[1]])
fwData[1] = record[2]
if(output):
print(' %s' % prectype[prechead[0]])
print(' Resume Count : %u' % \
record[1])
print(' FullResume : %u ns' % \
record[2])
print(' AverageResume : %u ns' % \
record[3])
elif(prechead[0] == 1):
record = struct.unpack('QQ', recdata[j+4:j+prechead[1]])
fwData[0] = record[1] - record[0]
if(output):
print(' %s' % prectype[prechead[0]])
print(' SuspendStart : %u ns' % \
record[0])
print(' SuspendEnd : %u ns' % \
record[1])
print(' SuspendTime : %u ns' % \
fwData[0])
j += prechead[1]
if(output):
print('')
i += header[1]
fp.close()
return fwData
# Function: statusCheck
# Description:
# Verify that the requested command and options will work, and
# print the results to the terminal
# Output:
# True if the test will work, False if not
def statusCheck(probecheck=False):
status = True
print('Checking this system (%s)...' % platform.node())
# check we have root access
res = sysvals.colorText('NO (No features of this tool will work!)')
if(sysvals.rootCheck(False)):
res = 'YES'
print(' have root access: %s' % res)
if(res != 'YES'):
print(' Try running this script with sudo')
return False
# check sysfs is mounted
res = sysvals.colorText('NO (No features of this tool will work!)')
if(os.path.exists(sysvals.powerfile)):
res = 'YES'
print(' is sysfs mounted: %s' % res)
if(res != 'YES'):
return False
# check target mode is a valid mode
if sysvals.suspendmode != 'command':
res = sysvals.colorText('NO')
modes = getModes()
if(sysvals.suspendmode in modes):
res = 'YES'
else:
status = False
print(' is "%s" a valid power mode: %s' % (sysvals.suspendmode, res))
if(res == 'NO'):
print(' valid power modes are: %s' % modes)
print(' please choose one with -m')
# check if ftrace is available
res = sysvals.colorText('NO')
ftgood = sysvals.verifyFtrace()
if(ftgood):
res = 'YES'
elif(sysvals.usecallgraph):
status = False
print(' is ftrace supported: %s' % res)
# check if kprobes are available
res = sysvals.colorText('NO')
sysvals.usekprobes = sysvals.verifyKprobes()
if(sysvals.usekprobes):
res = 'YES'
else:
sysvals.usedevsrc = False
print(' are kprobes supported: %s' % res)
# what data source are we using
res = 'DMESG'
if(ftgood):
sysvals.usetraceeventsonly = True
sysvals.usetraceevents = False
for e in sysvals.traceevents:
check = False
if(os.path.exists(sysvals.epath+e)):
check = True
if(not check):
sysvals.usetraceeventsonly = False
if(e == 'suspend_resume' and check):
sysvals.usetraceevents = True
if(sysvals.usetraceevents and sysvals.usetraceeventsonly):
res = 'FTRACE (all trace events found)'
elif(sysvals.usetraceevents):
res = 'DMESG and FTRACE (suspend_resume trace event found)'
print(' timeline data source: %s' % res)
# check if rtcwake
res = sysvals.colorText('NO')
if(sysvals.rtcpath != ''):
res = 'YES'
elif(sysvals.rtcwake):
status = False
print(' is rtcwake supported: %s' % res)
if not probecheck:
return status
# verify kprobes
if sysvals.usekprobes:
for name in sysvals.tracefuncs:
sysvals.defaultKprobe(name, sysvals.tracefuncs[name])
if sysvals.usedevsrc:
for name in sysvals.dev_tracefuncs:
sysvals.defaultKprobe(name, sysvals.dev_tracefuncs[name])
sysvals.addKprobes(True)
return status
# Function: doError
# Description:
# generic error function for catastrphic failures
# Arguments:
# msg: the error message to print
# help: True if printHelp should be called after, False otherwise
def doError(msg, help=False):
if(help == True):
printHelp()
print('ERROR: %s\n') % msg
sys.exit()
# Function: getArgInt
# Description:
# pull out an integer argument from the command line with checks
def getArgInt(name, args, min, max, main=True):
if main:
try:
arg = args.next()
except:
doError(name+': no argument supplied', True)
else:
arg = args
try:
val = int(arg)
except:
doError(name+': non-integer value given', True)
if(val < min or val > max):
doError(name+': value should be between %d and %d' % (min, max), True)
return val
# Function: getArgFloat
# Description:
# pull out a float argument from the command line with checks
def getArgFloat(name, args, min, max, main=True):
if main:
try:
arg = args.next()
except:
doError(name+': no argument supplied', True)
else:
arg = args
try:
val = float(arg)
except:
doError(name+': non-numerical value given', True)
if(val < min or val > max):
doError(name+': value should be between %f and %f' % (min, max), True)
return val
def processData():
print('PROCESSING DATA')
if(sysvals.usetraceeventsonly):
testruns = parseTraceLog()
if sysvals.dmesgfile:
dmesgtext = loadKernelLog(True)
for data in testruns:
data.extractErrorInfo(dmesgtext)
else:
testruns = loadKernelLog()
for data in testruns:
parseKernelLog(data)
if(sysvals.ftracefile and (sysvals.usecallgraph or sysvals.usetraceevents)):
appendIncompleteTraceLog(testruns)
createHTML(testruns)
return testruns
# Function: rerunTest
# Description:
# generate an output from an existing set of ftrace/dmesg logs
def rerunTest():
if sysvals.ftracefile:
doesTraceLogHaveTraceEvents()
if not sysvals.dmesgfile and not sysvals.usetraceeventsonly:
doError('recreating this html output requires a dmesg file')
sysvals.setOutputFile()
vprint('Output file: %s' % sysvals.htmlfile)
if os.path.exists(sysvals.htmlfile):
if not os.path.isfile(sysvals.htmlfile):
doError('a directory already exists with this name: %s' % sysvals.htmlfile)
elif not os.access(sysvals.htmlfile, os.W_OK):
doError('missing permission to write to %s' % sysvals.htmlfile)
return processData()
# Function: runTest
# Description:
# execute a suspend/resume, gather the logs, and generate the output
def runTest():
# prepare for the test
sysvals.initFtrace()
sysvals.initTestOutput('suspend')
vprint('Output files:\n\t%s\n\t%s\n\t%s' % \
(sysvals.dmesgfile, sysvals.ftracefile, sysvals.htmlfile))
# execute the test
executeSuspend()
sysvals.cleanupFtrace()
processData()
# if running as root, change output dir owner to sudo_user
if os.path.isdir(sysvals.testdir) and os.getuid() == 0 and \
'SUDO_USER' in os.environ:
cmd = 'chown -R {0}:{0} {1} > /dev/null 2>&1'
call(cmd.format(os.environ['SUDO_USER'], sysvals.testdir), shell=True)
def find_in_html(html, strs, div=False):
for str in strs:
l = len(str)
i = html.find(str)
if i >= 0:
break
if i < 0:
return ''
if not div:
return re.search(r'[-+]?\d*\.\d+|\d+', html[i+l:i+l+50]).group()
n = html[i+l:].find('</div>')
if n < 0:
return ''
return html[i+l:i+l+n]
# Function: runSummary
# Description:
# create a summary of tests in a sub-directory
def runSummary(subdir, local=True):
inpath = os.path.abspath(subdir)
outpath = inpath
if local:
outpath = os.path.abspath('.')
print('Generating a summary of folder "%s"' % inpath)
testruns = []
for dirname, dirnames, filenames in os.walk(subdir):
for filename in filenames:
if(not re.match('.*.html', filename)):
continue
file = os.path.join(dirname, filename)
html = open(file, 'r').read(10000)
suspend = find_in_html(html,
['Kernel Suspend: ', 'Kernel Suspend Time: '])
resume = find_in_html(html,
['Kernel Resume: ', 'Kernel Resume Time: '])
line = find_in_html(html, ['<div class="stamp">'], True)
stmp = line.split()
if not suspend or not resume or len(stmp) < 4:
continue
data = {
'host': stmp[0],
'kernel': stmp[1],
'mode': stmp[2],
'time': string.join(stmp[3:], ' '),
'suspend': suspend,
'resume': resume,
'url': os.path.relpath(file, outpath),
}
if len(stmp) == 7:
data['kernel'] = 'unknown'
data['mode'] = stmp[1]
data['time'] = string.join(stmp[2:], ' ')
testruns.append(data)
outfile = os.path.join(outpath, 'summary.html')
print('Summary file: %s' % outfile)
createHTMLSummarySimple(testruns, outfile, inpath)
# Function: checkArgBool
# Description:
# check if a boolean string value is true or false
def checkArgBool(value):
yes = ['1', 'true', 'yes', 'on']
if value.lower() in yes:
return True
return False
# Function: configFromFile
# Description:
# Configure the script via the info in a config file
def configFromFile(file):
Config = ConfigParser.ConfigParser()
Config.read(file)
sections = Config.sections()
overridekprobes = False
overridedevkprobes = False
if 'Settings' in sections:
for opt in Config.options('Settings'):
value = Config.get('Settings', opt).lower()
if(opt.lower() == 'verbose'):
sysvals.verbose = checkArgBool(value)
elif(opt.lower() == 'addlogs'):
sysvals.dmesglog = sysvals.ftracelog = checkArgBool(value)
elif(opt.lower() == 'dev'):
sysvals.usedevsrc = checkArgBool(value)
elif(opt.lower() == 'proc'):
sysvals.useprocmon = checkArgBool(value)
elif(opt.lower() == 'x2'):
if checkArgBool(value):
sysvals.execcount = 2
elif(opt.lower() == 'callgraph'):
sysvals.usecallgraph = checkArgBool(value)
elif(opt.lower() == 'override-timeline-functions'):
overridekprobes = checkArgBool(value)
elif(opt.lower() == 'override-dev-timeline-functions'):
overridedevkprobes = checkArgBool(value)
elif(opt.lower() == 'devicefilter'):
sysvals.setDeviceFilter(value)
elif(opt.lower() == 'expandcg'):
sysvals.cgexp = checkArgBool(value)
elif(opt.lower() == 'srgap'):
if checkArgBool(value):
sysvals.srgap = 5
elif(opt.lower() == 'mode'):
sysvals.suspendmode = value
elif(opt.lower() == 'command'):
sysvals.testcommand = value
elif(opt.lower() == 'x2delay'):
sysvals.x2delay = getArgInt('-x2delay', value, 0, 60000, False)
elif(opt.lower() == 'predelay'):
sysvals.predelay = getArgInt('-predelay', value, 0, 60000, False)
elif(opt.lower() == 'postdelay'):
sysvals.postdelay = getArgInt('-postdelay', value, 0, 60000, False)
elif(opt.lower() == 'maxdepth'):
sysvals.max_graph_depth = getArgInt('-maxdepth', value, 0, 1000, False)
elif(opt.lower() == 'rtcwake'):
if value.lower() == 'off':
sysvals.rtcwake = False
else:
sysvals.rtcwake = True
sysvals.rtcwaketime = getArgInt('-rtcwake', value, 0, 3600, False)
elif(opt.lower() == 'timeprec'):
sysvals.setPrecision(getArgInt('-timeprec', value, 0, 6, False))
elif(opt.lower() == 'mindev'):
sysvals.mindevlen = getArgFloat('-mindev', value, 0.0, 10000.0, False)
elif(opt.lower() == 'callloop-maxgap'):
sysvals.callloopmaxgap = getArgFloat('-callloop-maxgap', value, 0.0, 1.0, False)
elif(opt.lower() == 'callloop-maxlen'):
sysvals.callloopmaxgap = getArgFloat('-callloop-maxlen', value, 0.0, 1.0, False)
elif(opt.lower() == 'mincg'):
sysvals.mincglen = getArgFloat('-mincg', value, 0.0, 10000.0, False)
elif(opt.lower() == 'output-dir'):
sysvals.testdir = sysvals.setOutputFolder(value)
if sysvals.suspendmode == 'command' and not sysvals.testcommand:
doError('No command supplied for mode "command"')
# compatibility errors
if sysvals.usedevsrc and sysvals.usecallgraph:
doError('-dev is not compatible with -f')
if sysvals.usecallgraph and sysvals.useprocmon:
doError('-proc is not compatible with -f')
if overridekprobes:
sysvals.tracefuncs = dict()
if overridedevkprobes:
sysvals.dev_tracefuncs = dict()
kprobes = dict()
kprobesec = 'dev_timeline_functions_'+platform.machine()
if kprobesec in sections:
for name in Config.options(kprobesec):
text = Config.get(kprobesec, name)
kprobes[name] = (text, True)
kprobesec = 'timeline_functions_'+platform.machine()
if kprobesec in sections:
for name in Config.options(kprobesec):
if name in kprobes:
doError('Duplicate timeline function found "%s"' % (name))
text = Config.get(kprobesec, name)
kprobes[name] = (text, False)
for name in kprobes:
function = name
format = name
color = ''
args = dict()
text, dev = kprobes[name]
data = text.split()
i = 0
for val in data:
# bracketted strings are special formatting, read them separately
if val[0] == '[' and val[-1] == ']':
for prop in val[1:-1].split(','):
p = prop.split('=')
if p[0] == 'color':
try:
color = int(p[1], 16)
color = '#'+p[1]
except:
color = p[1]
continue
# first real arg should be the format string
if i == 0:
format = val
# all other args are actual function args
else:
d = val.split('=')
args[d[0]] = d[1]
i += 1
if not function or not format:
doError('Invalid kprobe: %s' % name)
for arg in re.findall('{(?P<n>[a-z,A-Z,0-9]*)}', format):
if arg not in args:
doError('Kprobe "%s" is missing argument "%s"' % (name, arg))
if (dev and name in sysvals.dev_tracefuncs) or (not dev and name in sysvals.tracefuncs):
doError('Duplicate timeline function found "%s"' % (name))
kp = {
'name': name,
'func': function,
'format': format,
sysvals.archargs: args
}
if color:
kp['color'] = color
if dev:
sysvals.dev_tracefuncs[name] = kp
else:
sysvals.tracefuncs[name] = kp
# Function: printHelp
# Description:
# print out the help text
def printHelp():
print('')
print('%s v%s' % (sysvals.title, sysvals.version))
print('Usage: sudo sleepgraph <options> <commands>')
print('')
print('Description:')
print(' This tool is designed to assist kernel and OS developers in optimizing')
print(' their linux stack\'s suspend/resume time. Using a kernel image built')
print(' with a few extra options enabled, the tool will execute a suspend and')
print(' capture dmesg and ftrace data until resume is complete. This data is')
print(' transformed into a device timeline and an optional callgraph to give')
print(' a detailed view of which devices/subsystems are taking the most')
print(' time in suspend/resume.')
print('')
print(' If no specific command is given, the default behavior is to initiate')
print(' a suspend/resume and capture the dmesg/ftrace output as an html timeline.')
print('')
print(' Generates output files in subdirectory: suspend-yymmdd-HHMMSS')
print(' HTML output: <hostname>_<mode>.html')
print(' raw dmesg output: <hostname>_<mode>_dmesg.txt')
print(' raw ftrace output: <hostname>_<mode>_ftrace.txt')
print('')
print('Options:')
print(' -h Print this help text')
print(' -v Print the current tool version')
print(' -config fn Pull arguments and config options from file fn')
print(' -verbose Print extra information during execution and analysis')
print(' -m mode Mode to initiate for suspend (default: %s)') % (sysvals.suspendmode)
print(' -o name Overrides the output subdirectory name when running a new test')
print(' default: suspend-{date}-{time}')
print(' -rtcwake t Wakeup t seconds after suspend, set t to "off" to disable (default: 15)')
print(' -addlogs Add the dmesg and ftrace logs to the html output')
print(' -srgap Add a visible gap in the timeline between sus/res (default: disabled)')
print(' [advanced]')
print(' -cmd {s} Run the timeline over a custom command, e.g. "sync -d"')
print(' -proc Add usermode process info into the timeline (default: disabled)')
print(' -dev Add kernel function calls and threads to the timeline (default: disabled)')
print(' -x2 Run two suspend/resumes back to back (default: disabled)')
print(' -x2delay t Include t ms delay between multiple test runs (default: 0 ms)')
print(' -predelay t Include t ms delay before 1st suspend (default: 0 ms)')
print(' -postdelay t Include t ms delay after last resume (default: 0 ms)')
print(' -mindev ms Discard all device blocks shorter than ms milliseconds (e.g. 0.001 for us)')
print(' -multi n d Execute <n> consecutive tests at <d> seconds intervals. The outputs will')
print(' be created in a new subdirectory with a summary page.')
print(' [debug]')
print(' -f Use ftrace to create device callgraphs (default: disabled)')
print(' -maxdepth N limit the callgraph data to N call levels (default: 0=all)')
print(' -expandcg pre-expand the callgraph data in the html output (default: disabled)')
print(' -fadd file Add functions to be graphed in the timeline from a list in a text file')
print(' -filter "d1,d2,..." Filter out all but this comma-delimited list of device names')
print(' -mincg ms Discard all callgraphs shorter than ms milliseconds (e.g. 0.001 for us)')
print(' -cgphase P Only show callgraph data for phase P (e.g. suspend_late)')
print(' -cgtest N Only show callgraph data for test N (e.g. 0 or 1 in an x2 run)')
print(' -timeprec N Number of significant digits in timestamps (0:S, [3:ms], 6:us)')
print('')
print('Other commands:')
print(' -modes List available suspend modes')
print(' -status Test to see if the system is enabled to run this tool')
print(' -fpdt Print out the contents of the ACPI Firmware Performance Data Table')
print(' -sysinfo Print out system info extracted from BIOS')
print(' -usbtopo Print out the current USB topology with power info')
print(' -usbauto Enable autosuspend for all connected USB devices')
print(' -flist Print the list of functions currently being captured in ftrace')
print(' -flistall Print all functions capable of being captured in ftrace')
print(' -summary directory Create a summary of all test in this dir')
print(' [redo]')
print(' -ftrace ftracefile Create HTML output using ftrace input (used with -dmesg)')
print(' -dmesg dmesgfile Create HTML output using dmesg (used with -ftrace)')
print('')
return True
# ----------------- MAIN --------------------
# exec start (skipped if script is loaded as library)
if __name__ == '__main__':
cmd = ''
outdir = ''
multitest = {'run': False, 'count': 0, 'delay': 0}
simplecmds = ['-sysinfo', '-modes', '-fpdt', '-flist', '-flistall', '-usbtopo', '-usbauto', '-status']
# loop through the command line arguments
args = iter(sys.argv[1:])
for arg in args:
if(arg == '-m'):
try:
val = args.next()
except:
doError('No mode supplied', True)
if val == 'command' and not sysvals.testcommand:
doError('No command supplied for mode "command"', True)
sysvals.suspendmode = val
elif(arg in simplecmds):
cmd = arg[1:]
elif(arg == '-h'):
printHelp()
sys.exit()
elif(arg == '-v'):
print("Version %s" % sysvals.version)
sys.exit()
elif(arg == '-x2'):
sysvals.execcount = 2
elif(arg == '-x2delay'):
sysvals.x2delay = getArgInt('-x2delay', args, 0, 60000)
elif(arg == '-predelay'):
sysvals.predelay = getArgInt('-predelay', args, 0, 60000)
elif(arg == '-postdelay'):
sysvals.postdelay = getArgInt('-postdelay', args, 0, 60000)
elif(arg == '-f'):
sysvals.usecallgraph = True
elif(arg == '-addlogs'):
sysvals.dmesglog = sysvals.ftracelog = True
elif(arg == '-verbose'):
sysvals.verbose = True
elif(arg == '-proc'):
sysvals.useprocmon = True
elif(arg == '-dev'):
sysvals.usedevsrc = True
elif(arg == '-maxdepth'):
sysvals.max_graph_depth = getArgInt('-maxdepth', args, 0, 1000)
elif(arg == '-rtcwake'):
try:
val = args.next()
except:
doError('No rtcwake time supplied', True)
if val.lower() == 'off':
sysvals.rtcwake = False
else:
sysvals.rtcwake = True
sysvals.rtcwaketime = getArgInt('-rtcwake', val, 0, 3600, False)
elif(arg == '-timeprec'):
sysvals.setPrecision(getArgInt('-timeprec', args, 0, 6))
elif(arg == '-mindev'):
sysvals.mindevlen = getArgFloat('-mindev', args, 0.0, 10000.0)
elif(arg == '-mincg'):
sysvals.mincglen = getArgFloat('-mincg', args, 0.0, 10000.0)
elif(arg == '-cgtest'):
sysvals.cgtest = getArgInt('-cgtest', args, 0, 1)
elif(arg == '-cgphase'):
try:
val = args.next()
except:
doError('No phase name supplied', True)
d = Data(0)
if val not in d.phases:
doError('Invalid phase, valid phaess are %s' % d.phases, True)
sysvals.cgphase = val
elif(arg == '-callloop-maxgap'):
sysvals.callloopmaxgap = getArgFloat('-callloop-maxgap', args, 0.0, 1.0)
elif(arg == '-callloop-maxlen'):
sysvals.callloopmaxlen = getArgFloat('-callloop-maxlen', args, 0.0, 1.0)
elif(arg == '-cmd'):
try:
val = args.next()
except:
doError('No command string supplied', True)
sysvals.testcommand = val
sysvals.suspendmode = 'command'
elif(arg == '-expandcg'):
sysvals.cgexp = True
elif(arg == '-srgap'):
sysvals.srgap = 5
elif(arg == '-multi'):
multitest['run'] = True
multitest['count'] = getArgInt('-multi n (exec count)', args, 2, 1000000)
multitest['delay'] = getArgInt('-multi d (delay between tests)', args, 0, 3600)
elif(arg == '-o'):
try:
val = args.next()
except:
doError('No subdirectory name supplied', True)
outdir = sysvals.setOutputFolder(val)
elif(arg == '-config'):
try:
val = args.next()
except:
doError('No text file supplied', True)
if(os.path.exists(val) == False):
doError('%s does not exist' % val)
configFromFile(val)
elif(arg == '-fadd'):
try:
val = args.next()
except:
doError('No text file supplied', True)
if(os.path.exists(val) == False):
doError('%s does not exist' % val)
sysvals.addFtraceFilterFunctions(val)
elif(arg == '-dmesg'):
try:
val = args.next()
except:
doError('No dmesg file supplied', True)
sysvals.notestrun = True
sysvals.dmesgfile = val
if(os.path.exists(sysvals.dmesgfile) == False):
doError('%s does not exist' % sysvals.dmesgfile)
elif(arg == '-ftrace'):
try:
val = args.next()
except:
doError('No ftrace file supplied', True)
sysvals.notestrun = True
sysvals.ftracefile = val
if(os.path.exists(sysvals.ftracefile) == False):
doError('%s does not exist' % sysvals.ftracefile)
elif(arg == '-summary'):
try:
val = args.next()
except:
doError('No directory supplied', True)
cmd = 'summary'
outdir = val
sysvals.notestrun = True
if(os.path.isdir(val) == False):
doError('%s is not accesible' % val)
elif(arg == '-filter'):
try:
val = args.next()
except:
doError('No devnames supplied', True)
sysvals.setDeviceFilter(val)
else:
doError('Invalid argument: '+arg, True)
# compatibility errors
if(sysvals.usecallgraph and sysvals.usedevsrc):
doError('-dev is not compatible with -f')
if(sysvals.usecallgraph and sysvals.useprocmon):
doError('-proc is not compatible with -f')
# callgraph size cannot exceed device size
if sysvals.mincglen < sysvals.mindevlen:
sysvals.mincglen = sysvals.mindevlen
# just run a utility command and exit
sysvals.cpuInfo()
if(cmd != ''):
if(cmd == 'status'):
statusCheck(True)
elif(cmd == 'fpdt'):
getFPDT(True)
elif(cmd == 'sysinfo'):
sysvals.printSystemInfo()
elif(cmd == 'usbtopo'):
detectUSB()
elif(cmd == 'modes'):
print getModes()
elif(cmd == 'flist'):
sysvals.getFtraceFilterFunctions(True)
elif(cmd == 'flistall'):
sysvals.getFtraceFilterFunctions(False)
elif(cmd == 'usbauto'):
setUSBDevicesAuto()
elif(cmd == 'summary'):
runSummary(outdir, True)
sys.exit()
# if instructed, re-analyze existing data files
if(sysvals.notestrun):
rerunTest()
sys.exit()
# verify that we can run a test
if(not statusCheck()):
print('Check FAILED, aborting the test run!')
sys.exit()
# extract mem modes and convert
mode = sysvals.suspendmode
if 'mem' == mode[:3]:
if '-' in mode:
memmode = mode.split('-')[-1]
else:
memmode = 'deep'
if memmode == 'shallow':
mode = 'standby'
elif memmode == 's2idle':
mode = 'freeze'
else:
mode = 'mem'
sysvals.memmode = memmode
sysvals.suspendmode = mode
sysvals.systemInfo(dmidecode(sysvals.mempath))
if multitest['run']:
# run multiple tests in a separate subdirectory
if not outdir:
s = 'suspend-x%d' % multitest['count']
outdir = datetime.now().strftime(s+'-%y%m%d-%H%M%S')
if not os.path.isdir(outdir):
os.mkdir(outdir)
for i in range(multitest['count']):
if(i != 0):
print('Waiting %d seconds...' % (multitest['delay']))
time.sleep(multitest['delay'])
print('TEST (%d/%d) START' % (i+1, multitest['count']))
fmt = 'suspend-%y%m%d-%H%M%S'
sysvals.testdir = os.path.join(outdir, datetime.now().strftime(fmt))
runTest()
print('TEST (%d/%d) COMPLETE' % (i+1, multitest['count']))
runSummary(outdir, False)
else:
if outdir:
sysvals.testdir = outdir
# run the test in the current directory
runTest()
|
aptana/Pydev
|
refs/heads/development
|
bundles/org.python.pydev.jython/Lib/CGIHTTPServer.py
|
8
|
"""CGI-savvy HTTP Server.
This module builds on SimpleHTTPServer by implementing GET and POST
requests to cgi-bin scripts.
If the os.fork() function is not present (e.g. on Windows),
os.popen2() is used as a fallback, with slightly altered semantics; if
that function is not present either (e.g. on Macintosh), only Python
scripts are supported, and they are executed by the current process.
In all cases, the implementation is intentionally naive -- all
requests are executed sychronously.
SECURITY WARNING: DON'T USE THIS CODE UNLESS YOU ARE INSIDE A FIREWALL
-- it may execute arbitrary Python code or external programs.
"""
__version__ = "0.4"
__all__ = ["CGIHTTPRequestHandler"]
import os
import sys
import urllib
import BaseHTTPServer
import SimpleHTTPServer
import select
class CGIHTTPRequestHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
"""Complete HTTP server with GET, HEAD and POST commands.
GET and HEAD also support running CGI scripts.
The POST command is *only* implemented for CGI scripts.
"""
# Determine platform specifics
have_fork = hasattr(os, 'fork')
have_popen2 = hasattr(os, 'popen2')
have_popen3 = hasattr(os, 'popen3')
# Make rfile unbuffered -- we need to read one line and then pass
# the rest to a subprocess, so we can't use buffered input.
rbufsize = 0
def do_POST(self):
"""Serve a POST request.
This is only implemented for CGI scripts.
"""
if self.is_cgi():
self.run_cgi()
else:
self.send_error(501, "Can only POST to CGI scripts")
def send_head(self):
"""Version of send_head that support CGI scripts"""
if self.is_cgi():
return self.run_cgi()
else:
return SimpleHTTPServer.SimpleHTTPRequestHandler.send_head(self)
def is_cgi(self):
"""Test whether self.path corresponds to a CGI script.
Return a tuple (dir, rest) if self.path requires running a
CGI script, None if not. Note that rest begins with a
slash if it is not empty.
The default implementation tests whether the path
begins with one of the strings in the list
self.cgi_directories (and the next character is a '/'
or the end of the string).
"""
path = self.path
for x in self.cgi_directories:
i = len(x)
if path[:i] == x and (not path[i:] or path[i] == '/'):
self.cgi_info = path[:i], path[i+1:]
return 1
return 0
cgi_directories = ['/cgi-bin', '/htbin']
def is_executable(self, path):
"""Test whether argument path is an executable file."""
return executable(path)
def is_python(self, path):
"""Test whether argument path is a Python script."""
head, tail = os.path.splitext(path)
return tail.lower() in (".py", ".pyw")
def run_cgi(self):
"""Execute a CGI script."""
dir, rest = self.cgi_info
i = rest.rfind('?')
if i >= 0:
rest, query = rest[:i], rest[i+1:]
else:
query = ''
i = rest.find('/')
if i >= 0:
script, rest = rest[:i], rest[i:]
else:
script, rest = rest, ''
scriptname = dir + '/' + script
scriptfile = self.translate_path(scriptname)
if not os.path.exists(scriptfile):
self.send_error(404, "No such CGI script (%s)" % `scriptname`)
return
if not os.path.isfile(scriptfile):
self.send_error(403, "CGI script is not a plain file (%s)" %
`scriptname`)
return
ispy = self.is_python(scriptname)
if not ispy:
if not (self.have_fork or self.have_popen2 or self.have_popen3):
self.send_error(403, "CGI script is not a Python script (%s)" %
`scriptname`)
return
if not self.is_executable(scriptfile):
self.send_error(403, "CGI script is not executable (%s)" %
`scriptname`)
return
# Reference: http://hoohoo.ncsa.uiuc.edu/cgi/env.html
# XXX Much of the following could be prepared ahead of time!
env = {}
env['SERVER_SOFTWARE'] = self.version_string()
env['SERVER_NAME'] = self.server.server_name
env['GATEWAY_INTERFACE'] = 'CGI/1.1'
env['SERVER_PROTOCOL'] = self.protocol_version
env['SERVER_PORT'] = str(self.server.server_port)
env['REQUEST_METHOD'] = self.command
uqrest = urllib.unquote(rest)
env['PATH_INFO'] = uqrest
env['PATH_TRANSLATED'] = self.translate_path(uqrest)
env['SCRIPT_NAME'] = scriptname
if query:
env['QUERY_STRING'] = query
host = self.address_string()
if host != self.client_address[0]:
env['REMOTE_HOST'] = host
env['REMOTE_ADDR'] = self.client_address[0]
# XXX AUTH_TYPE
# XXX REMOTE_USER
# XXX REMOTE_IDENT
if self.headers.typeheader is None:
env['CONTENT_TYPE'] = self.headers.type
else:
env['CONTENT_TYPE'] = self.headers.typeheader
length = self.headers.getheader('content-length')
if length:
env['CONTENT_LENGTH'] = length
accept = []
for line in self.headers.getallmatchingheaders('accept'):
if line[:1] in "\t\n\r ":
accept.append(line.strip())
else:
accept = accept + line[7:].split(',')
env['HTTP_ACCEPT'] = ','.join(accept)
ua = self.headers.getheader('user-agent')
if ua:
env['HTTP_USER_AGENT'] = ua
co = filter(None, self.headers.getheaders('cookie'))
if co:
env['HTTP_COOKIE'] = ', '.join(co)
# XXX Other HTTP_* headers
if not self.have_fork:
# Since we're setting the env in the parent, provide empty
# values to override previously set values
for k in ('QUERY_STRING', 'REMOTE_HOST', 'CONTENT_LENGTH',
'HTTP_USER_AGENT', 'HTTP_COOKIE'):
env.setdefault(k, "")
os.environ.update(env)
self.send_response(200, "Script output follows")
decoded_query = query.replace('+', ' ')
if self.have_fork:
# Unix -- fork as we should
args = [script]
if '=' not in decoded_query:
args.append(decoded_query)
nobody = nobody_uid()
self.wfile.flush() # Always flush before forking
pid = os.fork()
if pid != 0:
# Parent
pid, sts = os.waitpid(pid, 0)
# throw away additional data [see bug #427345]
while select.select([self.rfile], [], [], 0)[0]:
waste = self.rfile.read(1)
if sts:
self.log_error("CGI script exit status %#x", sts)
return
# Child
try:
try:
os.setuid(nobody)
except os.error:
pass
os.dup2(self.rfile.fileno(), 0)
os.dup2(self.wfile.fileno(), 1)
os.execve(scriptfile, args, env)
except:
self.server.handle_error(self.request, self.client_address)
os._exit(127)
elif self.have_popen2 or self.have_popen3:
# Windows -- use popen2 or popen3 to create a subprocess
import shutil
if self.have_popen3:
popenx = os.popen3
else:
popenx = os.popen2
cmdline = scriptfile
if self.is_python(scriptfile):
interp = sys.executable
if interp.lower().endswith("w.exe"):
# On Windows, use python.exe, not pythonw.exe
interp = interp[:-5] + interp[-4:]
cmdline = "%s -u %s" % (interp, cmdline)
if '=' not in query and '"' not in query:
cmdline = '%s "%s"' % (cmdline, query)
self.log_message("command: %s", cmdline)
try:
nbytes = int(length)
except:
nbytes = 0
files = popenx(cmdline, 'b')
fi = files[0]
fo = files[1]
if self.have_popen3:
fe = files[2]
if self.command.lower() == "post" and nbytes > 0:
data = self.rfile.read(nbytes)
fi.write(data)
# throw away additional data [see bug #427345]
while select.select([self.rfile._sock], [], [], 0)[0]:
waste = self.rfile._sock.recv(1)
fi.close()
shutil.copyfileobj(fo, self.wfile)
if self.have_popen3:
errors = fe.read()
fe.close()
if errors:
self.log_error('%s', errors)
sts = fo.close()
if sts:
self.log_error("CGI script exit status %#x", sts)
else:
self.log_message("CGI script exited OK")
else:
# Other O.S. -- execute script in this process
save_argv = sys.argv
save_stdin = sys.stdin
save_stdout = sys.stdout
save_stderr = sys.stderr
try:
try:
sys.argv = [scriptfile]
if '=' not in decoded_query:
sys.argv.append(decoded_query)
sys.stdout = self.wfile
sys.stdin = self.rfile
execfile(scriptfile, {"__name__": "__main__"})
finally:
sys.argv = save_argv
sys.stdin = save_stdin
sys.stdout = save_stdout
sys.stderr = save_stderr
except SystemExit, sts:
self.log_error("CGI script exit status %s", str(sts))
else:
self.log_message("CGI script exited OK")
nobody = None
def nobody_uid():
"""Internal routine to get nobody's uid"""
global nobody
if nobody:
return nobody
try:
import pwd
except ImportError:
return -1
try:
nobody = pwd.getpwnam('nobody')[2]
except KeyError:
nobody = 1 + max(map(lambda x: x[2], pwd.getpwall()))
return nobody
def executable(path):
"""Test for executable file."""
try:
st = os.stat(path)
except os.error:
return 0
return st[0] & 0111 != 0
def test(HandlerClass = CGIHTTPRequestHandler,
ServerClass = BaseHTTPServer.HTTPServer):
SimpleHTTPServer.test(HandlerClass, ServerClass)
if __name__ == '__main__':
test()
|
ryfeus/lambda-packs
|
refs/heads/master
|
Keras_tensorflow/source/numpy/distutils/fcompiler/lahey.py
|
229
|
from __future__ import division, absolute_import, print_function
import os
from numpy.distutils.fcompiler import FCompiler
compilers = ['LaheyFCompiler']
class LaheyFCompiler(FCompiler):
compiler_type = 'lahey'
description = 'Lahey/Fujitsu Fortran 95 Compiler'
version_pattern = r'Lahey/Fujitsu Fortran 95 Compiler Release (?P<version>[^\s*]*)'
executables = {
'version_cmd' : ["<F90>", "--version"],
'compiler_f77' : ["lf95", "--fix"],
'compiler_fix' : ["lf95", "--fix"],
'compiler_f90' : ["lf95"],
'linker_so' : ["lf95", "-shared"],
'archiver' : ["ar", "-cr"],
'ranlib' : ["ranlib"]
}
module_dir_switch = None #XXX Fix me
module_include_switch = None #XXX Fix me
def get_flags_opt(self):
return ['-O']
def get_flags_debug(self):
return ['-g', '--chk', '--chkglobal']
def get_library_dirs(self):
opt = []
d = os.environ.get('LAHEY')
if d:
opt.append(os.path.join(d, 'lib'))
return opt
def get_libraries(self):
opt = []
opt.extend(['fj9f6', 'fj9i6', 'fj9ipp', 'fj9e6'])
return opt
if __name__ == '__main__':
from distutils import log
log.set_verbosity(2)
from numpy.distutils.fcompiler import new_fcompiler
compiler = new_fcompiler(compiler='lahey')
compiler.customize()
print(compiler.get_version())
|
glmcdona/meddle
|
refs/heads/master
|
examples/base/Lib/json/tool.py
|
113
|
r"""Command-line tool to validate and pretty-print JSON
Usage::
$ echo '{"json":"obj"}' | python -m json.tool
{
"json": "obj"
}
$ echo '{ 1.2:3.4}' | python -m json.tool
Expecting property name: line 1 column 2 (char 2)
"""
import sys
import json
def main():
if len(sys.argv) == 1:
infile = sys.stdin
outfile = sys.stdout
elif len(sys.argv) == 2:
infile = open(sys.argv[1], 'rb')
outfile = sys.stdout
elif len(sys.argv) == 3:
infile = open(sys.argv[1], 'rb')
outfile = open(sys.argv[2], 'wb')
else:
raise SystemExit(sys.argv[0] + " [infile [outfile]]")
try:
obj = json.load(infile)
except ValueError, e:
raise SystemExit(e)
json.dump(obj, outfile, sort_keys=True, indent=4)
outfile.write('\n')
if __name__ == '__main__':
main()
|
syci/account-financial-tools
|
refs/heads/8.0
|
account_asset_management/wizard/account_asset_remove.py
|
19
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
#
# Copyright (C) 2010-2012 OpenERP s.a. (<http://openerp.com>).
# Copyright (c) 2014 Noviat nv/sa (www.noviat.com). All rights reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, orm
from openerp.tools.translate import _
from dateutil.relativedelta import relativedelta
from datetime import datetime
import logging
_logger = logging.getLogger(__name__)
class account_asset_remove(orm.TransientModel):
_name = 'account.asset.remove'
_description = 'Remove Asset'
_residual_value_regime_countries = ['FR']
def _posting_regime(self, cr, uid, context=None):
return[
('residual_value', _('Residual Value')),
('gain_loss_on_sale', _('Gain/Loss on Sale')),
]
def _get_posting_regime(self, cr, uid, context=None):
if not context:
context = {}
asset_obj = self.pool.get('account.asset.asset')
asset = asset_obj.browse(cr, uid, context.get('active_id'))
country = asset and asset.company_id.country_id.code or False
if country in self._residual_value_regime_countries:
return 'residual_value'
else:
return 'gain_loss_on_sale'
def _get_sale(self, cr, uid, context=None):
if not context:
context = {}
inv_line_obj = self.pool.get('account.invoice.line')
currency_obj = self.pool.get('res.currency')
asset_id = context.get('active_id')
sale_value = 0.0
account_sale_id = False
inv_line_ids = inv_line_obj.search(
cr, uid, [('asset_id', '=', asset_id)], context=context)
for line in inv_line_obj.browse(cr, uid, inv_line_ids):
inv = line.invoice_id
comp_curr = inv.company_id.currency_id
inv_curr = inv.currency_id
if line.invoice_id.state in ['open', 'paid']:
account_sale_id = line.account_id.id
amount = line.price_subtotal
if inv_curr != comp_curr:
amount = currency_obj.compute(
cr, uid, inv_curr.id, comp_curr.id, amount,
context=context)
sale_value += amount
return {'sale_value': sale_value, 'account_sale_id': account_sale_id}
def _get_sale_value(self, cr, uid, context=None):
return self._get_sale(cr, uid, context=context)['sale_value']
def _get_sale_account(self, cr, uid, context=None):
return self._get_sale(cr, uid, context=context)['account_sale_id']
def _get_plus_account(self, cr, uid, context=None):
if not context:
context = {}
acc = False
asset_obj = self.pool.get('account.asset.asset')
asset = asset_obj.browse(cr, uid, context.get('active_id'))
if asset:
acc = asset.category_id.account_plus_value_id
return acc and acc.id or False
def _get_min_account(self, cr, uid, context=None):
if not context:
context = {}
acc = False
asset_obj = self.pool.get('account.asset.asset')
asset = asset_obj.browse(cr, uid, context.get('active_id'))
if asset:
acc = asset.category_id.account_min_value_id
return acc and acc.id or False
def _get_residual_account(self, cr, uid, context=None):
if not context:
context = {}
acc = False
asset_obj = self.pool.get('account.asset.asset')
asset = asset_obj.browse(cr, uid, context.get('active_id'))
if asset:
acc = asset.category_id.account_residual_value_id
return acc and acc.id or False
_columns = {
'date_remove': fields.date(
'Asset Removal Date', required=True,
help="Removal date must be after the last posted entry "
"in case of early removal"),
'period_id': fields.many2one(
'account.period', 'Force Period',
domain=[('state', '<>', 'done')],
help="Keep empty to use the period of the removal ate."),
'sale_value': fields.float('Sale Value'),
'account_sale_id': fields.many2one(
'account.account', 'Asset Sale Account',
domain=[('type', '=', 'other')]),
'account_plus_value_id': fields.many2one(
'account.account', 'Plus-Value Account',
domain=[('type', '=', 'other')]),
'account_min_value_id': fields.many2one(
'account.account', 'Min-Value Account',
domain=[('type', '=', 'other')]),
'account_residual_value_id': fields.many2one(
'account.account', 'Residual Value Account',
domain=[('type', '=', 'other')]),
'posting_regime': fields.selection(
_posting_regime, 'Removal Entry Policy',
required=True,
help="Removal Entry Policy \n"
" * Residual Value: The non-depreciated value will be "
"posted on the 'Residual Value Account' \n"
" * Gain/Loss on Sale: The Gain or Loss will be posted on "
"the 'Plus-Value Account' or 'Min-Value Account' "),
'note': fields.text('Notes'),
}
_defaults = {
'sale_value': _get_sale_value,
'account_sale_id': _get_sale_account,
'account_plus_value_id': _get_plus_account,
'account_min_value_id': _get_min_account,
'account_residual_value_id': _get_residual_account,
'posting_regime': _get_posting_regime,
}
_sql_constraints = [(
'sale_value', 'CHECK (sale_value>=0)',
'The Sale Value must be positive!')]
def _prepare_early_removal(self, cr, uid,
asset, date_remove, context=None):
"""
Generate last depreciation entry on the day before the removal date.
"""
asset_line_obj = self.pool.get('account.asset.depreciation.line')
digits = self.pool.get('decimal.precision').precision_get(
cr, uid, 'Account')
dl_ids = asset_line_obj.search(
cr, uid,
[('asset_id', '=', asset.id), ('type', '=', 'depreciate'),
('init_entry', '=', False), ('move_check', '=', False)],
order='line_date asc')
first_to_depreciate_dl = asset_line_obj.browse(cr, uid, dl_ids[0])
first_date = first_to_depreciate_dl.line_date
if date_remove > first_date:
raise orm.except_orm(
_('Error!'),
_("You can't make an early removal if all the depreciation "
"lines for previous periods are not posted."))
last_depr_date = first_to_depreciate_dl.previous_id.line_date
period_number_days = (
datetime.strptime(first_date, '%Y-%m-%d') -
datetime.strptime(last_depr_date, '%Y-%m-%d')).days
date_remove = datetime.strptime(date_remove, '%Y-%m-%d')
new_line_date = date_remove + relativedelta(days=-1)
to_depreciate_days = (
new_line_date -
datetime.strptime(last_depr_date, '%Y-%m-%d')).days
to_depreciate_amount = round(
float(to_depreciate_days) / float(period_number_days) *
first_to_depreciate_dl.amount, digits)
residual_value = asset.value_residual - to_depreciate_amount
if to_depreciate_amount:
update_vals = {
'amount': to_depreciate_amount,
'line_date': new_line_date
}
first_to_depreciate_dl.write(update_vals)
asset_line_obj.create_move(
cr, uid, [dl_ids[0]], context=context)
dl_ids.pop(0)
asset_line_obj.unlink(cr, uid, dl_ids, context=context)
return residual_value
def _get_removal_data(self, cr, uid, wiz_data, asset, residual_value,
context=None):
move_lines = []
partner_id = asset.partner_id and asset.partner_id.id or False
categ = asset.category_id
# asset and asset depreciation account reversal
depr_amount = asset.asset_value - residual_value
move_line_vals = {
'name': asset.name,
'account_id': categ.account_depreciation_id.id,
'debit': depr_amount > 0 and depr_amount or 0.0,
'credit': depr_amount < 0 and -depr_amount or 0.0,
'partner_id': partner_id,
'asset_id': asset.id
}
move_lines.append((0, 0, move_line_vals))
move_line_vals = {
'name': asset.name,
'account_id': categ.account_asset_id.id,
'debit': asset.asset_value < 0 and -asset.asset_value or 0.0,
'credit': asset.asset_value > 0 and asset.asset_value or 0.0,
'partner_id': partner_id,
'asset_id': asset.id
}
move_lines.append((0, 0, move_line_vals))
if residual_value:
if wiz_data.posting_regime == 'residual_value':
move_line_vals = {
'name': asset.name,
'account_id': wiz_data.account_residual_value_id.id,
'analytic_account_id': asset.account_analytic_id.id,
'debit': residual_value,
'credit': 0.0,
'partner_id': partner_id,
'asset_id': asset.id
}
move_lines.append((0, 0, move_line_vals))
elif wiz_data.posting_regime == 'gain_loss_on_sale':
if wiz_data.sale_value:
sale_value = wiz_data.sale_value
move_line_vals = {
'name': asset.name,
'account_id': wiz_data.account_sale_id.id,
'analytic_account_id': asset.account_analytic_id.id,
'debit': sale_value,
'credit': 0.0,
'partner_id': partner_id,
'asset_id': asset.id
}
move_lines.append((0, 0, move_line_vals))
balance = wiz_data.sale_value - residual_value
account_id = (wiz_data.account_plus_value_id.id
if balance > 0
else wiz_data.account_min_value_id.id)
move_line_vals = {
'name': asset.name,
'account_id': account_id,
'analytic_account_id': asset.account_analytic_id.id,
'debit': balance < 0 and -balance or 0.0,
'credit': balance > 0 and balance or 0.0,
'partner_id': partner_id,
'asset_id': asset.id
}
move_lines.append((0, 0, move_line_vals))
return move_lines
def remove(self, cr, uid, ids, context=None):
asset_obj = self.pool.get('account.asset.asset')
asset_line_obj = self.pool.get('account.asset.depreciation.line')
move_obj = self.pool.get('account.move')
period_obj = self.pool.get('account.period')
asset_id = context['active_id']
asset = asset_obj.browse(cr, uid, asset_id, context=context)
asset_ref = asset.code and '%s (ref: %s)' \
% (asset.name, asset.code) or asset.name
wiz_data = self.browse(cr, uid, ids[0], context=context)
if context.get('early_removal'):
residual_value = self._prepare_early_removal(
cr, uid, asset, wiz_data.date_remove, context=context)
else:
residual_value = asset.value_residual
ctx = dict(context, company_id=asset.company_id.id)
period_id = wiz_data.period_id and wiz_data.period_id.id or False
if not period_id:
ctx.update(account_period_prefer_normal=True)
period_ids = period_obj.find(
cr, uid, wiz_data.date_remove, context=ctx)
period_id = period_ids[0]
dl_ids = asset_line_obj.search(
cr, uid,
[('asset_id', '=', asset.id), ('type', '=', 'depreciate')],
order='line_date desc')
last_date = asset_line_obj.browse(cr, uid, dl_ids[0]).line_date
if wiz_data.date_remove < last_date:
raise orm.except_orm(
_('Error!'),
_("The removal date must be after "
"the last depreciation date."))
line_name = asset_obj._get_depreciation_entry_name(
cr, uid, asset, len(dl_ids) + 1, context=context)
journal_id = asset.category_id.journal_id.id
# create move
move_vals = {
'name': asset.name,
'date': wiz_data.date_remove,
'ref': line_name,
'period_id': period_id,
'journal_id': journal_id,
'narration': wiz_data.note,
}
move_id = move_obj.create(cr, uid, move_vals, context=context)
# create asset line
asset_line_vals = {
'amount': residual_value,
'asset_id': asset_id,
'name': line_name,
'line_date': wiz_data.date_remove,
'move_id': move_id,
'type': 'remove',
}
asset_line_obj.create(cr, uid, asset_line_vals, context=context)
asset.write({'state': 'removed', 'date_remove': wiz_data.date_remove})
# create move lines
move_lines = self._get_removal_data(
cr, uid, wiz_data, asset, residual_value, context=context)
move_obj.write(cr, uid, [move_id], {'line_id': move_lines},
context=dict(context, allow_asset=True))
return {
'name': _("Asset '%s' Removal Journal Entry") % asset_ref,
'view_type': 'form',
'view_mode': 'tree,form',
'res_model': 'account.move',
'view_id': False,
'type': 'ir.actions.act_window',
'context': context,
'nodestroy': True,
'domain': [('id', '=', move_id)],
}
|
BorgERP/borg-erp-6of3
|
refs/heads/master
|
l10n_coa/l10n_at/__init__.py
|
438
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) conexus.at
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import account_wizard
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
PaddlePaddle/Paddle
|
refs/heads/develop
|
python/paddle/fluid/tests/unittests/test_precision_recall_op.py
|
7
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy as np
from op_test import OpTest
def calc_precision(tp_count, fp_count):
if tp_count > 0.0 or fp_count > 0.0:
return tp_count / (tp_count + fp_count)
return 1.0
def calc_recall(tp_count, fn_count):
if tp_count > 0.0 or fn_count > 0.0:
return tp_count / (tp_count + fn_count)
return 1.0
def calc_f1_score(precision, recall):
if precision > 0.0 or recall > 0.0:
return 2 * precision * recall / (precision + recall)
return 0.0
def get_states(idxs, labels, cls_num, weights=None):
ins_num = idxs.shape[0]
# TP FP TN FN
states = np.zeros((cls_num, 4)).astype('float32')
for i in range(ins_num):
w = weights[i] if weights is not None else 1.0
idx = idxs[i][0]
label = labels[i][0]
if idx == label:
states[idx][0] += w
for j in range(cls_num):
states[j][2] += w
states[idx][2] -= w
else:
states[label][3] += w
states[idx][1] += w
for j in range(cls_num):
states[j][2] += w
states[label][2] -= w
states[idx][2] -= w
return states
def compute_metrics(states, cls_num):
total_tp_count = 0.0
total_fp_count = 0.0
total_fn_count = 0.0
macro_avg_precision = 0.0
macro_avg_recall = 0.0
for i in range(cls_num):
total_tp_count += states[i][0]
total_fp_count += states[i][1]
total_fn_count += states[i][3]
macro_avg_precision += calc_precision(states[i][0], states[i][1])
macro_avg_recall += calc_recall(states[i][0], states[i][3])
metrics = []
macro_avg_precision /= cls_num
macro_avg_recall /= cls_num
metrics.append(macro_avg_precision)
metrics.append(macro_avg_recall)
metrics.append(calc_f1_score(macro_avg_precision, macro_avg_recall))
micro_avg_precision = calc_precision(total_tp_count, total_fp_count)
metrics.append(micro_avg_precision)
micro_avg_recall = calc_recall(total_tp_count, total_fn_count)
metrics.append(micro_avg_recall)
metrics.append(calc_f1_score(micro_avg_precision, micro_avg_recall))
return np.array(metrics).astype('float32')
class TestPrecisionRecallOp_0(OpTest):
def setUp(self):
self.op_type = "precision_recall"
ins_num = 64
cls_num = 10
max_probs = np.random.uniform(0, 1.0, (ins_num, 1)).astype('float32')
idxs = np.random.choice(range(cls_num), ins_num).reshape(
(ins_num, 1)).astype('int32')
labels = np.random.choice(range(cls_num), ins_num).reshape(
(ins_num, 1)).astype('int32')
states = get_states(idxs, labels, cls_num)
metrics = compute_metrics(states, cls_num)
self.attrs = {'class_number': cls_num}
self.inputs = {'MaxProbs': max_probs, 'Indices': idxs, 'Labels': labels}
self.outputs = {
'BatchMetrics': metrics,
'AccumMetrics': metrics,
'AccumStatesInfo': states
}
def test_check_output(self):
self.check_output()
class TestPrecisionRecallOp_1(OpTest):
def setUp(self):
self.op_type = "precision_recall"
ins_num = 64
cls_num = 10
max_probs = np.random.uniform(0, 1.0, (ins_num, 1)).astype('float32')
idxs = np.random.choice(range(cls_num), ins_num).reshape(
(ins_num, 1)).astype('int32')
weights = np.random.uniform(0, 1.0, (ins_num, 1)).astype('float32')
labels = np.random.choice(range(cls_num), ins_num).reshape(
(ins_num, 1)).astype('int32')
states = get_states(idxs, labels, cls_num, weights)
metrics = compute_metrics(states, cls_num)
self.attrs = {'class_number': cls_num}
self.inputs = {
'MaxProbs': max_probs,
'Indices': idxs,
'Labels': labels,
'Weights': weights
}
self.outputs = {
'BatchMetrics': metrics,
'AccumMetrics': metrics,
'AccumStatesInfo': states
}
def test_check_output(self):
self.check_output()
class TestPrecisionRecallOp_2(OpTest):
def setUp(self):
self.op_type = "precision_recall"
ins_num = 64
cls_num = 10
max_probs = np.random.uniform(0, 1.0, (ins_num, 1)).astype('float32')
idxs = np.random.choice(range(cls_num), ins_num).reshape(
(ins_num, 1)).astype('int32')
weights = np.random.uniform(0, 1.0, (ins_num, 1)).astype('float32')
labels = np.random.choice(range(cls_num), ins_num).reshape(
(ins_num, 1)).astype('int32')
states = np.random.randint(0, 30, (cls_num, 4)).astype('float32')
accum_states = get_states(idxs, labels, cls_num, weights)
batch_metrics = compute_metrics(accum_states, cls_num)
accum_states += states
accum_metrics = compute_metrics(accum_states, cls_num)
self.attrs = {'class_number': cls_num}
self.inputs = {
'MaxProbs': max_probs,
'Indices': idxs,
'Labels': labels,
'Weights': weights,
'StatesInfo': states
}
self.outputs = {
'BatchMetrics': batch_metrics,
'AccumMetrics': accum_metrics,
'AccumStatesInfo': accum_states
}
def test_check_output(self):
self.check_output()
if __name__ == '__main__':
unittest.main()
|
s0undt3ch/powerline
|
refs/heads/develop
|
setup.py
|
1
|
#!/usr/bin/env python
# vim:fileencoding=utf-8:noet
from __future__ import (unicode_literals, division, absolute_import, print_function)
import os
import sys
import subprocess
import logging
import shlex
from traceback import print_exc
from setuptools import setup, find_packages
CURRENT_DIR = os.path.abspath(os.path.dirname(__file__))
try:
README = open(os.path.join(CURRENT_DIR, 'README.rst'), 'rb').read().decode('utf-8')
except IOError:
README = ''
OLD_PYTHON = sys.version_info < (2, 7)
def compile_client():
'''Compile the C powerline-client script.'''
if hasattr(sys, 'getwindowsversion'):
raise NotImplementedError()
else:
from distutils.ccompiler import new_compiler
compiler = new_compiler().compiler
cflags = os.environ.get('CFLAGS', str('-O3'))
# A normal split would do a split on each space which might be incorrect. The
# shlex will not split if a space occurs in an arguments value.
subprocess.check_call(compiler + shlex.split(cflags) + ['client/powerline.c', '-o', 'scripts/powerline'])
try:
compile_client()
except Exception as e:
print('Compiling C version of powerline-client failed')
logging.exception(e)
# FIXME Catch more specific exceptions
import shutil
if hasattr(shutil, 'which'):
which = shutil.which
else:
sys.path.append(CURRENT_DIR)
from powerline.lib.shell import which
if which('socat') and which('sed') and which('sh'):
print('Using powerline.sh script instead of C version (requires socat, sed and sh)')
shutil.copyfile('client/powerline.sh', 'scripts/powerline')
can_use_scripts = True
else:
print('Using powerline.py script instead of C version')
shutil.copyfile('client/powerline.py', 'scripts/powerline')
can_use_scripts = True
else:
can_use_scripts = False
def get_version():
base_version = '2.1.4'
base_version += '.dev9999'
try:
return base_version + '+git.' + str(subprocess.check_output(['git', 'rev-parse', 'HEAD']).strip())
except Exception:
print_exc()
return base_version
setup(
name='powerline-status',
version=get_version(),
description='The ultimate statusline/prompt utility.',
long_description=README,
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Environment :: Plugins',
'Intended Audience :: End Users/Desktop',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
],
download_url='https://github.com/powerline/powerline/archive/develop.zip',
author='Kim Silkebaekken',
author_email='kim.silkebaekken+vim@gmail.com',
url='https://github.com/powerline/powerline',
license='MIT',
# XXX Python 3 doesn’t allow compiled C files to be included in the scripts
# list below. This is because Python 3 distutils tries to decode the file to
# ASCII, and fails when powerline-client is a binary.
#
# XXX Python 2 fucks up script contents*. Not using it to install scripts
# any longer.
# * Consider the following input:
# % alias hex1=$'hexdump -e \'"" 1/1 "%02X\n"\''
# % diff <(hex1 ./scripts/powerline) <(hex1 ~/.local/bin/powerline)
# This will show output like
# 375c375
# < 0D
# ---
# > 0A
# (repeated, with diff segment header numbers growing up).
#
# FIXME Current solution does not work with `pip install -e`. Still better
# then solution that is not working at all.
scripts=[
'scripts/powerline-lint',
'scripts/powerline-daemon',
'scripts/powerline-render',
'scripts/powerline-config',
] + (['scripts/powerline'] if can_use_scripts else []),
data_files=(None if can_use_scripts else (('bin', ['scripts/powerline']),)),
keywords='',
packages=find_packages(exclude=('tests', 'tests.*')),
include_package_data=True,
zip_safe=False,
install_requires=[],
extras_require={
'docs': [
'Sphinx',
'sphinx_rtd_theme',
],
},
test_suite='tests' if not OLD_PYTHON else None,
)
|
botherder/volatility
|
refs/heads/master
|
volatility/fmtspec.py
|
45
|
# Volatility
#
# This file is part of Volatility.
#
# Volatility is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Volatility is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Volatility. If not, see <http://www.gnu.org/licenses/>.
#
import re
class FormatSpec(object):
def __init__(self, string = '', **kwargs):
self.fill = ''
self.align = ''
self.sign = ''
self.altform = False
self.minwidth = -1
self.precision = -1
self.formtype = ''
if string != '':
self.from_string(string)
# Ensure we parse the remaining arguments after the string to that they override
self.from_specs(**kwargs)
def from_specs(self, fill = None, align = None, sign = None, altform = None, minwidth = None, precision = None, formtype = None):
## Allow setting individual elements using kwargs
if fill is not None:
self.fill = fill
if align is not None:
self.align = align
if sign is not None:
self.sign = sign
if altform is not None:
self.altform = altform
if minwidth is not None:
self.minwidth = minwidth
if precision is not None:
self.precision = precision
if formtype is not None:
self.formtype = formtype
def from_string(self, formatspec):
# Format specifier regular expression
regexp = "\A(.[<>=^]|[<>=^])?([-+ ]|\(\))?(#?)(0?)(\d*)(\.\d+)?(.)?\Z"
match = re.search(regexp, formatspec)
if match is None:
raise ValueError("Invalid format specification: " + formatspec)
if match.group(1):
fillalign = match.group(1)
if len(fillalign) > 1:
self.fill = fillalign[0]
self.align = fillalign[1]
elif fillalign:
self.align = fillalign
if match.group(2):
self.sign = match.group(2)
if match.group(3):
self.altform = len(match.group(3)) > 0
if len(match.group(4)):
if not self.fill:
self.fill = "0"
if not self.align:
self.align = "="
if match.group(5):
self.minwidth = int(match.group(5))
if match.group(6):
self.precision = int(match.group(6)[1:])
if match.group(7):
self.formtype = match.group(7)
def to_string(self):
formatspec = ""
if self.align:
formatspec = self.fill + self.align
formatspec += self.sign
if self.sign == '(':
formatspec += ')'
if self.altform:
formatspec += '#'
if self.minwidth >= 0:
formatspec += str(self.minwidth)
if self.precision >= 0:
formatspec += '.' + str(self.precision)
formatspec += self.formtype
return formatspec
def __str__(self):
return self.to_string()
|
andythigpen/home-assistant
|
refs/heads/master
|
tests/config/custom_components/light/test.py
|
7
|
"""
custom_components.light.test
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Provides a mock switch platform.
Call init before using it in your tests to ensure clean test data.
"""
from homeassistant.const import STATE_ON, STATE_OFF
from tests.helpers import MockToggleDevice
DEVICES = []
def init(empty=False):
""" (re-)initalizes the platform with devices. """
global DEVICES
DEVICES = [] if empty else [
MockToggleDevice('Ceiling', STATE_ON),
MockToggleDevice('Ceiling', STATE_OFF),
MockToggleDevice(None, STATE_OFF)
]
def setup_platform(hass, config, add_devices_callback, discovery_info=None):
""" Returns mock devices. """
add_devices_callback(DEVICES)
|
mpuccio/AliPhysics
|
refs/heads/master
|
PWGJE/EMCALJetTasks/Tracks/analysis/plots/__init__.py
|
369
|
#**************************************************************************
#* Copyright(c) 1998-2014, ALICE Experiment at CERN, All rights reserved. *
#* *
#* Author: The ALICE Off-line Project. *
#* Contributors are mentioned in the code where appropriate. *
#* *
#* Permission to use, copy, modify and distribute this software and its *
#* documentation strictly for non-commercial purposes is hereby granted *
#* without fee, provided that the above copyright notice appears in all *
#* copies and that both the copyright notice and this permission notice *
#* appear in the supporting documentation. The authors make no claims *
#* about the suitability of this software for any purpose. It is *
#* provided "as is" without express or implied warranty. *
#**************************************************************************
|
inclement/vispy
|
refs/heads/master
|
vispy/visuals/volume.py
|
6
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015, Vispy Development Team.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
"""
About this technique
--------------------
In Python, we define the six faces of a cuboid to draw, as well as
texture cooridnates corresponding with the vertices of the cuboid.
The back faces of the cuboid are drawn (and front faces are culled)
because only the back faces are visible when the camera is inside the
volume.
In the vertex shader, we intersect the view ray with the near and far
clipping planes. In the fragment shader, we use these two points to
compute the ray direction and then compute the position of the front
cuboid surface (or near clipping plane) along the view ray.
Next we calculate the number of steps to walk from the front surface
to the back surface and iterate over these positions in a for-loop.
At each iteration, the fragment color or other voxel information is
updated depending on the selected rendering method.
It is important for the texture interpolation is 'linear', since with
nearest the result look very ugly. The wrapping should be clamp_to_edge
to avoid artifacts when the ray takes a small step outside the volume.
The ray direction is established by mapping the vertex to the document
coordinate frame, adjusting z to +/-1, and mapping the coordinate back.
The ray is expressed in coordinates local to the volume (i.e. texture
coordinates).
"""
from ..gloo import Texture3D, TextureEmulated3D, VertexBuffer, IndexBuffer
from . import Visual
from .shaders import Function
from ..color import get_colormap
import numpy as np
# todo: implement more render methods (port from visvis)
# todo: allow anisotropic data
# todo: what to do about lighting? ambi/diffuse/spec/shinynes on each visual?
# Vertex shader
VERT_SHADER = """
attribute vec3 a_position;
// attribute vec3 a_texcoord;
uniform vec3 u_shape;
// varying vec3 v_texcoord;
varying vec3 v_position;
varying vec4 v_nearpos;
varying vec4 v_farpos;
void main() {
// v_texcoord = a_texcoord;
v_position = a_position;
// Project local vertex coordinate to camera position. Then do a step
// backward (in cam coords) and project back. Voila, we get our ray vector.
vec4 pos_in_cam = $viewtransformf(vec4(v_position, 1));
// intersection of ray and near clipping plane (z = -1 in clip coords)
pos_in_cam.z = -pos_in_cam.w;
v_nearpos = $viewtransformi(pos_in_cam);
// intersection of ray and far clipping plane (z = +1 in clip coords)
pos_in_cam.z = pos_in_cam.w;
v_farpos = $viewtransformi(pos_in_cam);
gl_Position = $transform(vec4(v_position, 1.0));
}
""" # noqa
# Fragment shader
FRAG_SHADER = """
// uniforms
uniform $sampler_type u_volumetex;
uniform vec3 u_shape;
uniform float u_threshold;
uniform float u_relative_step_size;
//varyings
// varying vec3 v_texcoord;
varying vec3 v_position;
varying vec4 v_nearpos;
varying vec4 v_farpos;
// uniforms for lighting. Hard coded until we figure out how to do lights
const vec4 u_ambient = vec4(0.2, 0.4, 0.2, 1.0);
const vec4 u_diffuse = vec4(0.8, 0.2, 0.2, 1.0);
const vec4 u_specular = vec4(1.0, 1.0, 1.0, 1.0);
const float u_shininess = 40.0;
//varying vec3 lightDirs[1];
// global holding view direction in local coordinates
vec3 view_ray;
float rand(vec2 co)
{{
// Create a pseudo-random number between 0 and 1.
// http://stackoverflow.com/questions/4200224
return fract(sin(dot(co.xy ,vec2(12.9898, 78.233))) * 43758.5453);
}}
float colorToVal(vec4 color1)
{{
return color1.g; // todo: why did I have this abstraction in visvis?
}}
vec4 calculateColor(vec4 betterColor, vec3 loc, vec3 step)
{{
// Calculate color by incorporating lighting
vec4 color1;
vec4 color2;
// View direction
vec3 V = normalize(view_ray);
// calculate normal vector from gradient
vec3 N; // normal
color1 = $sample( u_volumetex, loc+vec3(-step[0],0.0,0.0) );
color2 = $sample( u_volumetex, loc+vec3(step[0],0.0,0.0) );
N[0] = colorToVal(color1) - colorToVal(color2);
betterColor = max(max(color1, color2),betterColor);
color1 = $sample( u_volumetex, loc+vec3(0.0,-step[1],0.0) );
color2 = $sample( u_volumetex, loc+vec3(0.0,step[1],0.0) );
N[1] = colorToVal(color1) - colorToVal(color2);
betterColor = max(max(color1, color2),betterColor);
color1 = $sample( u_volumetex, loc+vec3(0.0,0.0,-step[2]) );
color2 = $sample( u_volumetex, loc+vec3(0.0,0.0,step[2]) );
N[2] = colorToVal(color1) - colorToVal(color2);
betterColor = max(max(color1, color2),betterColor);
float gm = length(N); // gradient magnitude
N = normalize(N);
// Flip normal so it points towards viewer
float Nselect = float(dot(N,V) > 0.0);
N = (2.0*Nselect - 1.0) * N; // == Nselect * N - (1.0-Nselect)*N;
// Get color of the texture (albeido)
color1 = betterColor;
color2 = color1;
// todo: parametrise color1_to_color2
// Init colors
vec4 ambient_color = vec4(0.0, 0.0, 0.0, 0.0);
vec4 diffuse_color = vec4(0.0, 0.0, 0.0, 0.0);
vec4 specular_color = vec4(0.0, 0.0, 0.0, 0.0);
vec4 final_color;
// todo: allow multiple light, define lights on viewvox or subscene
int nlights = 1;
for (int i=0; i<nlights; i++)
{{
// Get light direction (make sure to prevent zero devision)
vec3 L = normalize(view_ray); //lightDirs[i];
float lightEnabled = float( length(L) > 0.0 );
L = normalize(L+(1.0-lightEnabled));
// Calculate lighting properties
float lambertTerm = clamp( dot(N,L), 0.0, 1.0 );
vec3 H = normalize(L+V); // Halfway vector
float specularTerm = pow( max(dot(H,N),0.0), u_shininess);
// Calculate mask
float mask1 = lightEnabled;
// Calculate colors
ambient_color += mask1 * u_ambient; // * gl_LightSource[i].ambient;
diffuse_color += mask1 * lambertTerm;
specular_color += mask1 * specularTerm * u_specular;
}}
// Calculate final color by componing different components
final_color = color2 * ( ambient_color + diffuse_color) + specular_color;
final_color.a = color2.a;
// Done
return final_color;
}}
// for some reason, this has to be the last function in order for the
// filters to be inserted in the correct place...
void main() {{
vec3 farpos = v_farpos.xyz / v_farpos.w;
vec3 nearpos = v_nearpos.xyz / v_nearpos.w;
// Calculate unit vector pointing in the view direction through this
// fragment.
view_ray = normalize(farpos.xyz - nearpos.xyz);
// Compute the distance to the front surface or near clipping plane
float distance = dot(nearpos-v_position, view_ray);
distance = max(distance, min((-0.5 - v_position.x) / view_ray.x,
(u_shape.x - 0.5 - v_position.x) / view_ray.x));
distance = max(distance, min((-0.5 - v_position.y) / view_ray.y,
(u_shape.y - 0.5 - v_position.y) / view_ray.y));
distance = max(distance, min((-0.5 - v_position.z) / view_ray.z,
(u_shape.z - 0.5 - v_position.z) / view_ray.z));
// Now we have the starting position on the front surface
vec3 front = v_position + view_ray * distance;
// Decide how many steps to take
int nsteps = int(-distance / u_relative_step_size + 0.5);
if( nsteps < 1 )
discard;
// Get starting location and step vector in texture coordinates
vec3 step = ((v_position - front) / u_shape) / nsteps;
vec3 start_loc = front / u_shape;
// For testing: show the number of steps. This helps to establish
// whether the rays are correctly oriented
//gl_FragColor = vec4(0.0, nsteps / 3.0 / u_shape.x, 1.0, 1.0);
//return;
{before_loop}
// This outer loop seems necessary on some systems for large
// datasets. Ugly, but it works ...
vec3 loc = start_loc;
int iter = 0;
while (iter < nsteps) {{
for (iter=iter; iter<nsteps; iter++)
{{
// Get sample color
vec4 color = $sample(u_volumetex, loc);
float val = color.g;
{in_loop}
// Advance location deeper into the volume
loc += step;
}}
}}
{after_loop}
/* Set depth value - from visvis TODO
int iter_depth = int(maxi);
// Calculate end position in world coordinates
vec4 position2 = vertexPosition;
position2.xyz += ray*shape*float(iter_depth);
// Project to device coordinates and set fragment depth
vec4 iproj = gl_ModelViewProjectionMatrix * position2;
iproj.z /= iproj.w;
gl_FragDepth = (iproj.z+1.0)/2.0;
*/
}}
""" # noqa
MIP_SNIPPETS = dict(
before_loop="""
float maxval = -99999.0; // The maximum encountered value
int maxi = 0; // Where the maximum value was encountered
""",
in_loop="""
if( val > maxval ) {
maxval = val;
maxi = iter;
}
""",
after_loop="""
// Refine search for max value
loc = start_loc + step * (float(maxi) - 0.5);
for (int i=0; i<10; i++) {
maxval = max(maxval, $sample(u_volumetex, loc).g);
loc += step * 0.1;
}
gl_FragColor = $cmap(maxval);
""",
)
MIP_FRAG_SHADER = FRAG_SHADER.format(**MIP_SNIPPETS)
TRANSLUCENT_SNIPPETS = dict(
before_loop="""
vec4 integrated_color = vec4(0., 0., 0., 0.);
""",
in_loop="""
color = $cmap(val);
float a1 = integrated_color.a;
float a2 = color.a * (1 - a1);
float alpha = max(a1 + a2, 0.001);
// Doesn't work.. GLSL optimizer bug?
//integrated_color = (integrated_color * a1 / alpha) +
// (color * a2 / alpha);
// This should be identical but does work correctly:
integrated_color *= a1 / alpha;
integrated_color += color * a2 / alpha;
integrated_color.a = alpha;
if( alpha > 0.99 ){
// stop integrating if the fragment becomes opaque
iter = nsteps;
}
""",
after_loop="""
gl_FragColor = integrated_color;
""",
)
TRANSLUCENT_FRAG_SHADER = FRAG_SHADER.format(**TRANSLUCENT_SNIPPETS)
ADDITIVE_SNIPPETS = dict(
before_loop="""
vec4 integrated_color = vec4(0., 0., 0., 0.);
""",
in_loop="""
color = $cmap(val);
integrated_color = 1.0 - (1.0 - integrated_color) * (1.0 - color);
""",
after_loop="""
gl_FragColor = integrated_color;
""",
)
ADDITIVE_FRAG_SHADER = FRAG_SHADER.format(**ADDITIVE_SNIPPETS)
ISO_SNIPPETS = dict(
before_loop="""
vec4 color3 = vec4(0.0); // final color
vec3 dstep = 1.5 / u_shape; // step to sample derivative
gl_FragColor = vec4(0.0);
""",
in_loop="""
if (val > u_threshold-0.2) {
// Take the last interval in smaller steps
vec3 iloc = loc - step;
for (int i=0; i<10; i++) {
val = $sample(u_volumetex, iloc).g;
if (val > u_threshold) {
color = $cmap(val);
gl_FragColor = calculateColor(color, iloc, dstep);
iter = nsteps;
break;
}
iloc += step * 0.1;
}
}
""",
after_loop="""
""",
)
ISO_FRAG_SHADER = FRAG_SHADER.format(**ISO_SNIPPETS)
frag_dict = {
'mip': MIP_FRAG_SHADER,
'iso': ISO_FRAG_SHADER,
'translucent': TRANSLUCENT_FRAG_SHADER,
'additive': ADDITIVE_FRAG_SHADER,
}
class VolumeVisual(Visual):
""" Displays a 3D Volume
Parameters
----------
vol : ndarray
The volume to display. Must be ndim==3.
clim : tuple of two floats | None
The contrast limits. The values in the volume are mapped to
black and white corresponding to these values. Default maps
between min and max.
method : {'mip', 'translucent', 'additive', 'iso'}
The render method to use. See corresponding docs for details.
Default 'mip'.
threshold : float
The threshold to use for the isosurafce render method. By default
the mean of the given volume is used.
relative_step_size : float
The relative step size to step through the volume. Default 0.8.
Increase to e.g. 1.5 to increase performance, at the cost of
quality.
cmap : str
Colormap to use.
emulate_texture : bool
Use 2D textures to emulate a 3D texture. OpenGL ES 2.0 compatible,
but has lower performance on desktop platforms.
"""
def __init__(self, vol, clim=None, method='mip', threshold=None,
relative_step_size=0.8, cmap='grays',
emulate_texture=False):
tex_cls = TextureEmulated3D if emulate_texture else Texture3D
# Storage of information of volume
self._vol_shape = ()
self._clim = None
self._need_vertex_update = True
# Set the colormap
self._cmap = get_colormap(cmap)
# Create gloo objects
self._vertices = VertexBuffer()
self._texcoord = VertexBuffer(
np.array([
[0, 0, 0],
[1, 0, 0],
[0, 1, 0],
[1, 1, 0],
[0, 0, 1],
[1, 0, 1],
[0, 1, 1],
[1, 1, 1],
], dtype=np.float32))
self._tex = tex_cls((10, 10, 10), interpolation='linear',
wrapping='clamp_to_edge')
# Create program
Visual.__init__(self, vcode=VERT_SHADER, fcode="")
self.shared_program['u_volumetex'] = self._tex
self.shared_program['a_position'] = self._vertices
self.shared_program['a_texcoord'] = self._texcoord
self._draw_mode = 'triangle_strip'
self._index_buffer = IndexBuffer()
# Only show back faces of cuboid. This is required because if we are
# inside the volume, then the front faces are outside of the clipping
# box and will not be drawn.
self.set_gl_state('translucent', cull_face=False)
# Set data
self.set_data(vol, clim)
# Set params
self.method = method
self.relative_step_size = relative_step_size
self.threshold = threshold if (threshold is not None) else vol.mean()
self.freeze()
def set_data(self, vol, clim=None):
""" Set the volume data.
Parameters
----------
vol : ndarray
The 3D volume.
clim : tuple | None
Colormap limits to use. None will use the min and max values.
"""
# Check volume
if not isinstance(vol, np.ndarray):
raise ValueError('Volume visual needs a numpy array.')
if not ((vol.ndim == 3) or (vol.ndim == 4 and vol.shape[-1] <= 4)):
raise ValueError('Volume visual needs a 3D image.')
# Handle clim
if clim is not None:
clim = np.array(clim, float)
if not (clim.ndim == 1 and clim.size == 2):
raise ValueError('clim must be a 2-element array-like')
self._clim = tuple(clim)
if self._clim is None:
self._clim = vol.min(), vol.max()
# Apply clim
vol = np.array(vol, dtype='float32', copy=False)
if self._clim[1] == self._clim[0]:
if self._clim[0] != 0.:
vol *= 1.0 / self._clim[0]
else:
vol -= self._clim[0]
vol /= self._clim[1] - self._clim[0]
# Apply to texture
self._tex.set_data(vol) # will be efficient if vol is same shape
self.shared_program['u_shape'] = (vol.shape[2], vol.shape[1],
vol.shape[0])
shape = vol.shape[:3]
if self._vol_shape != shape:
self._vol_shape = shape
self._need_vertex_update = True
self._vol_shape = shape
# Get some stats
self._kb_for_texture = np.prod(self._vol_shape) / 1024
@property
def clim(self):
""" The contrast limits that were applied to the volume data.
Settable via set_data().
"""
return self._clim
@property
def cmap(self):
return self._cmap
@cmap.setter
def cmap(self, cmap):
self._cmap = get_colormap(cmap)
self.shared_program.frag['cmap'] = Function(self._cmap.glsl_map)
self.update()
@property
def method(self):
"""The render method to use
Current options are:
* translucent: voxel colors are blended along the view ray until
the result is opaque.
* mip: maxiumum intensity projection. Cast a ray and display the
maximum value that was encountered.
* additive: voxel colors are added along the view ray until
the result is saturated.
* iso: isosurface. Cast a ray until a certain threshold is
encountered. At that location, lighning calculations are
performed to give the visual appearance of a surface.
"""
return self._method
@method.setter
def method(self, method):
# Check and save
known_methods = list(frag_dict.keys())
if method not in known_methods:
raise ValueError('Volume render method should be in %r, not %r' %
(known_methods, method))
self._method = method
# Get rid of specific variables - they may become invalid
if 'u_threshold' in self.shared_program:
self.shared_program['u_threshold'] = None
self.shared_program.frag = frag_dict[method]
self.shared_program.frag['sampler_type'] = self._tex.glsl_sampler_type
self.shared_program.frag['sample'] = self._tex.glsl_sample
self.shared_program.frag['cmap'] = Function(self._cmap.glsl_map)
self.update()
@property
def threshold(self):
""" The threshold value to apply for the isosurface render method.
"""
return self._threshold
@threshold.setter
def threshold(self, value):
self._threshold = float(value)
if 'u_threshold' in self.shared_program:
self.shared_program['u_threshold'] = self._threshold
self.update()
@property
def relative_step_size(self):
""" The relative step size used during raycasting.
Larger values yield higher performance at reduced quality. If
set > 2.0 the ray skips entire voxels. Recommended values are
between 0.5 and 1.5. The amount of quality degredation depends
on the render method.
"""
return self._relative_step_size
@relative_step_size.setter
def relative_step_size(self, value):
value = float(value)
if value < 0.1:
raise ValueError('relative_step_size cannot be smaller than 0.1')
self._relative_step_size = value
self.shared_program['u_relative_step_size'] = value
def _create_vertex_data(self):
""" Create and set positions and texture coords from the given shape
We have six faces with 1 quad (2 triangles) each, resulting in
6*2*3 = 36 vertices in total.
"""
shape = self._vol_shape
# Get corner coordinates. The -0.5 offset is to center
# pixels/voxels. This works correctly for anisotropic data.
x0, x1 = -0.5, shape[2] - 0.5
y0, y1 = -0.5, shape[1] - 0.5
z0, z1 = -0.5, shape[0] - 0.5
pos = np.array([
[x0, y0, z0],
[x1, y0, z0],
[x0, y1, z0],
[x1, y1, z0],
[x0, y0, z1],
[x1, y0, z1],
[x0, y1, z1],
[x1, y1, z1],
], dtype=np.float32)
"""
6-------7
/| /|
4-------5 |
| | | |
| 2-----|-3
|/ |/
0-------1
"""
# Order is chosen such that normals face outward; front faces will be
# culled.
indices = np.array([2, 6, 0, 4, 5, 6, 7, 2, 3, 0, 1, 5, 3, 7],
dtype=np.uint32)
# Apply
self._vertices.set_data(pos)
self._index_buffer.set_data(indices)
def _compute_bounds(self, axis, view):
return 0, self._vol_shape[axis]
def _prepare_transforms(self, view):
trs = view.transforms
view.view_program.vert['transform'] = trs.get_transform()
view_tr_f = trs.get_transform('visual', 'document')
view_tr_i = view_tr_f.inverse
view.view_program.vert['viewtransformf'] = view_tr_f
view.view_program.vert['viewtransformi'] = view_tr_i
def _prepare_draw(self, view):
if self._need_vertex_update:
self._create_vertex_data()
|
codesmart-co/bit
|
refs/heads/master
|
connectors/google_adwords/datasources/drive.py
|
1
|
# system
import json
import logging
import zipfile
from io import BytesIO
from datetime import timedelta
from dateutil import parser
# superset
from superset import app
# BIT
from bit.models.datasource import DataSource
config = app.config
class GoogleDriveDataSource(DataSource):
def __init__(self, storage, path, source, name, primary_key_column, adapters, models):
super(GoogleDriveDataSource, self).__init__(
source=source, name=name, primary_key_column=primary_key_column, adapters=adapters, models=models)
self._storage = storage
self._path = path
self._dates = []
self._config_file_path = self._path + "/config.json"
self._config_last_date = None
def open(self, *args, **kwargs):
assert self._storage.exists(self._path)
with self._storage.open(self._config_file_path) as config_file:
config = json.load(config_file)
self._config_last_date = parser.parse(config['lastDate']).date()
prefix_len = len(self._path) + 1
dirs, files = self._storage.listdir(path=self._path)
dates = sorted([
parser.parse(d[prefix_len:]).date()
for d in dirs
])
# info = self._get_sync_info()
# if info:
# last_date = parser.parse(info.last_id).date()
# self._dates = [date for date in dates if last_date < date < self._config_last_date]
# elif config.get('DATA_SOURCE_START_DATE', None):
# last_date = parser.parse(config.get('DATA_SOURCE_START_DATE', None)).date() - timedelta(days=1)
# self._dates = [date for date in dates if last_date < date < self._config_last_date]
# else:
# self._dates = [date for date in dates if date < self._config_last_date]
last_date = parser.parse('2017-08-18').date() - timedelta(days=1)
self._dates = [date for date in dates if
last_date < date < self._config_last_date]
def close(self):
pass
def fetchmany(self):
if not len(self._dates):
return None
date = self._dates.pop(0)
logging.info("Fetch data for {0} report {1}".format(date.isoformat(), self.name))
_dir = "{0}/{1}".format(self._path, date.isoformat())
archive_path = "{0}/{1}".format(_dir, self.name + ".zip")
file_name = self.name + ".json"
with self._storage.open(archive_path) as archive_file:
with zipfile.ZipFile(archive_file) as zf:
content = BytesIO(zf.read(file_name))
content.seek(0)
rows = json.load(content)
if not len(rows):
self._set_last_sync(last={self.primary_key_column: date})
rows = self.fetchmany()
return rows
|
lem8r/website-themes
|
refs/heads/master
|
theme_erpu/__openerp__.py
|
1
|
# -*- coding: utf-8 -*-
{
'name': 'ERP Ukraine theme',
'description': 'Theme for erp.co.ua site.',
'website': 'https://erp.co.ua',
'version': '1.3',
'author': 'ERP Ukraine',
'data': [
'views/templates.xml'
],
'depends': [
'website',
'website_blog',
],
}
|
initialed85/mac_os_scripts
|
refs/heads/master
|
mac_os_scripts_tests/enable_security_logging_test.py
|
1
|
import unittest
from hamcrest import assert_that, equal_to
from mock import MagicMock, call
from mac_os_scripts.enable_security_logging import SecurityLoggingEnabler
from mac_os_scripts.utils import RunCommandOutput
_TEST_ENABLE_SECURITY_LOGGING = RunCommandOutput(
stdout='/System/Library/LaunchDaemons/com.apple.auditd.plist: service already loaded',
stderr='',
error_level=0,
)
class SecurityLoggingEnablerTest(unittest.TestCase):
def setUp(self):
self._subject = SecurityLoggingEnabler(
sudo_password='SomePassword',
)
self._subject.run_command = MagicMock()
def test_enable_security_logging(self):
self._subject.run_command.return_value = _TEST_ENABLE_SECURITY_LOGGING
assert_that(
self._subject.enable_security_logging(),
equal_to(True)
)
assert_that(
self._subject.run_command.mock_calls,
equal_to([
call(command_line='/bin/launchctl load -w /System/Library/LaunchDaemons/com.apple.auditd.plist', quiet=True,
sudo_password_override=False, timeout=None, send_lines=None)
])
)
def test_run_pass(self):
self._subject.enable_security_logging = MagicMock()
self._subject.enable_security_logging.return_value = True
assert_that(
self._subject.run(),
equal_to(True)
)
|
jriehl/numba
|
refs/heads/master
|
numba/cuda/simulator/reduction.py
|
7
|
from numba.six.moves import reduce as pyreduce
def Reduce(func):
def reduce_wrapper(seq, res=None, init=0):
r = pyreduce(func, seq, init)
if res is not None:
res[0] = r
return None
else:
return r
return reduce_wrapper
reduce = Reduce
|
sonaht/ansible
|
refs/heads/devel
|
lib/ansible/modules/packaging/os/pacman.py
|
21
|
#!/usr/bin/python -tt
# -*- coding: utf-8 -*-
# (c) 2012, Afterburn <http://github.com/afterburn>
# (c) 2013, Aaron Bull Schaefer <aaron@elasticdog.com>
# (c) 2015, Indrajit Raychaudhuri <irc+code@indrajit.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: pacman
short_description: Manage packages with I(pacman)
description:
- Manage packages with the I(pacman) package manager, which is used by
Arch Linux and its variants.
version_added: "1.0"
author:
- "Indrajit Raychaudhuri (@indrajitr)"
- "'Aaron Bull Schaefer (@elasticdog)' <aaron@elasticdog.com>"
- "Afterburn"
notes: []
requirements: []
options:
name:
description:
- Name of the package to install, upgrade, or remove.
required: false
default: null
aliases: [ 'pkg', 'package' ]
state:
description:
- Desired state of the package.
required: false
default: "present"
choices: ["present", "absent", "latest"]
recurse:
description:
- When removing a package, also remove its dependencies, provided
that they are not required by other packages and were not
explicitly installed by a user.
required: false
default: no
choices: ["yes", "no"]
version_added: "1.3"
force:
description:
- When removing package - force remove package, without any
checks. When update_cache - force redownload repo
databases.
required: false
default: no
choices: ["yes", "no"]
version_added: "2.0"
update_cache:
description:
- Whether or not to refresh the master package lists. This can be
run as part of a package installation or as a separate step.
required: false
default: no
choices: ["yes", "no"]
aliases: [ 'update-cache' ]
upgrade:
description:
- Whether or not to upgrade whole system
required: false
default: no
choices: ["yes", "no"]
version_added: "2.0"
'''
RETURN = '''
packages:
description: a list of packages that have been changed
returned: when upgrade is set to yes
type: list
sample: ['package', 'other-package']
'''
EXAMPLES = '''
# Install package foo
- pacman:
name: foo
state: present
# Upgrade package foo
- pacman:
name: foo
state: latest
update_cache: yes
# Remove packages foo and bar
- pacman:
name: foo,bar
state: absent
# Recursively remove package baz
- pacman:
name: baz
state: absent
recurse: yes
# Run the equivalent of "pacman -Sy" as a separate step
- pacman:
update_cache: yes
# Run the equivalent of "pacman -Su" as a separate step
- pacman:
upgrade: yes
# Run the equivalent of "pacman -Syu" as a separate step
- pacman:
update_cache: yes
upgrade: yes
# Run the equivalent of "pacman -Rdd", force remove package baz
- pacman:
name: baz
state: absent
force: yes
'''
import re
def get_version(pacman_output):
"""Take pacman -Qi or pacman -Si output and get the Version"""
lines = pacman_output.split('\n')
for line in lines:
if 'Version' in line:
return line.split(':')[1].strip()
return None
def query_package(module, pacman_path, name, state="present"):
"""Query the package status in both the local system and the repository. Returns a boolean to indicate if the package is installed, a second
boolean to indicate if the package is up-to-date and a third boolean to indicate whether online information were available
"""
if state == "present":
lcmd = "%s -Qi %s" % (pacman_path, name)
lrc, lstdout, lstderr = module.run_command(lcmd, check_rc=False)
if lrc != 0:
# package is not installed locally
return False, False, False
# get the version installed locally (if any)
lversion = get_version(lstdout)
rcmd = "%s -Si %s" % (pacman_path, name)
rrc, rstdout, rstderr = module.run_command(rcmd, check_rc=False)
# get the version in the repository
rversion = get_version(rstdout)
if rrc == 0:
# Return True to indicate that the package is installed locally, and the result of the version number comparison
# to determine if the package is up-to-date.
return True, (lversion == rversion), False
# package is installed but cannot fetch remote Version. Last True stands for the error
return True, True, True
def update_package_db(module, pacman_path):
if module.params["force"]:
args = "Syy"
else:
args = "Sy"
cmd = "%s -%s" % (pacman_path, args)
rc, stdout, stderr = module.run_command(cmd, check_rc=False)
if rc == 0:
return True
else:
module.fail_json(msg="could not update package db")
def upgrade(module, pacman_path):
cmdupgrade = "%s -Suq --noconfirm" % (pacman_path)
cmdneedrefresh = "%s -Qu" % (pacman_path)
rc, stdout, stderr = module.run_command(cmdneedrefresh, check_rc=False)
data = stdout.split('\n')
data.remove('')
packages = []
diff = {
'before': '',
'after': '',
}
if rc == 0:
regex = re.compile('([\w-]+) ((?:\S+)-(?:\S+)) -> ((?:\S+)-(?:\S+))')
for p in data:
m = regex.search(p)
packages.append(m.group(1))
if module._diff:
diff['before'] += "%s-%s\n" % (m.group(1), m.group(2))
diff['after'] += "%s-%s\n" % (m.group(1), m.group(3))
if module.check_mode:
module.exit_json(changed=True, msg="%s package(s) would be upgraded" % (len(data)), packages=packages, diff=diff)
rc, stdout, stderr = module.run_command(cmdupgrade, check_rc=False)
if rc == 0:
module.exit_json(changed=True, msg='System upgraded', packages=packages, diff=diff)
else:
module.fail_json(msg="Could not upgrade")
else:
module.exit_json(changed=False, msg='Nothing to upgrade', packages=packages)
def remove_packages(module, pacman_path, packages):
data = []
diff = {
'before': '',
'after': '',
}
if module.params["recurse"] or module.params["force"]:
if module.params["recurse"]:
args = "Rs"
if module.params["force"]:
args = "Rdd"
if module.params["recurse"] and module.params["force"]:
args = "Rdds"
else:
args = "R"
remove_c = 0
# Using a for loop in case of error, we can report the package that failed
for package in packages:
# Query the package first, to see if we even need to remove
installed, updated, unknown = query_package(module, pacman_path, package)
if not installed:
continue
cmd = "%s -%s %s --noconfirm --noprogressbar" % (pacman_path, args, package)
rc, stdout, stderr = module.run_command(cmd, check_rc=False)
if rc != 0:
module.fail_json(msg="failed to remove %s" % (package))
if module._diff:
d = stdout.split('\n')[2].split(' ')[2:]
for i, pkg in enumerate(d):
d[i] = re.sub('-[0-9].*$', '', d[i].split('/')[-1])
diff['before'] += "%s\n" % pkg
data.append('\n'.join(d))
remove_c += 1
if remove_c > 0:
module.exit_json(changed=True, msg="removed %s package(s)" % remove_c, diff=diff)
module.exit_json(changed=False, msg="package(s) already absent")
def install_packages(module, pacman_path, state, packages, package_files):
install_c = 0
package_err = []
message = ""
data = []
diff = {
'before': '',
'after': '',
}
to_install_repos = []
to_install_files = []
for i, package in enumerate(packages):
# if the package is installed and state == present or state == latest and is up-to-date then skip
installed, updated, latestError = query_package(module, pacman_path, package)
if latestError and state == 'latest':
package_err.append(package)
if installed and (state == 'present' or (state == 'latest' and updated)):
continue
if package_files[i]:
to_install_files.append(package_files[i])
else:
to_install_repos.append(package)
if to_install_repos:
cmd = "%s -S %s --noconfirm --noprogressbar --needed" % (pacman_path, " ".join(to_install_repos))
rc, stdout, stderr = module.run_command(cmd, check_rc=False)
if rc != 0:
module.fail_json(msg="failed to install %s: %s" % (" ".join(to_install_repos), stderr))
data = stdout.split('\n')[3].split(' ')[2:]
data = [ i for i in data if i != '' ]
for i, pkg in enumerate(data):
data[i] = re.sub('-[0-9].*$', '', data[i].split('/')[-1])
if module._diff:
diff['after'] += "%s\n" % pkg
install_c += len(to_install_repos)
if to_install_files:
cmd = "%s -U %s --noconfirm --noprogressbar --needed" % (pacman_path, " ".join(to_install_files))
rc, stdout, stderr = module.run_command(cmd, check_rc=False)
if rc != 0:
module.fail_json(msg="failed to install %s: %s" % (" ".join(to_install_files), stderr))
data = stdout.split('\n')[3].split(' ')[2:]
data = [ i for i in data if i != '' ]
for i, pkg in enumerate(data):
data[i] = re.sub('-[0-9].*$', '', data[i].split('/')[-1])
if module._diff:
diff['after'] += "%s\n" % pkg
install_c += len(to_install_files)
if state == 'latest' and len(package_err) > 0:
message = "But could not ensure 'latest' state for %s package(s) as remote version could not be fetched." % (package_err)
if install_c > 0:
module.exit_json(changed=True, msg="installed %s package(s). %s" % (install_c, message), diff=diff)
module.exit_json(changed=False, msg="package(s) already installed. %s" % (message), diff=diff)
def check_packages(module, pacman_path, packages, state):
would_be_changed = []
diff = {
'before': '',
'after': '',
'before_header': '',
'after_header': ''
}
for package in packages:
installed, updated, unknown = query_package(module, pacman_path, package)
if ((state in ["present", "latest"] and not installed) or
(state == "absent" and installed) or
(state == "latest" and not updated)):
would_be_changed.append(package)
if would_be_changed:
if state == "absent":
state = "removed"
if module._diff and (state == 'removed'):
diff['before_header'] = 'removed'
diff['before'] = '\n'.join(would_be_changed) + '\n'
elif module._diff and ((state == 'present') or (state == 'latest')):
diff['after_header'] = 'installed'
diff['after'] = '\n'.join(would_be_changed) + '\n'
module.exit_json(changed=True, msg="%s package(s) would be %s" % (
len(would_be_changed), state), diff=diff)
else:
module.exit_json(changed=False, msg="package(s) already %s" % state, diff=diff)
def expand_package_groups(module, pacman_path, pkgs):
expanded = []
for pkg in pkgs:
if pkg: # avoid empty strings
cmd = "%s -Sgq %s" % (pacman_path, pkg)
rc, stdout, stderr = module.run_command(cmd, check_rc=False)
if rc == 0:
# A group was found matching the name, so expand it
for name in stdout.split('\n'):
name = name.strip()
if name:
expanded.append(name)
else:
expanded.append(pkg)
return expanded
def main():
module = AnsibleModule(
argument_spec = dict(
name = dict(aliases=['pkg', 'package'], type='list'),
state = dict(default='present', choices=['present', 'installed', "latest", 'absent', 'removed']),
recurse = dict(default=False, type='bool'),
force = dict(default=False, type='bool'),
upgrade = dict(default=False, type='bool'),
update_cache = dict(default=False, aliases=['update-cache'], type='bool')
),
required_one_of = [['name', 'update_cache', 'upgrade']],
supports_check_mode = True)
pacman_path = module.get_bin_path('pacman', True)
p = module.params
# normalize the state parameter
if p['state'] in ['present', 'installed']:
p['state'] = 'present'
elif p['state'] in ['absent', 'removed']:
p['state'] = 'absent'
if p["update_cache"] and not module.check_mode:
update_package_db(module, pacman_path)
if not (p['name'] or p['upgrade']):
module.exit_json(changed=True, msg='Updated the package master lists')
if p['update_cache'] and module.check_mode and not (p['name'] or p['upgrade']):
module.exit_json(changed=True, msg='Would have updated the package cache')
if p['upgrade']:
upgrade(module, pacman_path)
if p['name']:
pkgs = expand_package_groups(module, pacman_path, p['name'])
pkg_files = []
for i, pkg in enumerate(pkgs):
if not pkg: # avoid empty strings
continue
elif re.match(".*\.pkg\.tar(\.(gz|bz2|xz|lrz|lzo|Z))?$", pkg):
# The package given is a filename, extract the raw pkg name from
# it and store the filename
pkg_files.append(pkg)
pkgs[i] = re.sub('-[0-9].*$', '', pkgs[i].split('/')[-1])
else:
pkg_files.append(None)
if module.check_mode:
check_packages(module, pacman_path, pkgs, p['state'])
if p['state'] in ['present', 'latest']:
install_packages(module, pacman_path, p['state'], pkgs, pkg_files)
elif p['state'] == 'absent':
remove_packages(module, pacman_path, pkgs)
# import module snippets
from ansible.module_utils.basic import *
if __name__ == "__main__":
main()
|
gibtang/CCNSCoding
|
refs/heads/master
|
external/emscripten/third_party/ply/example/BASIC/basinterp.py
|
166
|
# This file provides the runtime support for running a basic program
# Assumes the program has been parsed using basparse.py
import sys
import math
import random
class BasicInterpreter:
# Initialize the interpreter. prog is a dictionary
# containing (line,statement) mappings
def __init__(self,prog):
self.prog = prog
self.functions = { # Built-in function table
'SIN' : lambda z: math.sin(self.eval(z)),
'COS' : lambda z: math.cos(self.eval(z)),
'TAN' : lambda z: math.tan(self.eval(z)),
'ATN' : lambda z: math.atan(self.eval(z)),
'EXP' : lambda z: math.exp(self.eval(z)),
'ABS' : lambda z: abs(self.eval(z)),
'LOG' : lambda z: math.log(self.eval(z)),
'SQR' : lambda z: math.sqrt(self.eval(z)),
'INT' : lambda z: int(self.eval(z)),
'RND' : lambda z: random.random()
}
# Collect all data statements
def collect_data(self):
self.data = []
for lineno in self.stat:
if self.prog[lineno][0] == 'DATA':
self.data = self.data + self.prog[lineno][1]
self.dc = 0 # Initialize the data counter
# Check for end statements
def check_end(self):
has_end = 0
for lineno in self.stat:
if self.prog[lineno][0] == 'END' and not has_end:
has_end = lineno
if not has_end:
print("NO END INSTRUCTION")
self.error = 1
return
if has_end != lineno:
print("END IS NOT LAST")
self.error = 1
# Check loops
def check_loops(self):
for pc in range(len(self.stat)):
lineno = self.stat[pc]
if self.prog[lineno][0] == 'FOR':
forinst = self.prog[lineno]
loopvar = forinst[1]
for i in range(pc+1,len(self.stat)):
if self.prog[self.stat[i]][0] == 'NEXT':
nextvar = self.prog[self.stat[i]][1]
if nextvar != loopvar: continue
self.loopend[pc] = i
break
else:
print("FOR WITHOUT NEXT AT LINE %s" % self.stat[pc])
self.error = 1
# Evaluate an expression
def eval(self,expr):
etype = expr[0]
if etype == 'NUM': return expr[1]
elif etype == 'GROUP': return self.eval(expr[1])
elif etype == 'UNARY':
if expr[1] == '-': return -self.eval(expr[2])
elif etype == 'BINOP':
if expr[1] == '+': return self.eval(expr[2])+self.eval(expr[3])
elif expr[1] == '-': return self.eval(expr[2])-self.eval(expr[3])
elif expr[1] == '*': return self.eval(expr[2])*self.eval(expr[3])
elif expr[1] == '/': return float(self.eval(expr[2]))/self.eval(expr[3])
elif expr[1] == '^': return abs(self.eval(expr[2]))**self.eval(expr[3])
elif etype == 'VAR':
var,dim1,dim2 = expr[1]
if not dim1 and not dim2:
if var in self.vars:
return self.vars[var]
else:
print("UNDEFINED VARIABLE %s AT LINE %s" % (var, self.stat[self.pc]))
raise RuntimeError
# May be a list lookup or a function evaluation
if dim1 and not dim2:
if var in self.functions:
# A function
return self.functions[var](dim1)
else:
# A list evaluation
if var in self.lists:
dim1val = self.eval(dim1)
if dim1val < 1 or dim1val > len(self.lists[var]):
print("LIST INDEX OUT OF BOUNDS AT LINE %s" % self.stat[self.pc])
raise RuntimeError
return self.lists[var][dim1val-1]
if dim1 and dim2:
if var in self.tables:
dim1val = self.eval(dim1)
dim2val = self.eval(dim2)
if dim1val < 1 or dim1val > len(self.tables[var]) or dim2val < 1 or dim2val > len(self.tables[var][0]):
print("TABLE INDEX OUT OUT BOUNDS AT LINE %s" % self.stat[self.pc])
raise RuntimeError
return self.tables[var][dim1val-1][dim2val-1]
print("UNDEFINED VARIABLE %s AT LINE %s" % (var, self.stat[self.pc]))
raise RuntimeError
# Evaluate a relational expression
def releval(self,expr):
etype = expr[1]
lhs = self.eval(expr[2])
rhs = self.eval(expr[3])
if etype == '<':
if lhs < rhs: return 1
else: return 0
elif etype == '<=':
if lhs <= rhs: return 1
else: return 0
elif etype == '>':
if lhs > rhs: return 1
else: return 0
elif etype == '>=':
if lhs >= rhs: return 1
else: return 0
elif etype == '=':
if lhs == rhs: return 1
else: return 0
elif etype == '<>':
if lhs != rhs: return 1
else: return 0
# Assignment
def assign(self,target,value):
var, dim1, dim2 = target
if not dim1 and not dim2:
self.vars[var] = self.eval(value)
elif dim1 and not dim2:
# List assignment
dim1val = self.eval(dim1)
if not var in self.lists:
self.lists[var] = [0]*10
if dim1val > len(self.lists[var]):
print ("DIMENSION TOO LARGE AT LINE %s" % self.stat[self.pc])
raise RuntimeError
self.lists[var][dim1val-1] = self.eval(value)
elif dim1 and dim2:
dim1val = self.eval(dim1)
dim2val = self.eval(dim2)
if not var in self.tables:
temp = [0]*10
v = []
for i in range(10): v.append(temp[:])
self.tables[var] = v
# Variable already exists
if dim1val > len(self.tables[var]) or dim2val > len(self.tables[var][0]):
print("DIMENSION TOO LARGE AT LINE %s" % self.stat[self.pc])
raise RuntimeError
self.tables[var][dim1val-1][dim2val-1] = self.eval(value)
# Change the current line number
def goto(self,linenum):
if not linenum in self.prog:
print("UNDEFINED LINE NUMBER %d AT LINE %d" % (linenum, self.stat[self.pc]))
raise RuntimeError
self.pc = self.stat.index(linenum)
# Run it
def run(self):
self.vars = { } # All variables
self.lists = { } # List variables
self.tables = { } # Tables
self.loops = [ ] # Currently active loops
self.loopend= { } # Mapping saying where loops end
self.gosub = None # Gosub return point (if any)
self.error = 0 # Indicates program error
self.stat = list(self.prog) # Ordered list of all line numbers
self.stat.sort()
self.pc = 0 # Current program counter
# Processing prior to running
self.collect_data() # Collect all of the data statements
self.check_end()
self.check_loops()
if self.error: raise RuntimeError
while 1:
line = self.stat[self.pc]
instr = self.prog[line]
op = instr[0]
# END and STOP statements
if op == 'END' or op == 'STOP':
break # We're done
# GOTO statement
elif op == 'GOTO':
newline = instr[1]
self.goto(newline)
continue
# PRINT statement
elif op == 'PRINT':
plist = instr[1]
out = ""
for label,val in plist:
if out:
out += ' '*(15 - (len(out) % 15))
out += label
if val:
if label: out += " "
eval = self.eval(val)
out += str(eval)
sys.stdout.write(out)
end = instr[2]
if not (end == ',' or end == ';'):
sys.stdout.write("\n")
if end == ',': sys.stdout.write(" "*(15-(len(out) % 15)))
if end == ';': sys.stdout.write(" "*(3-(len(out) % 3)))
# LET statement
elif op == 'LET':
target = instr[1]
value = instr[2]
self.assign(target,value)
# READ statement
elif op == 'READ':
for target in instr[1]:
if self.dc < len(self.data):
value = ('NUM',self.data[self.dc])
self.assign(target,value)
self.dc += 1
else:
# No more data. Program ends
return
elif op == 'IF':
relop = instr[1]
newline = instr[2]
if (self.releval(relop)):
self.goto(newline)
continue
elif op == 'FOR':
loopvar = instr[1]
initval = instr[2]
finval = instr[3]
stepval = instr[4]
# Check to see if this is a new loop
if not self.loops or self.loops[-1][0] != self.pc:
# Looks like a new loop. Make the initial assignment
newvalue = initval
self.assign((loopvar,None,None),initval)
if not stepval: stepval = ('NUM',1)
stepval = self.eval(stepval) # Evaluate step here
self.loops.append((self.pc,stepval))
else:
# It's a repeat of the previous loop
# Update the value of the loop variable according to the step
stepval = ('NUM',self.loops[-1][1])
newvalue = ('BINOP','+',('VAR',(loopvar,None,None)),stepval)
if self.loops[-1][1] < 0: relop = '>='
else: relop = '<='
if not self.releval(('RELOP',relop,newvalue,finval)):
# Loop is done. Jump to the NEXT
self.pc = self.loopend[self.pc]
self.loops.pop()
else:
self.assign((loopvar,None,None),newvalue)
elif op == 'NEXT':
if not self.loops:
print("NEXT WITHOUT FOR AT LINE %s" % line)
return
nextvar = instr[1]
self.pc = self.loops[-1][0]
loopinst = self.prog[self.stat[self.pc]]
forvar = loopinst[1]
if nextvar != forvar:
print("NEXT DOESN'T MATCH FOR AT LINE %s" % line)
return
continue
elif op == 'GOSUB':
newline = instr[1]
if self.gosub:
print("ALREADY IN A SUBROUTINE AT LINE %s" % line)
return
self.gosub = self.stat[self.pc]
self.goto(newline)
continue
elif op == 'RETURN':
if not self.gosub:
print("RETURN WITHOUT A GOSUB AT LINE %s" % line)
return
self.goto(self.gosub)
self.gosub = None
elif op == 'FUNC':
fname = instr[1]
pname = instr[2]
expr = instr[3]
def eval_func(pvalue,name=pname,self=self,expr=expr):
self.assign((pname,None,None),pvalue)
return self.eval(expr)
self.functions[fname] = eval_func
elif op == 'DIM':
for vname,x,y in instr[1]:
if y == 0:
# Single dimension variable
self.lists[vname] = [0]*x
else:
# Double dimension variable
temp = [0]*y
v = []
for i in range(x):
v.append(temp[:])
self.tables[vname] = v
self.pc += 1
# Utility functions for program listing
def expr_str(self,expr):
etype = expr[0]
if etype == 'NUM': return str(expr[1])
elif etype == 'GROUP': return "(%s)" % self.expr_str(expr[1])
elif etype == 'UNARY':
if expr[1] == '-': return "-"+str(expr[2])
elif etype == 'BINOP':
return "%s %s %s" % (self.expr_str(expr[2]),expr[1],self.expr_str(expr[3]))
elif etype == 'VAR':
return self.var_str(expr[1])
def relexpr_str(self,expr):
return "%s %s %s" % (self.expr_str(expr[2]),expr[1],self.expr_str(expr[3]))
def var_str(self,var):
varname,dim1,dim2 = var
if not dim1 and not dim2: return varname
if dim1 and not dim2: return "%s(%s)" % (varname, self.expr_str(dim1))
return "%s(%s,%s)" % (varname, self.expr_str(dim1),self.expr_str(dim2))
# Create a program listing
def list(self):
stat = list(self.prog) # Ordered list of all line numbers
stat.sort()
for line in stat:
instr = self.prog[line]
op = instr[0]
if op in ['END','STOP','RETURN']:
print("%s %s" % (line, op))
continue
elif op == 'REM':
print("%s %s" % (line, instr[1]))
elif op == 'PRINT':
_out = "%s %s " % (line, op)
first = 1
for p in instr[1]:
if not first: _out += ", "
if p[0] and p[1]: _out += '"%s"%s' % (p[0],self.expr_str(p[1]))
elif p[1]: _out += self.expr_str(p[1])
else: _out += '"%s"' % (p[0],)
first = 0
if instr[2]: _out += instr[2]
print(_out)
elif op == 'LET':
print("%s LET %s = %s" % (line,self.var_str(instr[1]),self.expr_str(instr[2])))
elif op == 'READ':
_out = "%s READ " % line
first = 1
for r in instr[1]:
if not first: _out += ","
_out += self.var_str(r)
first = 0
print(_out)
elif op == 'IF':
print("%s IF %s THEN %d" % (line,self.relexpr_str(instr[1]),instr[2]))
elif op == 'GOTO' or op == 'GOSUB':
print("%s %s %s" % (line, op, instr[1]))
elif op == 'FOR':
_out = "%s FOR %s = %s TO %s" % (line,instr[1],self.expr_str(instr[2]),self.expr_str(instr[3]))
if instr[4]: _out += " STEP %s" % (self.expr_str(instr[4]))
print(_out)
elif op == 'NEXT':
print("%s NEXT %s" % (line, instr[1]))
elif op == 'FUNC':
print("%s DEF %s(%s) = %s" % (line,instr[1],instr[2],self.expr_str(instr[3])))
elif op == 'DIM':
_out = "%s DIM " % line
first = 1
for vname,x,y in instr[1]:
if not first: _out += ","
first = 0
if y == 0:
_out += "%s(%d)" % (vname,x)
else:
_out += "%s(%d,%d)" % (vname,x,y)
print(_out)
elif op == 'DATA':
_out = "%s DATA " % line
first = 1
for v in instr[1]:
if not first: _out += ","
first = 0
_out += v
print(_out)
# Erase the current program
def new(self):
self.prog = {}
# Insert statements
def add_statements(self,prog):
for line,stat in prog.items():
self.prog[line] = stat
# Delete a statement
def del_line(self,lineno):
try:
del self.prog[lineno]
except KeyError:
pass
|
nelango/ViralityAnalysis
|
refs/heads/master
|
model/lib/sklearn/feature_extraction/stop_words.py
|
290
|
# This list of English stop words is taken from the "Glasgow Information
# Retrieval Group". The original list can be found at
# http://ir.dcs.gla.ac.uk/resources/linguistic_utils/stop_words
ENGLISH_STOP_WORDS = frozenset([
"a", "about", "above", "across", "after", "afterwards", "again", "against",
"all", "almost", "alone", "along", "already", "also", "although", "always",
"am", "among", "amongst", "amoungst", "amount", "an", "and", "another",
"any", "anyhow", "anyone", "anything", "anyway", "anywhere", "are",
"around", "as", "at", "back", "be", "became", "because", "become",
"becomes", "becoming", "been", "before", "beforehand", "behind", "being",
"below", "beside", "besides", "between", "beyond", "bill", "both",
"bottom", "but", "by", "call", "can", "cannot", "cant", "co", "con",
"could", "couldnt", "cry", "de", "describe", "detail", "do", "done",
"down", "due", "during", "each", "eg", "eight", "either", "eleven", "else",
"elsewhere", "empty", "enough", "etc", "even", "ever", "every", "everyone",
"everything", "everywhere", "except", "few", "fifteen", "fify", "fill",
"find", "fire", "first", "five", "for", "former", "formerly", "forty",
"found", "four", "from", "front", "full", "further", "get", "give", "go",
"had", "has", "hasnt", "have", "he", "hence", "her", "here", "hereafter",
"hereby", "herein", "hereupon", "hers", "herself", "him", "himself", "his",
"how", "however", "hundred", "i", "ie", "if", "in", "inc", "indeed",
"interest", "into", "is", "it", "its", "itself", "keep", "last", "latter",
"latterly", "least", "less", "ltd", "made", "many", "may", "me",
"meanwhile", "might", "mill", "mine", "more", "moreover", "most", "mostly",
"move", "much", "must", "my", "myself", "name", "namely", "neither",
"never", "nevertheless", "next", "nine", "no", "nobody", "none", "noone",
"nor", "not", "nothing", "now", "nowhere", "of", "off", "often", "on",
"once", "one", "only", "onto", "or", "other", "others", "otherwise", "our",
"ours", "ourselves", "out", "over", "own", "part", "per", "perhaps",
"please", "put", "rather", "re", "same", "see", "seem", "seemed",
"seeming", "seems", "serious", "several", "she", "should", "show", "side",
"since", "sincere", "six", "sixty", "so", "some", "somehow", "someone",
"something", "sometime", "sometimes", "somewhere", "still", "such",
"system", "take", "ten", "than", "that", "the", "their", "them",
"themselves", "then", "thence", "there", "thereafter", "thereby",
"therefore", "therein", "thereupon", "these", "they", "thick", "thin",
"third", "this", "those", "though", "three", "through", "throughout",
"thru", "thus", "to", "together", "too", "top", "toward", "towards",
"twelve", "twenty", "two", "un", "under", "until", "up", "upon", "us",
"very", "via", "was", "we", "well", "were", "what", "whatever", "when",
"whence", "whenever", "where", "whereafter", "whereas", "whereby",
"wherein", "whereupon", "wherever", "whether", "which", "while", "whither",
"who", "whoever", "whole", "whom", "whose", "why", "will", "with",
"within", "without", "would", "yet", "you", "your", "yours", "yourself",
"yourselves"])
|
YeelerG/twilio-python
|
refs/heads/master
|
twilio/rest/resources/connect_apps.py
|
51
|
from . import InstanceResource, ListResource
from six import iteritems
class ConnectApp(InstanceResource):
""" An authorized connect app """
pass
class ConnectApps(ListResource):
""" A list of Connect App resources """
name = "ConnectApps"
instance = ConnectApp
key = "connect_apps"
def list(self, **kwargs):
"""
Returns a page of :class:`ConnectApp` resources as a list. For paging
informtion see :class:`ListResource`
"""
return self.get_instances(kwargs)
class AuthorizedConnectApp(ConnectApp):
""" An authorized connect app """
id_key = "connect_app_sid"
def load(self, entries):
""" Translate certain parameters into others"""
result = {}
for k, v in iteritems(entries):
k = k.replace("connect_app_", "")
result[k] = v
super(AuthorizedConnectApp, self).load(result)
class AuthorizedConnectApps(ConnectApps):
""" A list of Authorized Connect App resources """
name = "AuthorizedConnectApps"
instance = AuthorizedConnectApp
key = "authorized_connect_apps"
|
Lautitia/newfies-dialer
|
refs/heads/master
|
newfies/mod_mailer/admin.py
|
4
|
#
# Newfies-Dialer License
# http://www.newfies-dialer.org
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Copyright (C) 2011-2014 Star2Billing S.L.
#
# The Initial Developer of the Original Code is
# Arezqui Belaid <info@star2billing.com>
#
from django.contrib import admin
# from django.utils.translation import ugettext as _
from mod_mailer.models import MailTemplate, MailSpooler
class MailTemplateAdmin(admin.ModelAdmin):
list_display = ('id', 'template_key', 'label', 'from_email', 'from_name', 'subject', 'created_date')
list_display_links = ['id', 'template_key']
admin.site.register(MailTemplate, MailTemplateAdmin)
#MailSpooler
class MailSpoolerAdmin(admin.ModelAdmin):
list_display = ('id', 'mailtemplate', 'contact_email', 'mailspooler_type', 'created_date')
list_display_links = ['id', 'mailtemplate']
#raw_id_fields = ('contact',)
admin.site.register(MailSpooler, MailSpoolerAdmin)
|
matpow2/cuwo
|
refs/heads/master
|
cuwo/strings.py
|
1
|
# Copyright (c) Mathias Kaerlev 2013-2017.
#
# This file is part of cuwo.
#
# cuwo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# cuwo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with cuwo. If not, see <http://www.gnu.org/licenses/>.
"""
Constant string definitions
NOTE: This file is automatically generated. Do not modify.
"""
SOUND_NAMES = {
0: 'hit',
1: 'blade1',
2: 'blade2',
3: 'long-blade1',
4: 'long-blade2',
5: 'hit1',
6: 'hit2',
7: 'punch1',
8: 'punch2',
9: 'hit-arrow',
10: 'hit-arrow-critical',
11: 'smash1',
12: 'slam-ground',
13: 'smash-hit2',
14: 'smash-jump',
15: 'swing',
16: 'shield-swing',
17: 'swing-slow',
18: 'swing-slow2',
19: 'arrow-destroy',
20: 'blade1',
21: 'punch2',
22: 'salvo2',
23: 'sword-hit03',
24: 'block',
25: 'shield-slam',
26: 'roll',
27: 'destroy2',
28: 'cry',
29: 'levelup2',
30: 'missioncomplete',
31: 'water-splash01',
32: 'step2',
33: 'step-water',
34: 'step-water2',
35: 'step-water3',
36: 'channel2',
37: 'channel-hit',
38: 'fireball',
39: 'fire-hit',
40: 'magic02',
41: 'watersplash',
42: 'watersplash-hit',
43: 'lich-scream',
44: 'drink2',
45: 'pickup',
46: 'disenchant2',
47: 'upgrade2',
48: 'swirl',
49: 'human-voice01',
50: 'human-voice02',
51: 'gate',
52: 'spike-trap',
53: 'fire-trap',
54: 'lever',
55: 'charge2',
56: 'magic02',
57: 'drop',
58: 'drop-coin',
59: 'drop-item',
60: 'male-groan',
61: 'female-groan',
62: 'male-groan',
63: 'female-groan',
64: 'goblin-male-groan',
65: 'goblin-female-groan',
66: 'lizard-male-groan',
67: 'lizard-female-groan',
68: 'dwarf-male-groan',
69: 'dwarf-female-groan',
70: 'orc-male-groan',
71: 'orc-female-groan',
72: 'undead-male-groan',
73: 'undead-female-groan',
74: 'frogman-male-groan',
75: 'frogman-female-groan',
76: 'monster-groan',
77: 'troll-groan',
78: 'mole-groan',
79: 'slime-groan',
80: 'zombie-groan',
81: 'Explosion',
82: 'punch2',
83: 'menu-open2',
84: 'menu-close2',
85: 'menu-select',
86: 'menu-tab',
87: 'menu-grab-item',
88: 'menu-drop-item',
89: 'craft',
90: 'craft-proc',
91: 'absorb',
92: 'manashield',
93: 'bulwark',
94: 'bird1',
95: 'bird2',
96: 'bird3',
97: 'cricket1',
98: 'cricket2',
99: 'owl1',
100: 'owl2'
}
SOUND_IDS = {v: k for k, v in SOUND_NAMES.items()}
MODEL_NAMES = {
-1: None,
0: 'body2',
1: 'body4',
2: 'body3',
3: 'wizard-head',
4: 'wizard-body',
5: 'witch-head',
6: 'witch-body',
7: 'glove',
8: 'head2',
9: 'girl-head3',
10: 'elf-head-female',
11: 'old-head',
12: 'gnoll-head',
13: 'gnoll-body',
14: 'gnoll-hand',
15: 'gnoll-foot',
16: 'polar-gnoll-head',
17: 'polar-gnoll-body',
18: 'polar-gnoll-hand',
19: 'polar-gnoll-foot',
20: 'monkey-head',
21: 'monkey-body',
22: 'monkey-hand',
23: 'monkey-foot',
24: 'troll-head',
25: 'troll-body',
26: 'troll-arm',
27: 'troll-hand',
28: 'troll-foot',
29: 'dark-troll-eyes',
30: 'dark-troll-head',
31: 'dark-troll-body',
32: 'dark-troll-arm',
33: 'dark-troll-hand',
34: 'dark-troll-foot',
35: 'hell-demon-head',
36: 'hell-demon-body',
37: 'hell-demon-arm',
38: 'hell-demon-hand',
39: 'hell-demon-foot',
40: 'golem-head',
41: 'golem-body',
42: 'golem-arm',
43: 'golem-hand',
44: 'golem-foot',
45: 'golem-ember-head',
46: 'golem-ember-body',
47: 'golem-ember-arm',
48: 'golem-ember-hand',
49: 'golem-ember-foot',
50: 'golem-snow-head',
51: 'golem-snow-body',
52: 'golem-snow-arm',
53: 'golem-snow-hand',
54: 'golem-snow-foot',
55: 'yeti-head',
56: 'yeti-body',
57: 'yeti-arm',
58: 'yeti-hand',
59: 'yeti-foot',
60: 'ogre-head',
61: 'ogre-body',
62: 'ogre-hand',
63: 'ogre-foot',
64: 'rockling-head',
65: 'rockling-hand',
66: 'rockling-foot',
67: 'cyclops-head',
68: 'cyclops-body',
69: 'cyclops-arm',
70: 'cyclops-hand',
71: 'cyclops-foot',
72: 'mammoth-head',
73: 'mammoth-body',
74: 'mammoth-foot',
75: 'goblin-head-m01',
76: 'goblin-head-m02',
77: 'goblin-head-m03',
78: 'goblin-head-m04',
79: 'goblin-head-m05',
80: 'goblin-hair-m01',
81: 'goblin-hair-m02',
82: 'goblin-hair-m03',
83: 'goblin-hair-m04',
84: 'goblin-hair-m05',
85: 'goblin-hair-m06',
86: 'goblin-head-f01',
87: 'goblin-head-f02',
88: 'goblin-head-f03',
89: 'goblin-head-f04',
90: 'goblin-head-f05',
91: 'goblin-hair-f01',
92: 'goblin-hair-f02',
93: 'goblin-hair-f03',
94: 'goblin-hair-f04',
95: 'goblin-hair-f05',
96: 'goblin-hair-f06',
97: 'goblin-hand',
98: 'lizard-head-m01',
99: 'lizard-head-m02',
100: 'lizard-hair-m01',
101: 'lizard-hair-m02',
102: 'lizard-hair-m03',
103: 'lizard-hair-m04',
104: 'lizard-hair-m05',
105: 'lizard-hair-m06',
106: 'lizard-head-f01',
107: 'lizard-head-f02',
108: 'lizard-head-f03',
109: 'lizard-head-f04',
110: 'lizard-head-f05',
111: 'lizard-hand',
112: 'lizard-body',
113: 'lizard-foot',
114: 'frog-body',
115: 'frog-head',
116: 'frog-foot',
117: 'frog-hand',
118: 'plant-creature-body',
119: 'plant-creature-head',
120: 'plant-creature-foot',
121: 'plant-creature-hand',
122: 'radish-creature-head',
123: 'radish-creature-foot',
124: 'radish-creature-hand',
125: 'onionling-head',
126: 'onionling-foot',
127: 'desert-onionling',
128: 'desert-onionling-foot',
129: 'devourer-head',
130: 'devourer-foot',
131: 'mole-body',
132: 'mole-head',
133: 'mole-foot',
134: 'mole-hand',
135: 'biter-body',
136: 'biter-head',
137: 'biter-foot',
138: 'biter-hand',
139: 'koala-body',
140: 'koala-head',
141: 'koala-foot',
142: 'koala-hand',
143: 'squirrel-body',
144: 'squirrel-head',
145: 'squirrel-foot',
146: 'squirrel-hand',
147: 'raccoon-body',
148: 'raccoon-head',
149: 'raccoon-foot',
150: 'raccoon-hand',
151: 'owl-head',
152: 'owl-foot',
153: 'owl-hand',
154: 'bunny-body',
155: 'bunny-foot',
156: 'porcupine-body',
157: 'porcupine-foot',
158: 'squid-body',
159: 'squid-foot',
160: 'spike-head',
161: 'spike-foot',
162: 'spike-body',
163: 'spike-hand',
164: 'anubis-head',
165: 'anubis-foot',
166: 'anubis-body',
167: 'anubis-hand',
168: 'horus-head',
169: 'horus-foot',
170: 'horus-body',
171: 'horus-hand',
172: 'jester-head',
173: 'jester-foot',
174: 'jester-body',
175: 'jester-hand',
176: 'spectrino-head',
177: 'spectrino-foot',
178: 'spectrino-body',
179: 'spectrino-hand',
180: 'desert-nomad-head1',
181: 'desert-nomad-head2',
182: 'desert-nomad-head3',
183: 'desert-nomad-female-head1',
184: 'desert-nomad-female-head2',
185: 'desert-nomad-female-head3',
186: 'desert-nomad-foot',
187: 'desert-nomad-body',
188: 'desert-nomad-hand',
189: 'desert-nomad-hand2',
190: 'djinn-head',
191: 'djinn-foot',
192: 'djinn-body',
193: 'djinn-hand',
194: 'minotaur-head',
195: 'minotaur-foot',
196: 'minotaur-body',
197: 'minotaur-hand',
198: 'minotaur-arm',
199: 'imp-eyes',
200: 'imp-head',
201: 'imp-foot',
202: 'imp-body',
203: 'imp-hand',
204: 'penguin-body',
205: 'penguin-head',
206: 'penguin-foot',
207: 'penguin-hand',
208: 'crab-body',
209: 'crab-head',
210: 'crab-foot',
211: 'crab-hand',
212: 'crab-body-blue',
213: 'crab-head-blue',
214: 'crab-foot-blue',
215: 'crab-hand-blue',
216: 'barkbeetle-head',
217: 'barkbeetle-foot',
218: 'firebeetle-head',
219: 'firebeetle-foot',
220: 'snoutbeetle-head',
221: 'snoutbeetle-foot',
222: 'lemonbeetle-head',
223: 'lemonbeetle-foot',
224: 'santa-body',
225: 'santa-head',
226: 'zombie-body',
227: 'zombie-head',
228: 'foot',
229: 'zombie-hand',
230: 'hornet-body',
231: 'hornet-head',
232: 'hornet-foot',
233: 'hornet-hand',
234: 'insect-guard-body',
235: 'insect-guard-head',
236: 'insect-guard-foot',
237: 'insect-guard-hand',
238: 'insect-guard-wing',
239: 'fly-body',
240: 'fly-head',
241: 'fly-foot',
242: 'fly-hand',
243: 'bumblebee',
244: 'bumblebee-hand',
245: 'bumblebee-foot',
246: 'midge-body',
247: 'midge-head',
248: 'midge-foot',
249: 'midge-hand',
250: 'mosquito-body',
251: 'mosquito-head',
252: 'mosquito-foot',
253: 'mosquito-hand',
254: 'crow-body',
255: 'crow-head',
256: 'crow-foot',
257: 'crow-hand',
258: 'chicken-body',
259: 'chicken-head',
260: 'chicken-foot',
261: 'chicken-hand',
262: 'seagull-body',
263: 'seagull-head',
264: 'seagull-foot',
265: 'seagull-hand',
266: 'parrot-body',
267: 'parrot-head',
268: 'parrot-foot',
269: 'parrot-hand',
270: 'parrot-blue-body',
271: 'parrot-blue-head',
272: 'parrot-blue-foot',
273: 'parrot-blue-hand',
274: 'bat-body',
275: 'bat-head',
276: 'bat-foot',
277: 'bat-hand',
278: 'lich-body',
279: 'lich-head',
280: 'lich-hand',
281: 'lich-arm',
282: 'dwarf-head-m01',
283: 'dwarf-head-m02',
284: 'dwarf-head-m03',
285: 'dwarf-head-m04',
286: 'dwarf-head-m05',
287: 'dwarf-hair-m01',
288: 'dwarf-hair-m02',
289: 'dwarf-hair-m03',
290: 'dwarf-head-f01',
291: 'dwarf-head-f02',
292: 'dwarf-head-f03',
293: 'dwarf-head-f04',
294: 'dwarf-head-f05',
295: 'dwarf-hair-f01',
296: 'dwarf-hair-f02',
297: 'dwarf-hair-f03',
298: 'dwarf-hair-f04',
299: 'dwarf-hair-f05',
300: 'dwarf-body',
301: 'dwarf-body-female',
302: 'orc-hand',
303: 'undead-head-m01',
304: 'undead-head-m02',
305: 'undead-head-m03',
306: 'undead-head-m04',
307: 'undead-head-m05',
308: 'undead-head-m06',
309: 'undead-hair-m01',
310: 'undead-hair-m02',
311: 'undead-hair-m03',
312: 'undead-hair-m04',
313: 'undead-hair-m05',
314: 'undead-hair-m06',
315: 'undead-head-f01',
316: 'undead-head-f02',
317: 'undead-head-f03',
318: 'undead-head-f04',
319: 'undead-head-f05',
320: 'undead-head-f06',
321: 'undead-hair-f01',
322: 'undead-hair-f02',
323: 'undead-hair-f03',
324: 'undead-hair-f04',
325: 'undead-hair-f05',
326: 'undead-hair-f06',
327: 'undead-hand',
328: 'skeleton-head',
329: 'skeleton-eyes',
330: 'skeleton-hand',
331: 'skeleton-body',
332: 'skeleton-foot',
333: 'plainrunner-body',
334: 'plainrunner-foot',
335: 'leafrunner-body',
336: 'leafrunner-foot',
337: 'snowrunner-body',
338: 'snowrunner-foot',
339: 'desertrunner-body',
340: 'desertrunner-foot',
341: 'peacock-body',
342: 'peacock-foot',
343: 'peacock-head',
344: 'pony-body',
345: 'pony-head',
346: 'pony-foot',
347: 'pony-tail',
348: 'camel-body',
349: 'camel-head',
350: 'camel-foot',
351: 'cow-body',
352: 'cow-head',
353: 'cow-foot',
354: 'collie-body',
355: 'collie-head',
356: 'collie-hand',
357: 'collie-foot',
358: 'collie-tail',
359: 'shepherd-dog-body',
360: 'shepherd-dog-head',
361: 'shepherd-dog-hand',
362: 'shepherd-dog-foot',
363: 'shepherd-dog-tail',
364: 'skull-bull-body',
365: 'skull-bull-head',
366: 'skull-bull-hand',
367: 'skull-bull-foot',
368: 'skull-bull-tail',
369: 'alpaca-body',
370: 'alpaca-head',
371: 'alpaca-hand',
372: 'alpaca-foot',
373: 'alpaca-brown-body',
374: 'alpaca-brown-head',
375: 'alpaca-brown-hand',
376: 'alpaca-brown-foot',
377: 'dog-body2',
378: 'dog-head2',
379: 'dog-hand2',
380: 'dog-foot2',
381: 'dog-tail2',
382: 'scottish-terrier-body',
383: 'scottish-terrier-head',
384: 'scottish-terrier-hand',
385: 'scottish-terrier-foot',
386: 'scottish-terrier-tail',
387: 'wolf-body',
388: 'wolf-head',
389: 'wolf-hand',
390: 'wolf-foot',
391: 'wolf-tail',
392: 'panther-body',
393: 'panther-head',
394: 'panther-hand',
395: 'panther-foot',
396: 'panther-tail',
397: 'cat-body',
398: 'cat-head',
399: 'cat-hand',
400: 'cat-foot',
401: 'cat-tail',
402: 'cat-body2',
403: 'cat-head2',
404: 'cat-hand2',
405: 'cat-foot2',
406: 'cat-tail2',
407: 'cat-body3',
408: 'cat-head3',
409: 'cat-hand3',
410: 'cat-foot3',
411: 'cat-tail3',
412: 'pig-body',
413: 'pig-head',
414: 'pig-foot',
415: 'sheep-body',
416: 'sheep-head',
417: 'sheep-foot',
418: 'duckbill-body',
419: 'duckbill-head',
420: 'duckbill-foot',
421: 'duckbill-tail',
422: 'crocodile-body',
423: 'crocodile-head',
424: 'crocodile-foot',
425: 'dragon-body',
426: 'dragon-head',
427: 'dragon-foot',
428: 'dragon-wing',
429: 'dragon-tail',
430: 'hand2',
431: 'brown-hand',
432: 'foot',
433: 'boot',
434: 'wood-staff1',
435: 'wood-staff2',
436: 'wood-staff3',
437: 'wood-staff4',
438: 'wood-staff6',
439: 'wood-staff1-random1',
440: 'wood-staff2-random1',
441: 'wood-staff3-random1',
442: 'wood-staff4-random1',
443: 'wood-staff5-random1',
444: 'wood-staff1-random2',
445: 'wood-staff2-random2',
446: 'wood-staff3-random2',
447: 'wood-staff4-random2',
448: 'wood-staff5-random2',
449: 'wood-staff1-random3',
450: 'wood-staff2-random3',
451: 'wood-staff3-random3',
452: 'wood-staff4-random3',
453: 'wood-staff5-random3',
454: 'wood-staff1-random4',
455: 'wood-staff2-random4',
456: 'wood-staff3-random4',
457: 'wood-staff4-random4',
458: 'wood-staff5-random4',
459: 'wood-staff1-random5',
460: 'wood-staff2-random5',
461: 'wood-staff3-random5',
462: 'wood-staff4-random5',
463: 'wood-staff5-random5',
464: 'wood-staff1-random6',
465: 'wood-staff2-random6',
466: 'wood-staff3-random6',
467: 'wood-staff4-random6',
468: 'wood-staff5-random6',
469: 'wood-staff1-random7',
470: 'wood-staff2-random7',
471: 'wood-staff3-random7',
472: 'wood-staff4-random7',
473: 'wood-staff5-random7',
474: 'wood-staff1-random8',
475: 'wood-staff2-random8',
476: 'wood-staff3-random8',
477: 'wood-staff4-random8',
478: 'wood-staff5-random8',
479: 'wood-staff1-random9',
480: 'wood-staff2-random9',
481: 'wood-staff3-random9',
482: 'wood-staff4-random9',
483: 'wood-staff5-random9',
484: 'wood-staff1-random10',
485: 'wood-staff2-random10',
486: 'wood-staff3-random10',
487: 'wood-staff4-random10',
488: 'wood-staff5-random10',
489: 'wood-wand1',
490: 'wood-wand2',
491: 'wood-wand3',
492: 'wood-wand4',
493: 'wood-wand5',
494: 'wood-wand1-random1',
495: 'wood-wand2-random1',
496: 'wood-wand3-random1',
497: 'wood-wand4-random1',
498: 'wood-wand5-random1',
499: 'wood-wand1-random2',
500: 'wood-wand2-random2',
501: 'wood-wand3-random2',
502: 'wood-wand4-random2',
503: 'wood-wand5-random2',
504: 'wood-wand1-random3',
505: 'wood-wand2-random3',
506: 'wood-wand3-random3',
507: 'wood-wand4-random3',
508: 'wood-wand5-random3',
509: 'wood-wand1-random4',
510: 'wood-wand2-random4',
511: 'wood-wand3-random4',
512: 'wood-wand4-random4',
513: 'wood-wand5-random4',
514: 'wood-wand1-random5',
515: 'wood-wand2-random5',
516: 'wood-wand3-random5',
517: 'wood-wand4-random5',
518: 'wood-wand5-random5',
519: 'wood-wand1-random6',
520: 'wood-wand2-random6',
521: 'wood-wand3-random6',
522: 'wood-wand4-random6',
523: 'wood-wand5-random6',
524: 'wood-wand1-random7',
525: 'wood-wand2-random7',
526: 'wood-wand3-random7',
527: 'wood-wand4-random7',
528: 'wood-wand5-random7',
529: 'wood-wand1-random8',
530: 'wood-wand2-random8',
531: 'wood-wand3-random8',
532: 'wood-wand4-random8',
533: 'wood-wand5-random8',
534: 'wood-wand1-random9',
535: 'wood-wand2-random9',
536: 'wood-wand3-random9',
537: 'wood-wand4-random9',
538: 'wood-wand5-random9',
539: 'wood-wand1-random10',
540: 'wood-wand2-random10',
541: 'wood-wand3-random10',
542: 'wood-wand4-random10',
543: 'wood-wand5-random10',
544: 'gold-bracelet1',
545: 'gold-bracelet2',
546: 'gold-bracelet3',
547: 'gold-bracelet4',
548: 'gold-bracelet5',
549: 'gold-bracelet1-random1',
550: 'gold-bracelet2-random1',
551: 'gold-bracelet3-random1',
552: 'gold-bracelet4-random1',
553: 'gold-bracelet5-random1',
554: 'gold-bracelet1-random2',
555: 'gold-bracelet2-random2',
556: 'gold-bracelet3-random2',
557: 'gold-bracelet4-random2',
558: 'gold-bracelet5-random2',
559: 'gold-bracelet1-random3',
560: 'gold-bracelet2-random3',
561: 'gold-bracelet3-random3',
562: 'gold-bracelet4-random3',
563: 'gold-bracelet5-random3',
564: 'gold-bracelet1-random4',
565: 'gold-bracelet2-random4',
566: 'gold-bracelet3-random4',
567: 'gold-bracelet4-random4',
568: 'gold-bracelet5-random4',
569: 'gold-bracelet1-random5',
570: 'gold-bracelet2-random5',
571: 'gold-bracelet3-random5',
572: 'gold-bracelet4-random5',
573: 'gold-bracelet5-random5',
574: 'silver-bracelet1',
575: 'silver-bracelet2',
576: 'silver-bracelet3',
577: 'silver-bracelet4',
578: 'silver-bracelet5',
579: 'silver-bracelet1-random1',
580: 'silver-bracelet2-random1',
581: 'silver-bracelet3-random1',
582: 'silver-bracelet4-random1',
583: 'silver-bracelet5-random1',
584: 'silver-bracelet1-random2',
585: 'silver-bracelet2-random2',
586: 'silver-bracelet3-random2',
587: 'silver-bracelet4-random2',
588: 'silver-bracelet5-random2',
589: 'silver-bracelet1-random3',
590: 'silver-bracelet2-random3',
591: 'silver-bracelet3-random3',
592: 'silver-bracelet4-random3',
593: 'silver-bracelet5-random3',
594: 'silver-bracelet1-random4',
595: 'silver-bracelet2-random4',
596: 'silver-bracelet3-random4',
597: 'silver-bracelet4-random4',
598: 'silver-bracelet5-random4',
599: 'silver-bracelet1-random5',
600: 'silver-bracelet2-random5',
601: 'silver-bracelet3-random5',
602: 'silver-bracelet4-random5',
603: 'silver-bracelet5-random5',
604: 'obsidian-staff1',
605: 'obsidian-staff2',
606: 'obsidian-staff3',
607: 'obsidian-staff4',
608: 'obsidian-staff5',
609: 'iron-mace1',
610: 'iron-mace2',
611: 'iron-mace3',
612: 'iron-mace4',
613: 'iron-mace5',
614: 'iron-mace1-random1',
615: 'iron-mace2-random1',
616: 'iron-mace3-random1',
617: 'iron-mace4-random1',
618: 'iron-mace5-random1',
619: 'iron-mace1-random2',
620: 'iron-mace2-random2',
621: 'iron-mace3-random2',
622: 'iron-mace4-random2',
623: 'iron-mace5-random2',
624: 'iron-mace1-random3',
625: 'iron-mace2-random3',
626: 'iron-mace3-random3',
627: 'iron-mace4-random3',
628: 'iron-mace5-random3',
629: 'iron-mace1-random4',
630: 'iron-mace2-random4',
631: 'iron-mace3-random4',
632: 'iron-mace4-random4',
633: 'iron-mace5-random4',
634: 'iron-mace1-random5',
635: 'iron-mace2-random5',
636: 'iron-mace3-random5',
637: 'iron-mace4-random5',
638: 'iron-mace5-random5',
639: 'iron-mace1-random6',
640: 'iron-mace2-random6',
641: 'iron-mace3-random6',
642: 'iron-mace4-random6',
643: 'iron-mace5-random6',
644: 'iron-mace1-random7',
645: 'iron-mace2-random7',
646: 'iron-mace3-random7',
647: 'iron-mace4-random7',
648: 'iron-mace5-random7',
649: 'iron-mace1-random8',
650: 'iron-mace2-random8',
651: 'iron-mace3-random8',
652: 'iron-mace4-random8',
653: 'iron-mace5-random8',
654: 'iron-mace1-random9',
655: 'iron-mace2-random9',
656: 'iron-mace3-random9',
657: 'iron-mace4-random9',
658: 'iron-mace5-random9',
659: 'iron-mace1-random10',
660: 'iron-mace2-random10',
661: 'iron-mace3-random10',
662: 'iron-mace4-random10',
663: 'iron-mace5-random10',
664: 'wood-mace01',
665: 'wood-mace02',
666: 'wood-mace03',
667: 'wood-mace04',
668: 'wood-mace05',
669: 'light-helmet',
670: 'light-chest',
671: 'light-shoulder',
672: 'light-glove',
673: 'light-boot',
674: 'bow',
675: 'wood-bow2',
676: 'wood-bow3',
677: 'wood-bow4',
678: 'wood-bow5',
679: 'wood-bow1-random1',
680: 'wood-bow2-random1',
681: 'wood-bow3-random1',
682: 'wood-bow4-random1',
683: 'wood-bow5-random1',
684: 'wood-bow1-random2',
685: 'wood-bow2-random2',
686: 'wood-bow3-random2',
687: 'wood-bow4-random2',
688: 'wood-bow5-random2',
689: 'wood-bow1-random3',
690: 'wood-bow2-random3',
691: 'wood-bow3-random3',
692: 'wood-bow4-random3',
693: 'wood-bow5-random3',
694: 'wood-bow1-random4',
695: 'wood-bow2-random4',
696: 'wood-bow3-random4',
697: 'wood-bow4-random4',
698: 'wood-bow5-random4',
699: 'wood-bow1-random5',
700: 'wood-bow2-random5',
701: 'wood-bow3-random5',
702: 'wood-bow4-random5',
703: 'wood-bow5-random5',
704: 'wood-bow1-random6',
705: 'wood-bow2-random6',
706: 'wood-bow3-random6',
707: 'wood-bow4-random6',
708: 'wood-bow5-random6',
709: 'wood-bow1-random7',
710: 'wood-bow2-random7',
711: 'wood-bow3-random7',
712: 'wood-bow4-random7',
713: 'wood-bow5-random7',
714: 'wood-bow1-random8',
715: 'wood-bow2-random8',
716: 'wood-bow3-random8',
717: 'wood-bow4-random8',
718: 'wood-bow5-random8',
719: 'wood-bow1-random9',
720: 'wood-bow2-random9',
721: 'wood-bow3-random9',
722: 'wood-bow4-random9',
723: 'wood-bow5-random9',
724: 'wood-bow1-random10',
725: 'wood-bow2-random10',
726: 'wood-bow3-random10',
727: 'wood-bow4-random10',
728: 'wood-bow5-random10',
729: 'wood-crossbow1',
730: 'wood-crossbow2',
731: 'wood-crossbow3',
732: 'wood-crossbow4',
733: 'wood-crossbow5',
734: 'wood-crossbow1-random1',
735: 'wood-crossbow2-random1',
736: 'wood-crossbow3-random1',
737: 'wood-crossbow4-random1',
738: 'wood-crossbow5-random1',
739: 'wood-crossbow1-random2',
740: 'wood-crossbow2-random2',
741: 'wood-crossbow3-random2',
742: 'wood-crossbow4-random2',
743: 'wood-crossbow5-random2',
744: 'wood-crossbow1-random3',
745: 'wood-crossbow2-random3',
746: 'wood-crossbow3-random3',
747: 'wood-crossbow4-random3',
748: 'wood-crossbow5-random3',
749: 'wood-crossbow1-random4',
750: 'wood-crossbow2-random4',
751: 'wood-crossbow3-random4',
752: 'wood-crossbow4-random4',
753: 'wood-crossbow5-random4',
754: 'wood-crossbow1-random5',
755: 'wood-crossbow2-random5',
756: 'wood-crossbow3-random5',
757: 'wood-crossbow4-random5',
758: 'wood-crossbow5-random5',
759: 'wood-crossbow1-random6',
760: 'wood-crossbow2-random6',
761: 'wood-crossbow3-random6',
762: 'wood-crossbow4-random6',
763: 'wood-crossbow5-random6',
764: 'wood-crossbow1-random7',
765: 'wood-crossbow2-random7',
766: 'wood-crossbow3-random7',
767: 'wood-crossbow4-random7',
768: 'wood-crossbow5-random7',
769: 'wood-crossbow1-random8',
770: 'wood-crossbow2-random8',
771: 'wood-crossbow3-random8',
772: 'wood-crossbow4-random8',
773: 'wood-crossbow5-random8',
774: 'wood-crossbow1-random9',
775: 'wood-crossbow2-random9',
776: 'wood-crossbow3-random9',
777: 'wood-crossbow4-random9',
778: 'wood-crossbow5-random9',
779: 'wood-crossbow1-random10',
780: 'wood-crossbow2-random10',
781: 'wood-crossbow3-random10',
782: 'wood-crossbow4-random10',
783: 'wood-crossbow5-random10',
784: 'wood-boomerang1',
785: 'wood-boomerang2',
786: 'wood-boomerang3',
787: 'wood-boomerang4',
788: 'wood-boomerang5',
789: 'wood-boomerang1-random1',
790: 'wood-boomerang2-random1',
791: 'wood-boomerang3-random1',
792: 'wood-boomerang4-random1',
793: 'wood-boomerang5-random1',
794: 'wood-boomerang1-random2',
795: 'wood-boomerang2-random2',
796: 'wood-boomerang3-random2',
797: 'wood-boomerang4-random2',
798: 'wood-boomerang5-random2',
799: 'wood-boomerang1-random3',
800: 'wood-boomerang2-random3',
801: 'wood-boomerang3-random3',
802: 'wood-boomerang4-random3',
803: 'wood-boomerang5-random3',
804: 'wood-boomerang1-random4',
805: 'wood-boomerang2-random4',
806: 'wood-boomerang3-random4',
807: 'wood-boomerang4-random4',
808: 'wood-boomerang5-random4',
809: 'wood-boomerang1-random5',
810: 'wood-boomerang2-random5',
811: 'wood-boomerang3-random5',
812: 'wood-boomerang4-random5',
813: 'wood-boomerang5-random5',
814: 'wood-boomerang1-random6',
815: 'wood-boomerang2-random6',
816: 'wood-boomerang3-random6',
817: 'wood-boomerang4-random6',
818: 'wood-boomerang5-random6',
819: 'wood-boomerang1-random7',
820: 'wood-boomerang2-random7',
821: 'wood-boomerang3-random7',
822: 'wood-boomerang4-random7',
823: 'wood-boomerang5-random7',
824: 'wood-boomerang1-random8',
825: 'wood-boomerang2-random8',
826: 'wood-boomerang3-random8',
827: 'wood-boomerang4-random8',
828: 'wood-boomerang5-random8',
829: 'wood-boomerang1-random9',
830: 'wood-boomerang2-random9',
831: 'wood-boomerang3-random9',
832: 'wood-boomerang4-random9',
833: 'wood-boomerang5-random9',
834: 'wood-boomerang1-random10',
835: 'wood-boomerang2-random10',
836: 'wood-boomerang3-random10',
837: 'wood-boomerang4-random10',
838: 'wood-boomerang5-random10',
839: 'quiver',
840: 'arrow',
841: 'fireball',
842: 'torch',
843: 'mushroom',
844: 'shimmer-mushroom',
845: 'heartflower',
846: 'heartflower-frozen',
847: 'prickly-pear',
848: 'soulflower',
849: 'life-potion',
850: 'cactus-potion',
851: 'mana-potion',
852: 'cookie',
853: 'jelly-green',
854: 'jelly-pink',
855: 'jelly-blue',
856: 'jelly-yellow',
857: 'carrot',
858: 'pumpkin-mash',
859: 'candy',
860: 'lollipop',
861: 'softice',
862: 'donut-chocolate',
863: 'cotton-candy',
864: 'popcorn',
865: 'cereal-bar',
866: 'strawberry-cake',
867: 'chocolate-cake',
868: 'vanilla-cupcake',
869: 'chocolate-cupcake',
870: 'banana-split',
871: 'croissant',
872: 'lolly',
873: 'lemon-tart',
874: 'chocolate-cookie',
875: 'bubble-gum',
876: 'licorice-candy',
877: 'cinnamon-role',
878: 'apple-ring',
879: 'waffle',
880: 'water-ice',
881: 'date-cookie',
882: 'candied-apple',
883: 'strawberry-cocktail',
884: 'milk-chocolate-bar',
885: 'caramel-chocolate-bar',
886: 'mint-chocolate-bar',
887: 'white-chocolate-bar',
888: 'sugar-candy',
889: 'blackberry-marmelade',
890: 'salted-caramel',
891: 'ginger-tartlet',
892: 'mango-juice',
893: 'fruit-basket',
894: 'melon-icecream',
895: 'bloodorange-juice',
896: 'pancakes',
897: 'curry',
898: 'biscuit-role',
899: 'iron-sword1',
900: 'iron-sword2',
901: 'iron-sword3',
902: 'iron-sword4',
903: 'iron-sword5',
904: 'iron-sword1-random1',
905: 'iron-sword2-random1',
906: 'iron-sword3-random1',
907: 'iron-sword4-random1',
908: 'iron-sword5-random1',
909: 'iron-sword1-random2',
910: 'iron-sword2-random2',
911: 'iron-sword3-random2',
912: 'iron-sword4-random2',
913: 'iron-sword5-random2',
914: 'iron-sword1-random3',
915: 'iron-sword2-random3',
916: 'iron-sword3-random3',
917: 'iron-sword4-random3',
918: 'iron-sword5-random3',
919: 'iron-sword1-random4',
920: 'iron-sword2-random4',
921: 'iron-sword3-random4',
922: 'iron-sword4-random4',
923: 'iron-sword5-random4',
924: 'iron-sword1-random5',
925: 'iron-sword2-random5',
926: 'iron-sword3-random5',
927: 'iron-sword4-random5',
928: 'iron-sword5-random5',
929: 'iron-sword1-random6',
930: 'iron-sword2-random6',
931: 'iron-sword3-random6',
932: 'iron-sword4-random6',
933: 'iron-sword5-random6',
934: 'iron-sword1-random7',
935: 'iron-sword2-random7',
936: 'iron-sword3-random7',
937: 'iron-sword4-random7',
938: 'iron-sword5-random7',
939: 'iron-sword1-random8',
940: 'iron-sword2-random8',
941: 'iron-sword3-random8',
942: 'iron-sword4-random8',
943: 'iron-sword5-random8',
944: 'iron-sword1-random9',
945: 'iron-sword2-random9',
946: 'iron-sword3-random9',
947: 'iron-sword4-random9',
948: 'iron-sword5-random9',
949: 'iron-sword1-random10',
950: 'iron-sword2-random10',
951: 'iron-sword3-random10',
952: 'iron-sword4-random10',
953: 'iron-sword5-random10',
954: 'iron-dagger1',
955: 'iron-dagger2',
956: 'iron-dagger3',
957: 'iron-dagger4',
958: 'iron-dagger5',
959: 'iron-dagger1-random1',
960: 'iron-dagger2-random1',
961: 'iron-dagger3-random1',
962: 'iron-dagger4-random1',
963: 'iron-dagger5-random1',
964: 'iron-dagger1-random2',
965: 'iron-dagger2-random2',
966: 'iron-dagger3-random2',
967: 'iron-dagger4-random2',
968: 'iron-dagger5-random2',
969: 'iron-dagger1-random3',
970: 'iron-dagger2-random3',
971: 'iron-dagger3-random3',
972: 'iron-dagger4-random3',
973: 'iron-dagger5-random3',
974: 'iron-dagger1-random4',
975: 'iron-dagger2-random4',
976: 'iron-dagger3-random4',
977: 'iron-dagger4-random4',
978: 'iron-dagger5-random4',
979: 'iron-dagger1-random5',
980: 'iron-dagger2-random5',
981: 'iron-dagger3-random5',
982: 'iron-dagger4-random5',
983: 'iron-dagger5-random5',
984: 'iron-dagger1-random6',
985: 'iron-dagger2-random6',
986: 'iron-dagger3-random6',
987: 'iron-dagger4-random6',
988: 'iron-dagger5-random6',
989: 'iron-dagger1-random7',
990: 'iron-dagger2-random7',
991: 'iron-dagger3-random7',
992: 'iron-dagger4-random7',
993: 'iron-dagger5-random7',
994: 'iron-dagger1-random8',
995: 'iron-dagger2-random8',
996: 'iron-dagger3-random8',
997: 'iron-dagger4-random8',
998: 'iron-dagger5-random8',
999: 'iron-dagger1-random9',
1000: 'iron-dagger2-random9',
1001: 'iron-dagger3-random9',
1002: 'iron-dagger4-random9',
1003: 'iron-dagger5-random9',
1004: 'iron-dagger1-random10',
1005: 'iron-dagger2-random10',
1006: 'iron-dagger3-random10',
1007: 'iron-dagger4-random10',
1008: 'iron-dagger5-random10',
1009: 'iron-fist1',
1010: 'iron-fist2',
1011: 'iron-fist3',
1012: 'iron-fist4',
1013: 'iron-fist5',
1014: 'iron-fist1-random1',
1015: 'iron-fist2-random1',
1016: 'iron-fist3-random1',
1017: 'iron-fist4-random1',
1018: 'iron-fist5-random1',
1019: 'iron-fist1-random2',
1020: 'iron-fist2-random2',
1021: 'iron-fist3-random2',
1022: 'iron-fist4-random2',
1023: 'iron-fist5-random2',
1024: 'iron-fist1-random3',
1025: 'iron-fist2-random3',
1026: 'iron-fist3-random3',
1027: 'iron-fist4-random3',
1028: 'iron-fist5-random3',
1029: 'iron-fist1-random4',
1030: 'iron-fist2-random4',
1031: 'iron-fist3-random4',
1032: 'iron-fist4-random4',
1033: 'iron-fist5-random4',
1034: 'iron-fist1-random5',
1035: 'iron-fist2-random5',
1036: 'iron-fist3-random5',
1037: 'iron-fist4-random5',
1038: 'iron-fist5-random5',
1039: 'iron-fist1-random6',
1040: 'iron-fist2-random6',
1041: 'iron-fist3-random6',
1042: 'iron-fist4-random6',
1043: 'iron-fist5-random6',
1044: 'iron-fist1-random7',
1045: 'iron-fist2-random7',
1046: 'iron-fist3-random7',
1047: 'iron-fist4-random7',
1048: 'iron-fist5-random7',
1049: 'iron-fist1-random8',
1050: 'iron-fist2-random8',
1051: 'iron-fist3-random8',
1052: 'iron-fist4-random8',
1053: 'iron-fist5-random8',
1054: 'iron-fist1-random9',
1055: 'iron-fist2-random9',
1056: 'iron-fist3-random9',
1057: 'iron-fist4-random9',
1058: 'iron-fist5-random9',
1059: 'iron-fist1-random10',
1060: 'iron-fist2-random10',
1061: 'iron-fist3-random10',
1062: 'iron-fist4-random10',
1063: 'iron-fist5-random10',
1064: 'iron-shield01',
1065: 'iron-shield02',
1066: 'iron-shield03',
1067: 'iron-shield04',
1068: 'iron-shield05',
1069: 'iron-shield1-random1',
1070: 'iron-shield2-random1',
1071: 'iron-shield3-random1',
1072: 'iron-shield4-random1',
1073: 'iron-shield5-random1',
1074: 'iron-shield1-random2',
1075: 'iron-shield2-random2',
1076: 'iron-shield3-random2',
1077: 'iron-shield4-random2',
1078: 'iron-shield5-random2',
1079: 'iron-shield1-random3',
1080: 'iron-shield2-random3',
1081: 'iron-shield3-random3',
1082: 'iron-shield4-random3',
1083: 'iron-shield5-random3',
1084: 'iron-shield1-random4',
1085: 'iron-shield2-random4',
1086: 'iron-shield3-random4',
1087: 'iron-shield4-random4',
1088: 'iron-shield5-random4',
1089: 'iron-shield1-random5',
1090: 'iron-shield2-random5',
1091: 'iron-shield3-random5',
1092: 'iron-shield4-random5',
1093: 'iron-shield5-random5',
1094: 'iron-shield1-random6',
1095: 'iron-shield2-random6',
1096: 'iron-shield3-random6',
1097: 'iron-shield4-random6',
1098: 'iron-shield5-random6',
1099: 'iron-shield1-random7',
1100: 'iron-shield2-random7',
1101: 'iron-shield3-random7',
1102: 'iron-shield4-random7',
1103: 'iron-shield5-random7',
1104: 'iron-shield1-random8',
1105: 'iron-shield2-random8',
1106: 'iron-shield3-random8',
1107: 'iron-shield4-random8',
1108: 'iron-shield5-random8',
1109: 'iron-shield1-random9',
1110: 'iron-shield2-random9',
1111: 'iron-shield3-random9',
1112: 'iron-shield4-random9',
1113: 'iron-shield5-random9',
1114: 'iron-shield1-random10',
1115: 'iron-shield2-random10',
1116: 'iron-shield3-random10',
1117: 'iron-shield4-random10',
1118: 'iron-shield5-random10',
1119: 'wood-shield01',
1120: 'wood-shield02',
1121: 'wood-shield03',
1122: 'wood-shield04',
1123: 'wood-shield05',
1124: 'iron-chest1',
1125: 'iron-chest2',
1126: 'iron-chest3',
1127: 'iron-chest4',
1128: 'iron-chest5',
1129: 'iron-chest1-random1',
1130: 'iron-chest2-random1',
1131: 'iron-chest3-random1',
1132: 'iron-chest4-random1',
1133: 'iron-chest5-random1',
1134: 'iron-chest1-random2',
1135: 'iron-chest2-random2',
1136: 'iron-chest3-random2',
1137: 'iron-chest4-random2',
1138: 'iron-chest5-random2',
1139: 'iron-chest1-random3',
1140: 'iron-chest2-random3',
1141: 'iron-chest3-random3',
1142: 'iron-chest4-random3',
1143: 'iron-chest5-random3',
1144: 'iron-chest1-random4',
1145: 'iron-chest2-random4',
1146: 'iron-chest3-random4',
1147: 'iron-chest4-random4',
1148: 'iron-chest5-random4',
1149: 'iron-shoulder1',
1150: 'iron-shoulder2',
1151: 'iron-shoulder3',
1152: 'iron-shoulder4',
1153: 'iron-shoulder5',
1154: 'iron-shoulder1-random1',
1155: 'iron-shoulder2-random1',
1156: 'iron-shoulder3-random1',
1157: 'iron-shoulder4-random1',
1158: 'iron-shoulder5-random1',
1159: 'iron-shoulder1-random2',
1160: 'iron-shoulder2-random2',
1161: 'iron-shoulder3-random2',
1162: 'iron-shoulder4-random2',
1163: 'iron-shoulder5-random2',
1164: 'iron-shoulder1-random3',
1165: 'iron-shoulder2-random3',
1166: 'iron-shoulder3-random3',
1167: 'iron-shoulder4-random3',
1168: 'iron-shoulder5-random3',
1169: 'iron-shoulder1-random4',
1170: 'iron-shoulder2-random4',
1171: 'iron-shoulder3-random4',
1172: 'iron-shoulder4-random4',
1173: 'iron-shoulder5-random4',
1174: 'iron-hand1',
1175: 'iron-hand2',
1176: 'iron-hand3',
1177: 'iron-hand4',
1178: 'iron-hand5',
1179: 'iron-hand1-random1',
1180: 'iron-hand2-random1',
1181: 'iron-hand3-random1',
1182: 'iron-hand4-random1',
1183: 'iron-hand5-random1',
1184: 'iron-hand1-random2',
1185: 'iron-hand2-random2',
1186: 'iron-hand3-random2',
1187: 'iron-hand4-random2',
1188: 'iron-hand5-random2',
1189: 'iron-hand1-random3',
1190: 'iron-hand2-random3',
1191: 'iron-hand3-random3',
1192: 'iron-hand4-random3',
1193: 'iron-hand5-random3',
1194: 'iron-hand1-random4',
1195: 'iron-hand2-random4',
1196: 'iron-hand3-random4',
1197: 'iron-hand4-random4',
1198: 'iron-hand5-random4',
1199: 'iron-foot1',
1200: 'iron-foot2',
1201: 'iron-foot3',
1202: 'iron-foot4',
1203: 'iron-foot5',
1204: 'iron-foot1-random1',
1205: 'iron-foot2-random1',
1206: 'iron-foot3-random1',
1207: 'iron-foot4-random1',
1208: 'iron-foot5-random1',
1209: 'iron-foot1-random2',
1210: 'iron-foot2-random2',
1211: 'iron-foot3-random2',
1212: 'iron-foot4-random2',
1213: 'iron-foot5-random2',
1214: 'iron-foot1-random3',
1215: 'iron-foot2-random3',
1216: 'iron-foot3-random3',
1217: 'iron-foot4-random3',
1218: 'iron-foot5-random3',
1219: 'iron-foot1-random4',
1220: 'iron-foot2-random4',
1221: 'iron-foot3-random4',
1222: 'iron-foot4-random4',
1223: 'iron-foot5-random4',
1224: 'saurian-chest',
1225: 'saurian-shoulder',
1226: 'saurian-glove',
1227: 'saurian-boot',
1228: 'saurian-helmet',
1229: 'innkeeper-chest',
1230: 'backpack',
1231: 'body1',
1232: 'body2',
1233: 'body3',
1234: 'body4',
1235: 'body5',
1236: 'elf-head-m01',
1237: 'elf-head-m02',
1238: 'elf-head-m03',
1239: 'elf-head-m04',
1240: 'elf-head-f01',
1241: 'elf-head-f02',
1242: 'elf-head-f03',
1243: 'elf-head-f04',
1244: 'elf-head-f05',
1245: 'elf-head-f06',
1246: 'human-head-m01',
1247: 'human-head-m02',
1248: 'human-head-m03',
1249: 'human-head-m04',
1250: 'human-head-m05',
1251: 'human-head-m06',
1252: 'human-hair-m01',
1253: 'human-hair-m02',
1254: 'human-hair-m03',
1255: 'human-hair-m04',
1256: 'human-hair-m05',
1257: 'human-hair-m06',
1258: 'human-hair-m07',
1259: 'human-hair-m08',
1260: 'human-hair-m09',
1261: 'human-hair-m10',
1262: 'human-hair-m11',
1263: 'human-hair-m12',
1264: 'human-hair-m13',
1265: 'human-hair-m14',
1266: 'human-hair-m15',
1267: 'human-head-f01',
1268: 'human-head-f02',
1269: 'human-head-f03',
1270: 'human-head-f04',
1271: 'human-head-f05',
1272: 'human-head-f06',
1273: 'human-hair-f01',
1274: 'human-hair-f02',
1275: 'human-hair-f03',
1276: 'human-hair-f04',
1277: 'human-hair-f05',
1278: 'human-hair-f06',
1279: 'human-hair-f07',
1280: 'elf-hair-m01',
1281: 'elf-hair-m02',
1282: 'elf-hair-m03',
1283: 'elf-hair-m04',
1284: 'elf-hair-m05',
1285: 'elf-hair-m06',
1286: 'elf-hair-m07',
1287: 'elf-hair-m08',
1288: 'elf-hair-m09',
1289: 'elf-hair-m10',
1290: 'elf-hair-f01',
1291: 'elf-hair-f02',
1292: 'elf-hair-f03',
1293: 'elf-hair-f04',
1294: 'elf-hair-f05',
1295: 'elf-hair-f06',
1296: 'elf-hair-f07',
1297: 'elf-hair-f08',
1298: 'elf-hair-f09',
1299: 'elf-hair-f10',
1300: 'orc-head',
1301: 'orc-head-m02',
1302: 'orc-head-m03',
1303: 'orc-head-m04',
1304: 'orc-head-m05',
1305: 'orc-head-f01',
1306: 'orc-head-f02',
1307: 'orc-head-f03',
1308: 'orc-head-f04',
1309: 'orc-head-f05',
1310: 'orc-hair-m01',
1311: 'orc-hair-m02',
1312: 'orc-hair-m03',
1313: 'orc-hair-m04',
1314: 'orc-hair-m05',
1315: 'orc-hair-m06',
1316: 'orc-hair-m07',
1317: 'orc-hair-m08',
1318: 'orc-hair-m09',
1319: 'orc-hair-m10',
1320: 'orc-hair-f01',
1321: 'orc-hair-f02',
1322: 'orc-hair-f03',
1323: 'orc-hair-f04',
1324: 'frogman-head-m01',
1325: 'frogman-head-m02',
1326: 'frogman-head-m03',
1327: 'frogman-head-m04',
1328: 'frogman-head-m05',
1329: 'frogman-hair-m01',
1330: 'frogman-hair-m02',
1331: 'frogman-hair-m03',
1332: 'frogman-hair-m04',
1333: 'frogman-hair-m05',
1334: 'frogman-head-f01',
1335: 'frogman-head-f02',
1336: 'frogman-head-f03',
1337: 'frogman-head-f04',
1338: 'frogman-hair-f01',
1339: 'frogman-hair-f02',
1340: 'frogman-hair-f03',
1341: 'frogman-hair-f04',
1342: 'frogman-hand',
1343: 'mermaid-head-f01',
1344: 'mermaid-head-f02',
1345: 'mermaid-head-f03',
1346: 'mermaid-hair-f01',
1347: 'mermaid-hair-f02',
1348: 'mermaid-hair-f03',
1349: 'mermaid-body',
1350: 'mermaid-hand',
1351: 'merman-head-m01',
1352: 'merman-head-m02',
1353: 'merman-head-m03',
1354: 'merman-hair-m01',
1355: 'merman-hair-m02',
1356: 'merman-hair-m03',
1357: 'merman-body',
1358: 'mermaid-hand',
1359: 'linen-chest1',
1360: 'linen-chest2',
1361: 'linen-chest3',
1362: 'linen-chest4',
1363: 'linen-chest5',
1364: 'linen-chest1-random1',
1365: 'linen-chest2-random1',
1366: 'linen-chest3-random1',
1367: 'linen-chest4-random1',
1368: 'linen-chest5-random1',
1369: 'linen-chest1-random2',
1370: 'linen-chest2-random2',
1371: 'linen-chest3-random2',
1372: 'linen-chest4-random2',
1373: 'linen-chest5-random2',
1374: 'linen-chest1-random3',
1375: 'linen-chest2-random3',
1376: 'linen-chest3-random3',
1377: 'linen-chest4-random3',
1378: 'linen-chest5-random3',
1379: 'linen-chest1-random4',
1380: 'linen-chest2-random4',
1381: 'linen-chest3-random4',
1382: 'linen-chest4-random4',
1383: 'linen-chest5-random4',
1384: 'linen-shoulder1',
1385: 'linen-shoulder2',
1386: 'linen-shoulder3',
1387: 'linen-shoulder4',
1388: 'linen-shoulder5',
1389: 'linen-shoulder1-random1',
1390: 'linen-shoulder2-random1',
1391: 'linen-shoulder3-random1',
1392: 'linen-shoulder4-random1',
1393: 'linen-shoulder5-random1',
1394: 'linen-shoulder1-random2',
1395: 'linen-shoulder2-random2',
1396: 'linen-shoulder3-random2',
1397: 'linen-shoulder4-random2',
1398: 'linen-shoulder5-random2',
1399: 'linen-shoulder1-random3',
1400: 'linen-shoulder2-random3',
1401: 'linen-shoulder3-random3',
1402: 'linen-shoulder4-random3',
1403: 'linen-shoulder5-random3',
1404: 'linen-shoulder1-random4',
1405: 'linen-shoulder2-random4',
1406: 'linen-shoulder3-random4',
1407: 'linen-shoulder4-random4',
1408: 'linen-shoulder5-random4',
1409: 'linen-foot1',
1410: 'linen-foot2',
1411: 'linen-foot3',
1412: 'linen-foot4',
1413: 'linen-foot5',
1414: 'linen-foot1-random1',
1415: 'linen-foot2-random1',
1416: 'linen-foot3-random1',
1417: 'linen-foot4-random1',
1418: 'linen-foot5-random1',
1419: 'linen-foot1-random2',
1420: 'linen-foot2-random2',
1421: 'linen-foot3-random2',
1422: 'linen-foot4-random2',
1423: 'linen-foot5-random2',
1424: 'linen-foot1-random3',
1425: 'linen-foot2-random3',
1426: 'linen-foot3-random3',
1427: 'linen-foot4-random3',
1428: 'linen-foot5-random3',
1429: 'linen-foot1-random4',
1430: 'linen-foot2-random4',
1431: 'linen-foot3-random4',
1432: 'linen-foot4-random4',
1433: 'linen-foot5-random4',
1434: 'linen-hand1',
1435: 'linen-hand2',
1436: 'linen-hand3',
1437: 'linen-hand4',
1438: 'linen-hand5',
1439: 'linen-hand1-random1',
1440: 'linen-hand2-random1',
1441: 'linen-hand3-random1',
1442: 'linen-hand4-random1',
1443: 'linen-hand5-random1',
1444: 'linen-hand1-random2',
1445: 'linen-hand2-random2',
1446: 'linen-hand3-random2',
1447: 'linen-hand4-random2',
1448: 'linen-hand5-random2',
1449: 'linen-hand1-random3',
1450: 'linen-hand2-random3',
1451: 'linen-hand3-random3',
1452: 'linen-hand4-random3',
1453: 'linen-hand5-random3',
1454: 'linen-hand1-random4',
1455: 'linen-hand2-random4',
1456: 'linen-hand3-random4',
1457: 'linen-hand4-random4',
1458: 'linen-hand5-random4',
1459: 'wool-chest1',
1460: 'wool-chest2',
1461: 'wool-chest3',
1462: 'wool-chest4',
1463: 'wool-chest5',
1464: 'wool-chest1-random1',
1465: 'wool-chest2-random1',
1466: 'wool-chest3-random1',
1467: 'wool-chest4-random1',
1468: 'wool-chest5-random1',
1469: 'wool-chest1-random2',
1470: 'wool-chest2-random2',
1471: 'wool-chest3-random2',
1472: 'wool-chest4-random2',
1473: 'wool-chest5-random2',
1474: 'wool-chest1-random3',
1475: 'wool-chest2-random3',
1476: 'wool-chest3-random3',
1477: 'wool-chest4-random3',
1478: 'wool-chest5-random3',
1479: 'wool-chest1-random4',
1480: 'wool-chest2-random4',
1481: 'wool-chest3-random4',
1482: 'wool-chest4-random4',
1483: 'wool-chest5-random4',
1484: 'wool-shoulder1',
1485: 'wool-shoulder2',
1486: 'wool-shoulder3',
1487: 'wool-shoulder4',
1488: 'wool-shoulder5',
1489: 'wool-shoulder1-random1',
1490: 'wool-shoulder2-random1',
1491: 'wool-shoulder3-random1',
1492: 'wool-shoulder4-random1',
1493: 'wool-shoulder5-random1',
1494: 'wool-shoulder1-random2',
1495: 'wool-shoulder2-random2',
1496: 'wool-shoulder3-random2',
1497: 'wool-shoulder4-random2',
1498: 'wool-shoulder5-random2',
1499: 'wool-shoulder1-random3',
1500: 'wool-shoulder2-random3',
1501: 'wool-shoulder3-random3',
1502: 'wool-shoulder4-random3',
1503: 'wool-shoulder5-random3',
1504: 'wool-shoulder1-random4',
1505: 'wool-shoulder2-random4',
1506: 'wool-shoulder3-random4',
1507: 'wool-shoulder4-random4',
1508: 'wool-shoulder5-random4',
1509: 'wool-foot1',
1510: 'wool-foot2',
1511: 'wool-foot3',
1512: 'wool-foot4',
1513: 'wool-foot5',
1514: 'wool-foot1-random1',
1515: 'wool-foot2-random1',
1516: 'wool-foot3-random1',
1517: 'wool-foot4-random1',
1518: 'wool-foot5-random1',
1519: 'wool-foot1-random2',
1520: 'wool-foot2-random2',
1521: 'wool-foot3-random2',
1522: 'wool-foot4-random2',
1523: 'wool-foot5-random2',
1524: 'wool-foot1-random3',
1525: 'wool-foot2-random3',
1526: 'wool-foot3-random3',
1527: 'wool-foot4-random3',
1528: 'wool-foot5-random3',
1529: 'wool-foot1-random4',
1530: 'wool-foot2-random4',
1531: 'wool-foot3-random4',
1532: 'wool-foot4-random4',
1533: 'wool-foot5-random4',
1534: 'wool-hand1',
1535: 'wool-hand2',
1536: 'wool-hand3',
1537: 'wool-hand4',
1538: 'wool-hand5',
1539: 'wool-hand1-random1',
1540: 'wool-hand2-random1',
1541: 'wool-hand3-random1',
1542: 'wool-hand4-random1',
1543: 'wool-hand5-random1',
1544: 'wool-hand1-random2',
1545: 'wool-hand2-random2',
1546: 'wool-hand3-random2',
1547: 'wool-hand4-random2',
1548: 'wool-hand5-random2',
1549: 'wool-hand1-random3',
1550: 'wool-hand2-random3',
1551: 'wool-hand3-random3',
1552: 'wool-hand4-random3',
1553: 'wool-hand5-random3',
1554: 'wool-hand1-random4',
1555: 'wool-hand2-random4',
1556: 'wool-hand3-random4',
1557: 'wool-hand4-random4',
1558: 'wool-hand5-random4',
1564: 'gold-amulet1-random1',
1565: 'gold-amulet2-random1',
1566: 'gold-amulet3-random1',
1567: 'gold-amulet4-random1',
1568: 'gold-amulet5-random1',
1569: 'gold-amulet1-random2',
1570: 'gold-amulet2-random2',
1571: 'gold-amulet3-random2',
1572: 'gold-amulet4-random2',
1573: 'gold-amulet5-random2',
1574: 'gold-amulet1-random3',
1575: 'gold-amulet2-random3',
1576: 'gold-amulet3-random3',
1577: 'gold-amulet4-random3',
1578: 'gold-amulet5-random3',
1579: 'gold-amulet1-random4',
1580: 'gold-amulet2-random4',
1581: 'gold-amulet3-random4',
1582: 'gold-amulet4-random4',
1583: 'gold-amulet5-random4',
1584: 'gold-amulet1-random5',
1585: 'gold-amulet2-random5',
1586: 'gold-amulet3-random5',
1587: 'gold-amulet4-random5',
1588: 'gold-amulet5-random5',
1594: 'silver-amulet1-random1',
1595: 'silver-amulet2-random1',
1596: 'silver-amulet3-random1',
1597: 'silver-amulet4-random1',
1598: 'silver-amulet5-random1',
1599: 'silver-amulet1-random2',
1600: 'silver-amulet2-random2',
1601: 'silver-amulet3-random2',
1602: 'silver-amulet4-random2',
1603: 'silver-amulet5-random2',
1604: 'silver-amulet1-random3',
1605: 'silver-amulet2-random3',
1606: 'silver-amulet3-random3',
1607: 'silver-amulet4-random3',
1608: 'silver-amulet5-random3',
1609: 'silver-amulet1-random4',
1610: 'silver-amulet2-random4',
1611: 'silver-amulet3-random4',
1612: 'silver-amulet4-random4',
1613: 'silver-amulet5-random4',
1614: 'silver-amulet1-random5',
1615: 'silver-amulet2-random5',
1616: 'silver-amulet3-random5',
1617: 'silver-amulet4-random5',
1618: 'silver-amulet5-random5',
1619: 'gold-ring1-random1',
1620: 'gold-ring2-random1',
1621: 'gold-ring3-random1',
1622: 'gold-ring4-random1',
1623: 'gold-ring5-random1',
1624: 'gold-ring1-random2',
1625: 'gold-ring2-random2',
1626: 'gold-ring3-random2',
1627: 'gold-ring4-random2',
1628: 'gold-ring5-random2',
1629: 'gold-ring1-random3',
1630: 'gold-ring2-random3',
1631: 'gold-ring3-random3',
1632: 'gold-ring4-random3',
1633: 'gold-ring5-random3',
1634: 'gold-ring1-random4',
1635: 'gold-ring2-random4',
1636: 'gold-ring3-random4',
1637: 'gold-ring4-random4',
1638: 'gold-ring5-random4',
1639: 'gold-ring1-random5',
1640: 'gold-ring2-random5',
1641: 'gold-ring3-random5',
1642: 'gold-ring4-random5',
1643: 'gold-ring5-random5',
1644: 'silver-ring1-random1',
1645: 'silver-ring2-random1',
1646: 'silver-ring3-random1',
1647: 'silver-ring4-random1',
1648: 'silver-ring5-random1',
1649: 'silver-ring1-random2',
1650: 'silver-ring2-random2',
1651: 'silver-ring3-random2',
1652: 'silver-ring4-random2',
1653: 'silver-ring5-random2',
1654: 'silver-ring1-random3',
1655: 'silver-ring2-random3',
1656: 'silver-ring3-random3',
1657: 'silver-ring4-random3',
1658: 'silver-ring5-random3',
1659: 'silver-ring1-random4',
1660: 'silver-ring2-random4',
1661: 'silver-ring3-random4',
1662: 'silver-ring4-random4',
1663: 'silver-ring5-random4',
1664: 'silver-ring1-random5',
1665: 'silver-ring2-random5',
1666: 'silver-ring3-random5',
1667: 'silver-ring4-random5',
1668: 'silver-ring5-random5',
1669: 'silk-chest1',
1670: 'silk-chest2',
1671: 'silk-chest3',
1672: 'silk-chest4',
1673: 'silk-chest5',
1674: 'silk-chest1-random1',
1675: 'silk-chest2-random1',
1676: 'silk-chest3-random1',
1677: 'silk-chest4-random1',
1678: 'silk-chest5-random1',
1679: 'silk-chest1-random2',
1680: 'silk-chest2-random2',
1681: 'silk-chest3-random2',
1682: 'silk-chest4-random2',
1683: 'silk-chest5-random2',
1684: 'silk-chest1-random3',
1685: 'silk-chest2-random3',
1686: 'silk-chest3-random3',
1687: 'silk-chest4-random3',
1688: 'silk-chest5-random3',
1689: 'silk-chest1-random4',
1690: 'silk-chest2-random4',
1691: 'silk-chest3-random4',
1692: 'silk-chest4-random4',
1693: 'silk-chest5-random4',
1694: 'silk-shoulder1',
1695: 'silk-shoulder2',
1696: 'silk-shoulder3',
1697: 'silk-shoulder4',
1698: 'silk-shoulder5',
1699: 'silk-shoulder1-random1',
1700: 'silk-shoulder2-random1',
1701: 'silk-shoulder3-random1',
1702: 'silk-shoulder4-random1',
1703: 'silk-shoulder5-random1',
1704: 'silk-shoulder1-random2',
1705: 'silk-shoulder2-random2',
1706: 'silk-shoulder3-random2',
1707: 'silk-shoulder4-random2',
1708: 'silk-shoulder5-random2',
1709: 'silk-shoulder1-random3',
1710: 'silk-shoulder2-random3',
1711: 'silk-shoulder3-random3',
1712: 'silk-shoulder4-random3',
1713: 'silk-shoulder5-random3',
1714: 'silk-shoulder1-random4',
1715: 'silk-shoulder2-random4',
1716: 'silk-shoulder3-random4',
1717: 'silk-shoulder4-random4',
1718: 'silk-shoulder5-random4',
1719: 'silk-foot1',
1720: 'silk-foot2',
1721: 'silk-foot3',
1722: 'silk-foot4',
1723: 'silk-foot5',
1724: 'silk-foot1-random1',
1725: 'silk-foot2-random1',
1726: 'silk-foot3-random1',
1727: 'silk-foot4-random1',
1728: 'silk-foot5-random1',
1729: 'silk-foot1-random2',
1730: 'silk-foot2-random2',
1731: 'silk-foot3-random2',
1732: 'silk-foot4-random2',
1733: 'silk-foot5-random2',
1734: 'silk-foot1-random3',
1735: 'silk-foot2-random3',
1736: 'silk-foot3-random3',
1737: 'silk-foot4-random3',
1738: 'silk-foot5-random3',
1739: 'silk-foot1-random4',
1740: 'silk-foot2-random4',
1741: 'silk-foot3-random4',
1742: 'silk-foot4-random4',
1743: 'silk-foot5-random4',
1744: 'silk-hand1',
1745: 'silk-hand2',
1746: 'silk-hand3',
1747: 'silk-hand4',
1748: 'silk-hand5',
1749: 'silk-hand1-random1',
1750: 'silk-hand2-random1',
1751: 'silk-hand3-random1',
1752: 'silk-hand4-random1',
1753: 'silk-hand5-random1',
1754: 'silk-hand1-random2',
1755: 'silk-hand2-random2',
1756: 'silk-hand3-random2',
1757: 'silk-hand4-random2',
1758: 'silk-hand5-random2',
1759: 'silk-hand1-random3',
1760: 'silk-hand2-random3',
1761: 'silk-hand3-random3',
1762: 'silk-hand4-random3',
1763: 'silk-hand5-random3',
1764: 'silk-hand1-random4',
1765: 'silk-hand2-random4',
1766: 'silk-hand3-random4',
1767: 'silk-hand4-random4',
1768: 'silk-hand5-random4',
1769: 'iron-helmet',
1770: 'iron-axe1',
1771: 'iron-axe2',
1772: 'iron-axe3',
1773: 'iron-axe4',
1774: 'iron-axe5',
1775: 'iron-axe1-random1',
1776: 'iron-axe2-random1',
1777: 'iron-axe3-random1',
1778: 'iron-axe4-random1',
1779: 'iron-axe5-random1',
1780: 'iron-axe1-random2',
1781: 'iron-axe2-random2',
1782: 'iron-axe3-random2',
1783: 'iron-axe4-random2',
1784: 'iron-axe5-random2',
1785: 'iron-axe1-random3',
1786: 'iron-axe2-random3',
1787: 'iron-axe3-random3',
1788: 'iron-axe4-random3',
1789: 'iron-axe5-random3',
1790: 'iron-axe1-random4',
1791: 'iron-axe2-random4',
1792: 'iron-axe3-random4',
1793: 'iron-axe4-random4',
1794: 'iron-axe5-random4',
1795: 'iron-axe1-random5',
1796: 'iron-axe2-random5',
1797: 'iron-axe3-random5',
1798: 'iron-axe4-random5',
1799: 'iron-axe5-random5',
1800: 'iron-axe1-random6',
1801: 'iron-axe2-random6',
1802: 'iron-axe3-random6',
1803: 'iron-axe4-random6',
1804: 'iron-axe5-random6',
1805: 'iron-axe1-random7',
1806: 'iron-axe2-random7',
1807: 'iron-axe3-random7',
1808: 'iron-axe4-random7',
1809: 'iron-axe5-random7',
1810: 'iron-axe1-random8',
1811: 'iron-axe2-random8',
1812: 'iron-axe3-random8',
1813: 'iron-axe4-random8',
1814: 'iron-axe5-random8',
1815: 'iron-axe1-random9',
1816: 'iron-axe2-random9',
1817: 'iron-axe3-random9',
1818: 'iron-axe4-random9',
1819: 'iron-axe5-random9',
1820: 'iron-axe1-random10',
1821: 'iron-axe2-random10',
1822: 'iron-axe3-random10',
1823: 'iron-axe4-random10',
1824: 'iron-axe5-random10',
1825: 'iron-greatsword1',
1826: 'iron-greatsword2',
1827: 'iron-greatsword3',
1828: 'iron-greatsword4',
1829: 'iron-greatsword5',
1830: 'iron-greatsword1-random1',
1831: 'iron-greatsword2-random1',
1832: 'iron-greatsword3-random1',
1833: 'iron-greatsword4-random1',
1834: 'iron-greatsword5-random1',
1835: 'iron-greatsword1-random2',
1836: 'iron-greatsword2-random2',
1837: 'iron-greatsword3-random2',
1838: 'iron-greatsword4-random2',
1839: 'iron-greatsword5-random2',
1840: 'iron-greatsword1-random3',
1841: 'iron-greatsword2-random3',
1842: 'iron-greatsword3-random3',
1843: 'iron-greatsword4-random3',
1844: 'iron-greatsword5-random3',
1845: 'iron-greatsword1-random4',
1846: 'iron-greatsword2-random4',
1847: 'iron-greatsword3-random4',
1848: 'iron-greatsword4-random4',
1849: 'iron-greatsword5-random4',
1850: 'iron-greatsword1-random5',
1851: 'iron-greatsword2-random5',
1852: 'iron-greatsword3-random5',
1853: 'iron-greatsword4-random5',
1854: 'iron-greatsword5-random5',
1855: 'iron-greatsword1-random6',
1856: 'iron-greatsword2-random6',
1857: 'iron-greatsword3-random6',
1858: 'iron-greatsword4-random6',
1859: 'iron-greatsword5-random6',
1860: 'iron-greatsword1-random7',
1861: 'iron-greatsword2-random7',
1862: 'iron-greatsword3-random7',
1863: 'iron-greatsword4-random7',
1864: 'iron-greatsword5-random7',
1865: 'iron-greatsword1-random8',
1866: 'iron-greatsword2-random8',
1867: 'iron-greatsword3-random8',
1868: 'iron-greatsword4-random8',
1869: 'iron-greatsword5-random8',
1870: 'iron-greatsword1-random9',
1871: 'iron-greatsword2-random9',
1872: 'iron-greatsword3-random9',
1873: 'iron-greatsword4-random9',
1874: 'iron-greatsword5-random9',
1875: 'iron-greatsword1-random10',
1876: 'iron-greatsword2-random10',
1877: 'iron-greatsword3-random10',
1878: 'iron-greatsword4-random10',
1879: 'iron-greatsword5-random10',
1880: 'iron-longsword1',
1881: 'iron-longsword2',
1882: 'iron-longsword3',
1883: 'iron-longsword4',
1884: 'iron-longsword5',
1885: 'iron-longsword1-random1',
1886: 'iron-longsword2-random1',
1887: 'iron-longsword3-random1',
1888: 'iron-longsword4-random1',
1889: 'iron-longsword5-random1',
1890: 'iron-longsword1-random2',
1891: 'iron-longsword2-random2',
1892: 'iron-longsword3-random2',
1893: 'iron-longsword4-random2',
1894: 'iron-longsword5-random2',
1895: 'iron-longsword1-random3',
1896: 'iron-longsword2-random3',
1897: 'iron-longsword3-random3',
1898: 'iron-longsword4-random3',
1899: 'iron-longsword5-random3',
1900: 'iron-longsword1-random4',
1901: 'iron-longsword2-random4',
1902: 'iron-longsword3-random4',
1903: 'iron-longsword4-random4',
1904: 'iron-longsword5-random4',
1905: 'iron-longsword1-random5',
1906: 'iron-longsword2-random5',
1907: 'iron-longsword3-random5',
1908: 'iron-longsword4-random5',
1909: 'iron-longsword5-random5',
1910: 'iron-longsword1-random6',
1911: 'iron-longsword2-random6',
1912: 'iron-longsword3-random6',
1913: 'iron-longsword4-random6',
1914: 'iron-longsword5-random6',
1915: 'iron-longsword1-random7',
1916: 'iron-longsword2-random7',
1917: 'iron-longsword3-random7',
1918: 'iron-longsword4-random7',
1919: 'iron-longsword5-random7',
1920: 'iron-longsword1-random8',
1921: 'iron-longsword2-random8',
1922: 'iron-longsword3-random8',
1923: 'iron-longsword4-random8',
1924: 'iron-longsword5-random8',
1925: 'iron-longsword1-random9',
1926: 'iron-longsword2-random9',
1927: 'iron-longsword3-random9',
1928: 'iron-longsword4-random9',
1929: 'iron-longsword5-random9',
1930: 'iron-longsword1-random10',
1931: 'iron-longsword2-random10',
1932: 'iron-longsword3-random10',
1933: 'iron-longsword4-random10',
1934: 'iron-longsword5-random10',
1935: 'bone-greatsword',
1936: 'obsidian-greatsword',
1937: 'iron-greataxe1',
1938: 'iron-greataxe2',
1939: 'iron-greataxe3',
1940: 'iron-greataxe4',
1941: 'iron-greataxe5',
1942: 'iron-greataxe1-random1',
1943: 'iron-greataxe2-random1',
1944: 'iron-greataxe3-random1',
1945: 'iron-greataxe4-random1',
1946: 'iron-greataxe5-random1',
1947: 'iron-greataxe1-random2',
1948: 'iron-greataxe2-random2',
1949: 'iron-greataxe3-random2',
1950: 'iron-greataxe4-random2',
1951: 'iron-greataxe5-random2',
1952: 'iron-greataxe1-random3',
1953: 'iron-greataxe2-random3',
1954: 'iron-greataxe3-random3',
1955: 'iron-greataxe4-random3',
1956: 'iron-greataxe5-random3',
1957: 'iron-greataxe1-random4',
1958: 'iron-greataxe2-random4',
1959: 'iron-greataxe3-random4',
1960: 'iron-greataxe4-random4',
1961: 'iron-greataxe5-random4',
1962: 'iron-greataxe1-random5',
1963: 'iron-greataxe2-random5',
1964: 'iron-greataxe3-random5',
1965: 'iron-greataxe4-random5',
1966: 'iron-greataxe5-random5',
1967: 'iron-greataxe1-random6',
1968: 'iron-greataxe2-random6',
1969: 'iron-greataxe3-random6',
1970: 'iron-greataxe4-random6',
1971: 'iron-greataxe5-random6',
1972: 'iron-greataxe1-random7',
1973: 'iron-greataxe2-random7',
1974: 'iron-greataxe3-random7',
1975: 'iron-greataxe4-random7',
1976: 'iron-greataxe5-random7',
1977: 'iron-greataxe1-random8',
1978: 'iron-greataxe2-random8',
1979: 'iron-greataxe3-random8',
1980: 'iron-greataxe4-random8',
1981: 'iron-greataxe5-random8',
1982: 'iron-greataxe1-random9',
1983: 'iron-greataxe2-random9',
1984: 'iron-greataxe3-random9',
1985: 'iron-greataxe4-random9',
1986: 'iron-greataxe5-random9',
1987: 'iron-greataxe1-random10',
1988: 'iron-greataxe2-random10',
1989: 'iron-greataxe3-random10',
1990: 'iron-greataxe4-random10',
1991: 'iron-greataxe5-random10',
1992: 'bone-greataxe',
1993: 'obsidian-greataxe',
1994: 'saurian-greataxe',
1995: 'wood-greatmace02',
1996: 'iron-greatmace1',
1997: 'iron-greatmace2',
1998: 'iron-greatmace3',
1999: 'iron-greatmace4',
2000: 'iron-greatmace5',
2001: 'iron-greatmace1-random1',
2002: 'iron-greatmace2-random1',
2003: 'iron-greatmace3-random1',
2004: 'iron-greatmace4-random1',
2005: 'iron-greatmace5-random1',
2006: 'iron-greatmace1-random2',
2007: 'iron-greatmace2-random2',
2008: 'iron-greatmace3-random2',
2009: 'iron-greatmace4-random2',
2010: 'iron-greatmace5-random2',
2011: 'iron-greatmace1-random3',
2012: 'iron-greatmace2-random3',
2013: 'iron-greatmace3-random3',
2014: 'iron-greatmace4-random3',
2015: 'iron-greatmace5-random3',
2016: 'iron-greatmace1-random4',
2017: 'iron-greatmace2-random4',
2018: 'iron-greatmace3-random4',
2019: 'iron-greatmace4-random4',
2020: 'iron-greatmace5-random4',
2021: 'iron-greatmace1-random5',
2022: 'iron-greatmace2-random5',
2023: 'iron-greatmace3-random5',
2024: 'iron-greatmace4-random5',
2025: 'iron-greatmace5-random5',
2026: 'iron-greatmace1-random6',
2027: 'iron-greatmace2-random6',
2028: 'iron-greatmace3-random6',
2029: 'iron-greatmace4-random6',
2030: 'iron-greatmace5-random6',
2031: 'iron-greatmace1-random7',
2032: 'iron-greatmace2-random7',
2033: 'iron-greatmace3-random7',
2034: 'iron-greatmace4-random7',
2035: 'iron-greatmace5-random7',
2036: 'iron-greatmace1-random8',
2037: 'iron-greatmace2-random8',
2038: 'iron-greatmace3-random8',
2039: 'iron-greatmace4-random8',
2040: 'iron-greatmace5-random8',
2041: 'iron-greatmace1-random9',
2042: 'iron-greatmace2-random9',
2043: 'iron-greatmace3-random9',
2044: 'iron-greatmace4-random9',
2045: 'iron-greatmace5-random9',
2046: 'iron-greatmace1-random10',
2047: 'iron-greatmace2-random10',
2048: 'iron-greatmace3-random10',
2049: 'iron-greatmace4-random10',
2050: 'iron-greatmace5-random10',
2051: 'iron-pickaxe',
2052: 'bone-greatmace',
2053: 'obsidian-greatmace',
2054: 'obsidian-chest',
2055: 'obsidian-shoulders',
2056: 'obsidian-boot',
2057: 'obsidian-glove',
2058: 'obsidian-helmet',
2059: 'obsidian-sword',
2062: 'gold-chest',
2063: 'gold-shoulder',
2064: 'gold-boot',
2065: 'gold-glove',
2066: 'gold-helmet',
2067: 'bone-chest',
2068: 'bone-shoulders',
2069: 'bone-boot',
2070: 'bone-glove',
2072: 'bone-sword',
2073: 'bone-mace',
2074: 'bone-axe',
2075: 'cube',
2076: 'door',
2077: 'window',
2078: 'goddess2',
2079: 'artifact',
2080: 'pet-box',
2081: 'quest-item-amulet01',
2082: 'quest-item-amulet02',
2083: 'quest-item-jewel-case',
2084: 'copper-coin',
2085: 'silver-coin',
2086: 'gold-coin',
2087: 'bush',
2088: 'snow-bush',
2089: 'cobwebscrub',
2090: 'berry-bush',
2091: 'snow-berry',
2092: 'snow-berry-mash',
2093: 'scrub',
2094: 'scrub-green',
2095: 'fire-scrub',
2096: 'ginseng',
2097: 'ginseng-root',
2098: 'fir-tree',
2099: 'thorn-tree',
2100: 'gold-deposit',
2101: 'iron-deposit',
2102: 'silver-deposit',
2103: 'sandstone-deposit',
2104: 'emerald-deposit',
2105: 'sapphire-deposit',
2106: 'ruby-deposit',
2107: 'diamond-deposit',
2108: 'ice-crystal-deposit',
2109: 'scarecrow',
2110: 'aim',
2111: 'dummy',
2112: 'tree-leaves',
2113: 'vase',
2114: 'vase2',
2115: 'vase3',
2116: 'vase4',
2117: 'candle01',
2118: 'candle02',
2119: 'candle03',
2120: 'undead-candle1',
2121: 'undead-candle2',
2122: 'undead-candle3',
2123: 'character-platform',
2124: 'antique-building1',
2125: 'antique-building2',
2126: 'antique-building3',
2127: 'antique-building4',
2128: 'entrance-crypt',
2129: 'entrance-barrow',
2130: 'entrance-mine',
2131: 'entrance-antique',
2132: 'entrance-tomb',
2133: 'monster-body-reptile-shell',
2134: 'monster-body-reptile-shell-spiked',
2135: 'monster-body-reptile-crest1',
2136: 'monster-body-reptile-crest2',
2137: 'monster-body-reptile-crest3',
2138: 'monster-body-reptile-crest4',
2139: 'monster-body-reptile-crest5',
2140: 'monster-body-reptile-spiked1',
2141: 'monster-body-reptile-spiked2',
2142: 'monster-foot-reptile-normal',
2143: 'monster-foot-reptile-claw1',
2144: 'monster-foot-reptile-claw2',
2145: 'monster-head-reptile-lizard',
2146: 'monster-head-reptile-turtle',
2147: 'monster-head-reptile-horn1',
2148: 'monster-head-reptile-horn2',
2149: 'monster-head-reptile-horn3',
2150: 'monster-tail-reptile-normal',
2151: 'monster-tail-reptile-spike1',
2152: 'monster-tail-reptile-spike2',
2153: 'rune-giant-head-normal01',
2154: 'rune-giant-head-laser',
2155: 'rune-giant-body01',
2156: 'rune-giant-body02',
2157: 'rune-giant-hand01',
2158: 'rune-giant-foot01',
2159: 'cotton-plant',
2160: 'cotton',
2161: 'turtle-body',
2162: 'turtle-head',
2163: 'turtle-foot',
2164: 'egg1',
2165: 'egg1',
2166: 'egg1',
2167: 'werewolf-head',
2168: 'werewolf-hand',
2169: 'werewolf-body',
2170: 'werewolf-foot',
2171: 'vampire-head',
2172: 'vampire-eyes',
2173: 'vampire-hand',
2174: 'frame-house01',
2175: 'frame-inn',
2176: 'frame-shop',
2177: 'frame-tower',
2178: 'building-stone-well',
2179: 'building-stilt-inn1',
2180: 'building-stilt-hut3',
2181: 'building-stilt-hut2',
2182: 'building-stilt-path',
2183: 'landingplace',
2184: 'stone-bridge',
2185: 'entrance-stairs',
2186: 'carpet1',
2187: 'carpet2',
2188: 'carpet3',
2189: 'framework-base',
2190: 'framework-floor',
2191: 'framework-floor-stairs',
2192: 'framework-wall',
2193: 'framework-wall-window',
2194: 'framework-wall-door',
2195: 'framework-wall-indoor',
2196: 'framework-wall-balcony',
2197: 'framework-wall-lamp',
2198: 'framework-roof1',
2199: 'framework-roof2',
2200: 'framework-roof3',
2201: 'framework-arc',
2202: 'stone-base',
2203: 'stone-floor',
2204: 'stone-floor-stairs',
2205: 'stone-wall',
2206: 'stone-wall-window',
2207: 'stone-wall-door',
2208: 'stone-wall-indoor',
2209: 'stone-wall-balcony',
2210: 'stone-wall-lamp',
2211: 'stone-roof1',
2212: 'stone-roof2',
2213: 'stone-roof3',
2214: 'stone-arc',
2215: 'whitewood-base',
2216: 'whitewood-floor',
2217: 'whitewood-floor-stairs',
2218: 'whitewood-wall',
2219: 'whitewood-wall-window',
2220: 'whitewood-wall-door',
2221: 'whitewood-wall-indoor',
2222: 'whitewood-wall-balcony',
2223: 'whitewood-wall-lamp',
2224: 'whitewood-roof1',
2225: 'whitewood-roof2',
2226: 'whitewood-roof3',
2227: 'whitewood-arc',
2228: 'clay-base',
2229: 'clay-floor',
2230: 'clay-floor-stairs',
2231: 'clay-wall',
2232: 'clay-wall-window',
2233: 'clay-wall-door',
2234: 'clay-wall-indoor',
2235: 'clay-wall-balcony',
2236: 'clay-wall-lamp',
2237: 'clay-roof1',
2238: 'clay-roof2',
2239: 'clay-roof3',
2240: 'clay-arc',
2241: 'clay-entrance-stairs',
2242: 'whiteclay-entrance-stairs',
2243: 'whiteclay-base',
2244: 'whiteclay-floor',
2245: 'whiteclay-floor-stairs',
2246: 'whiteclay-wall',
2247: 'whiteclay-wall-window',
2248: 'whiteclay-wall-door',
2249: 'whiteclay-wall-indoor',
2250: 'whiteclay-wall-balcony',
2251: 'whiteclay-wall-lamp',
2252: 'whiteclay-roof1',
2253: 'whiteclay-roof2',
2254: 'whiteclay-roof3',
2255: 'whiteclay-arc',
2256: 'antiqueruin-entrance-stairs',
2257: 'antiqueruin-base',
2258: 'antiqueruin-floor',
2259: 'antiqueruin-floor-stairs',
2260: 'antiqueruin-wall',
2261: 'antiqueruin-roof1',
2262: 'antiqueruin-roof2',
2263: 'antiqueruin-roof3',
2264: 'antiqueruin-arc',
2265: 'jungleruin-entrance-stairs',
2266: 'jungleruin-base',
2267: 'jungleruin-floor',
2268: 'jungleruin-floor-stairs',
2269: 'jungleruin-wall',
2270: 'jungleruin-wall-door',
2271: 'jungleruin-wall-indoor',
2272: 'jungleruin-roof1',
2273: 'jungleruin-roof2',
2274: 'jungleruin-roof3',
2275: 'jungleruin-arc',
2276: 'desertruin-entrance-stairs',
2277: 'desertruin-base',
2278: 'desertruin-floor',
2279: 'desertruin-floor-stairs',
2280: 'desertruin-wall',
2281: 'desertruin-wall-door',
2282: 'desertruin-wall-indoor',
2283: 'desertruin-roof1',
2284: 'desertruin-roof2',
2285: 'desertruin-roof3',
2286: 'desertruin-arc',
2287: 'wood-base',
2288: 'wood-floor',
2289: 'wood-floor-stairs',
2290: 'wood-wall',
2291: 'wood-wall-window',
2292: 'wood-wall-door',
2293: 'wood-wall-indoor',
2294: 'wood-wall-balcony',
2295: 'wood-wall-lamp',
2296: 'wood-roof1',
2297: 'wood-roof2',
2298: 'wood-roof3',
2299: 'wood-arc',
2300: 'palm-leaf',
2301: 'palm-leaf-diagonal',
2302: 'white-castle-round-tower',
2303: 'building-warrior',
2304: 'building-ranger',
2305: 'building-rogue',
2306: 'building-mage',
2307: 'building-smithy',
2308: 'building-carpentersshop',
2309: 'building-weavingmill',
2310: 'cactus1',
2311: 'cactus2',
2313: 'painting01',
2314: 'painting02',
2315: 'icon-talk',
2316: 'icon-analyze',
2317: 'icon-vendor',
2318: 'digested-leftovers01',
2319: 'digested-leftovers02',
2320: 'digested-leftovers03',
2321: 'digested-leftovers04',
2322: 'flower-vase1',
2323: 'flower-vase2',
2324: 'flower-vase3',
2325: 'flower-vase4',
2326: 'flower-vase5',
2327: 'flower-vase6',
2328: 'flower-vase7',
2329: 'flower-vase8',
2330: 'flower-vase9',
2331: 'pitchfork',
2332: 'pumpkin',
2333: 'pumpkin-muffin',
2334: 'pineapple',
2335: 'pineapple-slice',
2336: 'spiribit',
2337: 'iron-nugget',
2338: 'silver-nugget',
2339: 'gold-nugget',
2340: 'emerald-nugget',
2341: 'sapphire-nugget',
2342: 'ruby-nugget',
2343: 'diamond-nugget',
2344: 'iron-cube',
2345: 'silver-cube',
2346: 'gold-cube',
2347: 'wood-cube',
2348: 'fire-cube',
2349: 'ice-cube',
2350: 'unholy-cube',
2351: 'wind-cube',
2352: 'cobweb',
2357: 'ice-crystal',
2358: 'ice-crystal-helmet',
2359: 'ice-crystal-shoulder',
2360: 'ice-crystal-chest',
2361: 'ice-crystal-boot',
2362: 'ice-crystal-glove',
2363: 'ice-coated-yarn',
2364: 'sandstone',
2365: 'wood-log',
2366: 'parrot-feather',
2367: 'parrot-beak',
2368: 'parrot-glove',
2369: 'parrot-chest',
2370: 'parrot-boot',
2371: 'parrot-helmet',
2372: 'parrot-shoulder',
2373: 'mammoth-hair',
2374: 'mammoth-glove',
2375: 'mammoth-chest',
2376: 'mammoth-boot',
2377: 'mammoth-helmet',
2378: 'mammoth-shoulder',
2379: 'bullterrier-head1',
2380: 'bullterrier-head2',
2381: 'bullterrier-head3',
2382: 'gnobold-foot',
2383: 'gnobold-head',
2384: 'gnobold-hand',
2385: 'insectoid-foot',
2386: 'insectoid-head',
2387: 'insectoid-hand',
2388: 'insectoid-body',
2389: 'flask',
2390: 'water-flask',
2391: 'radish-slice',
2392: 'onion-slice',
2393: 'mushroom-spit',
2394: 'plant-fiber',
2395: 'soup-ginseng',
2396: 'unknown',
2397: 'desert-rib',
2398: 'desert-skull',
2399: 'slime-green',
2400: 'slime-pink',
2401: 'slime-yellow',
2402: 'slime-blue',
2403: 'frightener-head',
2404: 'frightener-eyes',
2405: 'sand-horror-head',
2406: 'sand-horror-hand',
2407: 'seastar',
2408: 'bread',
2409: 'sandwich',
2410: 'fish1',
2411: 'fish2',
2412: 'shark',
2413: 'lantern-fish',
2414: 'lantern-fish-eyes',
2415: 'mawfish',
2416: 'piranha',
2417: 'blowfish',
2418: 'seahorse',
2421: 'formula',
2422: 'key1',
2423: 'flowers2',
2424: 'flowers',
2425: 'grass',
2426: 'grass2',
2427: 'grass3',
2428: 'lava-flower',
2429: 'lava-grass',
2430: 'thorn-plant',
2431: 'echinacea2',
2432: 'leaf',
2433: 'lantern02',
2434: 'torch',
2435: 'stone',
2436: 'stone2',
2437: 'tendril',
2438: 'tulips-colorful',
2439: 'cornflower',
2440: 'reed',
2441: 'pumpkin-leaves',
2442: 'pineapple-leaves',
2443: 'sunflower',
2444: 'bean-tendril',
2445: 'desert-flower01',
2446: 'desert-flower02',
2447: 'wheat',
2448: 'corn',
2449: 'water-lily01',
2450: 'water-lily02',
2451: 'inn-sign',
2452: 'shop-sign',
2453: 'weapon-shop-sign',
2454: 'armor-shop-sign',
2455: 'identifier-sign',
2456: 'smithy-sign',
2457: 'carpentersshop-sign',
2458: 'weavingmill-sign',
2459: 'ivy',
2460: 'wall-roses-red',
2461: 'wall-roses-white',
2462: 'christmas-tree',
2463: 'underwater-plant',
2464: 'alga',
2465: 'coral',
2466: 'inca-art1',
2467: 'inca-art2',
2468: 'inca-art3',
2469: 'inca-art4',
2470: 'crest1',
2471: 'castle-chain',
2472: 'wall-skull',
2473: 'torch-read',
2474: 'torch-green',
2475: 'torch-blue',
2476: 'torch-yellow',
2477: 'liana',
2478: 'chandelier',
2479: 'cobwebs',
2480: 'cobwebs2',
2481: 'goddess2',
2482: 'door',
2483: 'big-door',
2484: 'window',
2485: 'castle-window',
2486: 'gate',
2487: 'spike-trap',
2488: 'stomp-trap',
2489: 'lever',
2490: 'chest-base02',
2491: 'chest-top02',
2492: 'table',
2493: 'stone-table',
2494: 'sandstone-table',
2495: 'stone-stool',
2496: 'sandstone-stool',
2497: 'stool',
2498: 'bench',
2499: 'bed',
2500: 'bedtable',
2501: 'cupboard',
2502: 'market-stand1',
2503: 'market-stand2',
2504: 'market-stand3',
2505: 'barrel',
2506: 'crate',
2507: 'open-crate',
2508: 'sack',
2509: 'shelter',
2510: 'desktop',
2511: 'counter',
2512: 'shelf1',
2513: 'shelf2',
2514: 'shelf3',
2515: 'castle-shelf1',
2516: 'castle-shelf2',
2517: 'castle-shelf3',
2518: 'stone-shelf1',
2519: 'stone-shelf2',
2520: 'stone-shelf3',
2521: 'sandstone-shelf1',
2522: 'sandstone-shelf2',
2523: 'sandstone-shelf3',
2524: 'corpse',
2525: 'runestone',
2526: 'flower-box01',
2527: 'flower-box02',
2528: 'flower-box03',
2529: 'street-light02',
2530: 'street-light01',
2531: 'fence01',
2532: 'fence02',
2533: 'fence03',
2534: 'fence04',
2535: 'furnace',
2536: 'anvil',
2537: 'sawbench',
2538: 'workbench',
2539: 'customization-bench',
2540: 'spinningwheel',
2541: 'loom',
2542: 'overground-dungeon01',
2543: 'underground-dungeon01',
2544: 'castle-arc',
2545: 'temple-arc',
2546: 'bomb1',
2547: 'glider',
2548: 'boat2',
2549: 'stars',
2550: 'plaster',
2551: 'zzz',
2552: 'heart',
2553: 'angry',
2554: 'campfire',
2555: 'tent',
2556: 'beach-umbrella',
2557: 'beach-towel',
2558: 'wood-mat',
2559: 'compass2',
2560: 'mission',
2561: 'city',
2562: 'skull',
2563: 'airship',
2564: 'sphinx01',
2565: 'obelisk',
2566: 'manacube',
2567: 'maptile',
2568: 'cloud02'
}
MODEL_IDS = {v: k for k, v in MODEL_NAMES.items()}
ITEM_NAMES = {
(1, 0): 'Cookie',
(1, 1): 'LifePotion',
(1, 2): 'CactusPotion',
(1, 3): 'ManaPotion',
(1, 4): 'GinsengSoup',
(1, 5): 'SnowBerryMash',
(1, 6): 'MushroomSpit',
(1, 7): 'Bomb',
(1, 8): 'PineappleSlice',
(1, 9): 'PumpkinMuffin',
(2, 0): 'Formula',
(3, 0): 'Sword',
(3, 1): 'Axe',
(3, 2): 'Mace',
(3, 3): 'Dagger',
(3, 4): 'Fist',
(3, 5): 'Longsword',
(3, 6): 'Bow',
(3, 7): 'Crossbow',
(3, 8): 'Boomerang',
(3, 9): 'Arrow',
(3, 10): 'Staff',
(3, 11): 'Wand',
(3, 12): 'Bracelet',
(3, 13): 'Shield',
(3, 14): 'Arrows',
(3, 15): 'Greatsword',
(3, 16): 'Greataxe',
(3, 17): 'Greatmace',
(3, 20): 'Torch',
(4, 0): 'ChestArmor',
(5, 0): 'Gloves',
(6, 0): 'Boots',
(7, 0): 'ShoulderArmor',
(8, 0): 'Amulet',
(9, 0): 'Ring',
(11, 0): 'Nugget',
(11, 1): 'Log',
(11, 2): 'Feather',
(11, 3): 'Horn',
(11, 4): 'Claw',
(11, 5): 'Fiber',
(11, 6): 'Cobweb',
(11, 7): 'Hair',
(11, 8): 'Crystal',
(11, 9): 'Yarn',
(11, 10): 'Cube',
(11, 11): 'Capsule',
(11, 12): 'Flask',
(11, 13): 'Orb',
(11, 14): 'Spirit',
(11, 15): 'Mushroom',
(11, 16): 'Pumpkin',
(11, 17): 'Pineapple',
(11, 18): 'RadishSlice',
(11, 19): 'ShimmerMushroom',
(11, 20): 'GinsengRoot',
(11, 21): 'OnionSlice',
(11, 22): 'Heartflower',
(11, 23): 'PricklyPear',
(11, 24): 'FrozenHeartflower',
(11, 25): 'Soulflower',
(11, 26): 'WaterFlask',
(11, 27): 'SnowBerry',
(12, 0): 'Coin',
(13, 0): 'PlatinumCoin',
(14, 0): 'Leftovers',
(15, 0): 'Beak',
(16, 0): 'Painting',
(18, 0): 'Candle',
(18, 1): 'Candle',
(19, 0): 'Pet',
(20, 0): 'Bait',
(20, 1): 'Bait',
(20, 2): 'Bait',
(20, 3): 'Bait',
(20, 4): 'Bait',
(20, 5): 'Bait',
(20, 6): 'Bait',
(20, 7): 'Bait',
(20, 8): 'Bait',
(20, 9): 'Bait',
(20, 10): 'Bait',
(20, 11): 'Bait',
(20, 12): 'Bait',
(20, 13): 'Bait',
(20, 14): 'Bait',
(20, 15): 'Bait',
(20, 16): 'Bait',
(20, 17): 'Bait',
(20, 18): 'Bait',
(20, 19): 'BubbleGum',
(20, 20): 'Bait',
(20, 21): 'Bait',
(20, 22): 'VanillaCupcake',
(20, 23): 'ChocolateCupcake',
(20, 24): 'Bait',
(20, 25): 'CinnamonRole',
(20, 26): 'Waffle',
(20, 27): 'Croissant',
(20, 28): 'Bait',
(20, 29): 'Bait',
(20, 30): 'Candy',
(20, 31): 'Bait',
(20, 32): 'Bait',
(20, 33): 'PumpkinMash',
(20, 34): 'CottonCandy',
(20, 35): 'Carrot',
(20, 36): 'BlackberryMarmelade',
(20, 37): 'GreenJelly',
(20, 38): 'PinkJelly',
(20, 39): 'YellowJelly',
(20, 40): 'BlueJelly',
(20, 41): 'Bait',
(20, 42): 'Bait',
(20, 43): 'Bait',
(20, 44): 'Bait',
(20, 45): 'Bait',
(20, 46): 'Bait',
(20, 47): 'Bait',
(20, 48): 'Bait',
(20, 49): 'Bait',
(20, 50): 'BananaSplit',
(20, 51): 'Bait',
(20, 52): 'Bait',
(20, 53): 'Popcorn',
(20, 54): 'Bait',
(20, 55): 'LicoriceCandy',
(20, 56): 'CerealBar',
(20, 57): 'SaltedCaramel',
(20, 58): 'GingerTartlet',
(20, 59): 'MangoJuice',
(20, 60): 'FruitBasket',
(20, 61): 'MelonIceCream',
(20, 62): 'BloodOrangeJuice',
(20, 63): 'MilkChocolateBar',
(20, 64): 'MintChocolateBar',
(20, 65): 'WhiteChocolateBar',
(20, 66): 'CaramelChocolateBar',
(20, 67): 'ChocolateCookie',
(20, 68): 'Bait',
(20, 69): 'Bait',
(20, 70): 'Bait',
(20, 71): 'Bait',
(20, 72): 'Bait',
(20, 73): 'Bait',
(20, 74): 'SugarCandy',
(20, 75): 'AppleRing',
(20, 76): 'Bait',
(20, 77): 'Bait',
(20, 78): 'Bait',
(20, 79): 'Bait',
(20, 80): 'Bait',
(20, 81): 'Bait',
(20, 82): 'Bait',
(20, 83): 'Bait',
(20, 84): 'Bait',
(20, 85): 'Bait',
(20, 86): 'WaterIce',
(20, 87): 'ChocolateDonut',
(20, 88): 'Pancakes',
(20, 89): 'Bait',
(20, 90): 'StrawberryCake',
(20, 91): 'ChocolateCake',
(20, 92): 'Lollipop',
(20, 93): 'Softice',
(20, 94): 'Bait',
(20, 95): 'Bait',
(20, 96): 'Bait',
(20, 97): 'Bait',
(20, 98): 'CandiedApple',
(20, 99): 'DateCookie',
(20, 100): 'Bait',
(20, 101): 'Bait',
(20, 102): 'Bread',
(20, 103): 'Curry',
(20, 104): 'Lolly',
(20, 105): 'LemonTart',
(20, 106): 'StrawberryCocktail',
(20, 107): 'Bait',
(20, 108): 'Bait',
(20, 109): 'Bait',
(20, 110): 'Bait',
(20, 111): 'Bait',
(20, 112): 'Bait',
(20, 113): 'Bait',
(20, 114): 'Bait',
(20, 115): 'Bait',
(20, 116): 'Bait',
(20, 117): 'Bait',
(20, 118): 'Bait',
(20, 119): 'Bait',
(20, 120): 'Bait',
(20, 121): 'Bait',
(20, 122): 'Bait',
(20, 123): 'Bait',
(20, 124): 'Bait',
(20, 125): 'Bait',
(20, 126): 'Bait',
(20, 127): 'Bait',
(20, 128): 'Bait',
(20, 129): 'Bait',
(20, 130): 'Bait',
(20, 131): 'Bait',
(20, 132): 'Bait',
(20, 133): 'Bait',
(20, 134): 'Bait',
(20, 135): 'Bait',
(20, 136): 'Bait',
(20, 137): 'Bait',
(20, 138): 'Bait',
(20, 139): 'Bait',
(20, 140): 'Bait',
(20, 141): 'Bait',
(20, 142): 'Bait',
(20, 143): 'Bait',
(20, 144): 'Bait',
(20, 145): 'Bait',
(20, 146): 'Bait',
(20, 147): 'Bait',
(20, 148): 'Bait',
(20, 149): 'Bait',
(20, 150): 'Bait',
(20, 151): 'BiscuitRole',
(20, 152): 'Bait',
(20, 153): 'Bait',
(20, 154): 'Bait',
(20, 155): 'Bait',
(21, 0): 'Amulet1',
(21, 1): 'Amulet2',
(21, 2): 'JewelCase',
(21, 3): 'Key',
(21, 4): 'Medicine',
(21, 5): 'Antivenom',
(21, 6): 'BandAid',
(21, 7): 'Crutch',
(21, 8): 'Bandage',
(21, 9): 'Salve',
(23, 0): 'HangGlider',
(23, 1): 'Boat',
(24, 0): 'Lamp',
(25, 0): 'ManaCube'
}
ITEM_IDS = {v: k for k, v in ITEM_NAMES.items()}
STATIC_NAMES = {
0: 'Statue',
1: 'Door',
2: 'BigDoor',
3: 'Window',
4: 'CastleWindow',
5: 'Gate',
6: 'FireTrap',
7: 'SpikeTrap',
8: 'StompTrap',
9: 'Lever',
10: 'Chest',
11: 'ChestTop02',
12: 'Table',
13: 'Table',
14: 'Table',
15: 'Stool',
16: 'Stool',
17: 'Stool',
18: 'Bench',
19: 'Bed',
20: 'BedTable',
21: 'MarketStand1',
22: 'MarketStand2',
23: 'MarketStand3',
24: 'Barrel',
25: 'Crate',
26: 'OpenCrate',
27: 'Sack',
28: 'Shelter',
29: 'Cupboard',
30: 'Desktop',
31: 'Counter',
32: 'Shelf1',
33: 'Shelf2',
34: 'Shelf3',
35: 'CastleShelf1',
36: 'CastleShelf2',
37: 'CastleShelf3',
38: 'StoneShelf1',
39: 'StoneShelf2',
40: 'StoneShelf3',
41: 'SandstoneShelf1',
42: 'SandstoneShelf2',
43: 'SandstoneShelf3',
44: 'Corpse',
45: 'RuneStone',
46: 'Artifact',
47: 'FlowerBox1',
48: 'FlowerBox2',
49: 'FlowerBox3',
50: 'StreetLight',
51: 'FireStreetLight',
52: 'Fence1',
53: 'Fence2',
54: 'Fence3',
55: 'Fence4',
56: 'Vase1',
57: 'Vase2',
58: 'Vase3',
59: 'Vase4',
60: 'Vase5',
61: 'Vase6',
62: 'Vase7',
63: 'Vase8',
64: 'Vase9',
65: 'Campfire',
66: 'Tent',
67: 'BeachUmbrella',
68: 'BeachTowel',
69: 'SleepingMat',
71: 'Furnace',
72: 'Anvil',
73: 'SpinningWheel',
74: 'Loom',
75: 'SawBench',
76: 'Workbench',
77: 'CustomizationBench'
}
STATIC_IDS = {v: k for k, v in STATIC_NAMES.items()}
STATIC_MODELS = {
0: 2481,
1: 2482,
2: 2483,
3: 2484,
4: 2485,
5: 2486,
6: None,
7: 2487,
8: 2488,
9: 2489,
10: 2490,
11: 2491,
12: 2492,
13: 2493,
14: None,
15: 2495,
16: 2497,
17: 2496,
18: 2498,
19: 2499,
20: 2500,
21: 2502,
22: 2503,
23: 2504,
24: 2505,
25: 2506,
26: 2507,
27: 2508,
28: 2509,
29: 2501,
30: 2510,
31: 2511,
32: 2512,
33: 2513,
34: 2514,
35: 2515,
36: 2516,
37: 2517,
38: 2518,
39: 2519,
40: 2520,
41: 2521,
42: 2522,
43: 2523,
44: 2524,
45: 2525,
46: 2079,
47: 2526,
48: 2527,
49: 2528,
50: 2529,
51: 2530,
52: 2531,
53: 2532,
54: 2533,
55: 2534,
56: 2322,
57: 2323,
58: 2324,
59: 2325,
60: 2326,
61: 2327,
62: 2328,
63: 2329,
64: 2330,
65: 2554,
66: 2555,
67: 2556,
68: 2557,
69: 2558,
70: None,
71: 2535,
72: 2536,
73: 2540,
74: 2541,
75: 2537,
76: 2538,
77: 2539
}
ENTITY_NAMES = {
0: 'ElfMale',
1: 'ElfFemale',
2: 'HumanMale',
3: 'HumanFemale',
4: 'GoblinMale',
5: 'GoblinFemale',
6: 'Bullterrier',
7: 'LizardmanMale',
8: 'LizardmanFemale',
9: 'DwarfMale',
10: 'DwarfFemale',
11: 'OrcMale',
12: 'OrcFemale',
13: 'FrogmanMale',
14: 'FrogmanFemale',
15: 'UndeadMale',
16: 'UndeadFemale',
17: 'Skeleton',
18: 'OldMan',
19: 'Collie',
20: 'ShepherdDog',
21: 'SkullBull',
22: 'Alpaca',
23: 'BrownAlpaca',
24: 'Egg',
25: 'Turtle',
26: 'Terrier',
27: 'ScottishTerrier',
28: 'Wolf',
29: 'Panther',
30: 'Cat',
31: 'BrownCat',
32: 'WhiteCat',
33: 'Pig',
34: 'Sheep',
35: 'Bunny',
36: 'Porcupine',
37: 'GreenSlime',
38: 'PinkSlime',
39: 'YellowSlime',
40: 'BlueSlime',
41: 'Frightener',
42: 'SandHorror',
43: 'Wizard',
44: 'Bandit',
45: 'Witch',
46: 'Ogre',
47: 'Rockling',
48: 'Gnoll',
49: 'PolarGnoll',
50: 'Monkey',
51: 'Gnobold',
52: 'Insectoid',
53: 'Hornet',
54: 'InsectGuard',
55: 'Crow',
56: 'Chicken',
57: 'Seagull',
58: 'Parrot',
59: 'Bat',
60: 'Fly',
61: 'Midge',
62: 'Mosquito',
63: 'PlainRunner',
64: 'LeafRunner',
65: 'SnowRunner',
66: 'DesertRunner',
67: 'Peacock',
68: 'Frog',
69: 'PlantCreature',
70: 'RadishCreature',
71: 'Onionling',
72: 'DesertOnionling',
73: 'Devourer',
74: 'Duckbill',
75: 'Crocodile',
76: 'SpikeCreature',
77: 'Anubis',
78: 'Horus',
79: 'Jester',
80: 'Spectrino',
81: 'Djinn',
82: 'Minotaur',
83: 'NomadMale',
84: 'NomadFemale',
85: 'Imp',
86: 'Spitter',
87: 'Mole',
88: 'Biter',
89: 'Koala',
90: 'Squirrel',
91: 'Raccoon',
92: 'Owl',
93: 'Penguin',
94: 'Werewolf',
96: 'Zombie',
97: 'Vampire',
98: 'Horse',
99: 'Camel',
100: 'Cow',
101: 'Dragon',
102: 'BarkBeetle',
103: 'FireBeetle',
104: 'SnoutBeetle',
105: 'LemonBeetle',
106: 'Crab',
107: 'SeaCrab',
108: 'Troll',
109: 'DarkTroll',
110: 'HellDemon',
111: 'Golem',
112: 'EmberGolem',
113: 'SnowGolem',
114: 'Yeti',
115: 'Cyclops',
116: 'Mammoth',
117: 'Lich',
118: 'RuneGiant',
119: 'Saurian',
120: 'Bush',
121: 'SnowBush',
122: 'SnowBerryBush',
123: 'CottonPlant',
124: 'Scrub',
125: 'CobwebScrub',
126: 'FireScrub',
127: 'Ginseng',
128: 'Cactus',
130: 'ThornTree',
131: 'GoldDeposit',
132: 'IronDeposit',
133: 'SilverDeposit',
134: 'SandstoneDeposit',
135: 'EmeraldDeposit',
136: 'SapphireDeposit',
137: 'RubyDeposit',
138: 'DiamondDeposit',
139: 'IceCrystalDeposit',
140: 'Scarecrow',
141: 'Aim',
142: 'Dummy',
143: 'Vase',
144: 'Bomb',
145: 'SapphireFish',
146: 'LemonFish',
147: 'Seahorse',
148: 'Mermaid',
149: 'Merman',
150: 'Shark',
151: 'Bumblebee',
152: 'LanternFish',
153: 'MawFish',
154: 'Piranha',
155: 'Blowfish'
}
ENTITY_IDS = {v: k for k, v in ENTITY_NAMES.items()}
LOCATION_NAMES = {
(1, 0): 'Village',
(1, 1): 'Village',
(1, 2): 'Village',
(1, 3): 'Village',
(1, 4): 'Village',
(1, 5): 'Village',
(1, 6): 'Village',
(1, 7): 'Village',
(1, 8): 'Village',
(1, 9): 'Village',
(2, 0): 'Mountain',
(2, 1): 'Mountain',
(2, 2): 'Mountain',
(2, 3): 'Mountain',
(2, 4): 'Mountain',
(2, 5): 'Mountain',
(2, 6): 'Mountain',
(2, 7): 'Mountain',
(2, 8): 'Mountain',
(2, 9): 'Mountain',
(3, 0): 'Forest',
(3, 1): 'Forest',
(3, 2): 'Forest',
(3, 3): 'Forest',
(3, 4): 'Forest',
(3, 5): 'Forest',
(3, 6): 'Forest',
(3, 7): 'Forest',
(3, 8): 'Forest',
(3, 9): 'Forest',
(4, 0): 'Lake',
(4, 1): 'Lake',
(4, 2): 'Lake',
(4, 3): 'Lake',
(4, 4): 'Lake',
(4, 5): 'Lake',
(4, 6): 'Lake',
(4, 7): 'Lake',
(4, 8): 'Lake',
(4, 9): 'Lake',
(5, 0): 'Ruins',
(5, 1): 'Ruins',
(5, 2): 'Ruins',
(5, 3): 'Ruins',
(5, 4): 'Gravesite',
(6, 0): 'Canyon',
(6, 1): 'Canyon',
(6, 2): 'Canyon',
(6, 3): 'Canyon',
(6, 4): 'Canyon',
(6, 5): 'Canyon',
(6, 6): 'Canyon',
(6, 7): 'Canyon',
(6, 8): 'Canyon',
(6, 9): 'Canyon',
(7, 0): 'Valley',
(7, 1): 'Valley',
(7, 2): 'Valley',
(7, 3): 'Valley',
(7, 4): 'Valley',
(7, 5): 'Valley',
(7, 6): 'Valley',
(7, 7): 'Valley',
(7, 8): 'Valley',
(7, 9): 'Valley',
(8, 0): 'Crater',
(8, 1): 'Crater',
(8, 2): 'Crater',
(8, 3): 'Crater',
(8, 4): 'Crater',
(8, 5): 'Crater',
(8, 6): 'Crater',
(8, 7): 'Crater',
(8, 8): 'Crater',
(8, 9): 'Crater',
(9, 0): 'Cave',
(10, 0): 'Portal',
(11, 0): 'Rock',
(11, 1): 'Rock',
(11, 2): 'Rock',
(11, 3): 'Rock',
(11, 4): 'Rock',
(11, 5): 'Rock',
(11, 6): 'Rock',
(11, 7): 'Rock',
(11, 8): 'Rock',
(11, 9): 'Rock',
(12, 0): 'Tree',
(12, 1): 'Tree',
(12, 2): 'Tree',
(12, 3): 'Tree',
(12, 4): 'Tree',
(12, 5): 'Tree',
(12, 6): 'Tree',
(12, 7): 'Tree',
(12, 8): 'Tree',
(12, 9): 'Tree',
(13, 0): 'Peak',
(13, 1): 'Peak',
(13, 2): 'Peak',
(13, 3): 'Peak',
(13, 4): 'Peak',
(13, 5): 'Peak',
(13, 6): 'Peak',
(13, 7): 'Peak',
(13, 8): 'Peak',
(13, 9): 'Peak',
(14, 0): 'Castle',
(14, 1): 'Ruins',
(14, 2): 'Catacombs',
(14, 3): 'Palace',
(14, 4): 'Temple',
(14, 5): 'Pyramid',
(15, 0): 'Island',
(15, 1): 'Island',
(15, 2): 'Island',
(15, 3): 'Island',
(15, 4): 'Island',
(15, 5): 'Island',
(15, 6): 'Island',
(15, 7): 'Island',
(15, 8): 'Island',
(15, 9): 'Island'
}
LOCATION_IDS = {v: k for k, v in LOCATION_NAMES.items()}
QUARTER_NAMES = {
(1, 1): 'Trade Quarter',
(1, 2): 'Crafting Quarter',
(1, 3): 'Class Quarter',
(1, 4): 'Pet Quarter',
(4, 0): 'Portal',
(5, 0): 'Palace'
}
QUARTER_IDS = {v: k for k, v in QUARTER_NAMES.items()}
SKILL_NAMES = {
0: 'PetTaming',
1: 'PetRiding',
2: 'Climbing',
3: 'HangGliding',
4: 'Swimming',
5: 'BoatDriving',
6: 'Ability1',
7: 'Ability2',
8: 'Ability3',
9: 'Ability4',
10: 'Ability5'
}
SKILL_IDS = {v: k for k, v in SKILL_NAMES.items()}
ABILITY_NAMES = {
21: 'RangerKick',
34: 'HealingStream',
48: 'Intercept',
49: 'Teleport',
50: 'Retreat',
54: 'Smash',
79: 'Sneak',
86: 'Cyclone',
88: 'FireExplosion',
96: 'Shuriken',
97: 'Camouflage',
99: 'Aim',
100: 'Swiftness',
101: 'Bulwark',
102: 'WarFrenzy',
103: 'ManaShield'
}
ABILITY_IDS = {v: k for k, v in ABILITY_NAMES.items()}
|
dracos/django
|
refs/heads/master
|
tests/migrations/test_add_many_to_many_field_initial/0001_initial.py
|
133
|
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Project',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
),
migrations.CreateModel(
name='Task',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
),
migrations.AddField(
model_name='project',
name='tasks',
field=models.ManyToManyField(to='Task'),
),
]
|
syci/ingadhoc-odoo-addons
|
refs/heads/8.0
|
portal_account_distributor/product.py
|
2
|
# -*- coding: utf-8 -*-
from openerp import models, fields
class product_template(models.Model):
_inherit = 'product.template'
standard_price = fields.Float(
groups='base.group_user,base.group_portal_distributor')
|
pigmej/uwsgi_no_pp
|
refs/heads/master
|
plugins/graylog2/uwsgiplugin.py
|
21
|
NAME = 'graylog2'
CFLAGS = []
LDFLAGS = []
LIBS = ['-lz']
GCC_LIST = ['graylog2_plugin']
|
redhat-openstack/python-ironicclient
|
refs/heads/master-patches
|
ironicclient/openstack/common/apiclient/fake_client.py
|
3
|
# -*- coding: utf-8 -*-
#
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
A fake server that "responds" to API methods with pre-canned responses.
All of these responses come from the spec, so if for some reason the spec's
wrong the tests might raise AssertionError. I've indicated in comments the
places where actual behavior differs from the spec.
"""
# W0102: Dangerous default value %s as argument
# pylint: disable=W0102
import json
import requests
import six
from six.moves.urllib import parse
from ironicclient.openstack.common.apiclient import client
def assert_has_keys(dct, required=[], optional=[]):
for k in required:
try:
assert k in dct
except AssertionError:
extra_keys = set(dct.keys()).difference(set(required + optional))
raise AssertionError("found unexpected keys: %s" %
list(extra_keys))
class TestResponse(requests.Response):
"""Wrap requests.Response and provide a convenient initialization.
"""
def __init__(self, data):
super(TestResponse, self).__init__()
self._content_consumed = True
if isinstance(data, dict):
self.status_code = data.get('status_code', 200)
# Fake the text attribute to streamline Response creation
text = data.get('text', "")
if isinstance(text, (dict, list)):
self._content = json.dumps(text)
default_headers = {
"Content-Type": "application/json",
}
else:
self._content = text
default_headers = {}
if six.PY3 and isinstance(self._content, six.string_types):
self._content = self._content.encode('utf-8', 'strict')
self.headers = data.get('headers') or default_headers
else:
self.status_code = data
def __eq__(self, other):
return (self.status_code == other.status_code and
self.headers == other.headers and
self._content == other._content)
class FakeHTTPClient(client.HTTPClient):
def __init__(self, *args, **kwargs):
self.callstack = []
self.fixtures = kwargs.pop("fixtures", None) or {}
if not args and not "auth_plugin" in kwargs:
args = (None, )
super(FakeHTTPClient, self).__init__(*args, **kwargs)
def assert_called(self, method, url, body=None, pos=-1):
"""Assert than an API method was just called.
"""
expected = (method, url)
called = self.callstack[pos][0:2]
assert self.callstack, \
"Expected %s %s but no calls were made." % expected
assert expected == called, 'Expected %s %s; got %s %s' % \
(expected + called)
if body is not None:
if self.callstack[pos][3] != body:
raise AssertionError('%r != %r' %
(self.callstack[pos][3], body))
def assert_called_anytime(self, method, url, body=None):
"""Assert than an API method was called anytime in the test.
"""
expected = (method, url)
assert self.callstack, \
"Expected %s %s but no calls were made." % expected
found = False
entry = None
for entry in self.callstack:
if expected == entry[0:2]:
found = True
break
assert found, 'Expected %s %s; got %s' % \
(method, url, self.callstack)
if body is not None:
assert entry[3] == body, "%s != %s" % (entry[3], body)
self.callstack = []
def clear_callstack(self):
self.callstack = []
def authenticate(self):
pass
def client_request(self, client, method, url, **kwargs):
# Check that certain things are called correctly
if method in ["GET", "DELETE"]:
assert "json" not in kwargs
# Note the call
self.callstack.append(
(method,
url,
kwargs.get("headers") or {},
kwargs.get("json") or kwargs.get("data")))
try:
fixture = self.fixtures[url][method]
except KeyError:
pass
else:
return TestResponse({"headers": fixture[0],
"text": fixture[1]})
# Call the method
args = parse.parse_qsl(parse.urlparse(url)[4])
kwargs.update(args)
munged_url = url.rsplit('?', 1)[0]
munged_url = munged_url.strip('/').replace('/', '_').replace('.', '_')
munged_url = munged_url.replace('-', '_')
callback = "%s_%s" % (method.lower(), munged_url)
if not hasattr(self, callback):
raise AssertionError('Called unknown API method: %s %s, '
'expected fakes method name: %s' %
(method, url, callback))
resp = getattr(self, callback)(**kwargs)
if len(resp) == 3:
status, headers, body = resp
else:
status, body = resp
headers = {}
return TestResponse({
"status_code": status,
"text": body,
"headers": headers,
})
|
bobcyw/django
|
refs/heads/master
|
django/core/mail/backends/dummy.py
|
835
|
"""
Dummy email backend that does nothing.
"""
from django.core.mail.backends.base import BaseEmailBackend
class EmailBackend(BaseEmailBackend):
def send_messages(self, email_messages):
return len(list(email_messages))
|
edeposit/edeposit.amqp
|
refs/heads/master
|
edeposit/amqp/settings.py
|
1
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
Module is containing all necessary global variables for package.
Module also has ability to read user-defined data from two paths:
$HOME/:attr:`SETTINGS_PATH` and /etc/:attr:`SETTINGS_PATH`.
Note:
If the first path is found, other is ignored.
Example of the configuration file (``$HOME/edeposit/amqp.json``)::
{
"RABBITMQ_HOST": "localhost",
"RABBITMQ_PORT": "2222"
}
Attributes
----------
"""
import json
import os
import os.path
# module configuration ========================================================
RABBITMQ_HOST = '127.0.0.1' #:
RABBITMQ_PORT = '5672' #:
RABBITMQ_USER_NAME = 'guest' #:
RABBITMQ_USER_PASSWORD = 'guest' #:
# aleph's settings
RABBITMQ_ALEPH_VIRTUALHOST = "aleph" #:
RABBITMQ_ALEPH_INPUT_QUEUE = "daemon" #:
RABBITMQ_ALEPH_OUTPUT_QUEUE = "plone" #:
RABBITMQ_ALEPH_EXCHANGE = "search" #:
RABBITMQ_ALEPH_INPUT_KEY = "request" #:
RABBITMQ_ALEPH_OUTPUT_KEY = "result" #:
RABBITMQ_ALEPH_EXCEPTION_KEY = "exception" #:
# calibre's settings
RABBITMQ_CALIBRE_VIRTUALHOST = "calibre" #:
RABBITMQ_CALIBRE_INPUT_QUEUE = "daemon" #:
RABBITMQ_CALIBRE_OUTPUT_QUEUE = "plone" #:
RABBITMQ_CALIBRE_EXCHANGE = "convert" #:
RABBITMQ_CALIBRE_INPUT_KEY = "request" #:
RABBITMQ_CALIBRE_OUTPUT_KEY = "result" #:
# settings for edeposit.amqp.ftp daemon
RABBITMQ_FTP_VIRTUALHOST = "ftp" #: Virtualhost for FTP module
RABBITMQ_FTP_INPUT_QUEUE = "daemon" #: Input Queue for FTP AMQP daemon
RABBITMQ_FTP_OUTPUT_QUEUE = "plone" #: Queue to put responses from daemon
RABBITMQ_FTP_EXCHANGE = "ftp"
RABBITMQ_FTP_INPUT_KEY = "request" #:
RABBITMQ_FTP_OUTPUT_KEY = "result" #:
# settings for edeposit.amqp.antivirus daemon
RABBITMQ_ANTIVIRUS_VIRTUALHOST = "antivirus" #: Virtualhost for AV module
RABBITMQ_ANTIVIRUS_INPUT_QUEUE = "daemon" #: Input Queue for AV AMQP daemon
RABBITMQ_ANTIVIRUS_OUTPUT_QUEUE = "plone" #: Queue to put responses
RABBITMQ_ANTIVIRUS_EXCHANGE = "antivirus"
RABBITMQ_ANTIVIRUS_INPUT_KEY = "request" #:
RABBITMQ_ANTIVIRUS_OUTPUT_KEY = "result" #:
# settings for edeposit.amqp.harvester
RABBITMQ_HARVESTER_VIRTUALHOST = "harvester" #: Virtualhost for harvester
RABBITMQ_HARVESTER_INPUT_QUEUE = "daemon" #: Input Queue for harvester
RABBITMQ_HARVESTER_OUTPUT_QUEUE = "plone" #: Queue to put responses
RABBITMQ_HARVESTER_EXCHANGE = "harvester"
RABBITMQ_HARVESTER_INPUT_KEY = "request" #:
RABBITMQ_HARVESTER_OUTPUT_KEY = "result" #:
# settings for edeposit.amqp.ltp
RABBITMQ_LTP_VIRTUALHOST = "ltp" #: Virtualhost for ltp
RABBITMQ_LTP_INPUT_QUEUE = "daemon" #: Input Queue for ltp
RABBITMQ_LTP_OUTPUT_QUEUE = "plone" #: Queue to put responses
RABBITMQ_LTP_EXCHANGE = "ltp"
RABBITMQ_LTP_INPUT_KEY = "request" #:
RABBITMQ_LTP_OUTPUT_KEY = "result" #:
# settings for edeposit.amqp.pdfgen
RABBITMQ_PDFGEN_VIRTUALHOST = "pdfgen" #: Virtualhost for pdfgen
RABBITMQ_PDFGEN_INPUT_QUEUE = "daemon" #: Input Queue for pdfgen
RABBITMQ_PDFGEN_OUTPUT_QUEUE = "plone" #: Queue to put responses
RABBITMQ_PDFGEN_EXCHANGE = "pdfgen"
RABBITMQ_PDFGEN_INPUT_KEY = "request" #:
RABBITMQ_PDFGEN_OUTPUT_KEY = "result" #:
# settings for edeposit.amqp.downloader
RABBITMQ_DOWNER_VIRTUALHOST = "downloader" #: Virtualhost for downloader
RABBITMQ_DOWNER_INPUT_QUEUE = "daemon" #: Input Queue for downloader
RABBITMQ_DOWNER_OUTPUT_QUEUE = "plone" #: Queue to put responses
RABBITMQ_DOWNER_EXCHANGE = "downloader"
RABBITMQ_DOWNER_INPUT_KEY = "request" #:
RABBITMQ_DOWNER_OUTPUT_KEY = "result" #:
# settings for edeposit.amqp.storage
RABBITMQ_STORAGE_VIRTUALHOST = "storage" #: Virtualhost for storage
RABBITMQ_STORAGE_INPUT_QUEUE = "daemon" #: Input Queue for storage
RABBITMQ_STORAGE_OUTPUT_QUEUE = "plone" #: Queue to put responses
RABBITMQ_STORAGE_EXCHANGE = "export"
RABBITMQ_STORAGE_INPUT_KEY = "request" #:
RABBITMQ_STORAGE_OUTPUT_KEY = "result" #:
# settings for edeposit.amqp.marcxml2mods
RABBITMQ_MX2MODS_VIRTUALHOST = "marcxml2mods" #: Virtualhost for marcxml2mods
RABBITMQ_MX2MODS_INPUT_QUEUE = "daemon" #: Input Queue for marcxml2mods
RABBITMQ_MX2MODS_OUTPUT_QUEUE = "plone" #: Queue to put responses
RABBITMQ_MX2MODS_EXCHANGE = "export"
RABBITMQ_MX2MODS_INPUT_KEY = "request" #:
RABBITMQ_MX2MODS_OUTPUT_KEY = "result" #:
# settings for edeposit.amqp.aleph_link_export
RABBITMQ_ALEPH_LINK_EXPORT_VIRTUALHOST = "aleph" #: Virtualhost
RABBITMQ_ALEPH_LINK_EXPORT_INPUT_QUEUE = "updater" #: Input Queue
RABBITMQ_ALEPH_LINK_EXPORT_OUTPUT_QUEUE = "plone" #: Output Queue
RABBITMQ_ALEPH_LINK_EXPORT_EXCHANGE = "update-links"
RABBITMQ_ALEPH_LINK_EXPORT_INPUT_KEY = "request" #:
RABBITMQ_ALEPH_LINK_EXPORT_OUTPUT_KEY = "result" #:
# Settings parser =============================================================
def get_amqp_settings():
"""
Return all settings in dict in following format::
{
"submodule_name": {
"vhost": VIRTUALHOST,
"exchange": EXCHANGE,
"queues": {
QUEUE_NAME: ROUTING_KEY,
QUEUE_NAME: ROUTING_KEY
},
"in_key": INPUT_KEY,
"out_key": OUTPUT_KEY
},
...
}
"""
amqp_settings = {}
for vhost in filter(lambda x: x.endswith("VIRTUALHOST"), globals().keys()):
vhost = "RABBITMQ_" + vhost.split("_")[1]
queues = {
globals()[vhost + "_INPUT_QUEUE"]: globals()[vhost + "_INPUT_KEY"],
globals()[vhost + "_OUTPUT_QUEUE"]: globals()[vhost + "_OUTPUT_KEY"]
}
amqp_settings[vhost.split("_")[-1].lower()] = {
"vhost": globals()[vhost + "_VIRTUALHOST"],
"exchange": globals()[vhost + "_EXCHANGE"],
"queues": queues,
"in_key": globals()[vhost + "_INPUT_KEY"],
"out_key": globals()[vhost + "_OUTPUT_KEY"]
}
return amqp_settings
# user configuration reader ===================================================
_ALLOWED = [unicode, str, int, float]
SETTINGS_PATH = "/edeposit/amqp.json"
"""
Path which is appended to default search paths (``$HOME`` and ``/etc``).
Note:
It has to start with ``/``. Variable is **appended** to the default search
paths, so this doesn't mean, that the path is absolute!
"""
def get_all_constants():
"""
Get list of all uppercase, non-private globals (doesn't start with ``_``).
Returns:
list: Uppercase names defined in `globals()` (variables from this \
module).
"""
return filter(
lambda key: key.upper() == key and type(globals()[key]) in _ALLOWED,
filter( # filter _PRIVATE variables
lambda x: not x.startswith("_"),
globals().keys()
)
)
def substitute_globals(config_dict):
"""
Set global variables to values defined in `config_dict`.
Args:
config_dict (dict): dictionary with data, which are used to set \
`globals`.
Note:
`config_dict` have to be dictionary, or it is ignored. Also all
variables, that are not already in globals, or are not types defined in
:attr:`_ALLOWED` (str, int, float) or starts with ``_`` are silently
ignored.
"""
constants = get_all_constants()
if type(config_dict) != dict:
return
for key in config_dict.keys():
if key in constants and type(config_dict[key]) in _ALLOWED:
globals()[key] = config_dict[key]
# try to read data from configuration paths ($HOME/SETTINGS_PATH,
# /etc/SETTINGS_PATH)
if "HOME" in os.environ and os.path.exists(os.environ["HOME"] + SETTINGS_PATH):
with open(os.environ["HOME"] + SETTINGS_PATH) as f:
substitute_globals(json.loads(f.read()))
elif os.path.exists("/etc" + SETTINGS_PATH):
with open("/etc" + SETTINGS_PATH) as f:
substitute_globals(json.loads(f.read()))
|
skg-net/ansible
|
refs/heads/devel
|
lib/ansible/modules/cloud/vmware/vcenter_license.py
|
27
|
#!/usr/bin/python
# Copyright: (c) 2017, Dag Wieers (@dagwieers) <dag@wieers.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
module: vcenter_license
short_description: Manage VMware vCenter license keys
description:
- Add and delete vCenter license keys.
version_added: '2.4'
author:
- Dag Wieers (@dagwieers)
requirements:
- pyVmomi
options:
labels:
description:
- The optional labels of the license key to manage in vSphere vCenter.
- This is dictionary with key/value pair.
default: {
'source': 'ansible'
}
license:
description:
- The license key to manage in vSphere vCenter.
required: yes
state:
description:
- Whether to add (C(present)) or remove (C(absent)) the license key.
choices: [absent, present]
default: present
notes:
- This module will also auto-assign the current vCenter to the license key
if the product matches the license key, and vCenter us currently assigned
an evaluation license only.
- The evaluation license (00000-00000-00000-00000-00000) is not listed
when unused.
extends_documentation_fragment: vmware.documentation
'''
EXAMPLES = r'''
- name: Add a new vCenter license
vcenter_license:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
license: f600d-21ae3-5592b-249e0-cc341
state: present
delegate_to: localhost
- name: Remove an (unused) vCenter license
vcenter_license:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
license: f600d-21ae3-5592b-249e0-cc341
state: absent
delegate_to: localhost
'''
RETURN = r'''
licenses:
description: list of license keys after module executed
returned: always
type: list
sample:
- f600d-21ae3-5592b-249e0-cc341
- 143cc-0e942-b2955-3ea12-d006f
'''
try:
from pyVmomi import vim
HAS_PYVMOMI = True
except ImportError:
HAS_PYVMOMI = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.vmware import connect_to_api, vmware_argument_spec
def find_key(licenses, license):
for item in licenses:
if item.licenseKey == license:
return item
return None
def list_keys(licenses):
keys = []
for item in licenses:
# Filter out evaluation license key
if item.used is None:
continue
keys.append(item.licenseKey)
return keys
def main():
argument_spec = vmware_argument_spec()
argument_spec.update(dict(
labels=dict(type='dict', default=dict(source='ansible')),
license=dict(type='str', required=True),
state=dict(type='str', default='present', choices=['absent', 'present']),
))
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
license = module.params['license']
state = module.params['state']
# FIXME: This does not seem to work on vCenter v6.0
labels = []
for k in module.params['labels']:
kv = vim.KeyValue()
kv.key = k
kv.value = module.params['labels'][k]
labels.append(kv)
result = dict(
changed=False,
diff=dict(),
)
if not HAS_PYVMOMI:
module.fail_json(msg='pyvmomi is required for this module')
content = connect_to_api(module)
lm = content.licenseManager
result['licenses'] = list_keys(lm.licenses)
if module._diff:
result['diff']['before'] = '\n'.join(result['licenses']) + '\n'
if state == 'present' and license not in result['licenses']:
result['changed'] = True
if module.check_mode:
result['licenses'].append(license)
else:
lm.AddLicense(license, labels)
# Automatically assign to current vCenter, if needed
key = find_key(lm.licenses, license)
if content.about.name in key.name:
try:
lam = lm.licenseAssignmentManager
lam.UpdateAssignedLicense(entity=content.about.instanceUuid, licenseKey=license)
except:
module.warn('Could not assign "%s" (%s) to vCenter.' % (license, key.name))
result['licenses'] = list_keys(lm.licenses)
if module._diff:
result['diff']['after'] = '\n'.join(result['licenses']) + '\n'
elif state == 'absent' and license in result['licenses']:
# Check if key is in use
key = find_key(lm.licenses, license)
if key.used > 0:
module.fail_json(msg='Cannot remove key "%s", still in use %s time(s).' % (license, key.used))
result['changed'] = True
if module.check_mode:
result['licenses'].remove(license)
else:
lm.RemoveLicense(license)
result['licenses'] = list_keys(lm.licenses)
if module._diff:
result['diff']['after'] = '\n'.join(result['licenses']) + '\n'
module.exit_json(**result)
if __name__ == '__main__':
main()
|
misakwa/thriftpy
|
refs/heads/develop
|
tests/test_parser.py
|
1
|
# -*- coding: utf-8 -*-
import pytest
from thriftpy.thrift import TType
from thriftpy.parser import load
from thriftpy.parser.exc import ThriftParserError, ThriftGrammerError
def test_comments():
load('parser-cases/comments.thrift')
def test_constants():
thrift = load('parser-cases/constants.thrift')
assert thrift.int16 == 3
assert thrift.int32 == 800
assert thrift.int64 == 123456789
assert thrift.tstr == 'hello world'
assert thrift.integer32 == 900
assert thrift.tdouble == 1.3
assert thrift.tlist == [1, 2, 3]
assert thrift.tset == set([1, 2, 3])
assert thrift.tmap1 == {'key': 'val'}
assert thrift.tmap2 == {'key': 32}
assert thrift.my_country == 4
assert thrift.tom == thrift.Person(name='tom')
assert thrift.country_map == {1: 'US', 2: 'UK', 3: 'CA', 4: 'CN'}
def test_include():
thrift = load('parser-cases/include.thrift', include_dirs=[
'./parser-cases'])
assert thrift.datetime == 1422009523
def test_tutorial():
thrift = load('parser-cases/tutorial.thrift', include_dirs=[
'./parser-cases'])
assert thrift.INT32CONSTANT == 9853
assert thrift.MAPCONSTANT == {'hello': 'world', 'goodnight': 'moon'}
assert thrift.Operation.ADD == 1 and thrift.Operation.SUBTRACT == 2 \
and thrift.Operation.MULTIPLY == 3 and thrift.Operation.DIVIDE == 4
work = thrift.Work()
assert work.num1 == 0 and work.num2 is None and work.op is None \
and work.comment is None
assert set(thrift.Calculator.thrift_services) == set([
'ping', 'add', 'calculate', 'zip', 'getStruct'])
def test_e_type_error():
with pytest.raises(ThriftParserError) as excinfo:
load('parser-cases/e_type_error_0.thrift')
assert 'Type error' in str(excinfo.value)
with pytest.raises(ThriftParserError) as excinfo:
load('parser-cases/e_type_error_1.thrift')
assert 'Type error' in str(excinfo.value)
with pytest.raises(ThriftParserError) as excinfo:
load('parser-cases/e_type_error_2.thrift')
assert 'Type error' in str(excinfo.value)
def test_value_ref():
thrift = load('parser-cases/value_ref.thrift')
assert thrift.container == {'key': [1, 2, 3]}
assert thrift.lst == [39, 899, 123]
def test_type_ref():
thrift = load('parser-cases/type_ref.thrift')
assert thrift.jerry == thrift.type_ref_shared.Writer(
name='jerry', age=26, country=thrift.type_ref_shared.Country.US)
assert thrift.book == thrift.type_ref_shared.Book(name='Hello World',
writer=thrift.jerry)
def test_e_value_ref():
with pytest.raises(ThriftParserError) as excinfo:
load('parser-cases/e_value_ref_0.thrift')
assert excinfo.value
with pytest.raises(ThriftParserError) as excinfo:
load('parser-cases/e_value_ref_1.thrift')
assert str(excinfo.value) == ('Couldn\'t find a named value in enum Lang '
'for value 3')
with pytest.raises(ThriftParserError) as excinfo:
load('parser-cases/e_value_ref_2.thrift')
assert str(excinfo.value) == \
'No enum value or constant found named \'Cookbook\''
def test_enums():
thrift = load('parser-cases/enums.thrift')
assert thrift.Lang.C == 0
assert thrift.Lang.Go == 1
assert thrift.Lang.Java == 2
assert thrift.Lang.Javascript == 3
assert thrift.Lang.PHP == 4
assert thrift.Lang.Python == 5
assert thrift.Lang.Ruby == 6
assert thrift.Country.US == 1
assert thrift.Country.UK == 2
assert thrift.Country.CN == 3
assert thrift.OS.OSX == 0
assert thrift.OS.Win == 3
assert thrift.OS.Linux == 4
def test_structs():
thrift = load('parser-cases/structs.thrift')
assert thrift.Person.thrift_spec == {
1: (TType.STRING, 'name', False),
2: (TType.STRING, 'address', False)
}
assert thrift.Person.default_spec == [
('name', None), ('address', None)
]
assert thrift.Email.thrift_spec == {
1: (TType.STRING, 'subject', False),
2: (TType.STRING, 'content', False),
3: (TType.STRUCT, 'sender', thrift.Person, False),
4: (TType.STRUCT, 'recver', thrift.Person, True),
}
assert thrift.Email.default_spec == [
('subject', 'Subject'), ('content', None),
('sender', None), ('recver', None)
]
assert thrift.email == thrift.Email(
subject='Hello',
content='Long time no see',
sender=thrift.Person(name='jack', address='jack@gmail.com'),
recver=thrift.Person(name='chao', address='chao@gmail.com')
)
def test_e_structs():
with pytest.raises(ThriftParserError) as excinfo:
load('parser-cases/e_structs_0.thrift')
assert str(excinfo.value) == \
'Field \'name\' was required to create constant for type \'User\''
with pytest.raises(ThriftParserError) as excinfo:
load('parser-cases/e_structs_1.thrift')
assert str(excinfo.value) == \
'No field named \'avatar\' was found in struct of type \'User\''
def test_service():
thrift = load('parser-cases/service.thrift')
assert thrift.EmailService.thrift_services == ['ping', 'send']
assert thrift.EmailService.ping_args.thrift_spec == {}
assert thrift.EmailService.ping_args.default_spec == []
assert thrift.EmailService.ping_result.thrift_spec == {
1: (TType.STRUCT, 'network_error', thrift.NetworkError, False)
}
assert thrift.EmailService.ping_result.default_spec == [
('network_error', None)
]
assert thrift.EmailService.send_args.thrift_spec == {
1: (TType.STRUCT, 'recver', thrift.User, False),
2: (TType.STRUCT, 'sender', thrift.User, False),
3: (TType.STRUCT, 'email', thrift.Email, False),
}
assert thrift.EmailService.send_args.default_spec == [
('recver', None), ('sender', None), ('email', None)
]
assert thrift.EmailService.send_result.thrift_spec == {
0: (TType.BOOL, 'success', False),
1: (TType.STRUCT, 'network_error', thrift.NetworkError, False)
}
assert thrift.EmailService.send_result.default_spec == [
('success', None), ('network_error', None)
]
def test_service_extends():
thrift = load('parser-cases/service_extends.thrift')
assert thrift.PingService.thrift_services == ['ping', 'getStruct']
def test_e_service_extends():
with pytest.raises(ThriftParserError) as excinfo:
load('parser-cases/e_service_extends_0.thrift')
assert 'Can\'t find service' in str(excinfo.value)
def test_e_dead_include():
with pytest.raises(ThriftParserError) as excinfo:
load('parser-cases/e_dead_include_0.thrift')
assert 'Dead including' in str(excinfo.value)
def test_e_grammer_error_at_eof():
with pytest.raises(ThriftGrammerError) as excinfo:
load('parser-cases/e_grammer_error_at_eof.thrift')
assert str(excinfo.value) == 'Grammer error at EOF'
def test_e_use_thrift_reserved_keywords():
with pytest.raises(ThriftParserError) as excinfo:
load('parser-cases/e_use_thrift_reserved_keywords.thrift')
assert 'Cannot use reserved language keyword' in str(excinfo.value)
|
raajitr/django_hangman
|
refs/heads/master
|
env/lib/python2.7/site-packages/setuptools/depends.py
|
336
|
import sys
import imp
import marshal
from distutils.version import StrictVersion
from imp import PKG_DIRECTORY, PY_COMPILED, PY_SOURCE, PY_FROZEN
from .py33compat import Bytecode
__all__ = [
'Require', 'find_module', 'get_module_constant', 'extract_constant'
]
class Require:
"""A prerequisite to building or installing a distribution"""
def __init__(self, name, requested_version, module, homepage='',
attribute=None, format=None):
if format is None and requested_version is not None:
format = StrictVersion
if format is not None:
requested_version = format(requested_version)
if attribute is None:
attribute = '__version__'
self.__dict__.update(locals())
del self.self
def full_name(self):
"""Return full package/distribution name, w/version"""
if self.requested_version is not None:
return '%s-%s' % (self.name, self.requested_version)
return self.name
def version_ok(self, version):
"""Is 'version' sufficiently up-to-date?"""
return self.attribute is None or self.format is None or \
str(version) != "unknown" and version >= self.requested_version
def get_version(self, paths=None, default="unknown"):
"""Get version number of installed module, 'None', or 'default'
Search 'paths' for module. If not found, return 'None'. If found,
return the extracted version attribute, or 'default' if no version
attribute was specified, or the value cannot be determined without
importing the module. The version is formatted according to the
requirement's version format (if any), unless it is 'None' or the
supplied 'default'.
"""
if self.attribute is None:
try:
f, p, i = find_module(self.module, paths)
if f:
f.close()
return default
except ImportError:
return None
v = get_module_constant(self.module, self.attribute, default, paths)
if v is not None and v is not default and self.format is not None:
return self.format(v)
return v
def is_present(self, paths=None):
"""Return true if dependency is present on 'paths'"""
return self.get_version(paths) is not None
def is_current(self, paths=None):
"""Return true if dependency is present and up-to-date on 'paths'"""
version = self.get_version(paths)
if version is None:
return False
return self.version_ok(version)
def find_module(module, paths=None):
"""Just like 'imp.find_module()', but with package support"""
parts = module.split('.')
while parts:
part = parts.pop(0)
f, path, (suffix, mode, kind) = info = imp.find_module(part, paths)
if kind == PKG_DIRECTORY:
parts = parts or ['__init__']
paths = [path]
elif parts:
raise ImportError("Can't find %r in %s" % (parts, module))
return info
def get_module_constant(module, symbol, default=-1, paths=None):
"""Find 'module' by searching 'paths', and extract 'symbol'
Return 'None' if 'module' does not exist on 'paths', or it does not define
'symbol'. If the module defines 'symbol' as a constant, return the
constant. Otherwise, return 'default'."""
try:
f, path, (suffix, mode, kind) = find_module(module, paths)
except ImportError:
# Module doesn't exist
return None
try:
if kind == PY_COMPILED:
f.read(8) # skip magic & date
code = marshal.load(f)
elif kind == PY_FROZEN:
code = imp.get_frozen_object(module)
elif kind == PY_SOURCE:
code = compile(f.read(), path, 'exec')
else:
# Not something we can parse; we'll have to import it. :(
if module not in sys.modules:
imp.load_module(module, f, path, (suffix, mode, kind))
return getattr(sys.modules[module], symbol, None)
finally:
if f:
f.close()
return extract_constant(code, symbol, default)
def extract_constant(code, symbol, default=-1):
"""Extract the constant value of 'symbol' from 'code'
If the name 'symbol' is bound to a constant value by the Python code
object 'code', return that value. If 'symbol' is bound to an expression,
return 'default'. Otherwise, return 'None'.
Return value is based on the first assignment to 'symbol'. 'symbol' must
be a global, or at least a non-"fast" local in the code block. That is,
only 'STORE_NAME' and 'STORE_GLOBAL' opcodes are checked, and 'symbol'
must be present in 'code.co_names'.
"""
if symbol not in code.co_names:
# name's not there, can't possibly be an assignment
return None
name_idx = list(code.co_names).index(symbol)
STORE_NAME = 90
STORE_GLOBAL = 97
LOAD_CONST = 100
const = default
for byte_code in Bytecode(code):
op = byte_code.opcode
arg = byte_code.arg
if op == LOAD_CONST:
const = code.co_consts[arg]
elif arg == name_idx and (op == STORE_NAME or op == STORE_GLOBAL):
return const
else:
const = default
def _update_globals():
"""
Patch the globals to remove the objects not available on some platforms.
XXX it'd be better to test assertions about bytecode instead.
"""
if not sys.platform.startswith('java') and sys.platform != 'cli':
return
incompatible = 'extract_constant', 'get_module_constant'
for name in incompatible:
del globals()[name]
__all__.remove(name)
_update_globals()
|
Lilykos/invenio
|
refs/heads/master
|
invenio/modules/upgrader/upgrades/invenio_2013_12_05_new_index_doi.py
|
15
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2012, 2013 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
from invenio.legacy.dbquery import run_sql
depends_on = ['invenio_2013_09_25_virtual_indexes']
def info():
return "New DOI index"
def do_upgrade():
pass
def do_upgrade_atlantis():
doi_index = 27
run_sql("""
CREATE TABLE IF NOT EXISTS idxWORD%02dF (
id mediumint(9) unsigned NOT NULL auto_increment,
term varchar(50) default NULL,
hitlist longblob,
PRIMARY KEY (id),
UNIQUE KEY term (term)
) ENGINE=MyISAM;
""" % doi_index)
run_sql("""
CREATE TABLE IF NOT EXISTS idxWORD%02dR (
id_bibrec mediumint(9) unsigned NOT NULL,
termlist longblob,
type enum('CURRENT','FUTURE','TEMPORARY') NOT NULL default 'CURRENT',
PRIMARY KEY (id_bibrec,type)
) ENGINE=MyISAM;
""" % doi_index)
run_sql("""
CREATE TABLE IF NOT EXISTS idxPAIR%02dF (
id mediumint(9) unsigned NOT NULL auto_increment,
term varchar(100) default NULL,
hitlist longblob,
PRIMARY KEY (id),
UNIQUE KEY term (term)
) ENGINE=MyISAM;
""" % doi_index)
run_sql("""
CREATE TABLE IF NOT EXISTS idxPAIR%02dR (
id_bibrec mediumint(9) unsigned NOT NULL,
termlist longblob,
type enum('CURRENT','FUTURE','TEMPORARY') NOT NULL default 'CURRENT',
PRIMARY KEY (id_bibrec,type)
) ENGINE=MyISAM;
""" % doi_index)
run_sql("""
CREATE TABLE IF NOT EXISTS idxPHRASE%02dF (
id mediumint(9) unsigned NOT NULL auto_increment,
term text default NULL,
hitlist longblob,
PRIMARY KEY (id),
KEY term (term(50))
) ENGINE=MyISAM;
""" % doi_index)
run_sql("""
CREATE TABLE IF NOT EXISTS idxPHRASE%02dR (
id_bibrec mediumint(9) unsigned NOT NULL,
termlist longblob,
type enum('CURRENT','FUTURE','TEMPORARY') NOT NULL default 'CURRENT',
PRIMARY KEY (id_bibrec,type)
) ENGINE=MyISAM;
""" % doi_index)
run_sql("""INSERT INTO idxINDEX VALUES (%02d,'doi','This index contains words/phrases from DOI fields','0000-00-00 00:00:00', '', 'native','','No','No','No', 'BibIndexDOITokenizer')""" % doi_index)
run_sql("""INSERT INTO idxINDEX_idxINDEX (id_virtual, id_normal) VALUES (1, %02d)""" % doi_index)
run_sql("""INSERT INTO field VALUES (18,'doi','doi')""")
run_sql("""INSERT INTO idxINDEX_field (id_idxINDEX, id_field) VALUES (%02d, 18)""" % doi_index)
def estimate():
return 1
def pre_upgrade():
pass
def post_upgrade():
pass
|
bregman-arie/ansible
|
refs/heads/devel
|
lib/ansible/modules/system/aix_inittab.py
|
46
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2017, Joris Weijters <joris.weijters@gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
author:
- Joris Weijters (@molekuul)
module: aix_inittab
short_description: Manages the inittab on AIX
description:
- Manages the inittab on AIX.
version_added: "2.3"
options:
name:
description:
- Name of the inittab entry.
required: yes
aliases: ['service']
runlevel:
description:
- Runlevel of the entry.
required: yes
action:
description:
- Action what the init has to do with this entry.
required: yes
choices:
- boot
- bootwait
- hold
- initdefault
- off
- once
- ondemand
- powerfail
- powerwait
- respawn
- sysinit
- wait
command:
description:
- What command has to run.
required: yes
insertafter:
description:
- After which inittabline should the new entry inserted.
state:
description:
- Whether the entry should be present or absent in the inittab file.
choices: [ absent, present ]
default: present
notes:
- The changes are persistent across reboots, you need root rights to read or adjust the inittab with the C(lsitab), chitab,
C(mkitab) or C(rmitab) commands.
- Tested on AIX 7.1.
requirements:
- itertools
'''
EXAMPLES = '''
# Add service startmyservice to the inittab, directly after service existingservice.
- name: Add startmyservice to inittab
aix_inittab:
name: startmyservice
runlevel: 4
action: once
command: echo hello
insertafter: existingservice
state: present
become: yes
# Change inittab entry startmyservice to runlevel "2" and processaction "wait".
- name: Change startmyservice to inittab
aix_inittab:
name: startmyservice
runlevel: 2
action: wait
command: echo hello
state: present
become: yes
- name: Remove startmyservice from inittab
aix_inittab:
name: startmyservice
runlevel: 2
action: wait
command: echo hello
state: absent
become: yes
'''
RETURN = '''
name:
description: name of the adjusted inittab entry
returned: always
type: string
sample: startmyservice
msg:
description: action done with the inittab entry
returned: changed
type: string
sample: changed inittab entry startmyservice
changed:
description: whether the inittab changed or not
returned: always
type: boolean
sample: true
'''
# Import necessary libraries
import itertools
from ansible.module_utils.basic import AnsibleModule
# end import modules
# start defining the functions
def check_current_entry(module):
# Check if entry exists, if not return False in exists in return dict,
# if true return True and the entry in return dict
existsdict = {'exist': False}
lsitab = module.get_bin_path('lsitab')
(rc, out, err) = module.run_command([lsitab, module.params['name']])
if rc == 0:
keys = ('name', 'runlevel', 'action', 'command')
values = out.split(":")
# strip non readable characters as \n
values = map(lambda s: s.strip(), values)
existsdict = dict(itertools.izip(keys, values))
existsdict.update({'exist': True})
return existsdict
def main():
# initialize
module = AnsibleModule(
argument_spec=dict(
name=dict(type='str', required=True, aliases=['service']),
runlevel=dict(type='str', required=True),
action=dict(type='str', choices=[
'boot',
'bootwait',
'hold',
'initdefault',
'off',
'once',
'ondemand',
'powerfail',
'powerwait',
'respawn',
'sysinit',
'wait',
]),
command=dict(type='str', required=True),
insertafter=dict(type='str'),
state=dict(type='str', required=True, choices=['absent', 'present']),
),
supports_check_mode=True,
)
result = {
'name': module.params['name'],
'changed': False,
'msg': ""
}
# Find commandline strings
mkitab = module.get_bin_path('mkitab')
rmitab = module.get_bin_path('rmitab')
chitab = module.get_bin_path('chitab')
rc = 0
# check if the new entry exists
current_entry = check_current_entry(module)
# if action is install or change,
if module.params['state'] == 'present':
# create new entry string
new_entry = module.params['name'] + ":" + module.params['runlevel'] + \
":" + module.params['action'] + ":" + module.params['command']
# If current entry exists or fields are different(if the entry does not
# exists, then the entry wil be created
if (not current_entry['exist']) or (
module.params['runlevel'] != current_entry['runlevel'] or
module.params['action'] != current_entry['action'] or
module.params['command'] != current_entry['command']):
# If the entry does exist then change the entry
if current_entry['exist']:
if not module.check_mode:
(rc, out, err) = module.run_command([chitab, new_entry])
if rc != 0:
module.fail_json(
msg="could not change inittab", rc=rc, err=err)
result['msg'] = "changed inittab entry" + " " + current_entry['name']
result['changed'] = True
# If the entry does not exist create the entry
elif not current_entry['exist']:
if module.params['insertafter']:
if not module.check_mode:
(rc, out, err) = module.run_command(
[mkitab, '-i', module.params['insertafter'], new_entry])
else:
if not module.check_mode:
(rc, out, err) = module.run_command(
[mkitab, new_entry])
if rc != 0:
module.fail_json(msg="could not adjust inittab", rc=rc, err=err)
result['msg'] = "add inittab entry" + " " + module.params['name']
result['changed'] = True
elif module.params['state'] == 'absent':
# If the action is remove and the entry exists then remove the entry
if current_entry['exist']:
if not module.check_mode:
(rc, out, err) = module.run_command(
[rmitab, module.params['name']])
if rc != 0:
module.fail_json(
msg="could not remove entry grom inittab)", rc=rc, err=err)
result['msg'] = "removed inittab entry" + " " + current_entry['name']
result['changed'] = True
module.exit_json(**result)
if __name__ == '__main__':
main()
|
aam-at/tensorflow
|
refs/heads/master
|
tensorflow/lite/tools/convert_image_to_csv_test.py
|
17
|
# Lint as: python2, python3
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests image file conversion utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
from tensorflow.lite.tools import convert_image_to_csv
from tensorflow.python.framework import test_util
from tensorflow.python.framework.errors_impl import NotFoundError
from tensorflow.python.platform import resource_loader
from tensorflow.python.platform import test
PREFIX_PATH = resource_loader.get_path_to_datafile("../../core/lib/")
class ConvertImageToCsvTest(test_util.TensorFlowTestCase):
def testGetImageRaisesMissingFile(self):
image_path = os.path.join(PREFIX_PATH, "jpeg", "testdata", "no_such.jpg")
with self.assertRaises(NotFoundError):
_ = convert_image_to_csv.get_image(64, 96, False, image_path)
def testGetImageSizeIsCorrect(self):
image_path = os.path.join(PREFIX_PATH, "jpeg", "testdata", "small.jpg")
image_data = convert_image_to_csv.get_image(64, 96, False, image_path)
self.assertEqual((96, 64, 3), image_data.shape)
def testGetImageConvertsToGrayscale(self):
image_path = os.path.join(PREFIX_PATH, "jpeg", "testdata", "medium.jpg")
image_data = convert_image_to_csv.get_image(40, 20, True, image_path)
self.assertEqual((20, 40, 1), image_data.shape)
def testGetImageCanLoadPng(self):
image_path = os.path.join(PREFIX_PATH, "png", "testdata", "lena_rgba.png")
image_data = convert_image_to_csv.get_image(10, 10, False, image_path)
self.assertEqual((10, 10, 3), image_data.shape)
def testGetImageConvertsGrayscaleToColor(self):
image_path = os.path.join(PREFIX_PATH, "png", "testdata", "lena_gray.png")
image_data = convert_image_to_csv.get_image(23, 19, False, image_path)
self.assertEqual((19, 23, 3), image_data.shape)
def testGetImageColorValuesInRange(self):
image_path = os.path.join(PREFIX_PATH, "jpeg", "testdata", "small.jpg")
image_data = convert_image_to_csv.get_image(47, 31, False, image_path)
self.assertLessEqual(0, np.min(image_data))
self.assertGreaterEqual(255, np.max(image_data))
def testGetImageGrayscaleValuesInRange(self):
image_path = os.path.join(PREFIX_PATH, "jpeg", "testdata", "small.jpg")
image_data = convert_image_to_csv.get_image(27, 33, True, image_path)
self.assertLessEqual(0, np.min(image_data))
self.assertGreaterEqual(255, np.max(image_data))
def testArrayToIntCsv(self):
csv_string = convert_image_to_csv.array_to_int_csv(
np.array([[1, 2], [3, 4]]))
self.assertEqual("1,2,3,4", csv_string)
def testArrayToIntCsvRounding(self):
csv_string = convert_image_to_csv.array_to_int_csv(
np.array([[1.0, 2.0], [3.0, 4.0]]))
self.assertEqual("1,2,3,4", csv_string)
if __name__ == "__main__":
test.main()
|
huhongbo/dd-agent
|
refs/heads/master
|
aggregator.py
|
34
|
# stdlib
import logging
from time import time
# project
from checks.metric_types import MetricTypes
log = logging.getLogger(__name__)
# This is used to ensure that metrics with a timestamp older than
# RECENT_POINT_THRESHOLD_DEFAULT seconds (or the value passed in to
# the MetricsAggregator constructor) get discarded rather than being
# input into the incorrect bucket. Currently, the MetricsAggregator
# does not support submitting values for the past, and all values get
# submitted for the timestamp passed into the flush() function.
# The MetricsBucketAggregator uses times that are aligned to "buckets"
# that are the length of the interval that is passed into the
# MetricsBucketAggregator constructor.
RECENT_POINT_THRESHOLD_DEFAULT = 3600
class Infinity(Exception):
pass
class UnknownValue(Exception):
pass
class Metric(object):
"""
A base metric class that accepts points, slices them into time intervals
and performs roll-ups within those intervals.
"""
def sample(self, value, sample_rate, timestamp=None):
""" Add a point to the given metric. """
raise NotImplementedError()
def flush(self, timestamp, interval):
""" Flush all metrics up to the given timestamp. """
raise NotImplementedError()
class Gauge(Metric):
""" A metric that tracks a value at particular points in time. """
def __init__(self, formatter, name, tags, hostname, device_name, extra_config=None):
self.formatter = formatter
self.name = name
self.value = None
self.tags = tags
self.hostname = hostname
self.device_name = device_name
self.last_sample_time = None
self.timestamp = time()
def sample(self, value, sample_rate, timestamp=None):
self.value = value
self.last_sample_time = time()
self.timestamp = timestamp
def flush(self, timestamp, interval):
if self.value is not None:
res = [self.formatter(
metric=self.name,
timestamp=self.timestamp or timestamp,
value=self.value,
tags=self.tags,
hostname=self.hostname,
device_name=self.device_name,
metric_type=MetricTypes.GAUGE,
interval=interval,
)]
self.value = None
return res
return []
class BucketGauge(Gauge):
""" A metric that tracks a value at particular points in time.
The difference beween this class and Gauge is that this class will
report that gauge sample time as the time that Metric is flushed, as
opposed to the time that the sample was collected.
"""
def flush(self, timestamp, interval):
if self.value is not None:
res = [self.formatter(
metric=self.name,
timestamp=timestamp,
value=self.value,
tags=self.tags,
hostname=self.hostname,
device_name=self.device_name,
metric_type=MetricTypes.GAUGE,
interval=interval,
)]
self.value = None
return res
return []
class Count(Metric):
""" A metric that tracks a count. """
def __init__(self, formatter, name, tags, hostname, device_name, extra_config=None):
self.formatter = formatter
self.name = name
self.value = None
self.tags = tags
self.hostname = hostname
self.device_name = device_name
self.last_sample_time = None
def sample(self, value, sample_rate, timestamp=None):
self.value = (self.value or 0) + value
self.last_sample_time = time()
def flush(self, timestamp, interval):
if self.value is None:
return []
try:
return [self.formatter(
metric=self.name,
value=self.value,
timestamp=timestamp,
tags=self.tags,
hostname=self.hostname,
device_name=self.device_name,
metric_type=MetricTypes.COUNT,
interval=interval,
)]
finally:
self.value = None
class MonotonicCount(Metric):
def __init__(self, formatter, name, tags, hostname, device_name, extra_config=None):
self.formatter = formatter
self.name = name
self.tags = tags
self.hostname = hostname
self.device_name = device_name
self.prev_counter = None
self.curr_counter = None
self.count = None
self.last_sample_time = None
def sample(self, value, sample_rate, timestamp=None):
if self.curr_counter is None:
self.curr_counter = value
else:
self.prev_counter = self.curr_counter
self.curr_counter = value
prev = self.prev_counter
curr = self.curr_counter
if prev is not None and curr is not None:
self.count = (self.count or 0) + max(0, curr - prev)
self.last_sample_time = time()
def flush(self, timestamp, interval):
if self.count is None:
return []
try:
return [self.formatter(
hostname=self.hostname,
device_name=self.device_name,
tags=self.tags,
metric=self.name,
value=self.count,
timestamp=timestamp,
metric_type=MetricTypes.COUNT,
interval=interval
)]
finally:
self.prev_counter = self.curr_counter
self.curr_counter = None
self.count = None
class Counter(Metric):
""" A metric that tracks a counter value. """
def __init__(self, formatter, name, tags, hostname, device_name, extra_config=None):
self.formatter = formatter
self.name = name
self.value = 0
self.tags = tags
self.hostname = hostname
self.device_name = device_name
self.last_sample_time = None
def sample(self, value, sample_rate, timestamp=None):
self.value += value * int(1 / sample_rate)
self.last_sample_time = time()
def flush(self, timestamp, interval):
try:
value = self.value / interval
return [self.formatter(
metric=self.name,
value=value,
timestamp=timestamp,
tags=self.tags,
hostname=self.hostname,
device_name=self.device_name,
metric_type=MetricTypes.RATE,
interval=interval,
)]
finally:
self.value = 0
DEFAULT_HISTOGRAM_AGGREGATES = ['max', 'median', 'avg', 'count']
DEFAULT_HISTOGRAM_PERCENTILES = [0.95]
class Histogram(Metric):
""" A metric to track the distribution of a set of values. """
def __init__(self, formatter, name, tags, hostname, device_name, extra_config=None):
self.formatter = formatter
self.name = name
self.count = 0
self.samples = []
self.aggregates = extra_config['aggregates'] if\
extra_config is not None and extra_config.get('aggregates') is not None\
else DEFAULT_HISTOGRAM_AGGREGATES
self.percentiles = extra_config['percentiles'] if\
extra_config is not None and extra_config.get('percentiles') is not None\
else DEFAULT_HISTOGRAM_PERCENTILES
self.tags = tags
self.hostname = hostname
self.device_name = device_name
self.last_sample_time = None
def sample(self, value, sample_rate, timestamp=None):
self.count += int(1 / sample_rate)
self.samples.append(value)
self.last_sample_time = time()
def flush(self, ts, interval):
if not self.count:
return []
self.samples.sort()
length = len(self.samples)
min_ = self.samples[0]
max_ = self.samples[-1]
med = self.samples[int(round(length/2 - 1))]
avg = sum(self.samples) / float(length)
aggregators = [
('min', min_, MetricTypes.GAUGE),
('max', max_, MetricTypes.GAUGE),
('median', med, MetricTypes.GAUGE),
('avg', avg, MetricTypes.GAUGE),
('count', self.count/interval, MetricTypes.RATE),
]
metric_aggrs = [
(agg_name, agg_func, m_type)
for agg_name, agg_func, m_type in aggregators
if agg_name in self.aggregates
]
metrics = [self.formatter(
hostname=self.hostname,
device_name=self.device_name,
tags=self.tags,
metric='%s.%s' % (self.name, suffix),
value=value,
timestamp=ts,
metric_type=metric_type,
interval=interval) for suffix, value, metric_type in metric_aggrs
]
for p in self.percentiles:
val = self.samples[int(round(p * length - 1))]
name = '%s.%spercentile' % (self.name, int(p * 100))
metrics.append(self.formatter(
hostname=self.hostname,
tags=self.tags,
metric=name,
value=val,
timestamp=ts,
metric_type=MetricTypes.GAUGE,
interval=interval,
))
# Reset our state.
self.samples = []
self.count = 0
return metrics
class Set(Metric):
""" A metric to track the number of unique elements in a set. """
def __init__(self, formatter, name, tags, hostname, device_name, extra_config=None):
self.formatter = formatter
self.name = name
self.tags = tags
self.hostname = hostname
self.device_name = device_name
self.values = set()
self.last_sample_time = None
def sample(self, value, sample_rate, timestamp=None):
self.values.add(value)
self.last_sample_time = time()
def flush(self, timestamp, interval):
if not self.values:
return []
try:
return [self.formatter(
hostname=self.hostname,
device_name=self.device_name,
tags=self.tags,
metric=self.name,
value=len(self.values),
timestamp=timestamp,
metric_type=MetricTypes.GAUGE,
interval=interval,
)]
finally:
self.values = set()
class Rate(Metric):
""" Track the rate of metrics over each flush interval """
def __init__(self, formatter, name, tags, hostname, device_name, extra_config=None):
self.formatter = formatter
self.name = name
self.tags = tags
self.hostname = hostname
self.device_name = device_name
self.samples = []
self.last_sample_time = None
def sample(self, value, sample_rate, timestamp=None):
ts = time()
self.samples.append((int(ts), value))
self.last_sample_time = ts
def _rate(self, sample1, sample2):
interval = sample2[0] - sample1[0]
if interval == 0:
log.warn('Metric %s has an interval of 0. Not flushing.' % self.name)
raise Infinity()
delta = sample2[1] - sample1[1]
if delta < 0:
log.info('Metric %s has a rate < 0. Counter may have been Reset.' % self.name)
raise UnknownValue()
return (delta / float(interval))
def flush(self, timestamp, interval):
if len(self.samples) < 2:
return []
try:
try:
val = self._rate(self.samples[-2], self.samples[-1])
except Exception:
return []
return [self.formatter(
hostname=self.hostname,
device_name=self.device_name,
tags=self.tags,
metric=self.name,
value=val,
timestamp=timestamp,
metric_type=MetricTypes.GAUGE,
interval=interval
)]
finally:
self.samples = self.samples[-1:]
class Aggregator(object):
"""
Abstract metric aggregator class.
"""
# Types of metrics that allow strings
ALLOW_STRINGS = ['s', ]
def __init__(self, hostname, interval=1.0, expiry_seconds=300,
formatter=None, recent_point_threshold=None,
histogram_aggregates=None, histogram_percentiles=None,
utf8_decoding=False):
self.events = []
self.service_checks = []
self.total_count = 0
self.count = 0
self.event_count = 0
self.service_check_count = 0
self.hostname = hostname
self.expiry_seconds = expiry_seconds
self.formatter = formatter or api_formatter
self.interval = float(interval)
recent_point_threshold = recent_point_threshold or RECENT_POINT_THRESHOLD_DEFAULT
self.recent_point_threshold = int(recent_point_threshold)
self.num_discarded_old_points = 0
# Additional config passed when instantiating metric configs
self.metric_config = {
Histogram: {
'aggregates': histogram_aggregates,
'percentiles': histogram_percentiles
}
}
self.utf8_decoding = utf8_decoding
def packets_per_second(self, interval):
if interval == 0:
return 0
return round(float(self.count)/interval, 2)
def parse_metric_packet(self, packet):
"""
Schema of a dogstatsd packet:
<name>:<value>|<metric_type>|@<sample_rate>|#<tag1_name>:<tag1_value>,<tag2_name>:<tag2_value>:<value>|<metric_type>...
"""
parsed_packets = []
name_and_metadata = packet.split(':', 1)
if len(name_and_metadata) != 2:
raise Exception('Unparseable metric packet: %s' % packet)
name = name_and_metadata[0]
broken_split = name_and_metadata[1].split(':')
data = []
partial_datum = None
for token in broken_split:
# We need to fix the tag groups that got broken by the : split
if partial_datum is None:
partial_datum = token
elif "|" not in token:
partial_datum += ":" + token
else:
data.append(partial_datum)
partial_datum = token
data.append(partial_datum)
for datum in data:
value_and_metadata = datum.split('|')
if len(value_and_metadata) < 2:
raise Exception('Unparseable metric packet: %s' % packet)
# Submit the metric
raw_value = value_and_metadata[0]
metric_type = value_and_metadata[1]
if metric_type in self.ALLOW_STRINGS:
value = raw_value
else:
# Try to cast as an int first to avoid precision issues, then as a
# float.
try:
value = int(raw_value)
except ValueError:
try:
value = float(raw_value)
except ValueError:
# Otherwise, raise an error saying it must be a number
raise Exception('Metric value must be a number: %s, %s' % (name, raw_value))
# Parse the optional values - sample rate & tags.
sample_rate = 1
tags = None
for m in value_and_metadata[2:]:
# Parse the sample rate
if m[0] == '@':
sample_rate = float(m[1:])
assert 0 <= sample_rate <= 1
elif m[0] == '#':
tags = tuple(sorted(m[1:].split(',')))
parsed_packets.append((name, value, metric_type, tags,sample_rate))
return parsed_packets
def _unescape_sc_content(self, string):
return string.replace('\\n', '\n').replace('m\:', 'm:')
def _unescape_event_text(self, string):
return string.replace('\\n', '\n')
def parse_event_packet(self, packet):
try:
name_and_metadata = packet.split(':', 1)
if len(name_and_metadata) != 2:
raise Exception(u'Unparseable event packet: %s' % packet)
# Event syntax:
# _e{5,4}:title|body|meta
name = name_and_metadata[0]
metadata = name_and_metadata[1]
title_length, text_length = name.split(',')
title_length = int(title_length[3:])
text_length = int(text_length[:-1])
event = {
'title': metadata[:title_length],
'text': self._unescape_event_text(metadata[title_length+1:title_length+text_length+1])
}
meta = metadata[title_length+text_length+1:]
for m in meta.split('|')[1:]:
if m[0] == u't':
event['alert_type'] = m[2:]
elif m[0] == u'k':
event['aggregation_key'] = m[2:]
elif m[0] == u's':
event['source_type_name'] = m[2:]
elif m[0] == u'd':
event['date_happened'] = int(m[2:])
elif m[0] == u'p':
event['priority'] = m[2:]
elif m[0] == u'h':
event['hostname'] = m[2:]
elif m[0] == u'#':
event['tags'] = sorted(m[1:].split(u','))
return event
except (IndexError, ValueError):
raise Exception(u'Unparseable event packet: %s' % packet)
def parse_sc_packet(self, packet):
try:
_, data_and_metadata = packet.split('|', 1)
# Service check syntax:
# _sc|check_name|status|meta
if data_and_metadata.count('|') == 1:
# Case with no metadata
check_name, status = data_and_metadata.split('|')
metadata = ''
else:
check_name, status, metadata = data_and_metadata.split('|', 2)
service_check = {
'check_name': check_name,
'status': int(status)
}
message_delimiter = '|m:' if '|m:' in metadata else 'm:'
if message_delimiter in metadata:
meta, message = metadata.rsplit(message_delimiter, 1)
service_check['message'] = self._unescape_sc_content(message)
else:
meta = metadata
if not meta:
return service_check
meta = unicode(meta)
for m in meta.split('|'):
if m[0] == u'd':
service_check['timestamp'] = float(m[2:])
elif m[0] == u'h':
service_check['hostname'] = m[2:]
elif m[0] == u'#':
service_check['tags'] = sorted(m[1:].split(u','))
return service_check
except (IndexError, ValueError):
raise Exception(u'Unparseable service check packet: %s' % packet)
def submit_packets(self, packets):
# We should probably consider that packets are always encoded
# in utf8, but decoding all packets has an perf overhead of 7%
# So we let the user decide if we wants utf8 by default
# Keep a very conservative approach anyhow
# Clients MUST always send UTF-8 encoded content
if self.utf8_decoding:
packets = unicode(packets, 'utf-8', errors='replace')
for packet in packets.splitlines():
if not packet.strip():
continue
if packet.startswith('_e'):
self.event_count += 1
event = self.parse_event_packet(packet)
self.event(**event)
elif packet.startswith('_sc'):
self.service_check_count += 1
service_check = self.parse_sc_packet(packet)
self.service_check(**service_check)
else:
self.count += 1
parsed_packets = self.parse_metric_packet(packet)
for name, value, mtype, tags, sample_rate in parsed_packets:
hostname, device_name, tags = self._extract_magic_tags(tags)
self.submit_metric(name, value, mtype, tags=tags, hostname=hostname,
device_name=device_name, sample_rate=sample_rate)
def _extract_magic_tags(self, tags):
"""Magic tags (host, device) override metric hostname and device_name attributes"""
hostname = None
device_name = None
# This implementation avoid list operations for the common case
if tags:
tags_to_remove = []
for tag in tags:
if tag.startswith('host:'):
hostname = tag[5:]
tags_to_remove.append(tag)
elif tag.startswith('device:'):
device_name = tag[7:]
tags_to_remove.append(tag)
if tags_to_remove:
# tags is a tuple already sorted, we convert it into a list to pop elements
tags = list(tags)
for tag in tags_to_remove:
tags.remove(tag)
tags = tuple(tags) or None
return hostname, device_name, tags
def submit_metric(self, name, value, mtype, tags=None, hostname=None,
device_name=None, timestamp=None, sample_rate=1):
""" Add a metric to be aggregated """
raise NotImplementedError()
def event(self, title, text, date_happened=None, alert_type=None, aggregation_key=None, source_type_name=None, priority=None, tags=None, hostname=None):
event = {
'msg_title': title,
'msg_text': text,
}
if date_happened is not None:
event['timestamp'] = date_happened
else:
event['timestamp'] = int(time())
if alert_type is not None:
event['alert_type'] = alert_type
if aggregation_key is not None:
event['aggregation_key'] = aggregation_key
if source_type_name is not None:
event['source_type_name'] = source_type_name
if priority is not None:
event['priority'] = priority
if tags is not None:
event['tags'] = sorted(tags)
if hostname is not None:
event['host'] = hostname
else:
event['host'] = self.hostname
self.events.append(event)
def service_check(self, check_name, status, tags=None, timestamp=None,
hostname=None, message=None):
service_check = {
'check': check_name,
'status': status,
'timestamp': timestamp or int(time())
}
if tags is not None:
service_check['tags'] = sorted(tags)
if hostname is not None:
service_check['host_name'] = hostname
else:
service_check['host_name'] = self.hostname
if message is not None:
service_check['message'] = message
self.service_checks.append(service_check)
def flush(self):
""" Flush aggregated metrics """
raise NotImplementedError()
def flush_events(self):
events = self.events
self.events = []
self.total_count += self.event_count
self.event_count = 0
log.debug("Received %d events since last flush" % len(events))
return events
def flush_service_checks(self):
service_checks = self.service_checks
self.service_checks = []
self.total_count += self.service_check_count
self.service_check_count = 0
log.debug("Received {0} service check runs since last flush".format(len(service_checks)))
return service_checks
def send_packet_count(self, metric_name):
self.submit_metric(metric_name, self.count, 'g')
class MetricsBucketAggregator(Aggregator):
"""
A metric aggregator class.
"""
def __init__(self, hostname, interval=1.0, expiry_seconds=300,
formatter=None, recent_point_threshold=None,
histogram_aggregates=None, histogram_percentiles=None,
utf8_decoding=False):
super(MetricsBucketAggregator, self).__init__(
hostname,
interval,
expiry_seconds,
formatter,
recent_point_threshold,
histogram_aggregates,
histogram_percentiles,
utf8_decoding
)
self.metric_by_bucket = {}
self.last_sample_time_by_context = {}
self.current_bucket = None
self.current_mbc = {}
self.last_flush_cutoff_time = 0
self.metric_type_to_class = {
'g': BucketGauge,
'c': Counter,
'h': Histogram,
'ms': Histogram,
's': Set,
}
def calculate_bucket_start(self, timestamp):
return timestamp - (timestamp % self.interval)
def submit_metric(self, name, value, mtype, tags=None, hostname=None,
device_name=None, timestamp=None, sample_rate=1):
# Avoid calling extra functions to dedupe tags if there are none
# Note: if you change the way that context is created, please also change create_empty_metrics,
# which counts on this order
# Keep hostname with empty string to unset it
hostname = hostname if hostname is not None else self.hostname
if tags is None:
context = (name, tuple(), hostname, device_name)
else:
context = (name, tuple(sorted(set(tags))), hostname, device_name)
cur_time = time()
# Check to make sure that the timestamp that is passed in (if any) is not older than
# recent_point_threshold. If so, discard the point.
if timestamp is not None and cur_time - int(timestamp) > self.recent_point_threshold:
log.debug("Discarding %s - ts = %s , current ts = %s " % (name, timestamp, cur_time))
self.num_discarded_old_points += 1
else:
timestamp = timestamp or cur_time
# Keep track of the buckets using the timestamp at the start time of the bucket
bucket_start_timestamp = self.calculate_bucket_start(timestamp)
if bucket_start_timestamp == self.current_bucket:
metric_by_context = self.current_mbc
else:
if bucket_start_timestamp not in self.metric_by_bucket:
self.metric_by_bucket[bucket_start_timestamp] = {}
metric_by_context = self.metric_by_bucket[bucket_start_timestamp]
self.current_bucket = bucket_start_timestamp
self.current_mbc = metric_by_context
if context not in metric_by_context:
metric_class = self.metric_type_to_class[mtype]
metric_by_context[context] = metric_class(self.formatter, name, tags,
hostname, device_name, self.metric_config.get(metric_class))
metric_by_context[context].sample(value, sample_rate, timestamp)
def create_empty_metrics(self, sample_time_by_context, expiry_timestamp, flush_timestamp, metrics):
# Even if no data is submitted, Counters keep reporting "0" for expiry_seconds. The other Metrics
# (Set, Gauge, Histogram) do not report if no data is submitted
for context, last_sample_time in sample_time_by_context.items():
if last_sample_time < expiry_timestamp:
log.debug("%s hasn't been submitted in %ss. Expiring." % (context, self.expiry_seconds))
self.last_sample_time_by_context.pop(context, None)
else:
# The expiration currently only applies to Counters
# This counts on the ordering of the context created in submit_metric not changing
metric = Counter(self.formatter, context[0], context[1], context[2], context[3])
metrics += metric.flush(flush_timestamp, self.interval)
def flush(self):
cur_time = time()
flush_cutoff_time = self.calculate_bucket_start(cur_time)
expiry_timestamp = cur_time - self.expiry_seconds
metrics = []
if self.metric_by_bucket:
# We want to process these in order so that we can check for and expired metrics and
# re-create non-expired metrics. We also mutate self.metric_by_bucket.
for bucket_start_timestamp in sorted(self.metric_by_bucket.keys()):
metric_by_context = self.metric_by_bucket[bucket_start_timestamp]
if bucket_start_timestamp < flush_cutoff_time:
not_sampled_in_this_bucket = self.last_sample_time_by_context.copy()
# We mutate this dictionary while iterating so don't use an iterator.
for context, metric in metric_by_context.items():
if metric.last_sample_time < expiry_timestamp:
# This should never happen
log.warning("%s hasn't been submitted in %ss. Expiring." % (context, self.expiry_seconds))
not_sampled_in_this_bucket.pop(context, None)
self.last_sample_time_by_context.pop(context, None)
else:
metrics += metric.flush(bucket_start_timestamp, self.interval)
if isinstance(metric, Counter):
self.last_sample_time_by_context[context] = metric.last_sample_time
not_sampled_in_this_bucket.pop(context, None)
# We need to account for Metrics that have not expired and were not flushed for this bucket
self.create_empty_metrics(not_sampled_in_this_bucket, expiry_timestamp, bucket_start_timestamp, metrics)
del self.metric_by_bucket[bucket_start_timestamp]
else:
# Even if there are no metrics in this flush, there may be some non-expired counters
# We should only create these non-expired metrics if we've passed an interval since the last flush
if flush_cutoff_time >= self.last_flush_cutoff_time + self.interval:
self.create_empty_metrics(self.last_sample_time_by_context.copy(), expiry_timestamp,
flush_cutoff_time-self.interval, metrics)
# Log a warning regarding metrics with old timestamps being submitted
if self.num_discarded_old_points > 0:
log.warn('%s points were discarded as a result of having an old timestamp' % self.num_discarded_old_points)
self.num_discarded_old_points = 0
# Save some stats.
log.debug("received %s payloads since last flush" % self.count)
self.total_count += self.count
self.count = 0
self.current_bucket = None
self.current_mbc = {}
self.last_flush_cutoff_time = flush_cutoff_time
return metrics
class MetricsAggregator(Aggregator):
"""
A metric aggregator class.
"""
def __init__(self, hostname, interval=1.0, expiry_seconds=300,
formatter=None, recent_point_threshold=None,
histogram_aggregates=None, histogram_percentiles=None,
utf8_decoding=False):
super(MetricsAggregator, self).__init__(
hostname,
interval,
expiry_seconds,
formatter,
recent_point_threshold,
histogram_aggregates,
histogram_percentiles,
utf8_decoding
)
self.metrics = {}
self.metric_type_to_class = {
'g': Gauge,
'ct': Count,
'ct-c': MonotonicCount,
'c': Counter,
'h': Histogram,
'ms': Histogram,
's': Set,
'_dd-r': Rate,
}
def submit_metric(self, name, value, mtype, tags=None, hostname=None,
device_name=None, timestamp=None, sample_rate=1):
# Avoid calling extra functions to dedupe tags if there are none
# Keep hostname with empty string to unset it
hostname = hostname if hostname is not None else self.hostname
if tags is None:
context = (name, tuple(), hostname, device_name)
else:
context = (name, tuple(sorted(set(tags))), hostname, device_name)
if context not in self.metrics:
metric_class = self.metric_type_to_class[mtype]
self.metrics[context] = metric_class(self.formatter, name, tags,
hostname, device_name, self.metric_config.get(metric_class))
cur_time = time()
if timestamp is not None and cur_time - int(timestamp) > self.recent_point_threshold:
log.debug("Discarding %s - ts = %s , current ts = %s " % (name, timestamp, cur_time))
self.num_discarded_old_points += 1
else:
self.metrics[context].sample(value, sample_rate, timestamp)
def gauge(self, name, value, tags=None, hostname=None, device_name=None, timestamp=None):
self.submit_metric(name, value, 'g', tags, hostname, device_name, timestamp)
def increment(self, name, value=1, tags=None, hostname=None, device_name=None):
self.submit_metric(name, value, 'c', tags, hostname, device_name)
def decrement(self, name, value=-1, tags=None, hostname=None, device_name=None):
self.submit_metric(name, value, 'c', tags, hostname, device_name)
def rate(self, name, value, tags=None, hostname=None, device_name=None):
self.submit_metric(name, value, '_dd-r', tags, hostname, device_name)
def submit_count(self, name, value, tags=None, hostname=None, device_name=None):
self.submit_metric(name, value, 'ct', tags, hostname, device_name)
def count_from_counter(self, name, value, tags=None,
hostname=None, device_name=None):
self.submit_metric(name, value, 'ct-c', tags,
hostname, device_name)
def histogram(self, name, value, tags=None, hostname=None, device_name=None):
self.submit_metric(name, value, 'h', tags, hostname, device_name)
def set(self, name, value, tags=None, hostname=None, device_name=None):
self.submit_metric(name, value, 's', tags, hostname, device_name)
def flush(self):
timestamp = time()
expiry_timestamp = timestamp - self.expiry_seconds
# Flush points and remove expired metrics. We mutate this dictionary
# while iterating so don't use an iterator.
metrics = []
for context, metric in self.metrics.items():
if metric.last_sample_time < expiry_timestamp:
log.debug("%s hasn't been submitted in %ss. Expiring." % (context, self.expiry_seconds))
del self.metrics[context]
else:
metrics += metric.flush(timestamp, self.interval)
# Log a warning regarding metrics with old timestamps being submitted
if self.num_discarded_old_points > 0:
log.warn('%s points were discarded as a result of having an old timestamp' % self.num_discarded_old_points)
self.num_discarded_old_points = 0
# Save some stats.
log.debug("received %s payloads since last flush" % self.count)
self.total_count += self.count
self.count = 0
return metrics
def get_formatter(config):
formatter = api_formatter
if config['statsd_metric_namespace']:
def metric_namespace_formatter_wrapper(metric, value, timestamp, tags,
hostname=None, device_name=None,
metric_type=None, interval=None):
metric_prefix = config['statsd_metric_namespace']
if metric_prefix[-1] != '.':
metric_prefix += '.'
return api_formatter(metric_prefix + metric, value, timestamp, tags, hostname,
device_name, metric_type, interval)
formatter = metric_namespace_formatter_wrapper
return formatter
def api_formatter(metric, value, timestamp, tags, hostname=None, device_name=None,
metric_type=None, interval=None):
return {
'metric': metric,
'points': [(timestamp, value)],
'tags': tags,
'host': hostname,
'device_name': device_name,
'type': metric_type or MetricTypes.GAUGE,
'interval':interval,
}
|
coderhaoxin/tornado
|
refs/heads/master
|
tornado/queues.py
|
46
|
# Copyright 2015 The Tornado Authors
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, with_statement
__all__ = ['Queue', 'PriorityQueue', 'LifoQueue', 'QueueFull', 'QueueEmpty']
import collections
import heapq
from tornado import gen, ioloop
from tornado.concurrent import Future
from tornado.locks import Event
class QueueEmpty(Exception):
"""Raised by `.Queue.get_nowait` when the queue has no items."""
pass
class QueueFull(Exception):
"""Raised by `.Queue.put_nowait` when a queue is at its maximum size."""
pass
def _set_timeout(future, timeout):
if timeout:
def on_timeout():
future.set_exception(gen.TimeoutError())
io_loop = ioloop.IOLoop.current()
timeout_handle = io_loop.add_timeout(timeout, on_timeout)
future.add_done_callback(
lambda _: io_loop.remove_timeout(timeout_handle))
class Queue(object):
"""Coordinate producer and consumer coroutines.
If maxsize is 0 (the default) the queue size is unbounded.
.. testcode::
q = queues.Queue(maxsize=2)
@gen.coroutine
def consumer():
while True:
item = yield q.get()
try:
print('Doing work on %s' % item)
yield gen.sleep(0.01)
finally:
q.task_done()
@gen.coroutine
def producer():
for item in range(5):
yield q.put(item)
print('Put %s' % item)
@gen.coroutine
def main():
consumer() # Start consumer.
yield producer() # Wait for producer to put all tasks.
yield q.join() # Wait for consumer to finish all tasks.
print('Done')
io_loop.run_sync(main)
.. testoutput::
Put 0
Put 1
Put 2
Doing work on 0
Doing work on 1
Put 3
Doing work on 2
Put 4
Doing work on 3
Doing work on 4
Done
"""
def __init__(self, maxsize=0):
if maxsize is None:
raise TypeError("maxsize can't be None")
if maxsize < 0:
raise ValueError("maxsize can't be negative")
self._maxsize = maxsize
self._init()
self._getters = collections.deque([]) # Futures.
self._putters = collections.deque([]) # Pairs of (item, Future).
self._unfinished_tasks = 0
self._finished = Event()
self._finished.set()
@property
def maxsize(self):
"""Number of items allowed in the queue."""
return self._maxsize
def qsize(self):
"""Number of items in the queue."""
return len(self._queue)
def empty(self):
return not self._queue
def full(self):
if self.maxsize == 0:
return False
else:
return self.qsize() >= self.maxsize
def put(self, item, timeout=None):
"""Put an item into the queue, perhaps waiting until there is room.
Returns a Future, which raises `tornado.gen.TimeoutError` after a
timeout.
"""
try:
self.put_nowait(item)
except QueueFull:
future = Future()
self._putters.append((item, future))
_set_timeout(future, timeout)
return future
else:
return gen._null_future
def put_nowait(self, item):
"""Put an item into the queue without blocking.
If no free slot is immediately available, raise `QueueFull`.
"""
self._consume_expired()
if self._getters:
assert self.empty(), "queue non-empty, why are getters waiting?"
getter = self._getters.popleft()
self.__put_internal(item)
getter.set_result(self._get())
elif self.full():
raise QueueFull
else:
self.__put_internal(item)
def get(self, timeout=None):
"""Remove and return an item from the queue.
Returns a Future which resolves once an item is available, or raises
`tornado.gen.TimeoutError` after a timeout.
"""
future = Future()
try:
future.set_result(self.get_nowait())
except QueueEmpty:
self._getters.append(future)
_set_timeout(future, timeout)
return future
def get_nowait(self):
"""Remove and return an item from the queue without blocking.
Return an item if one is immediately available, else raise
`QueueEmpty`.
"""
self._consume_expired()
if self._putters:
assert self.full(), "queue not full, why are putters waiting?"
item, putter = self._putters.popleft()
self.__put_internal(item)
putter.set_result(None)
return self._get()
elif self.qsize():
return self._get()
else:
raise QueueEmpty
def task_done(self):
"""Indicate that a formerly enqueued task is complete.
Used by queue consumers. For each `.get` used to fetch a task, a
subsequent call to `.task_done` tells the queue that the processing
on the task is complete.
If a `.join` is blocking, it resumes when all items have been
processed; that is, when every `.put` is matched by a `.task_done`.
Raises `ValueError` if called more times than `.put`.
"""
if self._unfinished_tasks <= 0:
raise ValueError('task_done() called too many times')
self._unfinished_tasks -= 1
if self._unfinished_tasks == 0:
self._finished.set()
def join(self, timeout=None):
"""Block until all items in the queue are processed.
Returns a Future, which raises `tornado.gen.TimeoutError` after a
timeout.
"""
return self._finished.wait(timeout)
# These three are overridable in subclasses.
def _init(self):
self._queue = collections.deque()
def _get(self):
return self._queue.popleft()
def _put(self, item):
self._queue.append(item)
# End of the overridable methods.
def __put_internal(self, item):
self._unfinished_tasks += 1
self._finished.clear()
self._put(item)
def _consume_expired(self):
# Remove timed-out waiters.
while self._putters and self._putters[0][1].done():
self._putters.popleft()
while self._getters and self._getters[0].done():
self._getters.popleft()
def __repr__(self):
return '<%s at %s %s>' % (
type(self).__name__, hex(id(self)), self._format())
def __str__(self):
return '<%s %s>' % (type(self).__name__, self._format())
def _format(self):
result = 'maxsize=%r' % (self.maxsize, )
if getattr(self, '_queue', None):
result += ' queue=%r' % self._queue
if self._getters:
result += ' getters[%s]' % len(self._getters)
if self._putters:
result += ' putters[%s]' % len(self._putters)
if self._unfinished_tasks:
result += ' tasks=%s' % self._unfinished_tasks
return result
class PriorityQueue(Queue):
"""A `.Queue` that retrieves entries in priority order, lowest first.
Entries are typically tuples like ``(priority number, data)``.
.. testcode::
q = queues.PriorityQueue()
q.put((1, 'medium-priority item'))
q.put((0, 'high-priority item'))
q.put((10, 'low-priority item'))
print(q.get_nowait())
print(q.get_nowait())
print(q.get_nowait())
.. testoutput::
(0, 'high-priority item')
(1, 'medium-priority item')
(10, 'low-priority item')
"""
def _init(self):
self._queue = []
def _put(self, item):
heapq.heappush(self._queue, item)
def _get(self):
return heapq.heappop(self._queue)
class LifoQueue(Queue):
"""A `.Queue` that retrieves the most recently put items first.
.. testcode::
q = queues.LifoQueue()
q.put(3)
q.put(2)
q.put(1)
print(q.get_nowait())
print(q.get_nowait())
print(q.get_nowait())
.. testoutput::
1
2
3
"""
def _init(self):
self._queue = []
def _put(self, item):
self._queue.append(item)
def _get(self):
return self._queue.pop()
|
lalanza808/lalanza808.github.io
|
refs/heads/master
|
vendor/bundle/ruby/2.3.0/gems/pygments.rb-0.6.3/vendor/pygments-main/pygments/styles/paraiso_dark.py
|
126
|
# -*- coding: utf-8 -*-
"""
pygments.styles.paraiso_dark
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Paraíso (Dark) by Jan T. Sott
Pygments template by Jan T. Sott (https://github.com/idleberg)
Created with Base16 Builder by Chris Kempson
(https://github.com/chriskempson/base16-builder).
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.style import Style
from pygments.token import Keyword, Name, Comment, String, Error, Text, \
Number, Operator, Generic, Whitespace, Punctuation, Other, Literal
BACKGROUND = "#2f1e2e"
CURRENT_LINE = "#41323f"
SELECTION = "#4f424c"
FOREGROUND = "#e7e9db"
COMMENT = "#776e71"
RED = "#ef6155"
ORANGE = "#f99b15"
YELLOW = "#fec418"
GREEN = "#48b685"
AQUA = "#5bc4bf"
BLUE = "#06b6ef"
PURPLE = "#815ba4"
class ParaisoDarkStyle(Style):
default_style = ''
background_color = BACKGROUND
highlight_color = SELECTION
background_color = BACKGROUND
highlight_color = SELECTION
styles = {
# No corresponding class for the following:
Text: FOREGROUND, # class: ''
Whitespace: "", # class: 'w'
Error: RED, # class: 'err'
Other: "", # class 'x'
Comment: COMMENT, # class: 'c'
Comment.Multiline: "", # class: 'cm'
Comment.Preproc: "", # class: 'cp'
Comment.Single: "", # class: 'c1'
Comment.Special: "", # class: 'cs'
Keyword: PURPLE, # class: 'k'
Keyword.Constant: "", # class: 'kc'
Keyword.Declaration: "", # class: 'kd'
Keyword.Namespace: AQUA, # class: 'kn'
Keyword.Pseudo: "", # class: 'kp'
Keyword.Reserved: "", # class: 'kr'
Keyword.Type: YELLOW, # class: 'kt'
Operator: AQUA, # class: 'o'
Operator.Word: "", # class: 'ow' - like keywords
Punctuation: FOREGROUND, # class: 'p'
Name: FOREGROUND, # class: 'n'
Name.Attribute: BLUE, # class: 'na' - to be revised
Name.Builtin: "", # class: 'nb'
Name.Builtin.Pseudo: "", # class: 'bp'
Name.Class: YELLOW, # class: 'nc' - to be revised
Name.Constant: RED, # class: 'no' - to be revised
Name.Decorator: AQUA, # class: 'nd' - to be revised
Name.Entity: "", # class: 'ni'
Name.Exception: RED, # class: 'ne'
Name.Function: BLUE, # class: 'nf'
Name.Property: "", # class: 'py'
Name.Label: "", # class: 'nl'
Name.Namespace: YELLOW, # class: 'nn' - to be revised
Name.Other: BLUE, # class: 'nx'
Name.Tag: AQUA, # class: 'nt' - like a keyword
Name.Variable: RED, # class: 'nv' - to be revised
Name.Variable.Class: "", # class: 'vc' - to be revised
Name.Variable.Global: "", # class: 'vg' - to be revised
Name.Variable.Instance: "", # class: 'vi' - to be revised
Number: ORANGE, # class: 'm'
Number.Float: "", # class: 'mf'
Number.Hex: "", # class: 'mh'
Number.Integer: "", # class: 'mi'
Number.Integer.Long: "", # class: 'il'
Number.Oct: "", # class: 'mo'
Literal: ORANGE, # class: 'l'
Literal.Date: GREEN, # class: 'ld'
String: GREEN, # class: 's'
String.Backtick: "", # class: 'sb'
String.Char: FOREGROUND, # class: 'sc'
String.Doc: COMMENT, # class: 'sd' - like a comment
String.Double: "", # class: 's2'
String.Escape: ORANGE, # class: 'se'
String.Heredoc: "", # class: 'sh'
String.Interpol: ORANGE, # class: 'si'
String.Other: "", # class: 'sx'
String.Regex: "", # class: 'sr'
String.Single: "", # class: 's1'
String.Symbol: "", # class: 'ss'
Generic: "", # class: 'g'
Generic.Deleted: RED, # class: 'gd',
Generic.Emph: "italic", # class: 'ge'
Generic.Error: "", # class: 'gr'
Generic.Heading: "bold " + FOREGROUND, # class: 'gh'
Generic.Inserted: GREEN, # class: 'gi'
Generic.Output: "", # class: 'go'
Generic.Prompt: "bold " + COMMENT, # class: 'gp'
Generic.Strong: "bold", # class: 'gs'
Generic.Subheading: "bold " + AQUA, # class: 'gu'
Generic.Traceback: "", # class: 'gt'
}
|
hazrpg/calibre
|
refs/heads/master
|
src/calibre/library/field_metadata.py
|
3
|
'''
Created on 25 May 2010
@author: charles
'''
import traceback
from collections import OrderedDict
from calibre.utils.config_base import tweaks
category_icon_map = {
'authors' : 'user_profile.png',
'series' : 'series.png',
'formats' : 'book.png',
'publisher' : 'publisher.png',
'rating' : 'rating.png',
'news' : 'news.png',
'tags' : 'tags.png',
'custom:' : 'column.png',
'user:' : 'tb_folder.png',
'search' : 'search.png',
'identifiers': 'identifiers.png',
'gst' : 'catalog.png',
'languages' : 'languages.png',
}
# Builtin metadata {{{
def _builtin_field_metadata():
# This is a function so that changing the UI language allows newly created
# field metadata objects to have correctly translated labels for builtin
# fields.
return [
('authors', {'table':'authors',
'column':'name',
'link_column':'author',
'category_sort':'sort',
'datatype':'text',
'is_multiple':{'cache_to_list': ',',
'ui_to_list': '&',
'list_to_ui': ' & '},
'kind':'field',
'name':_('Authors'),
'search_terms':['authors', 'author'],
'is_custom':False,
'is_category':True,
'is_csp': False}),
('languages', {'table':'languages',
'column':'lang_code',
'link_column':'lang_code',
'category_sort':'lang_code',
'datatype':'text',
'is_multiple':{'cache_to_list': ',',
'ui_to_list': ',',
'list_to_ui': ', '},
'kind':'field',
'name':_('Languages'),
'search_terms':['languages', 'language'],
'is_custom':False,
'is_category':True,
'is_csp': False}),
('series', {'table':'series',
'column':'name',
'link_column':'series',
'category_sort':'(title_sort(name))',
'datatype':'series',
'is_multiple':{},
'kind':'field',
'name':ngettext('Series', 'Series', 1),
'search_terms':['series'],
'is_custom':False,
'is_category':True,
'is_csp': False}),
('formats', {'table':None,
'column':None,
'datatype':'text',
'is_multiple':{'cache_to_list': ',',
'ui_to_list': ',',
'list_to_ui': ', '},
'kind':'field',
'name':_('Formats'),
'search_terms':['formats', 'format'],
'is_custom':False,
'is_category':True,
'is_csp': False}),
('publisher', {'table':'publishers',
'column':'name',
'link_column':'publisher',
'category_sort':'name',
'datatype':'text',
'is_multiple':{},
'kind':'field',
'name':_('Publisher'),
'search_terms':['publisher'],
'is_custom':False,
'is_category':True,
'is_csp': False}),
('rating', {'table':'ratings',
'column':'rating',
'link_column':'rating',
'category_sort':'rating',
'datatype':'rating',
'is_multiple':{},
'kind':'field',
'name':_('Rating'),
'search_terms':['rating'],
'is_custom':False,
'is_category':True,
'is_csp': False}),
('news', {'table':'news',
'column':'name',
'category_sort':'name',
'datatype':None,
'is_multiple':{},
'kind':'category',
'name':_('News'),
'search_terms':[],
'is_custom':False,
'is_category':True,
'is_csp': False}),
('tags', {'table':'tags',
'column':'name',
'link_column': 'tag',
'category_sort':'name',
'datatype':'text',
'is_multiple':{'cache_to_list': ',',
'ui_to_list': ',',
'list_to_ui': ', '},
'kind':'field',
'name':_('Tags'),
'search_terms':['tags', 'tag'],
'is_custom':False,
'is_category':True,
'is_csp': False}),
('identifiers', {'table':None,
'column':None,
'datatype':'text',
'is_multiple':{'cache_to_list': ',',
'ui_to_list': ',',
'list_to_ui': ', '},
'kind':'field',
'name':_('Identifiers'),
'search_terms':['identifiers', 'identifier', 'isbn'],
'is_custom':False,
'is_category':True,
'is_csp': True}),
('author_sort',{'table':None,
'column':None,
'datatype':'text',
'is_multiple':{},
'kind':'field',
'name':_('Author Sort'),
'search_terms':['author_sort'],
'is_custom':False,
'is_category':False,
'is_csp': False}),
('au_map', {'table':None,
'column':None,
'datatype':'text',
'is_multiple':{'cache_to_list': ',',
'ui_to_list': None,
'list_to_ui': None},
'kind':'field',
'name':None,
'search_terms':[],
'is_custom':False,
'is_category':False,
'is_csp': False}),
('comments', {'table':None,
'column':None,
'datatype':'text',
'is_multiple':{},
'kind':'field',
'name':_('Comments'),
'search_terms':['comments', 'comment'],
'is_custom':False,
'is_category':False,
'is_csp': False}),
('cover', {'table':None,
'column':None,
'datatype':'int',
'is_multiple':{},
'kind':'field',
'name':_('Cover'),
'search_terms':['cover'],
'is_custom':False,
'is_category':False,
'is_csp': False}),
('id', {'table':None,
'column':None,
'datatype':'int',
'is_multiple':{},
'kind':'field',
'name':None,
'search_terms':['id'],
'is_custom':False,
'is_category':False,
'is_csp': False}),
('last_modified', {'table':None,
'column':None,
'datatype':'datetime',
'is_multiple':{},
'kind':'field',
'name':_('Modified'),
'search_terms':['last_modified'],
'is_custom':False,
'is_category':False,
'is_csp': False}),
('ondevice', {'table':None,
'column':None,
'datatype':'text',
'is_multiple':{},
'kind':'field',
'name':_('On Device'),
'search_terms':['ondevice'],
'is_custom':False,
'is_category':False,
'is_csp': False}),
('path', {'table':None,
'column':None,
'datatype':'text',
'is_multiple':{},
'kind':'field',
'name':_('Path'),
'search_terms':[],
'is_custom':False,
'is_category':False,
'is_csp': False}),
('pubdate', {'table':None,
'column':None,
'datatype':'datetime',
'is_multiple':{},
'kind':'field',
'name':_('Published'),
'search_terms':['pubdate'],
'is_custom':False,
'is_category':False,
'is_csp': False}),
('marked', {'table':None,
'column':None,
'datatype':'text',
'is_multiple':{},
'kind':'field',
'name': None,
'search_terms':['marked'],
'is_custom':False,
'is_category':False,
'is_csp': False}),
('series_index',{'table':None,
'column':None,
'datatype':'float',
'is_multiple':{},
'kind':'field',
'name':None,
'search_terms':['series_index'],
'is_custom':False,
'is_category':False,
'is_csp': False}),
('series_sort', {'table':None,
'column':None,
'datatype':'text',
'is_multiple':{},
'kind':'field',
'name':_('Series Sort'),
'search_terms':['series_sort'],
'is_custom':False,
'is_category':False,
'is_csp': False}),
('sort', {'table':None,
'column':None,
'datatype':'text',
'is_multiple':{},
'kind':'field',
'name':_('Title Sort'),
'search_terms':['title_sort'],
'is_custom':False,
'is_category':False,
'is_csp': False}),
('size', {'table':None,
'column':None,
'datatype':'float',
'is_multiple':{},
'kind':'field',
'name':_('Size'),
'search_terms':['size'],
'is_custom':False,
'is_category':False,
'is_csp': False}),
('timestamp', {'table':None,
'column':None,
'datatype':'datetime',
'is_multiple':{},
'kind':'field',
'name':_('Date'),
'search_terms':['date'],
'is_custom':False,
'is_category':False,
'is_csp': False}),
('title', {'table':None,
'column':None,
'datatype':'text',
'is_multiple':{},
'kind':'field',
'name':_('Title'),
'search_terms':['title'],
'is_custom':False,
'is_category':False,
'is_csp': False}),
('uuid', {'table':None,
'column':None,
'datatype':'text',
'is_multiple':{},
'kind':'field',
'name':None,
'search_terms':['uuid'],
'is_custom':False,
'is_category':False,
'is_csp': False}),
]
# }}}
class FieldMetadata(dict):
'''
key: the key to the dictionary is:
- for standard fields, the metadata field name.
- for custom fields, the metadata field name prefixed by '#'
This is done to create two 'namespaces' so the names don't clash
label: the actual column label. No prefixing.
datatype: the type of information in the field. Valid values are listed in
VALID_DATA_TYPES below.
is_multiple: valid for the text datatype. If {}, the field is to be
treated as a single term. If not None, it contains a dict of the form
{'cache_to_list': ',',
'ui_to_list': ',',
'list_to_ui': ', '}
where the cache_to_list contains the character used to split the value in
the meta2 table, ui_to_list contains the character used to create a list
from a value shown in the ui (each resulting value must be strip()ed and
empty values removed), and list_to_ui contains the string used in join()
to create a displayable string from the list.
kind == field: is a db field.
kind == category: standard tag category that isn't a field. see news.
kind == user: user-defined tag category.
kind == search: saved-searches category.
is_category: is a tag browser category. If true, then:
table: name of the db table used to construct item list
column: name of the column in the normalized table to join on
link_column: name of the column in the connection table to join on. This
key should not be present if there is no link table
category_sort: the field in the normalized table to sort on. This
key must be present if is_category is True
If these are None, then the category constructor must know how
to build the item list (e.g., formats, news).
The order below is the order that the categories will
appear in the tags pane.
name: the text that is to be used when displaying the field. Column headings
in the GUI, etc.
search_terms: the terms that can be used to identify the field when
searching. They can be thought of as aliases for metadata keys, but are only
valid when passed to search().
is_custom: the field has been added by the user.
rec_index: the index of the field in the db metadata record.
is_csp: field contains colon-separated pairs. Must also be text, is_multiple
'''
VALID_DATA_TYPES = frozenset([None, 'rating', 'text', 'comments', 'datetime',
'int', 'float', 'bool', 'series', 'composite', 'enumeration'])
# search labels that are not db columns
search_items = ['all', 'search']
def __init__(self):
self._field_metadata = _builtin_field_metadata()
self._tb_cats = OrderedDict()
self._tb_custom_fields = {}
self._search_term_map = {}
self.custom_label_to_key_map = {}
for k,v in self._field_metadata:
if v['kind'] == 'field' and v['datatype'] not in self.VALID_DATA_TYPES:
raise ValueError('Unknown datatype %s for field %s'%(v['datatype'], k))
self._tb_cats[k] = v
self._tb_cats[k]['label'] = k
self._tb_cats[k]['display'] = {}
self._tb_cats[k]['is_editable'] = True
self._add_search_terms_to_map(k, v['search_terms'])
self._tb_cats['timestamp']['display'] = {
'date_format': tweaks['gui_timestamp_display_format']}
self._tb_cats['pubdate']['display'] = {
'date_format': tweaks['gui_pubdate_display_format']}
self._tb_cats['last_modified']['display'] = {
'date_format': tweaks['gui_last_modified_display_format']}
self.custom_field_prefix = '#'
self.get = self._tb_cats.get
def __getitem__(self, key):
if key == 'title_sort':
return self._tb_cats['sort']
return self._tb_cats[key]
def __setitem__(self, key, val):
raise AttributeError('Assigning to this object is forbidden')
def __delitem__(self, key):
del self._tb_cats[key]
def __iter__(self):
for key in self._tb_cats:
yield key
def __contains__(self, key):
return key in self._tb_cats or key == 'title_sort'
def has_key(self, key):
return key in self
def keys(self):
return self._tb_cats.keys()
def sortable_field_keys(self):
return [k for k in self._tb_cats.keys()
if self._tb_cats[k]['kind']=='field' and
self._tb_cats[k]['datatype'] is not None]
def ui_sortable_field_keys(self):
ans = {k:self._tb_cats[k]['name'] for k in set(self.sortable_field_keys()) - {
'sort', 'author_sort', 'au_map', 'series_sort', 'marked',
'series_index', 'path', 'formats', 'identifiers', 'uuid',
'comments',
} if self._tb_cats[k]['name']}
ans['cover'] = _('Has cover')
return ans
def displayable_field_keys(self):
return [k for k in self._tb_cats.keys()
if self._tb_cats[k]['kind']=='field' and
self._tb_cats[k]['datatype'] is not None and
k not in ('au_map', 'marked', 'ondevice', 'cover', 'series_sort') and
not self.is_series_index(k)]
def standard_field_keys(self):
return [k for k in self._tb_cats.keys()
if self._tb_cats[k]['kind']=='field' and
not self._tb_cats[k]['is_custom']]
def custom_field_keys(self, include_composites=True):
res = []
for k in self._tb_cats.keys():
fm = self._tb_cats[k]
if fm['kind']=='field' and fm['is_custom'] and \
(fm['datatype'] != 'composite' or include_composites):
res.append(k)
return res
def all_field_keys(self):
return [k for k in self._tb_cats.keys() if self._tb_cats[k]['kind']=='field']
def iterkeys(self):
for key in self._tb_cats:
yield key
def itervalues(self):
return self._tb_cats.itervalues()
def values(self):
return self._tb_cats.values()
def iteritems(self):
for key in self._tb_cats:
yield (key, self._tb_cats[key])
def custom_iteritems(self):
for key, meta in self._tb_custom_fields.iteritems():
yield (key, meta)
def items(self):
return list(self.iteritems())
def is_custom_field(self, key):
return key.startswith(self.custom_field_prefix)
def is_ignorable_field(self, key):
'Custom fields and user categories are ignorable'
return self.is_custom_field(key) or key.startswith('@')
def ignorable_field_keys(self):
return [k for k in self._tb_cats.iterkeys() if self.is_ignorable_field(k)]
def is_series_index(self, key):
try:
m = self._tb_cats[key]
return (m['datatype'] == 'float' and key.endswith('_index') and
key[:-6] in self._tb_cats)
except (KeyError, ValueError, TypeError, AttributeError):
return False
def key_to_label(self, key):
if 'label' not in self._tb_cats[key]:
return key
return self._tb_cats[key]['label']
def label_to_key(self, label, prefer_custom=False):
if prefer_custom:
if label in self.custom_label_to_key_map:
return self.custom_label_to_key_map[label]
if 'label' in self._tb_cats:
return label
if not prefer_custom:
if label in self.custom_label_to_key_map:
return self.custom_label_to_key_map[label]
raise ValueError('Unknown key [%s]'%(label))
def all_metadata(self):
l = {}
for k in self._tb_cats:
l[k] = self._tb_cats[k]
return l
def custom_field_metadata(self, include_composites=True):
if include_composites:
return self._tb_custom_fields
l = {}
for k in self.custom_field_keys(include_composites):
l[k] = self._tb_cats[k]
return l
def add_custom_field(self, label, table, column, datatype, colnum, name,
display, is_editable, is_multiple, is_category,
is_csp=False):
key = self.custom_field_prefix + label
if key in self._tb_cats:
raise ValueError('Duplicate custom field [%s]'%(label))
if datatype not in self.VALID_DATA_TYPES:
raise ValueError('Unknown datatype %s for field %s'%(datatype, key))
self._tb_cats[key] = {'table':table, 'column':column,
'datatype':datatype, 'is_multiple':is_multiple,
'kind':'field', 'name':name,
'search_terms':[key], 'label':label,
'colnum':colnum, 'display':display,
'is_custom':True, 'is_category':is_category,
'link_column':'value','category_sort':'value',
'is_csp' : is_csp, 'is_editable': is_editable,}
self._tb_custom_fields[key] = self._tb_cats[key]
self._add_search_terms_to_map(key, [key])
self.custom_label_to_key_map[label] = key
if datatype == 'series':
key += '_index'
self._tb_cats[key] = {'table':None, 'column':None,
'datatype':'float', 'is_multiple':{},
'kind':'field', 'name':'',
'search_terms':[key], 'label':label+'_index',
'colnum':None, 'display':{},
'is_custom':False, 'is_category':False,
'link_column':None, 'category_sort':None,
'is_editable': False, 'is_csp': False}
self._add_search_terms_to_map(key, [key])
self.custom_label_to_key_map[label+'_index'] = key
def remove_dynamic_categories(self):
for key in list(self._tb_cats.keys()):
val = self._tb_cats[key]
if val['is_category'] and val['kind'] in ('user', 'search'):
for k in self._tb_cats[key]['search_terms']:
if k in self._search_term_map:
del self._search_term_map[k]
del self._tb_cats[key]
def remove_user_categories(self):
for key in list(self._tb_cats.keys()):
val = self._tb_cats[key]
if val['is_category'] and val['kind'] == 'user':
for k in self._tb_cats[key]['search_terms']:
if k in self._search_term_map:
del self._search_term_map[k]
del self._tb_cats[key]
def _remove_grouped_search_terms(self):
to_remove = [v for v in self._search_term_map
if isinstance(self._search_term_map[v], list)]
for v in to_remove:
del self._search_term_map[v]
def add_grouped_search_terms(self, gst):
self._remove_grouped_search_terms()
for t in gst:
try:
self._add_search_terms_to_map(gst[t], [t])
except ValueError:
traceback.print_exc()
def cc_series_index_column_for(self, key):
return self._tb_cats[key]['rec_index'] + 1
def add_user_category(self, label, name):
if label in self._tb_cats:
raise ValueError('Duplicate user field [%s]'%(label))
st = [label]
if icu_lower(label) != label:
st.append(icu_lower(label))
self._tb_cats[label] = {'table':None, 'column':None,
'datatype':None, 'is_multiple':{},
'kind':'user', 'name':name,
'search_terms':st, 'is_custom':False,
'is_category':True, 'is_csp': False}
self._add_search_terms_to_map(label, st)
def add_search_category(self, label, name):
if label in self._tb_cats:
raise ValueError('Duplicate user field [%s]'%(label))
self._tb_cats[label] = {'table':None, 'column':None,
'datatype':None, 'is_multiple':{},
'kind':'search', 'name':name,
'search_terms':[], 'is_custom':False,
'is_category':True, 'is_csp': False}
def set_field_record_index(self, label, index, prefer_custom=False):
if prefer_custom:
key = self.custom_field_prefix+label
if key not in self._tb_cats:
key = label
else:
if label in self._tb_cats:
key = label
else:
key = self.custom_field_prefix+label
self._tb_cats[key]['rec_index'] = index # let the exception fly ...
def get_search_terms(self):
s_keys = sorted(self._search_term_map.keys())
for v in self.search_items:
s_keys.append(v)
return s_keys
def _add_search_terms_to_map(self, key, terms):
if terms is not None:
for t in terms:
if t in self._search_term_map:
raise ValueError('Attempt to add duplicate search term "%s"'%t)
self._search_term_map[t] = key
def search_term_to_field_key(self, term):
return self._search_term_map.get(term, term)
def searchable_fields(self):
return [k for k in self._tb_cats.keys()
if self._tb_cats[k]['kind']=='field' and
len(self._tb_cats[k]['search_terms']) > 0]
|
000paradox000/django-dead-users
|
refs/heads/master
|
dead_users/forms/signup.py
|
1
|
from django.contrib.auth import get_user_model
from django import forms
class SignupForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(SignupForm, self).__init__(*args, **kwargs)
class Meta:
model = get_user_model()
fields = ['first_name', 'last_name',]
def signup(self, request, user):
user.first_name = self.cleaned_data['first_name']
user.last_name = self.cleaned_data['last_name']
user.save()
|
dav1x/ansible
|
refs/heads/devel
|
contrib/inventory/packet_net.py
|
16
|
#!/usr/bin/env python
'''
Packet.net external inventory script
=================================
Generates inventory that Ansible can understand by making API request to
Packet.net using the Packet library.
NOTE: This script assumes Ansible is being executed where the environment
variable needed for Packet API Token already been set:
export PACKET_API_TOKEN=Bfse9F24SFtfs423Gsd3ifGsd43sSdfs
This script also assumes there is a packet_net.ini file alongside it. To specify a
different path to packet_net.ini, define the PACKET_NET_INI_PATH environment variable:
export PACKET_NET_INI_PATH=/path/to/my_packet_net.ini
'''
# (c) 2016, Peter Sankauskas
# (c) 2017, Tomas Karasek
#
# This file is part of Ansible,
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
######################################################################
import sys
import os
import argparse
import re
from time import time
import six
from six.moves import configparser
try:
import packet
except ImportError as e:
sys.exit("failed=True msg='`packet-python` library required for this script'")
import traceback
try:
import json
except ImportError:
import simplejson as json
ini_section = 'packet'
class PacketInventory(object):
def _empty_inventory(self):
return {"_meta" : {"hostvars" : {}}}
def __init__(self):
''' Main execution path '''
# Inventory grouped by device IDs, tags, security groups, regions,
# and availability zones
self.inventory = self._empty_inventory()
# Index of hostname (address) to device ID
self.index = {}
# Read settings and parse CLI arguments
self.parse_cli_args()
self.read_settings()
# Cache
if self.args.refresh_cache:
self.do_api_calls_update_cache()
elif not self.is_cache_valid():
self.do_api_calls_update_cache()
# Data to print
if self.args.host:
data_to_print = self.get_host_info()
elif self.args.list:
# Display list of devices for inventory
if self.inventory == self._empty_inventory():
data_to_print = self.get_inventory_from_cache()
else:
data_to_print = self.json_format_dict(self.inventory, True)
print(data_to_print)
def is_cache_valid(self):
''' Determines if the cache files have expired, or if it is still valid '''
if os.path.isfile(self.cache_path_cache):
mod_time = os.path.getmtime(self.cache_path_cache)
current_time = time()
if (mod_time + self.cache_max_age) > current_time:
if os.path.isfile(self.cache_path_index):
return True
return False
def read_settings(self):
''' Reads the settings from the packet_net.ini file '''
if six.PY3:
config = configparser.ConfigParser()
else:
config = configparser.SafeConfigParser()
_ini_path_raw = os.environ.get('PACKET_NET_INI_PATH')
if _ini_path_raw:
packet_ini_path = os.path.expanduser(os.path.expandvars(_ini_path_raw))
else:
packet_ini_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'packet_net.ini')
config.read(packet_ini_path)
# items per page
self.items_per_page = 999
if config.has_option(ini_section, 'items_per_page'):
config.get(ini_section, 'items_per_page')
# Instance states to be gathered in inventory. Default is all of them.
packet_valid_device_states = [
'active',
'inactive',
'queued',
'provisioning'
]
self.packet_device_states = []
if config.has_option(ini_section, 'device_states'):
for device_state in config.get(ini_section, 'device_states').split(','):
device_state = device_state.strip()
if device_state not in packet_valid_device_states:
continue
self.packet_device_states.append(device_state)
else:
self.packet_device_states = packet_valid_device_states
# Cache related
cache_dir = os.path.expanduser(config.get(ini_section, 'cache_path'))
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
self.cache_path_cache = cache_dir + "/ansible-packet.cache"
self.cache_path_index = cache_dir + "/ansible-packet.index"
self.cache_max_age = config.getint(ini_section, 'cache_max_age')
# Configure nested groups instead of flat namespace.
if config.has_option(ini_section, 'nested_groups'):
self.nested_groups = config.getboolean(ini_section, 'nested_groups')
else:
self.nested_groups = False
# Replace dash or not in group names
if config.has_option(ini_section, 'replace_dash_in_groups'):
self.replace_dash_in_groups = config.getboolean(ini_section, 'replace_dash_in_groups')
else:
self.replace_dash_in_groups = True
# Configure which groups should be created.
group_by_options = [
'group_by_device_id',
'group_by_facility',
'group_by_project',
'group_by_operating_system',
'group_by_plan_type',
'group_by_tags',
'group_by_tag_none',
]
for option in group_by_options:
if config.has_option(ini_section, option):
setattr(self, option, config.getboolean(ini_section, option))
else:
setattr(self, option, True)
# Do we need to just include hosts that match a pattern?
try:
pattern_include = config.get(ini_section, 'pattern_include')
if pattern_include and len(pattern_include) > 0:
self.pattern_include = re.compile(pattern_include)
else:
self.pattern_include = None
except configparser.NoOptionError:
self.pattern_include = None
# Do we need to exclude hosts that match a pattern?
try:
pattern_exclude = config.get(ini_section, 'pattern_exclude')
if pattern_exclude and len(pattern_exclude) > 0:
self.pattern_exclude = re.compile(pattern_exclude)
else:
self.pattern_exclude = None
except configparser.NoOptionError:
self.pattern_exclude = None
# Projects
self.projects = []
configProjects = config.get(ini_section, 'projects')
configProjects_exclude = config.get(ini_section, 'projects_exclude')
if (configProjects == 'all'):
for projectInfo in self.get_projects():
if projectInfo.name not in configProjects_exclude:
self.projects.append(projectInfo.name)
else:
self.projects = configProjects.split(",")
def parse_cli_args(self):
''' Command line argument processing '''
parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on Packet')
parser.add_argument('--list', action='store_true', default=True,
help='List Devices (default: True)')
parser.add_argument('--host', action='store',
help='Get all the variables about a specific device')
parser.add_argument('--refresh-cache', action='store_true', default=False,
help='Force refresh of cache by making API requests to Packet (default: False - use cache files)')
self.args = parser.parse_args()
def do_api_calls_update_cache(self):
''' Do API calls to each region, and save data in cache files '''
for projectInfo in self.get_projects():
if projectInfo.name in self.projects:
self.get_devices_by_project(projectInfo)
self.write_to_cache(self.inventory, self.cache_path_cache)
self.write_to_cache(self.index, self.cache_path_index)
def connect(self):
''' create connection to api server'''
token=os.environ.get('PACKET_API_TOKEN')
if token is None:
raise Exception("Error reading token from environment (PACKET_API_TOKEN)!")
manager = packet.Manager(auth_token=token)
return manager
def get_projects(self):
'''Makes a Packet API call to get the list of projects'''
try:
manager = self.connect()
projects = manager.list_projects()
return projects
except Exception as e:
traceback.print_exc()
self.fail_with_error(e, 'getting Packet projects')
def get_devices_by_project(self, project):
''' Makes an Packet API call to the list of devices in a particular
project '''
params = {
'per_page': self.items_per_page
}
try:
manager = self.connect()
devices = manager.list_devices(project_id=project.id, params = params)
for device in devices:
self.add_device(device, project)
except Exception as e:
traceback.print_exc()
self.fail_with_error(e, 'getting Packet devices')
def fail_with_error(self, err_msg, err_operation=None):
'''log an error to std err for ansible-playbook to consume and exit'''
if err_operation:
err_msg = 'ERROR: "{err_msg}", while: {err_operation}\n'.format(
err_msg=err_msg, err_operation=err_operation)
sys.stderr.write(err_msg)
sys.exit(1)
def get_device(self, device_id):
manager = self.connect()
device = manager.get_device(device_id)
return device
def add_device(self, device, project):
''' Adds a device to the inventory and index, as long as it is
addressable '''
# Only return devices with desired device states
if device.state not in self.packet_device_states:
return
# Select the best destination address
dest = None
for ip_address in device.ip_addresses:
if ip_address['public'] is True and ip_address['address_family'] == 4:
dest = ip_address['address']
if not dest:
# Skip devices we cannot address (e.g. private VPC subnet)
return
# if we only want to include hosts that match a pattern, skip those that don't
if self.pattern_include and not self.pattern_include.match(device.hostname):
return
# if we need to exclude hosts that match a pattern, skip those
if self.pattern_exclude and self.pattern_exclude.match(device.hostname):
return
# Add to index
self.index[dest] = [project.id, device.id]
# Inventory: Group by device ID (always a group of 1)
if self.group_by_device_id:
self.inventory[device.id] = [dest]
if self.nested_groups:
self.push_group(self.inventory, 'devices', device.id)
# Inventory: Group by project
if self.group_by_project:
self.push(self.inventory, project.name, dest)
if self.nested_groups:
self.push_group(self.inventory, 'projects', project.name)
# Inventory: Group by facility
if self.group_by_facility:
self.push(self.inventory, device.facility['code'], dest)
if self.nested_groups:
if self.group_by_facility:
self.push_group(self.inventory, project.name, device.facility['code'])
# Inventory: Group by OS
if self.group_by_operating_system:
self.push(self.inventory, device.operating_system.slug, dest)
if self.nested_groups:
self.push_group(self.inventory, 'operating_systems', device.operating_system.slug)
# Inventory: Group by plan type
if self.group_by_plan_type:
self.push(self.inventory, device.plan['slug'], dest)
if self.nested_groups:
self.push_group(self.inventory, 'plans', device.plan['slug'])
# Inventory: Group by tag keys
if self.group_by_tags:
for k in device.tags:
key = self.to_safe("tag_" + k)
self.push(self.inventory, key, dest)
if self.nested_groups:
self.push_group(self.inventory, 'tags', self.to_safe("tag_" + k))
# Global Tag: devices without tags
if self.group_by_tag_none and len(device.tags) == 0:
self.push(self.inventory, 'tag_none', dest)
if self.nested_groups:
self.push_group(self.inventory, 'tags', 'tag_none')
# Global Tag: tag all Packet devices
self.push(self.inventory, 'packet', dest)
self.inventory["_meta"]["hostvars"][dest] = self.get_host_info_dict_from_device(device)
def get_host_info_dict_from_device(self, device):
device_vars = {}
for key in vars(device):
value = getattr(device, key)
key = self.to_safe('packet_' + key)
# Handle complex types
if key == 'packet_state':
device_vars[key] = device.state or ''
elif key == 'packet_hostname':
device_vars[key] = value
elif isinstance(value, (int, bool)):
device_vars[key] = value
elif isinstance(value, six.string_types):
device_vars[key] = value.strip()
elif value is None:
device_vars[key] = ''
elif key == 'packet_facility':
device_vars[key] = value['code']
elif key == 'packet_operating_system':
device_vars[key] = value.slug
elif key == 'packet_plan':
device_vars[key] = value['slug']
elif key == 'packet_tags':
for k in value:
key = self.to_safe('packet_tag_' + k)
device_vars[key] = k
else:
pass
#print key
#print type(value)
#print value
return device_vars
def get_host_info(self):
''' Get variables about a specific host '''
if len(self.index) == 0:
# Need to load index from cache
self.load_index_from_cache()
if not self.args.host in self.index:
# try updating the cache
self.do_api_calls_update_cache()
if not self.args.host in self.index:
# host might not exist anymore
return self.json_format_dict({}, True)
(project_id, device_id) = self.index[self.args.host]
device = self.get_device(device_id)
return self.json_format_dict(self.get_host_info_dict_from_device(device), True)
def push(self, my_dict, key, element):
''' Push an element onto an array that may not have been defined in
the dict '''
group_info = my_dict.setdefault(key, [])
if isinstance(group_info, dict):
host_list = group_info.setdefault('hosts', [])
host_list.append(element)
else:
group_info.append(element)
def push_group(self, my_dict, key, element):
''' Push a group as a child of another group. '''
parent_group = my_dict.setdefault(key, {})
if not isinstance(parent_group, dict):
parent_group = my_dict[key] = {'hosts': parent_group}
child_groups = parent_group.setdefault('children', [])
if element not in child_groups:
child_groups.append(element)
def get_inventory_from_cache(self):
''' Reads the inventory from the cache file and returns it as a JSON
object '''
cache = open(self.cache_path_cache, 'r')
json_inventory = cache.read()
return json_inventory
def load_index_from_cache(self):
''' Reads the index from the cache file sets self.index '''
cache = open(self.cache_path_index, 'r')
json_index = cache.read()
self.index = json.loads(json_index)
def write_to_cache(self, data, filename):
''' Writes data in JSON format to a file '''
json_data = self.json_format_dict(data, True)
cache = open(filename, 'w')
cache.write(json_data)
cache.close()
def uncammelize(self, key):
temp = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', key)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', temp).lower()
def to_safe(self, word):
''' Converts 'bad' characters in a string to underscores so they can be used as Ansible groups '''
regex = "[^A-Za-z0-9\_"
if not self.replace_dash_in_groups:
regex += "\-"
return re.sub(regex + "]", "_", word)
def json_format_dict(self, data, pretty=False):
''' Converts a dict to a JSON object and dumps it as a formatted
string '''
if pretty:
return json.dumps(data, sort_keys=True, indent=2)
else:
return json.dumps(data)
# Run the script
PacketInventory()
|
mearns/slider
|
refs/heads/master
|
slider/__main__.py
|
1
|
#! /usr/bin/env python
# vim: set fileencoding=utf-8: set encoding=utf-8:
from slider import PresentationGenerator
if __name__ == '__main__':
import codecs
import sys
with open('test.html', 'w') as ofile:
PresentationGenerator().markdown_to_html(codecs.open("test.md", mode='r', encoding='utf-8'), ofile)
|
karthik339/Agni
|
refs/heads/master
|
MainDemo/flask/lib/python2.7/site-packages/openid/consumer/html_parse.py
|
167
|
"""
This module implements a VERY limited parser that finds <link> tags in
the head of HTML or XHTML documents and parses out their attributes
according to the OpenID spec. It is a liberal parser, but it requires
these things from the data in order to work:
- There must be an open <html> tag
- There must be an open <head> tag inside of the <html> tag
- Only <link>s that are found inside of the <head> tag are parsed
(this is by design)
- The parser follows the OpenID specification in resolving the
attributes of the link tags. This means that the attributes DO NOT
get resolved as they would by an XML or HTML parser. In particular,
only certain entities get replaced, and href attributes do not get
resolved relative to a base URL.
From http://openid.net/specs.bml#linkrel:
- The openid.server URL MUST be an absolute URL. OpenID consumers
MUST NOT attempt to resolve relative URLs.
- The openid.server URL MUST NOT include entities other than &,
<, >, and ".
The parser ignores SGML comments and <![CDATA[blocks]]>. Both kinds of
quoting are allowed for attributes.
The parser deals with invalid markup in these ways:
- Tag names are not case-sensitive
- The <html> tag is accepted even when it is not at the top level
- The <head> tag is accepted even when it is not a direct child of
the <html> tag, but a <html> tag must be an ancestor of the <head>
tag
- <link> tags are accepted even when they are not direct children of
the <head> tag, but a <head> tag must be an ancestor of the <link>
tag
- If there is no closing tag for an open <html> or <head> tag, the
remainder of the document is viewed as being inside of the tag. If
there is no closing tag for a <link> tag, the link tag is treated
as a short tag. Exceptions to this rule are that <html> closes
<html> and <body> or <head> closes <head>
- Attributes of the <link> tag are not required to be quoted.
- In the case of duplicated attribute names, the attribute coming
last in the tag will be the value returned.
- Any text that does not parse as an attribute within a link tag will
be ignored. (e.g. <link pumpkin rel='openid.server' /> will ignore
pumpkin)
- If there are more than one <html> or <head> tag, the parser only
looks inside of the first one.
- The contents of <script> tags are ignored entirely, except unclosed
<script> tags. Unclosed <script> tags are ignored.
- Any other invalid markup is ignored, including unclosed SGML
comments and unclosed <![CDATA[blocks.
"""
__all__ = ['parseLinkAttrs']
import re
flags = ( re.DOTALL # Match newlines with '.'
| re.IGNORECASE
| re.VERBOSE # Allow comments and whitespace in patterns
| re.UNICODE # Make \b respect Unicode word boundaries
)
# Stuff to remove before we start looking for tags
removed_re = re.compile(r'''
# Comments
<!--.*?-->
# CDATA blocks
| <!\[CDATA\[.*?\]\]>
# script blocks
| <script\b
# make sure script is not an XML namespace
(?!:)
[^>]*>.*?</script>
''', flags)
tag_expr = r'''
# Starts with the tag name at a word boundary, where the tag name is
# not a namespace
<%(tag_name)s\b(?!:)
# All of the stuff up to a ">", hopefully attributes.
(?P<attrs>[^>]*?)
(?: # Match a short tag
/>
| # Match a full tag
>
(?P<contents>.*?)
# Closed by
(?: # One of the specified close tags
</?%(closers)s\s*>
# End of the string
| \Z
)
)
'''
def tagMatcher(tag_name, *close_tags):
if close_tags:
options = '|'.join((tag_name,) + close_tags)
closers = '(?:%s)' % (options,)
else:
closers = tag_name
expr = tag_expr % locals()
return re.compile(expr, flags)
# Must contain at least an open html and an open head tag
html_find = tagMatcher('html')
head_find = tagMatcher('head', 'body')
link_find = re.compile(r'<link\b(?!:)', flags)
attr_find = re.compile(r'''
# Must start with a sequence of word-characters, followed by an equals sign
(?P<attr_name>\w+)=
# Then either a quoted or unquoted attribute
(?:
# Match everything that\'s between matching quote marks
(?P<qopen>["\'])(?P<q_val>.*?)(?P=qopen)
|
# If the value is not quoted, match up to whitespace
(?P<unq_val>(?:[^\s<>/]|/(?!>))+)
)
|
(?P<end_link>[<>])
''', flags)
# Entity replacement:
replacements = {
'amp':'&',
'lt':'<',
'gt':'>',
'quot':'"',
}
ent_replace = re.compile(r'&(%s);' % '|'.join(replacements.keys()))
def replaceEnt(mo):
"Replace the entities that are specified by OpenID"
return replacements.get(mo.group(1), mo.group())
def parseLinkAttrs(html):
"""Find all link tags in a string representing a HTML document and
return a list of their attributes.
@param html: the text to parse
@type html: str or unicode
@return: A list of dictionaries of attributes, one for each link tag
@rtype: [[(type(html), type(html))]]
"""
stripped = removed_re.sub('', html)
html_mo = html_find.search(stripped)
if html_mo is None or html_mo.start('contents') == -1:
return []
start, end = html_mo.span('contents')
head_mo = head_find.search(stripped, start, end)
if head_mo is None or head_mo.start('contents') == -1:
return []
start, end = head_mo.span('contents')
link_mos = link_find.finditer(stripped, head_mo.start(), head_mo.end())
matches = []
for link_mo in link_mos:
start = link_mo.start() + 5
link_attrs = {}
for attr_mo in attr_find.finditer(stripped, start):
if attr_mo.lastgroup == 'end_link':
break
# Either q_val or unq_val must be present, but not both
# unq_val is a True (non-empty) value if it is present
attr_name, q_val, unq_val = attr_mo.group(
'attr_name', 'q_val', 'unq_val')
attr_val = ent_replace.sub(replaceEnt, unq_val or q_val)
link_attrs[attr_name] = attr_val
matches.append(link_attrs)
return matches
def relMatches(rel_attr, target_rel):
"""Does this target_rel appear in the rel_str?"""
# XXX: TESTME
rels = rel_attr.strip().split()
for rel in rels:
rel = rel.lower()
if rel == target_rel:
return 1
return 0
def linkHasRel(link_attrs, target_rel):
"""Does this link have target_rel as a relationship?"""
# XXX: TESTME
rel_attr = link_attrs.get('rel')
return rel_attr and relMatches(rel_attr, target_rel)
def findLinksRel(link_attrs_list, target_rel):
"""Filter the list of link attributes on whether it has target_rel
as a relationship."""
# XXX: TESTME
matchesTarget = lambda attrs: linkHasRel(attrs, target_rel)
return filter(matchesTarget, link_attrs_list)
def findFirstHref(link_attrs_list, target_rel):
"""Return the value of the href attribute for the first link tag
in the list that has target_rel as a relationship."""
# XXX: TESTME
matches = findLinksRel(link_attrs_list, target_rel)
if not matches:
return None
first = matches[0]
return first.get('href')
|
loisaidasam/gensim
|
refs/heads/develop
|
gensim/models/atmodel.py
|
4
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2016 Radim Rehurek <radimrehurek@seznam.cz>
# Copyright (C) 2016 Olavur Mortensen <olavurmortensen@gmail.com>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Author-topic model in Python.
This module trains the author-topic model on documents and corresponding author-document
dictionaries. The training is online and is constant in memory w.r.t. the number of
documents. The model is *not* constant in memory w.r.t. the number of authors.
The model can be updated with additional documents after taining has been completed. It is
also possible to continue training on the existing data.
The model is closely related to Latent Dirichlet Allocation. The AuthorTopicModel class
inherits the LdaModel class, and its usage is thus similar.
Distributed compuation and multiprocessing is not implemented at the moment, but may be
coming in the future.
The model was introduced by Rosen-Zvi and co-authors in 2004 (https://mimno.infosci.cornell.edu/info6150/readings/398.pdf).
A tutorial can be found at https://github.com/RaRe-Technologies/gensim/tree/develop/docs/notebooks/atmodel_tutorial.ipynb.
"""
# TODO: this class inherits LdaModel and overwrites some methods. There is some code
# duplication still, and a refactor could be made to avoid this. Comments with "TODOs"
# are included in the code where this is the case, for example in the log_perplexity
# and do_estep methods.
import logging
import numpy as np # for arrays, array broadcasting etc.
from copy import deepcopy
from shutil import copyfile
from os.path import isfile
from os import remove
from gensim import utils
from gensim.models import LdaModel
from gensim.models.ldamodel import LdaState
from gensim.matutils import dirichlet_expectation
from gensim.corpora import MmCorpus
from itertools import chain
from scipy.special import gammaln # gamma function utils
from six.moves import xrange
import six
logger = logging.getLogger('gensim.models.atmodel')
class AuthorTopicState(LdaState):
"""
NOTE: distributed mode not available yet in the author-topic model. This AuthorTopicState
object is kept so that when the time comes to imlement it, it will be easier.
Encapsulate information for distributed computation of AuthorTopicModel objects.
Objects of this class are sent over the network, so try to keep them lean to
reduce traffic.
"""
def __init__(self, eta, lambda_shape, gamma_shape):
self.eta = eta
self.sstats = np.zeros(lambda_shape)
self.gamma = np.zeros(gamma_shape)
self.numdocs = 0
def construct_doc2author(corpus, author2doc):
"""Make a mapping from document IDs to author IDs."""
doc2author = {}
for d, _ in enumerate(corpus):
author_ids = []
for a, a_doc_ids in author2doc.items():
if d in a_doc_ids:
author_ids.append(a)
doc2author[d] = author_ids
return doc2author
def construct_author2doc(corpus, doc2author):
"""Make a mapping from author IDs to document IDs."""
# First get a set of all authors.
authors_ids = set()
for d, a_doc_ids in doc2author.items():
for a in a_doc_ids:
authors_ids.add(a)
# Now construct the dictionary.
author2doc = {}
for a in authors_ids:
author2doc[a] = []
for d, a_ids in doc2author.items():
if a in a_ids:
author2doc[a].append(d)
return author2doc
class AuthorTopicModel(LdaModel):
"""
The constructor estimates the author-topic model parameters based
on a training corpus:
>>> model = AuthorTopicModel(corpus, num_topics=10, author2doc=author2doc, id2word=id2word)
The model can be updated (trained) with new documents via
>>> model.update(other_corpus, other_author2doc)
Model persistency is achieved through its `load`/`save` methods.
"""
def __init__(self, corpus=None, num_topics=100, id2word=None, author2doc=None, doc2author=None,
chunksize=2000, passes=1, iterations=50, decay=0.5, offset=1.0,
alpha='symmetric', eta='symmetric', update_every=1, eval_every=10,
gamma_threshold=0.001, serialized=False, serialization_path=None,
minimum_probability=0.01, random_state=None):
"""
If the iterable corpus and one of author2doc/doc2author dictionaries are given,
start training straight away. If not given, the model is left untrained
(presumably because you want to call the `update` method manually).
`num_topics` is the number of requested latent topics to be extracted from
the training corpus.
`id2word` is a mapping from word ids (integers) to words (strings). It is
used to determine the vocabulary size, as well as for debugging and topic
printing.
`author2doc` is a dictionary where the keys are the names of authors, and the
values are lists of documents that the author contributes to.
`doc2author` is a dictionary where the keys are document IDs (indexes to corpus)
and the values are lists of author names. I.e. this is the reverse mapping of
`author2doc`. Only one of the two, `author2doc` and `doc2author` have to be
supplied.
`passes` is the number of times the model makes a pass over the entire trianing
data.
`iterations` is the maximum number of times the model loops over each document
(M-step). The iterations stop when convergence is reached.
`chunksize` controls the size of the mini-batches.
`alpha` and `eta` are hyperparameters that affect sparsity of the author-topic
(theta) and topic-word (lambda) distributions. Both default to a symmetric
1.0/num_topics prior.
`alpha` can be set to an explicit array = prior of your choice. It also
support special values of 'asymmetric' and 'auto': the former uses a fixed
normalized asymmetric 1.0/topicno prior, the latter learns an asymmetric
prior directly from your data.
`eta` can be a scalar for a symmetric prior over topic/word
distributions, or a vector of shape num_words, which can be used to
impose (user defined) asymmetric priors over the word distribution.
It also supports the special value 'auto', which learns an asymmetric
prior over words directly from your data. `eta` can also be a matrix
of shape num_topics x num_words, which can be used to impose
asymmetric priors over the word distribution on a per-topic basis
(can not be learned from data).
Calculate and log perplexity estimate from the latest mini-batch every
`eval_every` model updates. Set to None to disable perplexity estimation.
`decay` and `offset` parameters are the same as Kappa and Tau_0 in
Hoffman et al, respectively. `decay` controls how quickly old documents are
forgotten, while `offset` down-weights early iterations.
`minimum_probability` controls filtering the topics returned for a document (bow).
`random_state` can be an integer or a numpy.random.RandomState object. Set the
state of the random number generator inside the author-topic model, to ensure
reproducibility of your experiments, for example.
`serialized` indicates whether the input corpora to the model are simple
in-memory lists (`serialized = False`) or saved to the hard-drive
(`serialized = True`). Note that this behaviour is quite different from
other Gensim models. If your data is too large to fit in to memory, use
this functionality. Note that calling `AuthorTopicModel.update` with new
data may be cumbersome as it requires all the existing data to be
re-serialized.
`serialization_path` must be set to a filepath, if `serialized = True` is
used. Use, for example, `serialization_path = /tmp/serialized_model.mm` or use your
working directory by setting `serialization_path = serialized_model.mm`. An existing
file *cannot* be overwritten; either delete the old file or choose a different
name.
Example:
>>> model = AuthorTopicModel(corpus, num_topics=100, author2doc=author2doc, id2word=id2word) # train model
>>> model.update(corpus2) # update the author-topic model with additional documents
>>> model = AuthorTopicModel(corpus, num_topics=50, author2doc=author2doc, id2word=id2word, alpha='auto', eval_every=5) # train asymmetric alpha from data
"""
# NOTE: as distributed version of this model is not implemented, "distributed" is set to false. Some of the
# infrastructure to implement a distributed author-topic model is already in place, such as the AuthorTopicState.
distributed = False
self.dispatcher = None
self.numworkers = 1
self.id2word = id2word
if corpus is None and self.id2word is None:
raise ValueError('at least one of corpus/id2word must be specified, to establish input space dimensionality')
if self.id2word is None:
logger.warning("no word id mapping provided; initializing from corpus, assuming identity")
self.id2word = utils.dict_from_corpus(corpus)
self.num_terms = len(self.id2word)
elif len(self.id2word) > 0:
self.num_terms = 1 + max(self.id2word.keys())
else:
self.num_terms = 0
if self.num_terms == 0:
raise ValueError("cannot compute the author-topic model over an empty collection (no terms)")
logger.info('Vocabulary consists of %d words.', self.num_terms)
self.author2doc = {}
self.doc2author = {}
self.distributed = distributed
self.num_topics = num_topics
self.num_authors = 0
self.chunksize = chunksize
self.decay = decay
self.offset = offset
self.minimum_probability = minimum_probability
self.num_updates = 0
self.total_docs = 0
self.passes = passes
self.update_every = update_every
self.eval_every = eval_every
self.author2id = {}
self.id2author = {}
self.serialized = serialized
if serialized and not serialization_path:
raise ValueError("If serialized corpora are used, a the path to a folder where the corpus should be saved must be provided (serialized_path).")
if serialized and serialization_path:
assert not isfile(serialization_path), "A file already exists at the serialization_path path; choose a different serialization_path, or delete the file."
self.serialization_path = serialization_path
# Initialize an empty self.corpus.
self.init_empty_corpus()
self.alpha, self.optimize_alpha = self.init_dir_prior(alpha, 'alpha')
assert self.alpha.shape == (self.num_topics,), "Invalid alpha shape. Got shape %s, but expected (%d, )" % (str(self.alpha.shape), self.num_topics)
if isinstance(eta, six.string_types):
if eta == 'asymmetric':
raise ValueError("The 'asymmetric' option cannot be used for eta")
self.eta, self.optimize_eta = self.init_dir_prior(eta, 'eta')
self.random_state = utils.get_random_state(random_state)
assert (self.eta.shape == (self.num_terms,) or self.eta.shape == (self.num_topics, self.num_terms)), (
"Invalid eta shape. Got shape %s, but expected (%d, 1) or (%d, %d)" %
(str(self.eta.shape), self.num_terms, self.num_topics, self.num_terms))
# VB constants
self.iterations = iterations
self.gamma_threshold = gamma_threshold
# Initialize the variational distributions q(beta|lambda) and q(theta|gamma)
self.state = AuthorTopicState(self.eta, (self.num_topics, self.num_terms), (self.num_authors, self.num_topics))
self.state.sstats = self.random_state.gamma(100., 1. / 100., (self.num_topics, self.num_terms))
self.expElogbeta = np.exp(dirichlet_expectation(self.state.sstats))
# if a training corpus was provided, start estimating the model right away
if corpus is not None and (author2doc is not None or doc2author is not None):
use_numpy = self.dispatcher is not None
self.update(corpus, author2doc, doc2author, chunks_as_numpy=use_numpy)
def __str__(self):
return "AuthorTopicModel(num_terms=%s, num_topics=%s, num_authors=%s, decay=%s, chunksize=%s)" % \
(self.num_terms, self.num_topics, self.num_authors, self.decay, self.chunksize)
def init_empty_corpus(self):
"""
Initialize an empty corpus. If the corpora are to be treated as lists, simply
initialize an empty list. If serialization is used, initialize an empty corpus
of the class `gensim.corpora.MmCorpus`.
"""
if self.serialized:
# Tnitialize the corpus as a serialized empty list.
# This corpus will be extended in self.update.
MmCorpus.serialize(self.serialization_path, []) # Serialize empty corpus.
self.corpus = MmCorpus(self.serialization_path) # Store serialized corpus object in self.corpus.
else:
# All input corpora are assumed to just be lists.
self.corpus = []
def extend_corpus(self, corpus):
"""
Add new documents in `corpus` to `self.corpus`. If serialization is used,
then the entire corpus (`self.corpus`) is re-serialized and the new documents
are added in the process. If serialization is not used, the corpus, as a list
of documents, is simply extended.
"""
if self.serialized:
# Re-serialize the entire corpus while appending the new documents.
if isinstance(corpus, MmCorpus):
# Check that we are not attempting to overwrite the serialized corpus.
assert self.corpus.input != corpus.input, 'Input corpus cannot have the same file path as the model corpus (serialization_path).'
corpus_chain = chain(self.corpus, corpus) # A generator with the old and new documents.
copyfile(self.serialization_path, self.serialization_path + '.tmp') # Make a temporary copy of the file where the corpus is serialized.
self.corpus.input = self.serialization_path + '.tmp' # Point the old corpus at this temporary file.
MmCorpus.serialize(self.serialization_path, corpus_chain) # Re-serialize the old corpus, and extend it with the new corpus.
self.corpus = MmCorpus(self.serialization_path) # Store the new serialized corpus object in self.corpus.
remove(self.serialization_path + '.tmp') # Remove the temporary file again.
else:
# self.corpus and corpus are just lists, just extend the list.
# First check that corpus is actually a list.
assert isinstance(corpus, list), "If serialized == False, all input corpora must be lists."
self.corpus.extend(corpus)
def compute_phinorm(self, ids, authors_d, expElogthetad, expElogbetad):
"""Efficiently computes the normalizing factor in phi."""
phinorm = np.zeros(len(ids))
expElogtheta_sum = expElogthetad.sum(axis=0)
phinorm = expElogtheta_sum.dot(expElogbetad) + 1e-100
return phinorm
def inference(self, chunk, author2doc, doc2author, rhot, collect_sstats=False, chunk_doc_idx=None):
"""
Given a chunk of sparse document vectors, update gamma (parameters
controlling the topic weights) for each author corresponding to the
documents in the chunk.
The whole input chunk of document is assumed to fit in RAM; chunking of
a large corpus must be done earlier in the pipeline.
If `collect_sstats` is True, also collect sufficient statistics needed
to update the model's topic-word distributions, and return a 2-tuple
`(gamma_chunk, sstats)`. Otherwise, return `(gamma_chunk, None)`.
`gamma_cunk` is of shape `len(chunk_authors) x self.num_topics`, where
`chunk_authors` is the number of authors in the documents in the
current chunk.
Avoids computing the `phi` variational parameter directly using the
optimization presented in **Lee, Seung: Algorithms for non-negative matrix factorization, NIPS 2001**.
"""
try:
_ = len(chunk)
except:
# convert iterators/generators to plain list, so we have len() etc.
chunk = list(chunk)
if len(chunk) > 1:
logger.debug("performing inference on a chunk of %i documents", len(chunk))
# Initialize the variational distribution q(theta|gamma) for the chunk
if collect_sstats:
sstats = np.zeros_like(self.expElogbeta)
else:
sstats = None
converged = 0
# Stack all the computed gammas into this output array.
gamma_chunk = np.zeros((0, self.num_topics))
# Now, for each document d update gamma and phi w.r.t. all authors in those documents.
for d, doc in enumerate(chunk):
if chunk_doc_idx is not None:
doc_no = chunk_doc_idx[d]
else:
doc_no = d
# Get the IDs and counts of all the words in the current document.
# TODO: this is duplication of code in LdaModel. Refactor.
if doc and not isinstance(doc[0][0], six.integer_types + (np.integer,)):
# make sure the term IDs are ints, otherwise np will get upset
ids = [int(id) for id, _ in doc]
else:
ids = [id for id, _ in doc]
cts = np.array([cnt for _, cnt in doc])
# Get all authors in current document, and convert the author names to integer IDs.
authors_d = [self.author2id[a] for a in self.doc2author[doc_no]]
gammad = self.state.gamma[authors_d, :] # gamma of document d before update.
tilde_gamma = gammad.copy() # gamma that will be updated.
# Compute the expectation of the log of the Dirichlet parameters theta and beta.
Elogthetad = dirichlet_expectation(tilde_gamma)
expElogthetad = np.exp(Elogthetad)
expElogbetad = self.expElogbeta[:, ids]
# Compute the normalizing constant of phi for the current document.
phinorm = self.compute_phinorm(ids, authors_d, expElogthetad, expElogbetad)
# Iterate between gamma and phi until convergence
for iteration in xrange(self.iterations):
lastgamma = tilde_gamma.copy()
# Update gamma.
# phi is computed implicitly below,
for ai, a in enumerate(authors_d):
tilde_gamma[ai, :] = self.alpha + len(self.author2doc[self.id2author[a]]) * expElogthetad[ai, :] * np.dot(cts / phinorm, expElogbetad.T)
# Update gamma.
# Interpolation between document d's "local" gamma (tilde_gamma),
# and "global" gamma (gammad).
tilde_gamma = (1 - rhot) * gammad + rhot * tilde_gamma
# Update Elogtheta and Elogbeta, since gamma and lambda have been updated.
Elogthetad = dirichlet_expectation(tilde_gamma)
expElogthetad = np.exp(Elogthetad)
# Update the normalizing constant in phi.
phinorm = self.compute_phinorm(ids, authors_d, expElogthetad, expElogbetad)
# Check for convergence.
# Criterion is mean change in "local" gamma.
meanchange_gamma = np.mean(abs(tilde_gamma - lastgamma))
gamma_condition = meanchange_gamma < self.gamma_threshold
if gamma_condition:
converged += 1
break
# End of iterations loop.
# Store the updated gammas in the model state.
self.state.gamma[authors_d, :] = tilde_gamma
# Stack the new gammas into the output array.
gamma_chunk = np.vstack([gamma_chunk, tilde_gamma])
if collect_sstats:
# Contribution of document d to the expected sufficient
# statistics for the M step.
expElogtheta_sum_a = expElogthetad.sum(axis=0)
sstats[:, ids] += np.outer(expElogtheta_sum_a.T, cts / phinorm)
if len(chunk) > 1:
logger.debug("%i/%i documents converged within %i iterations",
converged, len(chunk), self.iterations)
if collect_sstats:
# This step finishes computing the sufficient statistics for the
# M step, so that
# sstats[k, w] = \sum_d n_{dw} * \sum_a phi_{dwak}
# = \sum_d n_{dw} * exp{Elogtheta_{ak} + Elogbeta_{kw}} / phinorm_{dw}.
sstats *= self.expElogbeta
return gamma_chunk, sstats
def do_estep(self, chunk, author2doc, doc2author, rhot, state=None, chunk_doc_idx=None):
"""
Perform inference on a chunk of documents, and accumulate the collected
sufficient statistics in `state` (or `self.state` if None).
"""
# TODO: this method is somewhat similar to the one in LdaModel. Refactor if possible.
if state is None:
state = self.state
gamma, sstats = self.inference(chunk, author2doc, doc2author, rhot, collect_sstats=True, chunk_doc_idx=chunk_doc_idx)
state.sstats += sstats
state.numdocs += len(chunk)
return gamma
def log_perplexity(self, chunk, chunk_doc_idx=None, total_docs=None):
"""
Calculate and return per-word likelihood bound, using the `chunk` of
documents as evaluation corpus. Also output the calculated statistics. incl.
perplexity=2^(-bound), to log at INFO level.
"""
# TODO: This method is very similar to the one in LdaModel. Refactor.
if total_docs is None:
total_docs = len(chunk)
corpus_words = sum(cnt for document in chunk for _, cnt in document)
subsample_ratio = 1.0 * total_docs / len(chunk)
perwordbound = self.bound(chunk, chunk_doc_idx, subsample_ratio=subsample_ratio) / (subsample_ratio * corpus_words)
logger.info("%.3f per-word bound, %.1f perplexity estimate based on a corpus of %i documents with %i words" %
(perwordbound, np.exp2(-perwordbound), len(chunk), corpus_words))
return perwordbound
def update(self, corpus=None, author2doc=None, doc2author=None, chunksize=None, decay=None, offset=None,
passes=None, update_every=None, eval_every=None, iterations=None,
gamma_threshold=None, chunks_as_numpy=False):
"""
Train the model with new documents, by EM-iterating over `corpus` until
the topics converge (or until the maximum number of allowed iterations
is reached). `corpus` must be an iterable (repeatable stream of documents),
This update also supports updating an already trained model (`self`)
with new documents from `corpus`; the two models are then merged in
proportion to the number of old vs. new documents. This feature is still
experimental for non-stationary input streams.
For stationary input (no topic drift in new documents), on the other hand,
this equals the online update of Hoffman et al. and is guaranteed to
converge for any `decay` in (0.5, 1.0>. Additionally, for smaller
`corpus` sizes, an increasing `offset` may be beneficial (see
Table 1 in Hoffman et al.)
If update is called with authors that already exist in the model, it will
resume training on not only new documents for that author, but also the
previously seen documents. This is necessary for those authors' topic
distributions to converge.
Every time `update(corpus, author2doc)` is called, the new documents are
to appended to all the previously seen documents, and author2doc is
combined with the previously seen authors.
To resume training on all the data seen by the model, simply call
`update()`.
It is not possible to add new authors to existing documents, as all
documents in `corpus` are assumed to be new documents.
Args:
corpus (gensim corpus): The corpus with which the author-topic model should be updated.
author2doc (dictionary): author to document mapping corresponding to indexes in input
corpus.
doc2author (dictionary): document to author mapping corresponding to indexes in input
corpus.
chunks_as_numpy (bool): Whether each chunk passed to `.inference` should be a np
array of not. np can in some settings turn the term IDs
into floats, these will be converted back into integers in
inference, which incurs a performance hit. For distributed
computing it may be desirable to keep the chunks as np
arrays.
For other parameter settings, see :class:`AuthorTopicModel` constructor.
"""
# use parameters given in constructor, unless user explicitly overrode them
if decay is None:
decay = self.decay
if offset is None:
offset = self.offset
if passes is None:
passes = self.passes
if update_every is None:
update_every = self.update_every
if eval_every is None:
eval_every = self.eval_every
if iterations is None:
iterations = self.iterations
if gamma_threshold is None:
gamma_threshold = self.gamma_threshold
# TODO: if deepcopy is not used here, something goes wrong. When unit tests are run (specifically "testPasses"),
# the process simply gets killed.
author2doc = deepcopy(author2doc)
doc2author = deepcopy(doc2author)
# TODO: it is not possible to add new authors to an existing document (all input documents are treated
# as completely new documents). Perhaps this functionality could be implemented.
# If it's absolutely necessary, the user can delete the documents that have new authors, and call update
# on them with the new and old authors.
if corpus is None:
# Just keep training on the already available data.
# Assumes self.update() has been called before with input documents and corresponding authors.
assert self.total_docs > 0, 'update() was called with no documents to train on.'
train_corpus_idx = [d for d in xrange(self.total_docs)]
num_input_authors = len(self.author2doc)
else:
if doc2author is None and author2doc is None:
raise ValueError('at least one of author2doc/doc2author must be specified, to establish input space dimensionality')
# If either doc2author or author2doc is missing, construct them from the other.
if doc2author is None:
doc2author = construct_doc2author(corpus, author2doc)
elif author2doc is None:
author2doc = construct_author2doc(corpus, doc2author)
# Number of authors that need to be updated.
num_input_authors = len(author2doc)
try:
len_input_corpus = len(corpus)
except:
logger.warning("input corpus stream has no len(); counting documents")
len_input_corpus = sum(1 for _ in corpus)
if len_input_corpus == 0:
logger.warning("AuthorTopicModel.update() called with an empty corpus")
return
self.total_docs += len_input_corpus
# Add new documents in corpus to self.corpus.
self.extend_corpus(corpus)
# Obtain a list of new authors.
new_authors = []
# Sorting the author names makes the model more reproducible.
for a in sorted(author2doc.keys()):
if not self.author2doc.get(a):
new_authors.append(a)
num_new_authors = len(new_authors)
# Add new authors do author2id/id2author dictionaries.
for a_id, a_name in enumerate(new_authors):
self.author2id[a_name] = a_id + self.num_authors
self.id2author[a_id + self.num_authors] = a_name
# Increment the number of total authors seen.
self.num_authors += num_new_authors
# Initialize the variational distributions q(theta|gamma)
gamma_new = self.random_state.gamma(100., 1. / 100., (num_new_authors, self.num_topics))
self.state.gamma = np.vstack([self.state.gamma, gamma_new])
# Combine author2doc with self.author2doc.
# First, increment the document IDs by the number of previously seen documents.
for a, doc_ids in author2doc.items():
doc_ids = [d + self.total_docs - len_input_corpus for d in doc_ids]
# For all authors in the input corpus, add the new documents.
for a, doc_ids in author2doc.items():
if self.author2doc.get(a):
# This is not a new author, append new documents.
self.author2doc[a].extend(doc_ids)
else:
# This is a new author, create index.
self.author2doc[a] = doc_ids
# Add all new documents to self.doc2author.
for d, a_list in doc2author.items():
self.doc2author[d] = a_list
# Train on all documents of authors in input_corpus.
train_corpus_idx = []
for a in author2doc.keys(): # For all authors in input corpus.
for doc_ids in self.author2doc.values(): # For all documents in total corpus.
train_corpus_idx.extend(doc_ids)
# Make the list of training documents unique.
train_corpus_idx = list(set(train_corpus_idx))
# train_corpus_idx is only a list of indexes, so "len" is valid.
lencorpus = len(train_corpus_idx)
if chunksize is None:
chunksize = min(lencorpus, self.chunksize)
self.state.numdocs += lencorpus
if update_every:
updatetype = "online"
updateafter = min(lencorpus, update_every * self.numworkers * chunksize)
else:
updatetype = "batch"
updateafter = lencorpus
evalafter = min(lencorpus, (eval_every or 0) * self.numworkers * chunksize)
updates_per_pass = max(1, lencorpus / updateafter)
logger.info("running %s author-topic training, %s topics, %s authors, %i passes over "
"the supplied corpus of %i documents, updating model once "
"every %i documents, evaluating perplexity every %i documents, "
"iterating %ix with a convergence threshold of %f",
updatetype, self.num_topics, num_input_authors, passes, lencorpus,
updateafter, evalafter, iterations,
gamma_threshold)
if updates_per_pass * passes < 10:
logger.warning("too few updates, training might not converge; consider "
"increasing the number of passes or iterations to improve accuracy")
# rho is the "speed" of updating; TODO try other fncs
# pass_ + num_updates handles increasing the starting t for each pass,
# while allowing it to "reset" on the first pass of each update
def rho():
return pow(offset + pass_ + (self.num_updates / chunksize), -decay)
for pass_ in xrange(passes):
if self.dispatcher:
logger.info('initializing %s workers' % self.numworkers)
self.dispatcher.reset(self.state)
else:
# gamma is not needed in "other", thus its shape is (0, 0).
other = AuthorTopicState(self.eta, self.state.sstats.shape, (0, 0))
dirty = False
reallen = 0
for chunk_no, chunk_doc_idx in enumerate(utils.grouper(train_corpus_idx, chunksize, as_numpy=chunks_as_numpy)):
chunk = [self.corpus[d] for d in chunk_doc_idx]
reallen += len(chunk) # keep track of how many documents we've processed so far
if eval_every and ((reallen == lencorpus) or ((chunk_no + 1) % (eval_every * self.numworkers) == 0)):
# log_perplexity requires the indexes of the documents being evaluated, to know what authors
# correspond to the documents.
self.log_perplexity(chunk, chunk_doc_idx, total_docs=lencorpus)
if self.dispatcher:
# add the chunk to dispatcher's job queue, so workers can munch on it
logger.info('PROGRESS: pass %i, dispatching documents up to #%i/%i',
pass_, chunk_no * chunksize + len(chunk), lencorpus)
# this will eventually block until some jobs finish, because the queue has a small finite length
self.dispatcher.putjob(chunk)
else:
logger.info('PROGRESS: pass %i, at document #%i/%i',
pass_, chunk_no * chunksize + len(chunk), lencorpus)
# do_estep requires the indexes of the documents being trained on, to know what authors
# correspond to the documents.
gammat = self.do_estep(chunk, self.author2doc, self.doc2author, rho(), other, chunk_doc_idx)
if self.optimize_alpha:
self.update_alpha(gammat, rho())
dirty = True
del chunk
# perform an M step. determine when based on update_every, don't do this after every chunk
if update_every and (chunk_no + 1) % (update_every * self.numworkers) == 0:
if self.dispatcher:
# distributed mode: wait for all workers to finish
logger.info("reached the end of input; now waiting for all remaining jobs to finish")
other = self.dispatcher.getstate()
self.do_mstep(rho(), other, pass_ > 0)
del other # frees up memory
if self.dispatcher:
logger.info('initializing workers')
self.dispatcher.reset(self.state)
else:
other = AuthorTopicState(self.eta, self.state.sstats.shape, (0, 0))
dirty = False
# endfor single corpus iteration
if reallen != lencorpus:
raise RuntimeError("input corpus size changed during training (don't use generators as input)")
if dirty:
# finish any remaining updates
if self.dispatcher:
# distributed mode: wait for all workers to finish
logger.info("reached the end of input; now waiting for all remaining jobs to finish")
other = self.dispatcher.getstate()
self.do_mstep(rho(), other, pass_ > 0)
del other
dirty = False
# endfor entire corpus update
def bound(self, chunk, chunk_doc_idx=None, subsample_ratio=1.0, author2doc=None, doc2author=None):
"""
Estimate the variational bound of documents from `corpus`:
E_q[log p(corpus)] - E_q[log q(corpus)]
There are basically two use cases of this method:
1. `chunk` is a subset of the training corpus, and `chunk_doc_idx` is provided,
indicating the indexes of the documents in the training corpus.
2. `chunk` is a test set (held-out data), and author2doc and doc2author
corrsponding to this test set are provided. There must not be any new authors
passed to this method. `chunk_doc_idx` is not needed in this case.
To obtain the per-word bound, compute:
>>> corpus_words = sum(cnt for document in corpus for _, cnt in document)
>>> model.bound(corpus, author2doc=author2doc, doc2author=doc2author) / corpus_words
"""
# TODO: enable evaluation of documents with new authors. One could, for example, make it
# possible to pass a list of documents to self.inference with no author dictionaries,
# assuming all the documents correspond to one (unseen) author, learn the author's
# gamma, and return gamma (without adding it to self.state.gamma). Of course,
# collect_sstats should be set to false, so that the model is not updated w.r.t. these
# new documents.
_lambda = self.state.get_lambda()
Elogbeta = dirichlet_expectation(_lambda)
expElogbeta = np.exp(Elogbeta)
gamma = self.state.gamma
if author2doc is None and doc2author is None:
# Evaluating on training documents (chunk of self.corpus).
author2doc = self.author2doc
doc2author = self.doc2author
if not chunk_doc_idx:
# If author2doc and doc2author are not provided, chunk is assumed to be a subset of
# self.corpus, and chunk_doc_idx is thus required.
raise ValueError('Either author dictionaries or chunk_doc_idx must be provided. Consult documentation of bound method.')
elif author2doc is not None and doc2author is not None:
# Training on held-out documents (documents not seen during training).
# All authors in dictionaries must still be seen during training.
for a in author2doc.keys():
if not self.author2doc.get(a):
raise ValueError('bound cannot be called with authors not seen during training.')
if chunk_doc_idx:
raise ValueError('Either author dictionaries or chunk_doc_idx must be provided, not both. Consult documentation of bound method.')
else:
raise ValueError('Either both author2doc and doc2author should be provided, or neither. Consult documentation of bound method.')
Elogtheta = dirichlet_expectation(gamma)
expElogtheta = np.exp(Elogtheta)
word_score = 0.0
theta_score = 0.0
for d, doc in enumerate(chunk):
if chunk_doc_idx:
doc_no = chunk_doc_idx[d]
else:
doc_no = d
# Get all authors in current document, and convert the author names to integer IDs.
authors_d = [self.author2id[a] for a in self.doc2author[doc_no]]
ids = np.array([id for id, _ in doc]) # Word IDs in doc.
cts = np.array([cnt for _, cnt in doc]) # Word counts.
if d % self.chunksize == 0:
logger.debug("bound: at document #%i in chunk", d)
# Computing the bound requires summing over expElogtheta[a, k] * expElogbeta[k, v], which
# is the same computation as in normalizing phi.
phinorm = self.compute_phinorm(ids, authors_d, expElogtheta[authors_d, :], expElogbeta[:, ids])
word_score += np.log(1.0 / len(authors_d)) * sum(cts) + cts.dot(np.log(phinorm))
# Compensate likelihood for when `chunk` above is only a sample of the whole corpus. This ensures
# that the likelihood is always rougly on the same scale.
word_score *= subsample_ratio
# E[log p(theta | alpha) - log q(theta | gamma)]
for a in author2doc.keys():
a = self.author2id[a]
theta_score += np.sum((self.alpha - gamma[a, :]) * Elogtheta[a, :])
theta_score += np.sum(gammaln(gamma[a, :]) - gammaln(self.alpha))
theta_score += gammaln(np.sum(self.alpha)) - gammaln(np.sum(gamma[a, :]))
# theta_score is rescaled in a similar fashion.
# TODO: treat this in a more general way, similar to how it is done with word_score.
theta_score *= self.num_authors / len(author2doc)
# E[log p(beta | eta) - log q (beta | lambda)]
beta_score = 0.0
beta_score += np.sum((self.eta - _lambda) * Elogbeta)
beta_score += np.sum(gammaln(_lambda) - gammaln(self.eta))
sum_eta = np.sum(self.eta)
beta_score += np.sum(gammaln(sum_eta) - gammaln(np.sum(_lambda, 1)))
total_score = word_score + theta_score + beta_score
return total_score
def get_document_topics(self, word_id, minimum_probability=None):
'''
This method overwrites `LdaModel.get_document_topics` and simply raises an
exception. `get_document_topics` is not valid for the author-topic model,
use `get_author_topics` instead.
'''
raise NotImplementedError('Method "get_document_topics" is not valid for the author-topic model. Use the "get_author_topics" method.')
def get_author_topics(self, author_name, minimum_probability=None):
"""
Return topic distribution the given author, as a list of
(topic_id, topic_probability) 2-tuples.
Ignore topics with very low probability (below `minimum_probability`).
Obtaining topic probabilities of each word, as in LDA (via `per_word_topics`),
is not supported.
"""
author_id = self.author2id[author_name]
if minimum_probability is None:
minimum_probability = self.minimum_probability
minimum_probability = max(minimum_probability, 1e-8) # never allow zero values in sparse output
topic_dist = self.state.gamma[author_id, :] / sum(self.state.gamma[author_id, :])
author_topics = [(topicid, topicvalue) for topicid, topicvalue in enumerate(topic_dist)
if topicvalue >= minimum_probability]
return author_topics
def __getitem__(self, author_names, eps=None):
'''
Return topic distribution for input author as a list of
(topic_id, topic_probabiity) 2-tuples.
Ingores topics with probaility less than `eps`.
Do not call this method directly, instead use `model[author_names]`.
'''
if isinstance(author_names, list):
items = []
for a in author_names:
items.append(self.get_author_topics(a, minimum_probability=eps))
else:
items = self.get_author_topics(author_names, minimum_probability=eps)
return items
# endclass AuthorTopicModel
|
alfredodeza/boto
|
refs/heads/develop
|
boto/file/connection.py
|
153
|
# Copyright 2010 Google Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
# File representation of connection, for use with "file://" URIs.
from boto.file.bucket import Bucket
class FileConnection(object):
def __init__(self, file_storage_uri):
# FileConnections are per-file storage URI.
self.file_storage_uri = file_storage_uri
def get_bucket(self, bucket_name, validate=True, headers=None):
return Bucket(bucket_name, self.file_storage_uri.object_name)
|
bjacquet/cardndice
|
refs/heads/master
|
polls/urls.py
|
1
|
from django.conf.urls.defaults import *
from cardndice.polls.models import Poll
info_dict = {
'queryset': Poll.objects.all(),
}
urlpatterns = patterns('',
(r'^$', 'django.views.generic.list_detail.object_list', info_dict),
(r'^(?P<object_id>\d+)/$', 'django.views.generic.list_detail.object_detail', info_dict),
(r'^(?P<object_id>\d+)/results/$', 'django.views.generic.list_detail.object_detail', dict(info_dict, template_name='polls/results.html')),
(r'^(?P<object_id>\d+)/vote/$', 'cardndice.polls.views.vote'),
)
|
hyiltiz/youtube-dl
|
refs/heads/master
|
youtube_dl/extractor/sina.py
|
107
|
# coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import (
compat_urllib_request,
compat_urllib_parse,
)
class SinaIE(InfoExtractor):
_VALID_URL = r'''(?x)https?://(.*?\.)?video\.sina\.com\.cn/
(
(.+?/(((?P<pseudo_id>\d+).html)|(.*?(\#|(vid=)|b/)(?P<id>\d+?)($|&|\-))))
|
# This is used by external sites like Weibo
(api/sinawebApi/outplay.php/(?P<token>.+?)\.swf)
)
'''
_TESTS = [
{
'url': 'http://video.sina.com.cn/news/vlist/zt/chczlj2013/?opsubject_id=top12#110028898',
'md5': 'd65dd22ddcf44e38ce2bf58a10c3e71f',
'info_dict': {
'id': '110028898',
'ext': 'flv',
'title': '《中国新闻》 朝鲜要求巴拿马立即释放被扣船员',
}
},
{
'url': 'http://video.sina.com.cn/v/b/101314253-1290078633.html',
'info_dict': {
'id': '101314253',
'ext': 'flv',
'title': '军方提高对朝情报监视级别',
},
},
]
def _extract_video(self, video_id):
data = compat_urllib_parse.urlencode({'vid': video_id})
url_doc = self._download_xml('http://v.iask.com/v_play.php?%s' % data,
video_id, 'Downloading video url')
image_page = self._download_webpage(
'http://interface.video.sina.com.cn/interface/common/getVideoImage.php?%s' % data,
video_id, 'Downloading thumbnail info')
return {'id': video_id,
'url': url_doc.find('./durl/url').text,
'ext': 'flv',
'title': url_doc.find('./vname').text,
'thumbnail': image_page.split('=')[1],
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
if mobj.group('token') is not None:
# The video id is in the redirected url
self.to_screen('Getting video id')
request = compat_urllib_request.Request(url)
request.get_method = lambda: 'HEAD'
(_, urlh) = self._download_webpage_handle(request, 'NA', False)
return self._real_extract(urlh.geturl())
elif video_id is None:
pseudo_id = mobj.group('pseudo_id')
webpage = self._download_webpage(url, pseudo_id)
video_id = self._search_regex(r'vid:\'(\d+?)\'', webpage, 'video id')
return self._extract_video(video_id)
|
matbra/bokeh
|
refs/heads/master
|
sphinx/source/docs/user_guide/source_examples/styling_glyph_properties.py
|
24
|
from bokeh.plotting import figure, output_file, show
output_file("axes.html")
p = figure(plot_width=400, plot_height=400)
p.circle([1,2,3,4,5], [2,5,8,2,7], name="mycircle")
glyph = p.select(name="mycircle")[0].glyph
glyph.size = 60
glyph.fill_alpha = 0.2
glyph.line_color = "firebrick"
glyph.line_dash = [6, 3]
glyph.line_width = 2
show(p)
|
betoesquivel/fil2014
|
refs/heads/master
|
filenv/lib/python2.7/site-packages/django/contrib/gis/gdal/prototypes/geom.py
|
219
|
from ctypes import c_char_p, c_double, c_int, c_void_p, POINTER
from django.contrib.gis.gdal.envelope import OGREnvelope
from django.contrib.gis.gdal.libgdal import lgdal
from django.contrib.gis.gdal.prototypes.errcheck import check_bool, check_envelope
from django.contrib.gis.gdal.prototypes.generation import (const_string_output,
double_output, geom_output, int_output, srs_output, string_output, void_output)
### Generation routines specific to this module ###
def env_func(f, argtypes):
"For getting OGREnvelopes."
f.argtypes = argtypes
f.restype = None
f.errcheck = check_envelope
return f
def pnt_func(f):
"For accessing point information."
return double_output(f, [c_void_p, c_int])
def topology_func(f):
f.argtypes = [c_void_p, c_void_p]
f.restype = c_int
f.errchck = check_bool
return f
### OGR_G ctypes function prototypes ###
# GeoJSON routines.
from_json = geom_output(lgdal.OGR_G_CreateGeometryFromJson, [c_char_p])
to_json = string_output(lgdal.OGR_G_ExportToJson, [c_void_p], str_result=True, decoding='ascii')
to_kml = string_output(lgdal.OGR_G_ExportToKML, [c_void_p, c_char_p], str_result=True, decoding='ascii')
# GetX, GetY, GetZ all return doubles.
getx = pnt_func(lgdal.OGR_G_GetX)
gety = pnt_func(lgdal.OGR_G_GetY)
getz = pnt_func(lgdal.OGR_G_GetZ)
# Geometry creation routines.
from_wkb = geom_output(lgdal.OGR_G_CreateFromWkb, [c_char_p, c_void_p, POINTER(c_void_p), c_int], offset=-2)
from_wkt = geom_output(lgdal.OGR_G_CreateFromWkt, [POINTER(c_char_p), c_void_p, POINTER(c_void_p)], offset=-1)
create_geom = geom_output(lgdal.OGR_G_CreateGeometry, [c_int])
clone_geom = geom_output(lgdal.OGR_G_Clone, [c_void_p])
get_geom_ref = geom_output(lgdal.OGR_G_GetGeometryRef, [c_void_p, c_int])
get_boundary = geom_output(lgdal.OGR_G_GetBoundary, [c_void_p])
geom_convex_hull = geom_output(lgdal.OGR_G_ConvexHull, [c_void_p])
geom_diff = geom_output(lgdal.OGR_G_Difference, [c_void_p, c_void_p])
geom_intersection = geom_output(lgdal.OGR_G_Intersection, [c_void_p, c_void_p])
geom_sym_diff = geom_output(lgdal.OGR_G_SymmetricDifference, [c_void_p, c_void_p])
geom_union = geom_output(lgdal.OGR_G_Union, [c_void_p, c_void_p])
# Geometry modification routines.
add_geom = void_output(lgdal.OGR_G_AddGeometry, [c_void_p, c_void_p])
import_wkt = void_output(lgdal.OGR_G_ImportFromWkt, [c_void_p, POINTER(c_char_p)])
# Destroys a geometry
destroy_geom = void_output(lgdal.OGR_G_DestroyGeometry, [c_void_p], errcheck=False)
# Geometry export routines.
to_wkb = void_output(lgdal.OGR_G_ExportToWkb, None, errcheck=True) # special handling for WKB.
to_wkt = string_output(lgdal.OGR_G_ExportToWkt, [c_void_p, POINTER(c_char_p)], decoding='ascii')
to_gml = string_output(lgdal.OGR_G_ExportToGML, [c_void_p], str_result=True, decoding='ascii')
get_wkbsize = int_output(lgdal.OGR_G_WkbSize, [c_void_p])
# Geometry spatial-reference related routines.
assign_srs = void_output(lgdal.OGR_G_AssignSpatialReference, [c_void_p, c_void_p], errcheck=False)
get_geom_srs = srs_output(lgdal.OGR_G_GetSpatialReference, [c_void_p])
# Geometry properties
get_area = double_output(lgdal.OGR_G_GetArea, [c_void_p])
get_centroid = void_output(lgdal.OGR_G_Centroid, [c_void_p, c_void_p])
get_dims = int_output(lgdal.OGR_G_GetDimension, [c_void_p])
get_coord_dim = int_output(lgdal.OGR_G_GetCoordinateDimension, [c_void_p])
set_coord_dim = void_output(lgdal.OGR_G_SetCoordinateDimension, [c_void_p, c_int], errcheck=False)
get_geom_count = int_output(lgdal.OGR_G_GetGeometryCount, [c_void_p])
get_geom_name = const_string_output(lgdal.OGR_G_GetGeometryName, [c_void_p], decoding='ascii')
get_geom_type = int_output(lgdal.OGR_G_GetGeometryType, [c_void_p])
get_point_count = int_output(lgdal.OGR_G_GetPointCount, [c_void_p])
get_point = void_output(lgdal.OGR_G_GetPoint, [c_void_p, c_int, POINTER(c_double), POINTER(c_double), POINTER(c_double)], errcheck=False)
geom_close_rings = void_output(lgdal.OGR_G_CloseRings, [c_void_p], errcheck=False)
# Topology routines.
ogr_contains = topology_func(lgdal.OGR_G_Contains)
ogr_crosses = topology_func(lgdal.OGR_G_Crosses)
ogr_disjoint = topology_func(lgdal.OGR_G_Disjoint)
ogr_equals = topology_func(lgdal.OGR_G_Equals)
ogr_intersects = topology_func(lgdal.OGR_G_Intersects)
ogr_overlaps = topology_func(lgdal.OGR_G_Overlaps)
ogr_touches = topology_func(lgdal.OGR_G_Touches)
ogr_within = topology_func(lgdal.OGR_G_Within)
# Transformation routines.
geom_transform = void_output(lgdal.OGR_G_Transform, [c_void_p, c_void_p])
geom_transform_to = void_output(lgdal.OGR_G_TransformTo, [c_void_p, c_void_p])
# For retrieving the envelope of the geometry.
get_envelope = env_func(lgdal.OGR_G_GetEnvelope, [c_void_p, POINTER(OGREnvelope)])
|
vollov/net-audit
|
refs/heads/master
|
src/pymongo/errors.py
|
2
|
# Copyright 2009-2012 10gen, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Exceptions raised by PyMongo."""
from bson.errors import *
class PyMongoError(Exception):
"""Base class for all PyMongo exceptions.
.. versionadded:: 1.4
"""
class ConnectionFailure(PyMongoError):
"""Raised when a connection to the database cannot be made or is lost.
"""
class AutoReconnect(ConnectionFailure):
"""Raised when a connection to the database is lost and an attempt to
auto-reconnect will be made.
In order to auto-reconnect you must handle this exception, recognizing that
the operation which caused it has not necessarily succeeded. Future
operations will attempt to open a new connection to the database (and
will continue to raise this exception until the first successful
connection is made).
"""
def __init__(self, message='', errors=None):
self.errors = errors or []
ConnectionFailure.__init__(self, message)
class ConfigurationError(PyMongoError):
"""Raised when something is incorrectly configured.
"""
class OperationFailure(PyMongoError):
"""Raised when a database operation fails.
.. versionadded:: 1.8
The :attr:`code` attribute.
"""
def __init__(self, error, code=None):
self.code = code
PyMongoError.__init__(self, error)
class TimeoutError(OperationFailure):
"""Raised when a database operation times out.
.. versionadded:: 1.8
"""
class DuplicateKeyError(OperationFailure):
"""Raised when a safe insert or update fails due to a duplicate key error.
.. note:: Requires server version **>= 1.3.0**
.. versionadded:: 1.4
"""
class InvalidOperation(PyMongoError):
"""Raised when a client attempts to perform an invalid operation.
"""
class InvalidName(PyMongoError):
"""Raised when an invalid name is used.
"""
class CollectionInvalid(PyMongoError):
"""Raised when collection validation fails.
"""
class InvalidURI(ConfigurationError):
"""Raised when trying to parse an invalid mongodb URI.
.. versionadded:: 1.5
"""
class UnsupportedOption(ConfigurationError):
"""Exception for unsupported options.
.. versionadded:: 2.0
"""
|
Maximilian-Reuter/SickRage
|
refs/heads/master
|
lib/guessit/date.py
|
33
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# GuessIt - A library for guessing information from filenames
# Copyright (c) 2013 Nicolas Wack <wackou@gmail.com>
#
# GuessIt is free software; you can redistribute it and/or modify it under
# the terms of the Lesser GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# GuessIt is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Lesser GNU General Public License for more details.
#
# You should have received a copy of the Lesser GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import absolute_import, division, print_function, unicode_literals
import datetime
import re
from dateutil import parser
_dsep = r'[-/ \.]'
_dsep_bis = r'[-/ \.x]'
date_regexps = [
re.compile('%s(\d{8})%s' % (_dsep, _dsep), re.IGNORECASE),
re.compile('%s(\d{6})%s' % (_dsep, _dsep), re.IGNORECASE),
re.compile('[^\d](\d{2})%s(\d{1,2})%s(\d{1,2})[^\d]' % (_dsep, _dsep), re.IGNORECASE),
re.compile('[^\d](\d{1,2})%s(\d{1,2})%s(\d{2})[^\d]' % (_dsep, _dsep), re.IGNORECASE),
re.compile('[^\d](\d{4})%s(\d{1,2})%s(\d{1,2})[^\d]' % (_dsep_bis, _dsep), re.IGNORECASE),
re.compile('[^\d](\d{1,2})%s(\d{1,2})%s(\d{4})[^\d]' % (_dsep, _dsep_bis), re.IGNORECASE),
re.compile('[^\d](\d{1,2}(?:st|nd|rd|th)?%s(?:[a-z]{3,10})%s\d{4})[^\d]' % (_dsep, _dsep), re.IGNORECASE)]
def valid_year(year, today=None):
"""Check if number is a valid year"""
if not today:
today = datetime.date.today()
return 1920 < year < today.year + 5
def search_year(string):
"""Looks for year patterns, and if found return the year and group span.
Assumes there are sentinels at the beginning and end of the string that
always allow matching a non-digit delimiting the date.
Note this only looks for valid production years, that is between 1920
and now + 5 years, so for instance 2000 would be returned as a valid
year but 1492 would not.
>>> search_year(' in the year 2000... ')
(2000, (13, 17))
>>> search_year(' they arrived in 1492. ')
(None, None)
"""
match = re.search(r'[^0-9]([0-9]{4})[^0-9]', string)
if match:
year = int(match.group(1))
if valid_year(year):
return year, match.span(1)
return None, None
def search_date(string, year_first=None, day_first=True):
"""Looks for date patterns, and if found return the date and group span.
Assumes there are sentinels at the beginning and end of the string that
always allow matching a non-digit delimiting the date.
Year can be defined on two digit only. It will return the nearest possible
date from today.
>>> search_date(' This happened on 2002-04-22. ')
(datetime.date(2002, 4, 22), (18, 28))
>>> search_date(' And this on 17-06-1998. ')
(datetime.date(1998, 6, 17), (13, 23))
>>> search_date(' no date in here ')
(None, None)
"""
start, end = None, None
match = None
for date_re in date_regexps:
s = date_re.search(string)
if s and (match is None or s.end() - s.start() > len(match)):
start, end = s.start(), s.end()
if date_re.groups:
match = '-'.join(s.groups())
else:
match = s.group()
if match is None:
return None, None
today = datetime.date.today()
# If day_first/year_first is undefined, parse is made using both possible values.
yearfirst_opts = [False, True]
if year_first is not None:
yearfirst_opts = [year_first]
dayfirst_opts = [True, False]
if day_first is not None:
dayfirst_opts = [day_first]
kwargs_list = ({'dayfirst': d, 'yearfirst': y} for d in dayfirst_opts for y in yearfirst_opts)
for kwargs in kwargs_list:
try:
date = parser.parse(match, **kwargs)
except (ValueError, TypeError) as e: #see https://bugs.launchpad.net/dateutil/+bug/1247643
date = None
pass
# check date plausibility
if date and valid_year(date.year, today=today):
return date.date(), (start+1, end-1) #compensate for sentinels
return None, None
|
AOKPSaber/android_external_chromium
|
refs/heads/jb-mr1
|
testing/gmock/scripts/gmock_doctor.py
|
66
|
#!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Converts compiler's errors in code using Google Mock to plain English."""
__author__ = 'wan@google.com (Zhanyong Wan)'
import re
import sys
_VERSION = '1.0.3'
_EMAIL = 'googlemock@googlegroups.com'
_COMMON_GMOCK_SYMBOLS = [
# Matchers
'_',
'A',
'AddressSatisfies',
'AllOf',
'An',
'AnyOf',
'ContainerEq',
'Contains',
'ContainsRegex',
'DoubleEq',
'ElementsAre',
'ElementsAreArray',
'EndsWith',
'Eq',
'Field',
'FloatEq',
'Ge',
'Gt',
'HasSubstr',
'IsInitializedProto',
'Le',
'Lt',
'MatcherCast',
'Matches',
'MatchesRegex',
'NanSensitiveDoubleEq',
'NanSensitiveFloatEq',
'Ne',
'Not',
'NotNull',
'Pointee',
'Property',
'Ref',
'ResultOf',
'SafeMatcherCast',
'StartsWith',
'StrCaseEq',
'StrCaseNe',
'StrEq',
'StrNe',
'Truly',
'TypedEq',
'Value',
# Actions
'Assign',
'ByRef',
'DeleteArg',
'DoAll',
'DoDefault',
'IgnoreResult',
'Invoke',
'InvokeArgument',
'InvokeWithoutArgs',
'Return',
'ReturnNew',
'ReturnNull',
'ReturnRef',
'SaveArg',
'SetArgReferee',
'SetArgPointee',
'SetArgumentPointee',
'SetArrayArgument',
'SetErrnoAndReturn',
'Throw',
'WithArg',
'WithArgs',
'WithoutArgs',
# Cardinalities
'AnyNumber',
'AtLeast',
'AtMost',
'Between',
'Exactly',
# Sequences
'InSequence',
'Sequence',
# Misc
'DefaultValue',
'Mock',
]
# Regex for matching source file path and line number in the compiler's errors.
_GCC_FILE_LINE_RE = r'(?P<file>.*):(?P<line>\d+):\s+'
_CLANG_FILE_LINE_RE = r'(?P<file>.*):(?P<line>\d+):(?P<column>\d+):\s+'
_CLANG_NON_GMOCK_FILE_LINE_RE = (
r'(?P<file>.*[/\\^](?!gmock-)[^/\\]+):(?P<line>\d+):(?P<column>\d+):\s+')
def _FindAllMatches(regex, s):
"""Generates all matches of regex in string s."""
r = re.compile(regex)
return r.finditer(s)
def _GenericDiagnoser(short_name, long_name, diagnoses, msg):
"""Diagnoses the given disease by pattern matching.
Can provide different diagnoses for different patterns.
Args:
short_name: Short name of the disease.
long_name: Long name of the disease.
diagnoses: A list of pairs (regex, pattern for formatting the diagnosis
for matching regex).
msg: Compiler's error messages.
Yields:
Tuples of the form
(short name of disease, long name of disease, diagnosis).
"""
for regex, diagnosis in diagnoses:
if re.search(regex, msg):
diagnosis = '%(file)s:%(line)s:' + diagnosis
for m in _FindAllMatches(regex, msg):
yield (short_name, long_name, diagnosis % m.groupdict())
def _NeedToReturnReferenceDiagnoser(msg):
"""Diagnoses the NRR disease, given the error messages by the compiler."""
gcc_regex = (r'In member function \'testing::internal::ReturnAction<R>.*\n'
+ _GCC_FILE_LINE_RE + r'instantiated from here\n'
r'.*gmock-actions\.h.*error: creating array with negative size')
clang_regex = (r'error:.*array.*negative.*\r?\n'
r'(.*\n)*?' +
_CLANG_NON_GMOCK_FILE_LINE_RE +
r'note: in instantiation of function template specialization '
r'\'testing::internal::ReturnAction<(?P<type>).*>'
r'::operator Action<.*>\' requested here')
diagnosis = """
You are using a Return() action in a function that returns a reference to
%(type)s. Please use ReturnRef() instead."""
return _GenericDiagnoser('NRR', 'Need to Return Reference',
[(clang_regex, diagnosis),
(gcc_regex, diagnosis % {'type': 'a type'})],
msg)
def _NeedToReturnSomethingDiagnoser(msg):
"""Diagnoses the NRS disease, given the error messages by the compiler."""
gcc_regex = (_GCC_FILE_LINE_RE + r'(instantiated from here\n.'
r'*gmock.*actions\.h.*error: void value not ignored)'
r'|(error: control reaches end of non-void function)')
clang_regex1 = (_CLANG_FILE_LINE_RE +
r'error: cannot initialize return object '
r'of type \'Result\' \(aka \'(?P<return_type>).*\'\) '
r'with an rvalue of type \'void\'')
clang_regex2 = (_CLANG_FILE_LINE_RE +
r'error: cannot initialize return object '
r'of type \'(?P<return_type>).*\' '
r'with an rvalue of type \'void\'')
diagnosis = """
You are using an action that returns void, but it needs to return
%(return_type)s. Please tell it *what* to return. Perhaps you can use
the pattern DoAll(some_action, Return(some_value))?"""
return _GenericDiagnoser(
'NRS',
'Need to Return Something',
[(gcc_regex, diagnosis % {'return_type': '*something*'}),
(clang_regex1, diagnosis),
(clang_regex2, diagnosis)],
msg)
def _NeedToReturnNothingDiagnoser(msg):
"""Diagnoses the NRN disease, given the error messages by the compiler."""
gcc_regex = (_GCC_FILE_LINE_RE + r'instantiated from here\n'
r'.*gmock-actions\.h.*error: instantiation of '
r'\'testing::internal::ReturnAction<R>::Impl<F>::value_\' '
r'as type \'void\'')
clang_regex1 = (r'error: field has incomplete type '
r'\'Result\' \(aka \'void\'\)(\r)?\n'
r'(.*\n)*?' +
_CLANG_NON_GMOCK_FILE_LINE_RE + r'note: in instantiation '
r'of function template specialization '
r'\'testing::internal::ReturnAction<(?P<return_type>.*)>'
r'::operator Action<void \(.*\)>\' requested here')
clang_regex2 = (r'error: field has incomplete type '
r'\'Result\' \(aka \'void\'\)(\r)?\n'
r'(.*\n)*?' +
_CLANG_NON_GMOCK_FILE_LINE_RE + r'note: in instantiation '
r'of function template specialization '
r'\'testing::internal::DoBothAction<.*>'
r'::operator Action<(?P<return_type>.*) \(.*\)>\' '
r'requested here')
diagnosis = """
You are using an action that returns %(return_type)s, but it needs to return
void. Please use a void-returning action instead.
All actions but the last in DoAll(...) must return void. Perhaps you need
to re-arrange the order of actions in a DoAll(), if you are using one?"""
return _GenericDiagnoser(
'NRN',
'Need to Return Nothing',
[(gcc_regex, diagnosis % {'return_type': '*something*'}),
(clang_regex1, diagnosis),
(clang_regex2, diagnosis)],
msg)
def _IncompleteByReferenceArgumentDiagnoser(msg):
"""Diagnoses the IBRA disease, given the error messages by the compiler."""
gcc_regex = (_GCC_FILE_LINE_RE + r'instantiated from here\n'
r'.*gtest-printers\.h.*error: invalid application of '
r'\'sizeof\' to incomplete type \'(?P<type>.*)\'')
clang_regex = (r'.*gtest-printers\.h.*error: invalid application of '
r'\'sizeof\' to an incomplete type '
r'\'(?P<type>.*)( const)?\'\r?\n'
r'(.*\n)*?' +
_CLANG_NON_GMOCK_FILE_LINE_RE +
r'note: in instantiation of member function '
r'\'testing::internal2::TypeWithoutFormatter<.*>::'
r'PrintValue\' requested here')
diagnosis = """
In order to mock this function, Google Mock needs to see the definition
of type "%(type)s" - declaration alone is not enough. Either #include
the header that defines it, or change the argument to be passed
by pointer."""
return _GenericDiagnoser('IBRA', 'Incomplete By-Reference Argument Type',
[(gcc_regex, diagnosis),
(clang_regex, diagnosis)],
msg)
def _OverloadedFunctionMatcherDiagnoser(msg):
"""Diagnoses the OFM disease, given the error messages by the compiler."""
gcc_regex = (_GCC_FILE_LINE_RE + r'error: no matching function for '
r'call to \'Truly\(<unresolved overloaded function type>\)')
clang_regex = (_CLANG_FILE_LINE_RE + r'error: no matching function for '
r'call to \'Truly')
diagnosis = """
The argument you gave to Truly() is an overloaded function. Please tell
your compiler which overloaded version you want to use.
For example, if you want to use the version whose signature is
bool Foo(int n);
you should write
Truly(static_cast<bool (*)(int n)>(Foo))"""
return _GenericDiagnoser('OFM', 'Overloaded Function Matcher',
[(gcc_regex, diagnosis),
(clang_regex, diagnosis)],
msg)
def _OverloadedFunctionActionDiagnoser(msg):
"""Diagnoses the OFA disease, given the error messages by the compiler."""
gcc_regex = (_GCC_FILE_LINE_RE + r'error: no matching function for call to '
r'\'Invoke\(<unresolved overloaded function type>')
clang_regex = (_CLANG_FILE_LINE_RE + r'error: no matching '
r'function for call to \'Invoke\'\r?\n'
r'(.*\n)*?'
r'.*\bgmock-\w+-actions\.h:\d+:\d+:\s+'
r'note: candidate template ignored:\s+'
r'couldn\'t infer template argument \'FunctionImpl\'')
diagnosis = """
Function you are passing to Invoke is overloaded. Please tell your compiler
which overloaded version you want to use.
For example, if you want to use the version whose signature is
bool MyFunction(int n, double x);
you should write something like
Invoke(static_cast<bool (*)(int n, double x)>(MyFunction))"""
return _GenericDiagnoser('OFA', 'Overloaded Function Action',
[(gcc_regex, diagnosis),
(clang_regex, diagnosis)],
msg)
def _OverloadedMethodActionDiagnoser(msg):
"""Diagnoses the OMA disease, given the error messages by the compiler."""
gcc_regex = (_GCC_FILE_LINE_RE + r'error: no matching function for '
r'call to \'Invoke\(.+, <unresolved overloaded function '
r'type>\)')
clang_regex = (_CLANG_FILE_LINE_RE + r'error: no matching function '
r'for call to \'Invoke\'\r?\n'
r'(.*\n)*?'
r'.*\bgmock-\w+-actions\.h:\d+:\d+: '
r'note: candidate function template not viable: '
r'requires 1 argument, but 2 were provided')
diagnosis = """
The second argument you gave to Invoke() is an overloaded method. Please
tell your compiler which overloaded version you want to use.
For example, if you want to use the version whose signature is
class Foo {
...
bool Bar(int n, double x);
};
you should write something like
Invoke(foo, static_cast<bool (Foo::*)(int n, double x)>(&Foo::Bar))"""
return _GenericDiagnoser('OMA', 'Overloaded Method Action',
[(gcc_regex, diagnosis),
(clang_regex, diagnosis)],
msg)
def _MockObjectPointerDiagnoser(msg):
"""Diagnoses the MOP disease, given the error messages by the compiler."""
gcc_regex = (_GCC_FILE_LINE_RE + r'error: request for member '
r'\'gmock_(?P<method>.+)\' in \'(?P<mock_object>.+)\', '
r'which is of non-class type \'(.*::)*(?P<class_name>.+)\*\'')
clang_regex = (_CLANG_FILE_LINE_RE + r'error: member reference type '
r'\'(?P<class_name>.*?) *\' is a pointer; '
r'maybe you meant to use \'->\'\?')
diagnosis = """
The first argument to ON_CALL() and EXPECT_CALL() must be a mock *object*,
not a *pointer* to it. Please write '*(%(mock_object)s)' instead of
'%(mock_object)s' as your first argument.
For example, given the mock class:
class %(class_name)s : public ... {
...
MOCK_METHOD0(%(method)s, ...);
};
and the following mock instance:
%(class_name)s* mock_ptr = ...
you should use the EXPECT_CALL like this:
EXPECT_CALL(*mock_ptr, %(method)s(...));"""
return _GenericDiagnoser(
'MOP',
'Mock Object Pointer',
[(gcc_regex, diagnosis),
(clang_regex, diagnosis % {'mock_object': 'mock_object',
'method': 'method',
'class_name': '%(class_name)s'})],
msg)
def _NeedToUseSymbolDiagnoser(msg):
"""Diagnoses the NUS disease, given the error messages by the compiler."""
gcc_regex = (_GCC_FILE_LINE_RE + r'error: \'(?P<symbol>.+)\' '
r'(was not declared in this scope|has not been declared)')
clang_regex = (_CLANG_FILE_LINE_RE + r'error: use of undeclared identifier '
r'\'(?P<symbol>.+)\'')
diagnosis = """
'%(symbol)s' is defined by Google Mock in the testing namespace.
Did you forget to write
using testing::%(symbol)s;
?"""
for m in (list(_FindAllMatches(gcc_regex, msg)) +
list(_FindAllMatches(clang_regex, msg))):
symbol = m.groupdict()['symbol']
if symbol in _COMMON_GMOCK_SYMBOLS:
yield ('NUS', 'Need to Use Symbol', diagnosis % m.groupdict())
def _NeedToUseReturnNullDiagnoser(msg):
"""Diagnoses the NRNULL disease, given the error messages by the compiler."""
gcc_regex = ('instantiated from \'testing::internal::ReturnAction<R>'
'::operator testing::Action<Func>\(\) const.*\n' +
_GCC_FILE_LINE_RE + r'instantiated from here\n'
r'.*error: no matching function for call to \'ImplicitCast_\('
r'long int&\)')
clang_regex = (r'\bgmock-actions.h:.* error: no matching function for '
r'call to \'ImplicitCast_\'\r?\n'
r'(.*\n)*?' +
_CLANG_NON_GMOCK_FILE_LINE_RE + r'note: in instantiation '
r'of function template specialization '
r'\'testing::internal::ReturnAction<long>::operator '
r'Action<(?P<type>.*)\(\)>\' requested here')
diagnosis = """
You are probably calling Return(NULL) and the compiler isn't sure how to turn
NULL into %(type)s. Use ReturnNull() instead.
Note: the line number may be off; please fix all instances of Return(NULL)."""
return _GenericDiagnoser(
'NRNULL', 'Need to use ReturnNull',
[(clang_regex, diagnosis),
(gcc_regex, diagnosis % {'type': 'the right type'})],
msg)
def _TypeInTemplatedBaseDiagnoser(msg):
"""Diagnoses the TTB disease, given the error messages by the compiler."""
# This version works when the type is used as the mock function's return
# type.
gcc_4_3_1_regex_type_in_retval = (
r'In member function \'int .*\n' + _GCC_FILE_LINE_RE +
r'error: a function call cannot appear in a constant-expression')
gcc_4_4_0_regex_type_in_retval = (
r'error: a function call cannot appear in a constant-expression'
+ _GCC_FILE_LINE_RE + r'error: template argument 1 is invalid\n')
# This version works when the type is used as the mock function's sole
# parameter type.
gcc_regex_type_of_sole_param = (
_GCC_FILE_LINE_RE +
r'error: \'(?P<type>.+)\' was not declared in this scope\n'
r'.*error: template argument 1 is invalid\n')
# This version works when the type is used as a parameter of a mock
# function that has multiple parameters.
gcc_regex_type_of_a_param = (
r'error: expected `;\' before \'::\' token\n'
+ _GCC_FILE_LINE_RE +
r'error: \'(?P<type>.+)\' was not declared in this scope\n'
r'.*error: template argument 1 is invalid\n'
r'.*error: \'.+\' was not declared in this scope')
clang_regex_type_of_retval_or_sole_param = (
_CLANG_FILE_LINE_RE +
r'error: use of undeclared identifier \'(?P<type>.*)\'\n'
r'(.*\n)*?'
r'(?P=file):(?P=line):\d+: error: '
r'non-friend class member \'Result\' cannot have a qualified name'
)
clang_regex_type_of_a_param = (
_CLANG_FILE_LINE_RE +
r'error: C\+\+ requires a type specifier for all declarations\n'
r'(.*\n)*?'
r'(?P=file):(?P=line):(?P=column): error: '
r'C\+\+ requires a type specifier for all declarations'
)
diagnosis = """
In a mock class template, types or typedefs defined in the base class
template are *not* automatically visible. This is how C++ works. Before
you can use a type or typedef named %(type)s defined in base class Base<T>, you
need to make it visible. One way to do it is:
typedef typename Base<T>::%(type)s %(type)s;"""
return _GenericDiagnoser(
'TTB', 'Type in Template Base',
[(gcc_4_3_1_regex_type_in_retval, diagnosis % {'type': 'Foo'}),
(gcc_4_4_0_regex_type_in_retval, diagnosis % {'type': 'Foo'}),
(gcc_regex_type_of_sole_param, diagnosis),
(gcc_regex_type_of_a_param, diagnosis),
(clang_regex_type_of_retval_or_sole_param, diagnosis),
(clang_regex_type_of_a_param, diagnosis % {'type': 'Foo'})],
msg)
def _WrongMockMethodMacroDiagnoser(msg):
"""Diagnoses the WMM disease, given the error messages by the compiler."""
gcc_regex = (_GCC_FILE_LINE_RE +
r'.*this_method_does_not_take_(?P<wrong_args>\d+)_argument.*\n'
r'.*\n'
r'.*candidates are.*FunctionMocker<[^>]+A(?P<args>\d+)\)>')
clang_regex = (_CLANG_NON_GMOCK_FILE_LINE_RE +
r'error:.*array.*negative.*r?\n'
r'(.*\n)*?'
r'(?P=file):(?P=line):(?P=column): error: too few arguments '
r'to function call, expected (?P<args>\d+), '
r'have (?P<wrong_args>\d+)')
diagnosis = """
You are using MOCK_METHOD%(wrong_args)s to define a mock method that has
%(args)s arguments. Use MOCK_METHOD%(args)s (or MOCK_CONST_METHOD%(args)s,
MOCK_METHOD%(args)s_T, MOCK_CONST_METHOD%(args)s_T as appropriate) instead."""
return _GenericDiagnoser('WMM', 'Wrong MOCK_METHODn Macro',
[(gcc_regex, diagnosis),
(clang_regex, diagnosis)],
msg)
def _WrongParenPositionDiagnoser(msg):
"""Diagnoses the WPP disease, given the error messages by the compiler."""
gcc_regex = (_GCC_FILE_LINE_RE +
r'error:.*testing::internal::MockSpec<.* has no member named \''
r'(?P<method>\w+)\'')
clang_regex = (_CLANG_NON_GMOCK_FILE_LINE_RE +
r'error: no member named \'(?P<method>\w+)\' in '
r'\'testing::internal::MockSpec<.*>\'')
diagnosis = """
The closing parenthesis of ON_CALL or EXPECT_CALL should be *before*
".%(method)s". For example, you should write:
EXPECT_CALL(my_mock, Foo(_)).%(method)s(...);
instead of:
EXPECT_CALL(my_mock, Foo(_).%(method)s(...));"""
return _GenericDiagnoser('WPP', 'Wrong Parenthesis Position',
[(gcc_regex, diagnosis),
(clang_regex, diagnosis)],
msg)
_DIAGNOSERS = [
_IncompleteByReferenceArgumentDiagnoser,
_MockObjectPointerDiagnoser,
_NeedToReturnNothingDiagnoser,
_NeedToReturnReferenceDiagnoser,
_NeedToReturnSomethingDiagnoser,
_NeedToUseReturnNullDiagnoser,
_NeedToUseSymbolDiagnoser,
_OverloadedFunctionActionDiagnoser,
_OverloadedFunctionMatcherDiagnoser,
_OverloadedMethodActionDiagnoser,
_TypeInTemplatedBaseDiagnoser,
_WrongMockMethodMacroDiagnoser,
_WrongParenPositionDiagnoser,
]
def Diagnose(msg):
"""Generates all possible diagnoses given the compiler error message."""
msg = re.sub(r'\x1b\[[^m]*m', '', msg) # Strips all color formatting.
diagnoses = []
for diagnoser in _DIAGNOSERS:
for diag in diagnoser(msg):
diagnosis = '[%s - %s]\n%s' % diag
if not diagnosis in diagnoses:
diagnoses.append(diagnosis)
return diagnoses
def main():
print ('Google Mock Doctor v%s - '
'diagnoses problems in code using Google Mock.' % _VERSION)
if sys.stdin.isatty():
print ('Please copy and paste the compiler errors here. Press c-D when '
'you are done:')
else:
print 'Waiting for compiler errors on stdin . . .'
msg = sys.stdin.read().strip()
diagnoses = Diagnose(msg)
count = len(diagnoses)
if not count:
print ("""
Your compiler complained:
8<------------------------------------------------------------
%s
------------------------------------------------------------>8
Uh-oh, I'm not smart enough to figure out what the problem is. :-(
However...
If you send your source code and the compiler's error messages to
%s, you can be helped and I can get smarter --
win-win for us!""" % (msg, _EMAIL))
else:
print '------------------------------------------------------------'
print 'Your code appears to have the following',
if count > 1:
print '%s diseases:' % (count,)
else:
print 'disease:'
i = 0
for d in diagnoses:
i += 1
if count > 1:
print '\n#%s:' % (i,)
print d
print ("""
How did I do? If you think I'm wrong or unhelpful, please send your
source code and the compiler's error messages to %s.
Then you can be helped and I can get smarter -- I promise I won't be upset!""" %
_EMAIL)
if __name__ == '__main__':
main()
|
Belxjander/Kirito
|
refs/heads/master
|
Python-3.5.0-Amiga/Tools/pybench/Arithmetic.py
|
92
|
from pybench import Test
class SimpleIntegerArithmetic(Test):
version = 2.0
operations = 5 * (3 + 5 + 5 + 3 + 3 + 3)
rounds = 120000
def test(self):
for i in range(self.rounds):
a = 2
b = 3
c = 3
c = a + b
c = b + c
c = c + a
c = a + b
c = b + c
c = c - a
c = a - b
c = b - c
c = c - a
c = b - c
c = a / b
c = b / a
c = c / b
c = a * b
c = b * a
c = c * b
c = a / b
c = b / a
c = c / b
a = 2
b = 3
c = 3
c = a + b
c = b + c
c = c + a
c = a + b
c = b + c
c = c - a
c = a - b
c = b - c
c = c - a
c = b - c
c = a / b
c = b / a
c = c / b
c = a * b
c = b * a
c = c * b
c = a / b
c = b / a
c = c / b
a = 2
b = 3
c = 3
c = a + b
c = b + c
c = c + a
c = a + b
c = b + c
c = c - a
c = a - b
c = b - c
c = c - a
c = b - c
c = a / b
c = b / a
c = c / b
c = a * b
c = b * a
c = c * b
c = a / b
c = b / a
c = c / b
a = 2
b = 3
c = 3
c = a + b
c = b + c
c = c + a
c = a + b
c = b + c
c = c - a
c = a - b
c = b - c
c = c - a
c = b - c
c = a / b
c = b / a
c = c / b
c = a * b
c = b * a
c = c * b
c = a / b
c = b / a
c = c / b
a = 2
b = 3
c = 3
c = a + b
c = b + c
c = c + a
c = a + b
c = b + c
c = c - a
c = a - b
c = b - c
c = c - a
c = b - c
c = a / b
c = b / a
c = c / b
c = a * b
c = b * a
c = c * b
c = a / b
c = b / a
c = c / b
def calibrate(self):
for i in range(self.rounds):
pass
class SimpleFloatArithmetic(Test):
version = 2.0
operations = 5 * (3 + 5 + 5 + 3 + 3 + 3)
rounds = 120000
def test(self):
for i in range(self.rounds):
a = 2.1
b = 3.3332
c = 3.14159
c = a + b
c = b + c
c = c + a
c = a + b
c = b + c
c = c - a
c = a - b
c = b - c
c = c - a
c = b - c
c = a / b
c = b / a
c = c / b
c = a * b
c = b * a
c = c * b
c = a / b
c = b / a
c = c / b
a = 2.1
b = 3.3332
c = 3.14159
c = a + b
c = b + c
c = c + a
c = a + b
c = b + c
c = c - a
c = a - b
c = b - c
c = c - a
c = b - c
c = a / b
c = b / a
c = c / b
c = a * b
c = b * a
c = c * b
c = a / b
c = b / a
c = c / b
a = 2.1
b = 3.3332
c = 3.14159
c = a + b
c = b + c
c = c + a
c = a + b
c = b + c
c = c - a
c = a - b
c = b - c
c = c - a
c = b - c
c = a / b
c = b / a
c = c / b
c = a * b
c = b * a
c = c * b
c = a / b
c = b / a
c = c / b
a = 2.1
b = 3.3332
c = 3.14159
c = a + b
c = b + c
c = c + a
c = a + b
c = b + c
c = c - a
c = a - b
c = b - c
c = c - a
c = b - c
c = a / b
c = b / a
c = c / b
c = a * b
c = b * a
c = c * b
c = a / b
c = b / a
c = c / b
a = 2.1
b = 3.3332
c = 3.14159
c = a + b
c = b + c
c = c + a
c = a + b
c = b + c
c = c - a
c = a - b
c = b - c
c = c - a
c = b - c
c = a / b
c = b / a
c = c / b
c = a * b
c = b * a
c = c * b
c = a / b
c = b / a
c = c / b
def calibrate(self):
for i in range(self.rounds):
pass
class SimpleIntFloatArithmetic(Test):
version = 2.0
operations = 5 * (3 + 5 + 5 + 3 + 3 + 3)
rounds = 120000
def test(self):
for i in range(self.rounds):
a = 2
b = 3
c = 3.14159
c = a + b
c = b + c
c = c + a
c = a + b
c = b + c
c = c - a
c = a - b
c = b - c
c = c - a
c = b - c
c = a / b
c = b / a
c = c / b
c = a * b
c = b * a
c = c * b
c = a / b
c = b / a
c = c / b
a = 2
b = 3
c = 3.14159
c = a + b
c = b + c
c = c + a
c = a + b
c = b + c
c = c - a
c = a - b
c = b - c
c = c - a
c = b - c
c = a / b
c = b / a
c = c / b
c = a * b
c = b * a
c = c * b
c = a / b
c = b / a
c = c / b
a = 2
b = 3
c = 3.14159
c = a + b
c = b + c
c = c + a
c = a + b
c = b + c
c = c - a
c = a - b
c = b - c
c = c - a
c = b - c
c = a / b
c = b / a
c = c / b
c = a * b
c = b * a
c = c * b
c = a / b
c = b / a
c = c / b
a = 2
b = 3
c = 3.14159
c = a + b
c = b + c
c = c + a
c = a + b
c = b + c
c = c - a
c = a - b
c = b - c
c = c - a
c = b - c
c = a / b
c = b / a
c = c / b
c = a * b
c = b * a
c = c * b
c = a / b
c = b / a
c = c / b
a = 2
b = 3
c = 3.14159
c = a + b
c = b + c
c = c + a
c = a + b
c = b + c
c = c - a
c = a - b
c = b - c
c = c - a
c = b - c
c = a / b
c = b / a
c = c / b
c = a * b
c = b * a
c = c * b
c = a / b
c = b / a
c = c / b
def calibrate(self):
for i in range(self.rounds):
pass
class SimpleLongArithmetic(Test):
version = 2.0
operations = 5 * (3 + 5 + 5 + 3 + 3 + 3)
rounds = 60000
def test(self):
for i in range(self.rounds):
a = 2220001
b = 100001
c = 30005
c = a + b
c = b + c
c = c + a
c = a + b
c = b + c
c = c - a
c = a - b
c = b - c
c = c - a
c = b - c
c = a / b
c = b / a
c = c / b
c = a * b
c = b * a
c = c * b
c = a / b
c = b / a
c = c / b
a = 2220001
b = 100001
c = 30005
c = a + b
c = b + c
c = c + a
c = a + b
c = b + c
c = c - a
c = a - b
c = b - c
c = c - a
c = b - c
c = a / b
c = b / a
c = c / b
c = a * b
c = b * a
c = c * b
c = a / b
c = b / a
c = c / b
a = 2220001
b = 100001
c = 30005
c = a + b
c = b + c
c = c + a
c = a + b
c = b + c
c = c - a
c = a - b
c = b - c
c = c - a
c = b - c
c = a / b
c = b / a
c = c / b
c = a * b
c = b * a
c = c * b
c = a / b
c = b / a
c = c / b
a = 2220001
b = 100001
c = 30005
c = a + b
c = b + c
c = c + a
c = a + b
c = b + c
c = c - a
c = a - b
c = b - c
c = c - a
c = b - c
c = a / b
c = b / a
c = c / b
c = a * b
c = b * a
c = c * b
c = a / b
c = b / a
c = c / b
a = 2220001
b = 100001
c = 30005
c = a + b
c = b + c
c = c + a
c = a + b
c = b + c
c = c - a
c = a - b
c = b - c
c = c - a
c = b - c
c = a / b
c = b / a
c = c / b
c = a * b
c = b * a
c = c * b
c = a / b
c = b / a
c = c / b
def calibrate(self):
for i in range(self.rounds):
pass
class SimpleComplexArithmetic(Test):
version = 2.0
operations = 5 * (3 + 5 + 5 + 3 + 3 + 3)
rounds = 80000
def test(self):
for i in range(self.rounds):
a = 2 + 3j
b = 2.5 + 4.5j
c = 1.2 + 6.2j
c = a + b
c = b + c
c = c + a
c = a + b
c = b + c
c = c - a
c = a - b
c = b - c
c = c - a
c = b - c
c = a / b
c = b / a
c = c / b
c = a * b
c = b * a
c = c * b
c = a / b
c = b / a
c = c / b
a = 2 + 3j
b = 2.5 + 4.5j
c = 1.2 + 6.2j
c = a + b
c = b + c
c = c + a
c = a + b
c = b + c
c = c - a
c = a - b
c = b - c
c = c - a
c = b - c
c = a / b
c = b / a
c = c / b
c = a * b
c = b * a
c = c * b
c = a / b
c = b / a
c = c / b
a = 2 + 3j
b = 2.5 + 4.5j
c = 1.2 + 6.2j
c = a + b
c = b + c
c = c + a
c = a + b
c = b + c
c = c - a
c = a - b
c = b - c
c = c - a
c = b - c
c = a / b
c = b / a
c = c / b
c = a * b
c = b * a
c = c * b
c = a / b
c = b / a
c = c / b
a = 2 + 3j
b = 2.5 + 4.5j
c = 1.2 + 6.2j
c = a + b
c = b + c
c = c + a
c = a + b
c = b + c
c = c - a
c = a - b
c = b - c
c = c - a
c = b - c
c = a / b
c = b / a
c = c / b
c = a * b
c = b * a
c = c * b
c = a / b
c = b / a
c = c / b
a = 2 + 3j
b = 2.5 + 4.5j
c = 1.2 + 6.2j
c = a + b
c = b + c
c = c + a
c = a + b
c = b + c
c = c - a
c = a - b
c = b - c
c = c - a
c = b - c
c = a / b
c = b / a
c = c / b
c = a * b
c = b * a
c = c * b
c = a / b
c = b / a
c = c / b
def calibrate(self):
for i in range(self.rounds):
pass
|
matmutant/sl4a
|
refs/heads/master
|
python/gdata/tests/atom_tests/auth_test.py
|
128
|
#!/usr/bin/env python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This module is used for version 2 of the Google Data APIs.
__author__ = 'j.s@google.com (Jeff Scudder)'
import unittest
import atom.auth
import atom.http_core
class BasicAuthTest(unittest.TestCase):
def test_modify_request(self):
http_request = atom.http_core.HttpRequest()
credentials = atom.auth.BasicAuth('Aladdin', 'open sesame')
self.assert_(credentials.basic_cookie == 'QWxhZGRpbjpvcGVuIHNlc2FtZQ==')
credentials.modify_request(http_request)
self.assert_(http_request.headers[
'Authorization'] == 'Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==')
def suite():
return unittest.TestSuite((unittest.makeSuite(BasicAuthTest,'test'),))
if __name__ == '__main__':
unittest.main()
|
jolyonb/edx-platform
|
refs/heads/master
|
lms/djangoapps/badges/migrations/0001_initial.py
|
14
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import jsonfield.fields
import badges.models
from django.conf import settings
import django.utils.timezone
from model_utils import fields
from opaque_keys.edx.django.models import CourseKeyField
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='BadgeAssertion',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('data', jsonfield.fields.JSONField()),
('backend', models.CharField(max_length=50)),
('image_url', models.URLField()),
('assertion_url', models.URLField()),
('modified', fields.AutoLastModifiedField(default=django.utils.timezone.now, verbose_name='modified', editable=False)),
('created', fields.AutoCreatedField(default=django.utils.timezone.now, verbose_name='created', editable=False, db_index=True)),
],
),
migrations.CreateModel(
name='BadgeClass',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('slug', models.SlugField(max_length=255, validators=[badges.models.validate_lowercase])),
('issuing_component', models.SlugField(default=b'', blank=True, validators=[badges.models.validate_lowercase])),
('display_name', models.CharField(max_length=255)),
('course_id', CourseKeyField(default=None, max_length=255, blank=True)),
('description', models.TextField()),
('criteria', models.TextField()),
('mode', models.CharField(default=b'', max_length=100, blank=True)),
('image', models.ImageField(upload_to=b'badge_classes', validators=[badges.models.validate_badge_image])),
],
),
migrations.CreateModel(
name='CourseCompleteImageConfiguration',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('mode', models.CharField(help_text='The course mode for this badge image. For example, "verified" or "honor".', unique=True, max_length=125)),
('icon', models.ImageField(help_text='Badge images must be square PNG files. The file size should be under 250KB.', upload_to=b'course_complete_badges', validators=[badges.models.validate_badge_image])),
('default', models.BooleanField(default=False, help_text='Set this value to True if you want this image to be the default image for any course modes that do not have a specified badge image. You can have only one default image.')),
],
),
migrations.AlterUniqueTogether(
name='badgeclass',
unique_together=set([('slug', 'issuing_component', 'course_id')]),
),
migrations.AddField(
model_name='badgeassertion',
name='badge_class',
field=models.ForeignKey(to='badges.BadgeClass', on_delete=models.CASCADE),
),
migrations.AddField(
model_name='badgeassertion',
name='user',
field=models.ForeignKey(to=settings.AUTH_USER_MODEL, on_delete=models.CASCADE),
),
]
|
yk5/incubator-airflow
|
refs/heads/master
|
airflow/contrib/hooks/__init__.py
|
2
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# Contrib hooks are not imported by default. They should be accessed
# directly: from airflow.contrib.hooks.hook_module import Hook
import sys
import os as _os
# ------------------------------------------------------------------------
#
# #TODO #FIXME Airflow 2.0
#
# Old import machinary below.
#
# This is deprecated but should be kept until Airflow 2.0
# for compatibility.
#
# ------------------------------------------------------------------------
_hooks = {
'docker_hook': ['DockerHook'],
'ftp_hook': ['FTPHook'],
'ftps_hook': ['FTPSHook'],
'vertica_hook': ['VerticaHook'],
'ssh_hook': ['SSHHook'],
'winrm_hook': ['WinRMHook'],
'sftp_hook': ['SFTPHook'],
'bigquery_hook': ['BigQueryHook'],
'qubole_hook': ['QuboleHook'],
'gcs_hook': ['GoogleCloudStorageHook'],
'datastore_hook': ['DatastoreHook'],
'gcp_cloudml_hook': ['CloudMLHook'],
'redshift_hook': ['RedshiftHook'],
'gcp_dataproc_hook': ['DataProcHook'],
'gcp_dataflow_hook': ['DataFlowHook'],
'spark_submit_operator': ['SparkSubmitOperator'],
'cloudant_hook': ['CloudantHook'],
'fs_hook': ['FSHook'],
'wasb_hook': ['WasbHook'],
'gcp_pubsub_hook': ['PubSubHook'],
'jenkins_hook': ['JenkinsHook'],
'aws_dynamodb_hook': ['AwsDynamoDBHook'],
'azure_data_lake_hook': ['AzureDataLakeHook'],
}
if not _os.environ.get('AIRFLOW_USE_NEW_IMPORTS', False):
from airflow.utils.helpers import AirflowImporter
airflow_importer = AirflowImporter(sys.modules[__name__], _hooks)
|
android-ia/platform_external_gtest
|
refs/heads/master
|
test/gtest_break_on_failure_unittest.py
|
2140
|
#!/usr/bin/env python
#
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test for Google Test's break-on-failure mode.
A user can ask Google Test to seg-fault when an assertion fails, using
either the GTEST_BREAK_ON_FAILURE environment variable or the
--gtest_break_on_failure flag. This script tests such functionality
by invoking gtest_break_on_failure_unittest_ (a program written with
Google Test) with different environments and command line flags.
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import gtest_test_utils
import os
import sys
# Constants.
IS_WINDOWS = os.name == 'nt'
# The environment variable for enabling/disabling the break-on-failure mode.
BREAK_ON_FAILURE_ENV_VAR = 'GTEST_BREAK_ON_FAILURE'
# The command line flag for enabling/disabling the break-on-failure mode.
BREAK_ON_FAILURE_FLAG = 'gtest_break_on_failure'
# The environment variable for enabling/disabling the throw-on-failure mode.
THROW_ON_FAILURE_ENV_VAR = 'GTEST_THROW_ON_FAILURE'
# The environment variable for enabling/disabling the catch-exceptions mode.
CATCH_EXCEPTIONS_ENV_VAR = 'GTEST_CATCH_EXCEPTIONS'
# Path to the gtest_break_on_failure_unittest_ program.
EXE_PATH = gtest_test_utils.GetTestExecutablePath(
'gtest_break_on_failure_unittest_')
environ = gtest_test_utils.environ
SetEnvVar = gtest_test_utils.SetEnvVar
# Tests in this file run a Google-Test-based test program and expect it
# to terminate prematurely. Therefore they are incompatible with
# the premature-exit-file protocol by design. Unset the
# premature-exit filepath to prevent Google Test from creating
# the file.
SetEnvVar(gtest_test_utils.PREMATURE_EXIT_FILE_ENV_VAR, None)
def Run(command):
"""Runs a command; returns 1 if it was killed by a signal, or 0 otherwise."""
p = gtest_test_utils.Subprocess(command, env=environ)
if p.terminated_by_signal:
return 1
else:
return 0
# The tests.
class GTestBreakOnFailureUnitTest(gtest_test_utils.TestCase):
"""Tests using the GTEST_BREAK_ON_FAILURE environment variable or
the --gtest_break_on_failure flag to turn assertion failures into
segmentation faults.
"""
def RunAndVerify(self, env_var_value, flag_value, expect_seg_fault):
"""Runs gtest_break_on_failure_unittest_ and verifies that it does
(or does not) have a seg-fault.
Args:
env_var_value: value of the GTEST_BREAK_ON_FAILURE environment
variable; None if the variable should be unset.
flag_value: value of the --gtest_break_on_failure flag;
None if the flag should not be present.
expect_seg_fault: 1 if the program is expected to generate a seg-fault;
0 otherwise.
"""
SetEnvVar(BREAK_ON_FAILURE_ENV_VAR, env_var_value)
if env_var_value is None:
env_var_value_msg = ' is not set'
else:
env_var_value_msg = '=' + env_var_value
if flag_value is None:
flag = ''
elif flag_value == '0':
flag = '--%s=0' % BREAK_ON_FAILURE_FLAG
else:
flag = '--%s' % BREAK_ON_FAILURE_FLAG
command = [EXE_PATH]
if flag:
command.append(flag)
if expect_seg_fault:
should_or_not = 'should'
else:
should_or_not = 'should not'
has_seg_fault = Run(command)
SetEnvVar(BREAK_ON_FAILURE_ENV_VAR, None)
msg = ('when %s%s, an assertion failure in "%s" %s cause a seg-fault.' %
(BREAK_ON_FAILURE_ENV_VAR, env_var_value_msg, ' '.join(command),
should_or_not))
self.assert_(has_seg_fault == expect_seg_fault, msg)
def testDefaultBehavior(self):
"""Tests the behavior of the default mode."""
self.RunAndVerify(env_var_value=None,
flag_value=None,
expect_seg_fault=0)
def testEnvVar(self):
"""Tests using the GTEST_BREAK_ON_FAILURE environment variable."""
self.RunAndVerify(env_var_value='0',
flag_value=None,
expect_seg_fault=0)
self.RunAndVerify(env_var_value='1',
flag_value=None,
expect_seg_fault=1)
def testFlag(self):
"""Tests using the --gtest_break_on_failure flag."""
self.RunAndVerify(env_var_value=None,
flag_value='0',
expect_seg_fault=0)
self.RunAndVerify(env_var_value=None,
flag_value='1',
expect_seg_fault=1)
def testFlagOverridesEnvVar(self):
"""Tests that the flag overrides the environment variable."""
self.RunAndVerify(env_var_value='0',
flag_value='0',
expect_seg_fault=0)
self.RunAndVerify(env_var_value='0',
flag_value='1',
expect_seg_fault=1)
self.RunAndVerify(env_var_value='1',
flag_value='0',
expect_seg_fault=0)
self.RunAndVerify(env_var_value='1',
flag_value='1',
expect_seg_fault=1)
def testBreakOnFailureOverridesThrowOnFailure(self):
"""Tests that gtest_break_on_failure overrides gtest_throw_on_failure."""
SetEnvVar(THROW_ON_FAILURE_ENV_VAR, '1')
try:
self.RunAndVerify(env_var_value=None,
flag_value='1',
expect_seg_fault=1)
finally:
SetEnvVar(THROW_ON_FAILURE_ENV_VAR, None)
if IS_WINDOWS:
def testCatchExceptionsDoesNotInterfere(self):
"""Tests that gtest_catch_exceptions doesn't interfere."""
SetEnvVar(CATCH_EXCEPTIONS_ENV_VAR, '1')
try:
self.RunAndVerify(env_var_value='1',
flag_value='1',
expect_seg_fault=1)
finally:
SetEnvVar(CATCH_EXCEPTIONS_ENV_VAR, None)
if __name__ == '__main__':
gtest_test_utils.Main()
|
pgmillon/ansible
|
refs/heads/devel
|
lib/ansible/modules/network/aci/mso_schema_template_anp_epg.py
|
15
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2018, Dag Wieers (@dagwieers) <dag@wieers.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: mso_schema_template_anp_epg
short_description: Manage Endpoint Groups (EPGs) in schema templates
description:
- Manage EPGs in schema templates on Cisco ACI Multi-Site.
author:
- Dag Wieers (@dagwieers)
version_added: '2.8'
options:
schema:
description:
- The name of the schema.
type: str
required: yes
template:
description:
- The name of the template.
type: str
required: yes
anp:
description:
- The name of the ANP.
type: str
required: yes
epg:
description:
- The name of the EPG to manage.
type: str
aliases: [ name ]
display_name:
description:
- The name as displayed on the MSO web interface.
type: str
# contracts:
# description:
# - A list of contracts associated to this ANP.
# type: list
bd:
description:
- The BD associated to this ANP.
type: dict
suboptions:
name:
description:
- The name of the BD to associate with.
required: true
type: str
schema:
description:
- The schema that defines the referenced BD.
- If this parameter is unspecified, it defaults to the current schema.
type: str
template:
description:
- The template that defines the referenced BD.
type: str
subnets:
description:
- The subnets associated to this ANP.
type: list
suboptions:
ip:
description:
- The IP range in CIDR notation.
type: str
required: true
description:
description:
- The description of this subnet.
type: str
scope:
description:
- The scope of the subnet.
type: str
choices: [ private, public ]
shared:
description:
- Whether this subnet is shared between VRFs.
type: bool
no_default_gateway:
description:
- Whether this subnet has a default gateway.
type: bool
useg_epg:
description:
- Whether this is a USEG EPG.
type: bool
# useg_epg_attributes:
# description:
# - A dictionary consisting of USEG attributes.
# type: dict
intra_epg_isolation:
description:
- Whether intra EPG isolation is enforced.
- When not specified, this parameter defaults to C(unenforced).
type: str
choices: [ enforced, unenforced ]
intersite_multicaste_source:
description:
- Whether intersite multicase source is enabled.
- When not specified, this parameter defaults to C(no).
type: bool
state:
description:
- Use C(present) or C(absent) for adding or removing.
- Use C(query) for listing an object or multiple objects.
type: str
choices: [ absent, present, query ]
default: present
seealso:
- module: mso_schema_template_anp
- module: mso_schema_template_anp_epg_subnet
- module: mso_schema_template_bd
- module: mso_schema_template_contract_filter
extends_documentation_fragment: mso
'''
EXAMPLES = r'''
- name: Add a new EPG
mso_schema_template_anp_epg:
host: mso_host
username: admin
password: SomeSecretPassword
schema: Schema 1
template: Template 1
anp: ANP 1
epg: EPG 1
state: present
delegate_to: localhost
- name: Remove an EPG
mso_schema_template_anp_epg:
host: mso_host
username: admin
password: SomeSecretPassword
schema: Schema 1
template: Template 1
anp: ANP 1
epg: EPG 1
state: absent
delegate_to: localhost
- name: Query a specific EPG
mso_schema_template_anp_epg:
host: mso_host
username: admin
password: SomeSecretPassword
schema: Schema 1
template: Template 1
anp: ANP 1
epg: EPG 1
state: query
delegate_to: localhost
register: query_result
- name: Query all EPGs
mso_schema_template_anp_epg:
host: mso_host
username: admin
password: SomeSecretPassword
schema: Schema 1
template: Template 1
anp: ANP 1
state: query
delegate_to: localhost
register: query_result
'''
RETURN = r'''
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.aci.mso import MSOModule, mso_argument_spec, mso_reference_spec, mso_subnet_spec, issubset
def main():
argument_spec = mso_argument_spec()
argument_spec.update(
schema=dict(type='str', required=True),
template=dict(type='str', required=True),
anp=dict(type='str', required=True),
epg=dict(type='str', aliases=['name']), # This parameter is not required for querying all objects
bd=dict(type='dict', options=mso_reference_spec()),
display_name=dict(type='str'),
useg_epg=dict(type='bool'),
intra_epg_isolation=dict(type='str', choices=['enforced', 'unenforced']),
intersite_multicaste_source=dict(type='bool'),
subnets=dict(type='list', options=mso_subnet_spec()),
state=dict(type='str', default='present', choices=['absent', 'present', 'query']),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
required_if=[
['state', 'absent', ['epg']],
['state', 'present', ['epg']],
],
)
schema = module.params['schema']
template = module.params['template']
anp = module.params['anp']
epg = module.params['epg']
display_name = module.params['display_name']
bd = module.params['bd']
useg_epg = module.params['useg_epg']
intra_epg_isolation = module.params['intra_epg_isolation']
intersite_multicaste_source = module.params['intersite_multicaste_source']
subnets = module.params['subnets']
state = module.params['state']
mso = MSOModule(module)
# Get schema_id
schema_obj = mso.get_obj('schemas', displayName=schema)
if schema_obj:
schema_id = schema_obj['id']
else:
mso.fail_json(msg="Provided schema '{0}' does not exist".format(schema))
schema_path = 'schemas/{id}'.format(**schema_obj)
# Get template
templates = [t['name'] for t in schema_obj['templates']]
if template not in templates:
mso.fail_json(msg="Provided template '{0}' does not exist. Existing templates: {1}".format(template, ', '.join(templates)))
template_idx = templates.index(template)
# Get ANP
anps = [a['name'] for a in schema_obj['templates'][template_idx]['anps']]
if anp not in anps:
mso.fail_json(msg="Provided anp '{0}' does not exist. Existing anps: {1}".format(anp, ', '.join(anps)))
anp_idx = anps.index(anp)
# Get EPG
epgs = [e['name'] for e in schema_obj['templates'][template_idx]['anps'][anp_idx]['epgs']]
if epg is not None and epg in epgs:
epg_idx = epgs.index(epg)
mso.existing = schema_obj['templates'][template_idx]['anps'][anp_idx]['epgs'][epg_idx]
if state == 'query':
if epg is None:
mso.existing = schema_obj['templates'][template_idx]['anps'][anp_idx]['epgs']
elif not mso.existing:
mso.fail_json(msg="EPG '{epg}' not found".format(epg=epg))
mso.exit_json()
epgs_path = '/templates/{0}/anps/{1}/epgs'.format(template, anp)
epg_path = '/templates/{0}/anps/{1}/epgs/{2}'.format(template, anp, epg)
ops = []
mso.previous = mso.existing
if state == 'absent':
if mso.existing:
mso.sent = mso.existing = {}
ops.append(dict(op='remove', path=epg_path))
elif state == 'present':
bd_ref = mso.make_reference(bd, 'bd', schema_id, template)
subnets = mso.make_subnets(subnets)
if display_name is None and not mso.existing:
display_name = epg
payload = dict(
name=epg,
displayName=display_name,
uSegEpg=useg_epg,
intraEpg=intra_epg_isolation,
proxyArp=intersite_multicaste_source,
# FIXME: Missing functionality
# uSegAttrs=[],
contractRelationships=[],
subnets=subnets,
bdRef=bd_ref,
)
mso.sanitize(payload, collate=True)
if mso.existing:
ops.append(dict(op='replace', path=epg_path, value=mso.sent))
else:
ops.append(dict(op='add', path=epgs_path + '/-', value=mso.sent))
mso.existing = mso.proposed
if not module.check_mode:
mso.request(schema_path, method='PATCH', data=ops)
mso.exit_json()
if __name__ == "__main__":
main()
|
suhussai/youtube-dl
|
refs/heads/master
|
test/helper.py
|
63
|
from __future__ import unicode_literals
import errno
import io
import hashlib
import json
import os.path
import re
import types
import sys
import youtube_dl.extractor
from youtube_dl import YoutubeDL
from youtube_dl.utils import (
compat_str,
preferredencoding,
write_string,
)
def get_params(override=None):
PARAMETERS_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)),
"parameters.json")
with io.open(PARAMETERS_FILE, encoding='utf-8') as pf:
parameters = json.load(pf)
if override:
parameters.update(override)
return parameters
def try_rm(filename):
""" Remove a file if it exists """
try:
os.remove(filename)
except OSError as ose:
if ose.errno != errno.ENOENT:
raise
def report_warning(message):
'''
Print the message to stderr, it will be prefixed with 'WARNING:'
If stderr is a tty file the 'WARNING:' will be colored
'''
if sys.stderr.isatty() and os.name != 'nt':
_msg_header = '\033[0;33mWARNING:\033[0m'
else:
_msg_header = 'WARNING:'
output = '%s %s\n' % (_msg_header, message)
if 'b' in getattr(sys.stderr, 'mode', '') or sys.version_info[0] < 3:
output = output.encode(preferredencoding())
sys.stderr.write(output)
class FakeYDL(YoutubeDL):
def __init__(self, override=None):
# Different instances of the downloader can't share the same dictionary
# some test set the "sublang" parameter, which would break the md5 checks.
params = get_params(override=override)
super(FakeYDL, self).__init__(params, auto_init=False)
self.result = []
def to_screen(self, s, skip_eol=None):
print(s)
def trouble(self, s, tb=None):
raise Exception(s)
def download(self, x):
self.result.append(x)
def expect_warning(self, regex):
# Silence an expected warning matching a regex
old_report_warning = self.report_warning
def report_warning(self, message):
if re.match(regex, message):
return
old_report_warning(message)
self.report_warning = types.MethodType(report_warning, self)
def gettestcases(include_onlymatching=False):
for ie in youtube_dl.extractor.gen_extractors():
for tc in ie.get_testcases(include_onlymatching):
yield tc
md5 = lambda s: hashlib.md5(s.encode('utf-8')).hexdigest()
def expect_info_dict(self, got_dict, expected_dict):
for info_field, expected in expected_dict.items():
if isinstance(expected, compat_str) and expected.startswith('re:'):
got = got_dict.get(info_field)
match_str = expected[len('re:'):]
match_rex = re.compile(match_str)
self.assertTrue(
isinstance(got, compat_str),
'Expected a %s object, but got %s for field %s' % (
compat_str.__name__, type(got).__name__, info_field))
self.assertTrue(
match_rex.match(got),
'field %s (value: %r) should match %r' % (info_field, got, match_str))
elif isinstance(expected, compat_str) and expected.startswith('startswith:'):
got = got_dict.get(info_field)
start_str = expected[len('startswith:'):]
self.assertTrue(
isinstance(got, compat_str),
'Expected a %s object, but got %s for field %s' % (
compat_str.__name__, type(got).__name__, info_field))
self.assertTrue(
got.startswith(start_str),
'field %s (value: %r) should start with %r' % (info_field, got, start_str))
elif isinstance(expected, compat_str) and expected.startswith('contains:'):
got = got_dict.get(info_field)
contains_str = expected[len('contains:'):]
self.assertTrue(
isinstance(got, compat_str),
'Expected a %s object, but got %s for field %s' % (
compat_str.__name__, type(got).__name__, info_field))
self.assertTrue(
contains_str in got,
'field %s (value: %r) should contain %r' % (info_field, got, contains_str))
elif isinstance(expected, type):
got = got_dict.get(info_field)
self.assertTrue(isinstance(got, expected),
'Expected type %r for field %s, but got value %r of type %r' % (expected, info_field, got, type(got)))
else:
if isinstance(expected, compat_str) and expected.startswith('md5:'):
got = 'md5:' + md5(got_dict.get(info_field))
elif isinstance(expected, compat_str) and expected.startswith('mincount:'):
got = got_dict.get(info_field)
self.assertTrue(
isinstance(got, (list, dict)),
'Expected field %s to be a list or a dict, but it is of type %s' % (
info_field, type(got).__name__))
expected_num = int(expected.partition(':')[2])
assertGreaterEqual(
self, len(got), expected_num,
'Expected %d items in field %s, but only got %d' % (
expected_num, info_field, len(got)
)
)
continue
else:
got = got_dict.get(info_field)
self.assertEqual(expected, got,
'invalid value for field %s, expected %r, got %r' % (info_field, expected, got))
# Check for the presence of mandatory fields
if got_dict.get('_type') not in ('playlist', 'multi_video'):
for key in ('id', 'url', 'title', 'ext'):
self.assertTrue(got_dict.get(key), 'Missing mandatory field %s' % key)
# Check for mandatory fields that are automatically set by YoutubeDL
for key in ['webpage_url', 'extractor', 'extractor_key']:
self.assertTrue(got_dict.get(key), 'Missing field: %s' % key)
# Are checkable fields missing from the test case definition?
test_info_dict = dict((key, value if not isinstance(value, compat_str) or len(value) < 250 else 'md5:' + md5(value))
for key, value in got_dict.items()
if value and key in ('id', 'title', 'description', 'uploader', 'upload_date', 'timestamp', 'uploader_id', 'location', 'age_limit'))
missing_keys = set(test_info_dict.keys()) - set(expected_dict.keys())
if missing_keys:
def _repr(v):
if isinstance(v, compat_str):
return "'%s'" % v.replace('\\', '\\\\').replace("'", "\\'").replace('\n', '\\n')
else:
return repr(v)
info_dict_str = ''
if len(missing_keys) != len(expected_dict):
info_dict_str += ''.join(
' %s: %s,\n' % (_repr(k), _repr(v))
for k, v in test_info_dict.items() if k not in missing_keys)
if info_dict_str:
info_dict_str += '\n'
info_dict_str += ''.join(
' %s: %s,\n' % (_repr(k), _repr(test_info_dict[k]))
for k in missing_keys)
write_string(
'\n\'info_dict\': {\n' + info_dict_str + '},\n', out=sys.stderr)
self.assertFalse(
missing_keys,
'Missing keys in test definition: %s' % (
', '.join(sorted(missing_keys))))
def assertRegexpMatches(self, text, regexp, msg=None):
if hasattr(self, 'assertRegexp'):
return self.assertRegexp(text, regexp, msg)
else:
m = re.match(regexp, text)
if not m:
note = 'Regexp didn\'t match: %r not found' % (regexp)
if len(text) < 1000:
note += ' in %r' % text
if msg is None:
msg = note
else:
msg = note + ', ' + msg
self.assertTrue(m, msg)
def assertGreaterEqual(self, got, expected, msg=None):
if not (got >= expected):
if msg is None:
msg = '%r not greater than or equal to %r' % (got, expected)
self.assertTrue(got >= expected, msg)
def expect_warnings(ydl, warnings_re):
real_warning = ydl.report_warning
def _report_warning(w):
if not any(re.search(w_re, w) for w_re in warnings_re):
real_warning(w)
ydl.report_warning = _report_warning
|
henryprescott/portfolio
|
refs/heads/master
|
node_modules/sqlite3/deps/extract.py
|
775
|
import sys
import tarfile
import os
tarball = os.path.abspath(sys.argv[1])
dirname = os.path.abspath(sys.argv[2])
tfile = tarfile.open(tarball,'r:gz');
tfile.extractall(dirname)
sys.exit(0)
|
steedos/odoo7
|
refs/heads/master
|
openerp/tools/test_config.py
|
456
|
# -*- coding: utf-8 -*-
""" Tests for the configuration file/command-line arguments. """
# This test should be run from its directory.
# TODO A configmanager object cannot parse multiple times a config file
# and/or the command line, preventing to 'reload' a configuration.
import os
import config
config_file_00 = os.path.join(os.path.dirname(__file__),'test-config-values-00.conf')
# 1. No config file, no command-line arguments (a.k.a. default values)
conf = config.configmanager()
conf.parse_config()
assert conf['osv_memory_age_limit'] == 1.0
assert os.path.join(conf['root_path'], 'addons') == conf['addons_path']
# 2. No config file, some command-line arguments
conf = config.configmanager()
# mess with the optparse.Option definition to allow an invalid path
conf.casts['addons_path'].action = 'store'
conf.parse_config(['--addons-path=/xyz/dont-exist', '--osv-memory-age-limit=2.3'])
assert conf['osv_memory_age_limit'] == 2.3
assert conf['addons_path'] == '/xyz/dont-exist'
# 3. Config file, no command-line arguments
conf = config.configmanager()
conf.parse_config(['-c', config_file_00])
assert conf['osv_memory_age_limit'] == 3.4
# 4. Config file, and command-line arguments
conf = config.configmanager()
conf.parse_config(['-c', config_file_00, '--osv-memory-age-limit=2.3'])
assert conf['osv_memory_age_limit'] == 2.3
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
sbinet-staging/pyrame
|
refs/heads/master
|
getapi/unit_test.py
|
1
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Frédéric Magniette, Miguel Rubio-Roy
# This file is part of Pyrame.
#
# Pyrame is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Pyrame is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Pyrame. If not, see <http://www.gnu.org/licenses/>
import apipools
import sys
test=apipools.api_pool()
test.add_api_from_file("cmd_ps","/opt/pyrame/cmd_ps.api")
api=test.get_api("cmd_ps","get_voltage_ps")
pattern="{'function': 'get_voltage_ps', 'model': 'cmd_ps', 'args': ['ps_id', 'channel'], 'id': 10}"
if str(api)==pattern:
sys.exit(0)
else:
print("%s\n not equal to \n%s"%(str(api),pattern))
sys.exit(1)
|
herow/planning_qgis
|
refs/heads/master
|
python/plugins/processing/gui/CreateNewScriptAction.py
|
6
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
CreateNewScriptAction.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from PyQt4.QtGui import QIcon
from processing.gui.ToolboxAction import ToolboxAction
from processing.gui.ScriptEditorDialog import ScriptEditorDialog
class CreateNewScriptAction(ToolboxAction):
SCRIPT_PYTHON = 0
SCRIPT_R = 1
def __init__(self, actionName, scriptType):
self.name = actionName
self.group = self.tr('Tools', 'CreateNewScriptAction')
self.scriptType = scriptType
def getIcon(self):
if self.scriptType == self.SCRIPT_PYTHON:
return QIcon(':/processing/images/script.png')
elif self.scriptType == self.SCRIPT_R:
return QIcon(':/processing/images/r.png')
def execute(self):
dlg = None
if self.scriptType == self.SCRIPT_PYTHON:
dlg = ScriptEditorDialog(ScriptEditorDialog.SCRIPT_PYTHON, None)
if self.scriptType == self.SCRIPT_R:
dlg = ScriptEditorDialog(ScriptEditorDialog.SCRIPT_R, None)
dlg.show()
dlg.exec_()
if dlg.update:
if self.scriptType == self.SCRIPT_PYTHON:
self.toolbox.updateProvider('script')
elif self.scriptType == self.SCRIPT_R:
self.toolbox.updateProvider('r')
|
M157q/django-localflavor
|
refs/heads/master
|
localflavor/sk/forms.py
|
7
|
"""
Slovak-specific form helpers
"""
from __future__ import absolute_import, unicode_literals
from django.forms.fields import Select, RegexField
from django.utils.translation import ugettext_lazy as _
from .sk_districts import DISTRICT_CHOICES
from .sk_regions import REGION_CHOICES
class SKRegionSelect(Select):
"""
A select widget widget with list of Slovak regions as choices.
"""
def __init__(self, attrs=None):
super(SKRegionSelect, self).__init__(attrs, choices=REGION_CHOICES)
class SKDistrictSelect(Select):
"""
A select widget with list of Slovak districts as choices.
"""
def __init__(self, attrs=None):
super(SKDistrictSelect, self).__init__(attrs, choices=DISTRICT_CHOICES)
class SKPostalCodeField(RegexField):
"""
A form field that validates its input as Slovak postal code.
Valid form is XXXXX or XXX XX, where X represents integer.
"""
default_error_messages = {
'invalid': _('Enter a postal code in the format XXXXX or XXX XX.'),
}
def __init__(self, max_length=None, min_length=None, *args, **kwargs):
super(SKPostalCodeField, self).__init__(r'^\d{5}$|^\d{3} \d{2}$',
max_length, min_length, *args, **kwargs)
def clean(self, value):
"""
Validates the input and returns a string that contains only numbers.
Returns an empty string for empty values.
"""
v = super(SKPostalCodeField, self).clean(value)
return v.replace(' ', '')
|
0Rick0/Fontys-DS-GCD
|
refs/heads/master
|
Original Assignments/week 5/example.py
|
1
|
#!/usr/bin/env python3
import mincemeat
import glob
import traceback
import sys
import operator
def mapfn(k, v: str):
import string
from stopwords import allStopWords
stopwords = allStopWords.keys()
trans = str.maketrans('', '', string.punctuation)
for w in v.split():
w = w.lower()
w = w.translate(trans)
if w in stopwords or len(w) <= 1:
continue
yield w, 1
def reducefn(k, vs):
return sum(vs)
# result = 0
# for v in vs:
# result += v
# return result
def read_file(filename):
try:
with open(filename, encoding='ISO-8859-1') as f:
return f.read()
except (IOError, UnicodeDecodeError):
sys.stderr.write(filename + '\n')
traceback.print_exc()
return None
def read_all_files(small=True) -> tuple:
if small:
files = glob.glob('Gutenberg/Gutenberg Small/*.*')
else:
files = glob.glob('Gutenberg/Gutenberg SF/**.txt')
files += glob.glob('Gutenberg/Gutenberg SF/**.htm')
for file in files:
yield file, read_file(file)
def main():
s = mincemeat.Server()
data = {f: d for f, d in read_all_files(False)}
# print(data.keys())
# The data source can be any dictionary-like object
s.datasource = data
s.mapfn = mapfn
s.reducefn = reducefn
results = s.run_server(password="changeme")
results = sorted(results.items(), key=operator.itemgetter(1), reverse=True)
print(results)
with open('sorted.txt', 'w') as f:
f.write('\n'.join('%s\t%d' % result for result in results))
if __name__ == '__main__':
main()
|
clef/python-social-auth
|
refs/heads/master
|
setup.py
|
47
|
# -*- coding: utf-8 -*-
"""Setup file for easy installation"""
import sys
import os
from os.path import join, dirname, split
from setuptools import setup
PY3 = os.environ.get('BUILD_VERSION') == '3' or sys.version_info[0] == 3
version = __import__('social').__version__
LONG_DESCRIPTION = """
Python Social Auth is an easy to setup social authentication/registration
mechanism with support for several frameworks and auth providers.
Crafted using base code from django-social-auth, implements a common interface
to define new authentication providers from third parties. And to bring support
for more frameworks and ORMs.
"""
def long_description():
"""Return long description from README.rst if it's present
because it doesn't get installed."""
try:
return open(join(dirname(__file__), 'README.rst')).read()
except IOError:
return LONG_DESCRIPTION
def path_tokens(path):
if not path:
return []
head, tail = split(path)
return path_tokens(head) + [tail]
def get_packages():
exclude_pacakages = ('__pycache__',)
packages = []
for path_info in os.walk('social'):
tokens = path_tokens(path_info[0])
if tokens[-1] not in exclude_pacakages:
packages.append('.'.join(tokens))
return packages
requirements_file, tests_requirements_file = {
False: ('requirements.txt', 'social/tests/requirements.txt'),
True: ('requirements-python3.txt', 'social/tests/requirements-python3.txt')
}[PY3]
with open(requirements_file, 'r') as f:
requirements = f.readlines()
with open(tests_requirements_file, 'r') as f:
tests_requirements = [line for line in f.readlines() if '@' not in line]
setup(
name='python-social-auth',
version=version,
author='Matias Aguirre',
author_email='matiasaguirre@gmail.com',
description='Python social authentication made simple.',
license='BSD',
keywords='django, flask, pyramid, webpy, openid, oauth, social auth',
url='https://github.com/omab/python-social-auth',
packages=get_packages(),
long_description=long_description(),
install_requires=requirements,
classifiers=[
'Development Status :: 4 - Beta',
'Topic :: Internet',
'License :: OSI Approved :: BSD License',
'Intended Audience :: Developers',
'Environment :: Web Environment',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3'
],
package_data={
'social/tests': ['social/tests/*.txt']
},
include_package_data=True,
tests_require=tests_requirements,
test_suite='social.tests',
zip_safe=False
)
|
partofthething/home-assistant
|
refs/heads/dev
|
homeassistant/components/vlc/media_player.py
|
16
|
"""Provide functionality to interact with vlc devices on the network."""
import logging
import vlc
import voluptuous as vol
from homeassistant.components.media_player import PLATFORM_SCHEMA, MediaPlayerEntity
from homeassistant.components.media_player.const import (
MEDIA_TYPE_MUSIC,
SUPPORT_PAUSE,
SUPPORT_PLAY,
SUPPORT_PLAY_MEDIA,
SUPPORT_STOP,
SUPPORT_VOLUME_MUTE,
SUPPORT_VOLUME_SET,
)
from homeassistant.const import CONF_NAME, STATE_IDLE, STATE_PAUSED, STATE_PLAYING
import homeassistant.helpers.config_validation as cv
import homeassistant.util.dt as dt_util
_LOGGER = logging.getLogger(__name__)
CONF_ARGUMENTS = "arguments"
DEFAULT_NAME = "Vlc"
SUPPORT_VLC = (
SUPPORT_PAUSE
| SUPPORT_VOLUME_SET
| SUPPORT_VOLUME_MUTE
| SUPPORT_PLAY_MEDIA
| SUPPORT_PLAY
| SUPPORT_STOP
)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_ARGUMENTS, default=""): cv.string,
vol.Optional(CONF_NAME): cv.string,
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the vlc platform."""
add_entities(
[VlcDevice(config.get(CONF_NAME, DEFAULT_NAME), config.get(CONF_ARGUMENTS))]
)
class VlcDevice(MediaPlayerEntity):
"""Representation of a vlc player."""
def __init__(self, name, arguments):
"""Initialize the vlc device."""
self._instance = vlc.Instance(arguments)
self._vlc = self._instance.media_player_new()
self._name = name
self._volume = None
self._muted = None
self._state = None
self._media_position_updated_at = None
self._media_position = None
self._media_duration = None
def update(self):
"""Get the latest details from the device."""
status = self._vlc.get_state()
if status == vlc.State.Playing:
self._state = STATE_PLAYING
elif status == vlc.State.Paused:
self._state = STATE_PAUSED
else:
self._state = STATE_IDLE
self._media_duration = self._vlc.get_length() / 1000
position = self._vlc.get_position() * self._media_duration
if position != self._media_position:
self._media_position_updated_at = dt_util.utcnow()
self._media_position = position
self._volume = self._vlc.audio_get_volume() / 100
self._muted = self._vlc.audio_get_mute() == 1
return True
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def state(self):
"""Return the state of the device."""
return self._state
@property
def volume_level(self):
"""Volume level of the media player (0..1)."""
return self._volume
@property
def is_volume_muted(self):
"""Boolean if volume is currently muted."""
return self._muted
@property
def supported_features(self):
"""Flag media player features that are supported."""
return SUPPORT_VLC
@property
def media_content_type(self):
"""Content type of current playing media."""
return MEDIA_TYPE_MUSIC
@property
def media_duration(self):
"""Duration of current playing media in seconds."""
return self._media_duration
@property
def media_position(self):
"""Position of current playing media in seconds."""
return self._media_position
@property
def media_position_updated_at(self):
"""When was the position of the current playing media valid."""
return self._media_position_updated_at
def media_seek(self, position):
"""Seek the media to a specific location."""
track_length = self._vlc.get_length() / 1000
self._vlc.set_position(position / track_length)
def mute_volume(self, mute):
"""Mute the volume."""
self._vlc.audio_set_mute(mute)
self._muted = mute
def set_volume_level(self, volume):
"""Set volume level, range 0..1."""
self._vlc.audio_set_volume(int(volume * 100))
self._volume = volume
def media_play(self):
"""Send play command."""
self._vlc.play()
self._state = STATE_PLAYING
def media_pause(self):
"""Send pause command."""
self._vlc.pause()
self._state = STATE_PAUSED
def media_stop(self):
"""Send stop command."""
self._vlc.stop()
self._state = STATE_IDLE
def play_media(self, media_type, media_id, **kwargs):
"""Play media from a URL or file."""
if not media_type == MEDIA_TYPE_MUSIC:
_LOGGER.error(
"Invalid media type %s. Only %s is supported",
media_type,
MEDIA_TYPE_MUSIC,
)
return
self._vlc.set_media(self._instance.media_new(media_id))
self._vlc.play()
self._state = STATE_PLAYING
|
dkuner/example-modules
|
refs/heads/master
|
modules/modeling/spark/hero_cos_matc_spark/main.py
|
2
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
from specparser import HadoopRuntime
def main():
hr = HadoopRuntime()
settings = hr.settings
match_result_output_dir = hr.get_hdfs_working_dir("match_result")
settings.Output.match_result.val = match_result_output_dir
match_analysis_output_dir = hr.get_hdfs_working_dir("match_analysis")
settings.Output.match_analysis.val = match_analysis_output_dir
#SPARK_HOME=/home/run/spark-1.1.0-bin-cdh4
#/home/run/spark_word_segement.jar
# os.system("SPARK_HOME=/home/ansibler/work/spark/spark-1.1.0-bin-cdh4")
os.system('''SPARK_HOME=/home/run/spark-1.1.0-bin-cdh4 \
&& $SPARK_HOME/bin/spark-submit --class \"com.zetdata.hero.trial.SimpleApp\" \
--master %s \
--num-executors 3 --driver-memory 1024m --executor-memory 1024m --executor-cores 1 \
--conf "spark.executor.extraJavaOptions=-XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:MaxPermSize=1024m" \
/home/run/spark_word_segement.jar \
%s %s %s %s %s ''' %(settings.Param.spark_host,
settings.Input.jd_dir.val,
settings.Input.rs_dir.val,
settings.Output.match_result.val,
settings.Output.match_analysis.val,
settings.Input.white_dict.val
))
print("Done")
if __name__ == "__main__":
main()
|
alvaroaleman/ansible
|
refs/heads/devel
|
lib/ansible/module_utils/facts.py
|
7
|
# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import os
import sys
import stat
import time
import shlex
import errno
import fnmatch
import glob
import platform
import re
import signal
import socket
import struct
import datetime
import getpass
import pwd
from ansible.module_utils.basic import get_all_subclasses
from ansible.module_utils.six import PY3, iteritems
from ansible.module_utils.six.moves import configparser, StringIO, reduce
from ansible.module_utils._text import to_native, to_text
try:
import selinux
HAVE_SELINUX=True
except ImportError:
HAVE_SELINUX=False
try:
# Check if we have SSLContext support
from ssl import create_default_context, SSLContext
del create_default_context
del SSLContext
HAS_SSLCONTEXT = True
except ImportError:
HAS_SSLCONTEXT = False
try:
import json
# Detect python-json which is incompatible and fallback to simplejson in
# that case
try:
json.loads
json.dumps
except AttributeError:
raise ImportError
except ImportError:
import simplejson as json
# The distutils module is not shipped with SUNWPython on Solaris.
# It's in the SUNWPython-devel package which also contains development files
# that don't belong on production boxes. Since our Solaris code doesn't
# depend on LooseVersion, do not import it on Solaris.
if platform.system() != 'SunOS':
from distutils.version import LooseVersion
# --------------------------------------------------------------
# timeout function to make sure some fact gathering
# steps do not exceed a time limit
GATHER_TIMEOUT=None
class TimeoutError(Exception):
pass
def timeout(seconds=None, error_message="Timer expired"):
if seconds is None:
seconds = globals().get('GATHER_TIMEOUT') or 10
def decorator(func):
def _handle_timeout(signum, frame):
raise TimeoutError(error_message)
def wrapper(*args, **kwargs):
signal.signal(signal.SIGALRM, _handle_timeout)
signal.alarm(seconds)
try:
result = func(*args, **kwargs)
finally:
signal.alarm(0)
return result
return wrapper
# If we were called as @timeout, then the first parameter will be the
# function we are to wrap instead of the number of seconds. Detect this
# and correct it by setting seconds to our default value and return the
# inner decorator function manually wrapped around the function
if callable(seconds):
func = seconds
seconds = 10
return decorator(func)
# If we were called as @timeout([...]) then python itself will take
# care of wrapping the inner decorator around the function
return decorator
# --------------------------------------------------------------
class Facts(object):
"""
This class should only attempt to populate those facts that
are mostly generic to all systems. This includes platform facts,
service facts (e.g. ssh keys or selinux), and distribution facts.
Anything that requires extensive code or may have more than one
possible implementation to establish facts for a given topic should
subclass Facts.
"""
# i86pc is a Solaris and derivatives-ism
_I386RE = re.compile(r'i([3456]86|86pc)')
# For the most part, we assume that platform.dist() will tell the truth.
# This is the fallback to handle unknowns or exceptions
SELINUX_MODE_DICT = { 1: 'enforcing', 0: 'permissive', -1: 'disabled' }
# A list of dicts. If there is a platform with more than one
# package manager, put the preferred one last. If there is an
# ansible module, use that as the value for the 'name' key.
PKG_MGRS = [ { 'path' : '/usr/bin/yum', 'name' : 'yum' },
{ 'path' : '/usr/bin/dnf', 'name' : 'dnf' },
{ 'path' : '/usr/bin/apt-get', 'name' : 'apt' },
{ 'path' : '/usr/bin/zypper', 'name' : 'zypper' },
{ 'path' : '/usr/sbin/urpmi', 'name' : 'urpmi' },
{ 'path' : '/usr/bin/pacman', 'name' : 'pacman' },
{ 'path' : '/bin/opkg', 'name' : 'opkg' },
{ 'path' : '/usr/pkg/bin/pkgin', 'name' : 'pkgin' },
{ 'path' : '/opt/local/bin/pkgin', 'name' : 'pkgin' },
{ 'path' : '/opt/tools/bin/pkgin', 'name' : 'pkgin' },
{ 'path' : '/opt/local/bin/port', 'name' : 'macports' },
{ 'path' : '/usr/local/bin/brew', 'name' : 'homebrew' },
{ 'path' : '/sbin/apk', 'name' : 'apk' },
{ 'path' : '/usr/sbin/pkg', 'name' : 'pkgng' },
{ 'path' : '/usr/sbin/swlist', 'name' : 'SD-UX' },
{ 'path' : '/usr/bin/emerge', 'name' : 'portage' },
{ 'path' : '/usr/sbin/pkgadd', 'name' : 'svr4pkg' },
{ 'path' : '/usr/bin/pkg', 'name' : 'pkg' },
{ 'path' : '/usr/bin/xbps-install','name' : 'xbps' },
{ 'path' : '/usr/local/sbin/pkg', 'name' : 'pkgng' },
]
def __init__(self, module, load_on_init=True, cached_facts=None):
self.module = module
if not cached_facts:
self.facts = {}
else:
self.facts = cached_facts
### TODO: Eventually, these should all get moved to populate(). But
# some of the values are currently being used by other subclasses (for
# instance, os_family and distribution). Have to sort out what to do
# about those first.
if load_on_init:
self.get_platform_facts()
self.facts.update(Distribution(module).populate())
self.get_cmdline()
self.get_public_ssh_host_keys()
self.get_selinux_facts()
self.get_caps_facts()
self.get_fips_facts()
self.get_pkg_mgr_facts()
self.get_service_mgr_facts()
self.get_lsb_facts()
self.get_date_time_facts()
self.get_user_facts()
self.get_local_facts()
self.get_env_facts()
self.get_dns_facts()
self.get_python_facts()
def populate(self):
return self.facts
# Platform
# platform.system() can be Linux, Darwin, Java, or Windows
def get_platform_facts(self):
self.facts['system'] = platform.system()
self.facts['kernel'] = platform.release()
self.facts['machine'] = platform.machine()
self.facts['python_version'] = platform.python_version()
self.facts['fqdn'] = socket.getfqdn()
self.facts['hostname'] = platform.node().split('.')[0]
self.facts['nodename'] = platform.node()
self.facts['domain'] = '.'.join(self.facts['fqdn'].split('.')[1:])
arch_bits = platform.architecture()[0]
self.facts['userspace_bits'] = arch_bits.replace('bit', '')
if self.facts['machine'] == 'x86_64':
self.facts['architecture'] = self.facts['machine']
if self.facts['userspace_bits'] == '64':
self.facts['userspace_architecture'] = 'x86_64'
elif self.facts['userspace_bits'] == '32':
self.facts['userspace_architecture'] = 'i386'
elif Facts._I386RE.search(self.facts['machine']):
self.facts['architecture'] = 'i386'
if self.facts['userspace_bits'] == '64':
self.facts['userspace_architecture'] = 'x86_64'
elif self.facts['userspace_bits'] == '32':
self.facts['userspace_architecture'] = 'i386'
else:
self.facts['architecture'] = self.facts['machine']
if self.facts['system'] == 'AIX':
# Attempt to use getconf to figure out architecture
# fall back to bootinfo if needed
getconf_bin = self.module.get_bin_path('getconf')
if getconf_bin:
rc, out, err = self.module.run_command([getconf_bin, 'MACHINE_ARCHITECTURE'])
data = out.splitlines()
self.facts['architecture'] = data[0]
else:
bootinfo_bin = self.module.get_bin_path('bootinfo')
rc, out, err = self.module.run_command([bootinfo_bin, '-p'])
data = out.splitlines()
self.facts['architecture'] = data[0]
elif self.facts['system'] == 'OpenBSD':
self.facts['architecture'] = platform.uname()[5]
machine_id = get_file_content("/var/lib/dbus/machine-id") or get_file_content("/etc/machine-id")
if machine_id:
machine_id = machine_id.splitlines()[0]
self.facts["machine_id"] = machine_id
def get_local_facts(self):
fact_path = self.module.params.get('fact_path', None)
if not fact_path or not os.path.exists(fact_path):
return
local = {}
for fn in sorted(glob.glob(fact_path + '/*.fact')):
# where it will sit under local facts
fact_base = os.path.basename(fn).replace('.fact','')
if stat.S_IXUSR & os.stat(fn)[stat.ST_MODE]:
# run it
# try to read it as json first
# if that fails read it with ConfigParser
# if that fails, skip it
try:
rc, out, err = self.module.run_command(fn)
except UnicodeError:
fact = 'error loading fact - output of running %s was not utf-8' % fn
local[fact_base] = fact
self.facts['local'] = local
return
else:
out = get_file_content(fn, default='')
# load raw json
fact = 'loading %s' % fact_base
try:
fact = json.loads(out)
except ValueError:
# load raw ini
cp = configparser.ConfigParser()
try:
cp.readfp(StringIO(out))
except configparser.Error:
fact = "error loading fact - please check content"
else:
fact = {}
for sect in cp.sections():
if sect not in fact:
fact[sect] = {}
for opt in cp.options(sect):
val = cp.get(sect, opt)
fact[sect][opt]=val
local[fact_base] = fact
if not local:
return
self.facts['local'] = local
def get_cmdline(self):
data = get_file_content('/proc/cmdline')
if data:
self.facts['cmdline'] = {}
try:
for piece in shlex.split(data):
item = piece.split('=', 1)
if len(item) == 1:
self.facts['cmdline'][item[0]] = True
else:
self.facts['cmdline'][item[0]] = item[1]
except ValueError:
pass
def get_public_ssh_host_keys(self):
keytypes = ('dsa', 'rsa', 'ecdsa', 'ed25519')
# list of directories to check for ssh keys
# used in the order listed here, the first one with keys is used
keydirs = ['/etc/ssh', '/etc/openssh', '/etc']
for keydir in keydirs:
for type_ in keytypes:
factname = 'ssh_host_key_%s_public' % type_
if factname in self.facts:
# a previous keydir was already successful, stop looking
# for keys
return
key_filename = '%s/ssh_host_%s_key.pub' % (keydir, type_)
keydata = get_file_content(key_filename)
if keydata is not None:
self.facts[factname] = keydata.split()[1]
def get_pkg_mgr_facts(self):
if self.facts['system'] == 'OpenBSD':
self.facts['pkg_mgr'] = 'openbsd_pkg'
else:
self.facts['pkg_mgr'] = 'unknown'
for pkg in Facts.PKG_MGRS:
if os.path.exists(pkg['path']):
self.facts['pkg_mgr'] = pkg['name']
def get_service_mgr_facts(self):
#TODO: detect more custom init setups like bootscripts, dmd, s6, Epoch, runit, etc
# also other OSs other than linux might need to check across several possible candidates
# Mapping of proc_1 values to more useful names
proc_1_map = {
'procd': 'openwrt_init',
}
# try various forms of querying pid 1
proc_1 = get_file_content('/proc/1/comm')
if proc_1 is None:
rc, proc_1, err = self.module.run_command("ps -p 1 -o comm|tail -n 1", use_unsafe_shell=True)
# If the output of the command starts with what looks like a PID, then the 'ps' command
# probably didn't work the way we wanted, probably because it's busybox
if re.match(r' *[0-9]+ ', proc_1):
proc_1 = None
# The ps command above may return "COMMAND" if the user cannot read /proc, e.g. with grsecurity
if proc_1 == "COMMAND\n":
proc_1 = None
if proc_1 is not None:
proc_1 = os.path.basename(proc_1)
proc_1 = to_native(proc_1)
proc_1 = proc_1.strip()
if proc_1 is not None and (proc_1 == 'init' or proc_1.endswith('sh')):
# many systems return init, so this cannot be trusted, if it ends in 'sh' it probalby is a shell in a container
proc_1 = None
# if not init/None it should be an identifiable or custom init, so we are done!
if proc_1 is not None:
# Lookup proc_1 value in map and use proc_1 value itself if no match
self.facts['service_mgr'] = proc_1_map.get(proc_1, proc_1)
# start with the easy ones
elif self.facts['distribution'] == 'MacOSX':
#FIXME: find way to query executable, version matching is not ideal
if LooseVersion(platform.mac_ver()[0]) >= LooseVersion('10.4'):
self.facts['service_mgr'] = 'launchd'
else:
self.facts['service_mgr'] = 'systemstarter'
elif 'BSD' in self.facts['system'] or self.facts['system'] in ['Bitrig', 'DragonFly']:
#FIXME: we might want to break out to individual BSDs
self.facts['service_mgr'] = 'bsdinit'
elif self.facts['system'] == 'AIX':
self.facts['service_mgr'] = 'src'
elif self.facts['system'] == 'SunOS':
#FIXME: smf?
self.facts['service_mgr'] = 'svcs'
elif self.facts['distribution'] == 'OpenWrt':
self.facts['service_mgr'] = 'openwrt_init'
elif self.facts['system'] == 'Linux':
if self.is_systemd_managed():
self.facts['service_mgr'] = 'systemd'
elif self.module.get_bin_path('initctl') and os.path.exists("/etc/init/"):
self.facts['service_mgr'] = 'upstart'
elif os.path.realpath('/sbin/rc') == '/sbin/openrc':
self.facts['service_mgr'] = 'openrc'
elif os.path.exists('/etc/init.d/'):
self.facts['service_mgr'] = 'sysvinit'
if not self.facts.get('service_mgr', False):
# if we cannot detect, fallback to generic 'service'
self.facts['service_mgr'] = 'service'
def get_lsb_facts(self):
lsb_path = self.module.get_bin_path('lsb_release')
if lsb_path:
rc, out, err = self.module.run_command([lsb_path, "-a"], errors='surrogate_or_replace')
if rc == 0:
self.facts['lsb'] = {}
for line in out.splitlines():
if len(line) < 1 or ':' not in line:
continue
value = line.split(':', 1)[1].strip()
if 'LSB Version:' in line:
self.facts['lsb']['release'] = value
elif 'Distributor ID:' in line:
self.facts['lsb']['id'] = value
elif 'Description:' in line:
self.facts['lsb']['description'] = value
elif 'Release:' in line:
self.facts['lsb']['release'] = value
elif 'Codename:' in line:
self.facts['lsb']['codename'] = value
elif lsb_path is None and os.path.exists('/etc/lsb-release'):
self.facts['lsb'] = {}
for line in get_file_lines('/etc/lsb-release'):
value = line.split('=',1)[1].strip()
if 'DISTRIB_ID' in line:
self.facts['lsb']['id'] = value
elif 'DISTRIB_RELEASE' in line:
self.facts['lsb']['release'] = value
elif 'DISTRIB_DESCRIPTION' in line:
self.facts['lsb']['description'] = value
elif 'DISTRIB_CODENAME' in line:
self.facts['lsb']['codename'] = value
if 'lsb' in self.facts and 'release' in self.facts['lsb']:
self.facts['lsb']['major_release'] = self.facts['lsb']['release'].split('.')[0]
def get_selinux_facts(self):
if not HAVE_SELINUX:
self.facts['selinux'] = False
return
self.facts['selinux'] = {}
if not selinux.is_selinux_enabled():
self.facts['selinux']['status'] = 'disabled'
else:
self.facts['selinux']['status'] = 'enabled'
try:
self.facts['selinux']['policyvers'] = selinux.security_policyvers()
except (AttributeError,OSError):
self.facts['selinux']['policyvers'] = 'unknown'
try:
(rc, configmode) = selinux.selinux_getenforcemode()
if rc == 0:
self.facts['selinux']['config_mode'] = Facts.SELINUX_MODE_DICT.get(configmode, 'unknown')
else:
self.facts['selinux']['config_mode'] = 'unknown'
except (AttributeError,OSError):
self.facts['selinux']['config_mode'] = 'unknown'
try:
mode = selinux.security_getenforce()
self.facts['selinux']['mode'] = Facts.SELINUX_MODE_DICT.get(mode, 'unknown')
except (AttributeError,OSError):
self.facts['selinux']['mode'] = 'unknown'
try:
(rc, policytype) = selinux.selinux_getpolicytype()
if rc == 0:
self.facts['selinux']['type'] = policytype
else:
self.facts['selinux']['type'] = 'unknown'
except (AttributeError,OSError):
self.facts['selinux']['type'] = 'unknown'
def get_caps_facts(self):
capsh_path = self.module.get_bin_path('capsh')
if capsh_path:
rc, out, err = self.module.run_command([capsh_path, "--print"], errors='surrogate_or_replace')
enforced_caps = []
enforced = 'NA'
for line in out.splitlines():
if len(line) < 1:
continue
if line.startswith('Current:'):
if line.split(':')[1].strip() == '=ep':
enforced = 'False'
else:
enforced = 'True'
enforced_caps = [i.strip() for i in line.split('=')[1].split(',')]
self.facts['system_capabilities_enforced'] = enforced
self.facts['system_capabilities'] = enforced_caps
def get_fips_facts(self):
self.facts['fips'] = False
data = get_file_content('/proc/sys/crypto/fips_enabled')
if data and data == '1':
self.facts['fips'] = True
def get_date_time_facts(self):
self.facts['date_time'] = {}
now = datetime.datetime.now()
self.facts['date_time']['year'] = now.strftime('%Y')
self.facts['date_time']['month'] = now.strftime('%m')
self.facts['date_time']['weekday'] = now.strftime('%A')
self.facts['date_time']['weekday_number'] = now.strftime('%w')
self.facts['date_time']['weeknumber'] = now.strftime('%W')
self.facts['date_time']['day'] = now.strftime('%d')
self.facts['date_time']['hour'] = now.strftime('%H')
self.facts['date_time']['minute'] = now.strftime('%M')
self.facts['date_time']['second'] = now.strftime('%S')
self.facts['date_time']['epoch'] = now.strftime('%s')
if self.facts['date_time']['epoch'] == '' or self.facts['date_time']['epoch'][0] == '%':
self.facts['date_time']['epoch'] = str(int(time.time()))
self.facts['date_time']['date'] = now.strftime('%Y-%m-%d')
self.facts['date_time']['time'] = now.strftime('%H:%M:%S')
self.facts['date_time']['iso8601_micro'] = now.utcnow().strftime("%Y-%m-%dT%H:%M:%S.%fZ")
self.facts['date_time']['iso8601'] = now.utcnow().strftime("%Y-%m-%dT%H:%M:%SZ")
self.facts['date_time']['iso8601_basic'] = now.strftime("%Y%m%dT%H%M%S%f")
self.facts['date_time']['iso8601_basic_short'] = now.strftime("%Y%m%dT%H%M%S")
self.facts['date_time']['tz'] = time.strftime("%Z")
self.facts['date_time']['tz_offset'] = time.strftime("%z")
def is_systemd_managed(self):
# tools must be installed
if self.module.get_bin_path('systemctl'):
# this should show if systemd is the boot init system, if checking init faild to mark as systemd
# these mirror systemd's own sd_boot test http://www.freedesktop.org/software/systemd/man/sd_booted.html
for canary in ["/run/systemd/system/", "/dev/.run/systemd/", "/dev/.systemd/"]:
if os.path.exists(canary):
return True
return False
# User
def get_user_facts(self):
self.facts['user_id'] = getpass.getuser()
pwent = pwd.getpwnam(getpass.getuser())
self.facts['user_uid'] = pwent.pw_uid
self.facts['user_gid'] = pwent.pw_gid
self.facts['user_gecos'] = pwent.pw_gecos
self.facts['user_dir'] = pwent.pw_dir
self.facts['user_shell'] = pwent.pw_shell
def get_env_facts(self):
self.facts['env'] = {}
for k,v in iteritems(os.environ):
self.facts['env'][k] = v
def get_dns_facts(self):
self.facts['dns'] = {}
for line in get_file_content('/etc/resolv.conf', '').splitlines():
if line.startswith('#') or line.startswith(';') or line.strip() == '':
continue
tokens = line.split()
if len(tokens) == 0:
continue
if tokens[0] == 'nameserver':
if not 'nameservers' in self.facts['dns']:
self.facts['dns']['nameservers'] = []
for nameserver in tokens[1:]:
self.facts['dns']['nameservers'].append(nameserver)
elif tokens[0] == 'domain':
if len(tokens) > 1:
self.facts['dns']['domain'] = tokens[1]
elif tokens[0] == 'search':
self.facts['dns']['search'] = []
for suffix in tokens[1:]:
self.facts['dns']['search'].append(suffix)
elif tokens[0] == 'sortlist':
self.facts['dns']['sortlist'] = []
for address in tokens[1:]:
self.facts['dns']['sortlist'].append(address)
elif tokens[0] == 'options':
self.facts['dns']['options'] = {}
if len(tokens) > 1:
for option in tokens[1:]:
option_tokens = option.split(':', 1)
if len(option_tokens) == 0:
continue
val = len(option_tokens) == 2 and option_tokens[1] or True
self.facts['dns']['options'][option_tokens[0]] = val
def _get_mount_size_facts(self, mountpoint):
size_total = None
size_available = None
try:
statvfs_result = os.statvfs(mountpoint)
size_total = statvfs_result.f_frsize * statvfs_result.f_blocks
size_available = statvfs_result.f_frsize * (statvfs_result.f_bavail)
except OSError:
pass
return size_total, size_available
def get_python_facts(self):
self.facts['python'] = {
'version': {
'major': sys.version_info[0],
'minor': sys.version_info[1],
'micro': sys.version_info[2],
'releaselevel': sys.version_info[3],
'serial': sys.version_info[4]
},
'version_info': list(sys.version_info),
'executable': sys.executable,
'has_sslcontext': HAS_SSLCONTEXT
}
try:
self.facts['python']['type'] = sys.subversion[0]
except AttributeError:
try:
self.facts['python']['type'] = sys.implementation.name
except AttributeError:
self.facts['python']['type'] = None
class Distribution(object):
"""
This subclass of Facts fills the distribution, distribution_version and distribution_release variables
To do so it checks the existence and content of typical files in /etc containing distribution information
This is unit tested. Please extend the tests to cover all distributions if you have them available.
"""
# every distribution name mentioned here, must have one of
# - allowempty == True
# - be listed in SEARCH_STRING
# - have a function get_distribution_DISTNAME implemented
OSDIST_LIST = (
{'path': '/etc/oracle-release', 'name': 'OracleLinux'},
{'path': '/etc/slackware-version', 'name': 'Slackware'},
{'path': '/etc/redhat-release', 'name': 'RedHat'},
{'path': '/etc/vmware-release', 'name': 'VMwareESX', 'allowempty': True},
{'path': '/etc/openwrt_release', 'name': 'OpenWrt'},
{'path': '/etc/system-release', 'name': 'Amazon'},
{'path': '/etc/alpine-release', 'name': 'Alpine'},
{'path': '/etc/arch-release', 'name': 'Archlinux', 'allowempty': True},
{'path': '/etc/os-release', 'name': 'SuSE'},
{'path': '/etc/SuSE-release', 'name': 'SuSE'},
{'path': '/etc/gentoo-release', 'name': 'Gentoo'},
{'path': '/etc/os-release', 'name': 'Debian'},
{'path': '/etc/lsb-release', 'name': 'Mandriva'},
{'path': '/etc/altlinux-release', 'name': 'Altlinux'},
{'path': '/etc/os-release', 'name': 'NA'},
{'path': '/etc/coreos/update.conf', 'name': 'Coreos'},
)
SEARCH_STRING = {
'OracleLinux': 'Oracle Linux',
'RedHat': 'Red Hat',
'Altlinux': 'ALT Linux',
}
# A list with OS Family members
OS_FAMILY = dict(
RedHat = 'RedHat', Fedora = 'RedHat', CentOS = 'RedHat', Scientific = 'RedHat',
SLC = 'RedHat', Ascendos = 'RedHat', CloudLinux = 'RedHat', PSBM = 'RedHat',
OracleLinux = 'RedHat', OVS = 'RedHat', OEL = 'RedHat', Amazon = 'RedHat',
XenServer = 'RedHat', Ubuntu = 'Debian', Debian = 'Debian', Raspbian = 'Debian', Slackware = 'Slackware', SLES = 'Suse',
SLED = 'Suse', openSUSE = 'Suse', openSUSE_Tumbleweed = 'Suse', SuSE = 'Suse', SLES_SAP = 'Suse', SUSE_LINUX = 'Suse', Gentoo = 'Gentoo', Funtoo = 'Gentoo',
Archlinux = 'Archlinux', Manjaro = 'Archlinux', Mandriva = 'Mandrake', Mandrake = 'Mandrake', Altlinux = 'Altlinux',
Solaris = 'Solaris', Nexenta = 'Solaris', OmniOS = 'Solaris', OpenIndiana = 'Solaris',
SmartOS = 'Solaris', AIX = 'AIX', Alpine = 'Alpine', MacOSX = 'Darwin',
FreeBSD = 'FreeBSD', HPUX = 'HP-UX', openSUSE_Leap = 'Suse', Neon = 'Debian'
)
def __init__(self, module):
self.system = platform.system()
self.facts = {}
self.module = module
def populate(self):
self.get_distribution_facts()
return self.facts
def get_distribution_facts(self):
# The platform module provides information about the running
# system/distribution. Use this as a baseline and fix buggy systems
# afterwards
self.facts['distribution'] = self.system
self.facts['distribution_release'] = platform.release()
self.facts['distribution_version'] = platform.version()
systems_implemented = ('AIX', 'HP-UX', 'Darwin', 'FreeBSD', 'OpenBSD', 'SunOS', 'DragonFly', 'NetBSD')
self.facts['distribution'] = self.system
if self.system in systems_implemented:
cleanedname = self.system.replace('-','')
distfunc = getattr(self, 'get_distribution_'+cleanedname)
distfunc()
elif self.system == 'Linux':
# try to find out which linux distribution this is
dist = platform.dist()
self.facts['distribution'] = dist[0].capitalize() or 'NA'
self.facts['distribution_version'] = dist[1] or 'NA'
self.facts['distribution_major_version'] = dist[1].split('.')[0] or 'NA'
self.facts['distribution_release'] = dist[2] or 'NA'
# Try to handle the exceptions now ...
# self.facts['distribution_debug'] = []
for ddict in self.OSDIST_LIST:
name = ddict['name']
path = ddict['path']
if not os.path.exists(path):
continue
# if allowempty is set, we only check for file existance but not content
if 'allowempty' in ddict and ddict['allowempty']:
self.facts['distribution'] = name
break
if os.path.getsize(path) == 0:
continue
data = get_file_content(path)
if name in self.SEARCH_STRING:
# look for the distribution string in the data and replace according to RELEASE_NAME_MAP
# only the distribution name is set, the version is assumed to be correct from platform.dist()
if self.SEARCH_STRING[name] in data:
# this sets distribution=RedHat if 'Red Hat' shows up in data
self.facts['distribution'] = name
else:
# this sets distribution to what's in the data, e.g. CentOS, Scientific, ...
self.facts['distribution'] = data.split()[0]
break
else:
# call a dedicated function for parsing the file content
try:
distfunc = getattr(self, 'get_distribution_' + name)
parsed = distfunc(name, data, path)
if parsed is None or parsed:
# distfunc return False if parsing failed
# break only if parsing was succesful
# otherwise continue with other distributions
break
except AttributeError:
# this should never happen, but if it does fail quitely and not with a traceback
pass
# to debug multiple matching release files, one can use:
# self.facts['distribution_debug'].append({path + ' ' + name:
# (parsed,
# self.facts['distribution'],
# self.facts['distribution_version'],
# self.facts['distribution_release'],
# )})
self.facts['os_family'] = self.facts['distribution']
distro = self.facts['distribution'].replace(' ', '_')
if distro in self.OS_FAMILY:
self.facts['os_family'] = self.OS_FAMILY[distro]
def get_distribution_AIX(self):
rc, out, err = self.module.run_command("/usr/bin/oslevel")
data = out.split('.')
self.facts['distribution_version'] = data[0]
self.facts['distribution_release'] = data[1]
def get_distribution_HPUX(self):
rc, out, err = self.module.run_command("/usr/sbin/swlist |egrep 'HPUX.*OE.*[AB].[0-9]+\.[0-9]+'", use_unsafe_shell=True)
data = re.search('HPUX.*OE.*([AB].[0-9]+\.[0-9]+)\.([0-9]+).*', out)
if data:
self.facts['distribution_version'] = data.groups()[0]
self.facts['distribution_release'] = data.groups()[1]
def get_distribution_Darwin(self):
self.facts['distribution'] = 'MacOSX'
rc, out, err = self.module.run_command("/usr/bin/sw_vers -productVersion")
data = out.split()[-1]
self.facts['distribution_version'] = data
def get_distribution_FreeBSD(self):
self.facts['distribution_release'] = platform.release()
data = re.search('(\d+)\.(\d+)-RELEASE.*', self.facts['distribution_release'])
if data:
self.facts['distribution_major_version'] = data.group(1)
self.facts['distribution_version'] = '%s.%s' % (data.group(1), data.group(2))
def get_distribution_OpenBSD(self):
self.facts['distribution_version'] = platform.release()
rc, out, err = self.module.run_command("/sbin/sysctl -n kern.version")
match = re.match('OpenBSD\s[0-9]+.[0-9]+-(\S+)\s.*', out)
if match:
self.facts['distribution_release'] = match.groups()[0]
else:
self.facts['distribution_release'] = 'release'
def get_distribution_DragonFly(self):
pass
def get_distribution_NetBSD(self):
self.facts['distribution_major_version'] = self.facts['distribution_release'].split('.')[0]
def get_distribution_Slackware(self, name, data, path):
if 'Slackware' not in data:
return False # TODO: remove
self.facts['distribution'] = name
version = re.findall('\w+[.]\w+', data)
if version:
self.facts['distribution_version'] = version[0]
def get_distribution_Amazon(self, name, data, path):
if 'Amazon' not in data:
return False # TODO: remove
self.facts['distribution'] = 'Amazon'
self.facts['distribution_version'] = data.split()[-1]
def get_distribution_OpenWrt(self, name, data, path):
if 'OpenWrt' not in data:
return False # TODO: remove
self.facts['distribution'] = name
version = re.search('DISTRIB_RELEASE="(.*)"', data)
if version:
self.facts['distribution_version'] = version.groups()[0]
release = re.search('DISTRIB_CODENAME="(.*)"', data)
if release:
self.facts['distribution_release'] = release.groups()[0]
def get_distribution_Alpine(self, name, data, path):
self.facts['distribution'] = 'Alpine'
self.facts['distribution_version'] = data
def get_distribution_SunOS(self):
data = get_file_content('/etc/release').splitlines()[0]
if 'Solaris' in data:
ora_prefix = ''
if 'Oracle Solaris' in data:
data = data.replace('Oracle ','')
ora_prefix = 'Oracle '
self.facts['distribution'] = data.split()[0]
self.facts['distribution_version'] = data.split()[1]
self.facts['distribution_release'] = ora_prefix + data
return
uname_v = get_uname_version(self.module)
distribution_version = None
if 'SmartOS' in data:
self.facts['distribution'] = 'SmartOS'
if os.path.exists('/etc/product'):
product_data = dict([l.split(': ', 1) for l in get_file_content('/etc/product').splitlines() if ': ' in l])
if 'Image' in product_data:
distribution_version = product_data.get('Image').split()[-1]
elif 'OpenIndiana' in data:
self.facts['distribution'] = 'OpenIndiana'
elif 'OmniOS' in data:
self.facts['distribution'] = 'OmniOS'
distribution_version = data.split()[-1]
elif uname_v is not None and 'NexentaOS_' in uname_v:
self.facts['distribution'] = 'Nexenta'
distribution_version = data.split()[-1].lstrip('v')
if self.facts['distribution'] in ('SmartOS', 'OpenIndiana', 'OmniOS', 'Nexenta'):
self.facts['distribution_release'] = data.strip()
if distribution_version is not None:
self.facts['distribution_version'] = distribution_version
elif uname_v is not None:
self.facts['distribution_version'] = uname_v.splitlines()[0].strip()
return
return False # TODO: remove if tested without this
def get_distribution_SuSE(self, name, data, path):
if 'suse' not in data.lower():
return False # TODO: remove if tested without this
if path == '/etc/os-release':
for line in data.splitlines():
distribution = re.search("^NAME=(.*)", line)
if distribution:
self.facts['distribution'] = distribution.group(1).strip('"')
# example pattern are 13.04 13.0 13
distribution_version = re.search('^VERSION_ID="?([0-9]+\.?[0-9]*)"?', line)
if distribution_version:
self.facts['distribution_version'] = distribution_version.group(1)
if 'open' in data.lower():
release = re.search("^PRETTY_NAME=[^(]+ \(?([^)]+?)\)", line)
if release:
self.facts['distribution_release'] = release.groups()[0]
elif 'enterprise' in data.lower() and 'VERSION_ID' in line:
# SLES doesn't got funny release names
release = re.search('^VERSION_ID="?[0-9]+\.?([0-9]*)"?', line)
if release.group(1):
release = release.group(1)
else:
release = "0" # no minor number, so it is the first release
self.facts['distribution_release'] = release
elif path == '/etc/SuSE-release':
if 'open' in data.lower():
data = data.splitlines()
distdata = get_file_content(path).splitlines()[0]
self.facts['distribution'] = distdata.split()[0]
for line in data:
release = re.search('CODENAME *= *([^\n]+)', line)
if release:
self.facts['distribution_release'] = release.groups()[0].strip()
elif 'enterprise' in data.lower():
lines = data.splitlines()
distribution = lines[0].split()[0]
if "Server" in data:
self.facts['distribution'] = "SLES"
elif "Desktop" in data:
self.facts['distribution'] = "SLED"
for line in lines:
release = re.search('PATCHLEVEL = ([0-9]+)', line) # SLES doesn't got funny release names
if release:
self.facts['distribution_release'] = release.group(1)
self.facts['distribution_version'] = self.facts['distribution_version'] + '.' + release.group(1)
def get_distribution_Debian(self, name, data, path):
if 'Debian' in data or 'Raspbian' in data:
self.facts['distribution'] = 'Debian'
release = re.search("PRETTY_NAME=[^(]+ \(?([^)]+?)\)", data)
if release:
self.facts['distribution_release'] = release.groups()[0]
elif 'Ubuntu' in data:
self.facts['distribution'] = 'Ubuntu'
pass # Ubuntu gets correct info from python functions
else:
return False # TODO: remove if tested without this
def get_distribution_Mandriva(self, name, data, path):
if 'Mandriva' in data:
self.facts['distribution'] = 'Mandriva'
version = re.search('DISTRIB_RELEASE="(.*)"', data)
if version:
self.facts['distribution_version'] = version.groups()[0]
release = re.search('DISTRIB_CODENAME="(.*)"', data)
if release:
self.facts['distribution_release'] = release.groups()[0]
self.facts['distribution'] = name
else:
return False
def get_distribution_NA(self, name, data, path):
for line in data.splitlines():
distribution = re.search("^NAME=(.*)", line)
if distribution and self.facts['distribution'] == 'NA':
self.facts['distribution'] = distribution.group(1).strip('"')
version = re.search("^VERSION=(.*)", line)
if version and self.facts['distribution_version'] == 'NA':
self.facts['distribution_version'] = version.group(1).strip('"')
def get_distribution_Coreos(self, name, data, path):
if self.facts['distribution'].lower() == 'coreos':
if not data:
# include fix from #15230, #15228
return
release = re.search("^GROUP=(.*)", data)
if release:
self.facts['distribution_release'] = release.group(1).strip('"')
else:
return False # TODO: remove if tested without this
class Hardware(Facts):
"""
This is a generic Hardware subclass of Facts. This should be further
subclassed to implement per platform. If you subclass this, it
should define:
- memfree_mb
- memtotal_mb
- swapfree_mb
- swaptotal_mb
- processor (a list)
- processor_cores
- processor_count
All subclasses MUST define platform.
"""
platform = 'Generic'
def __new__(cls, *arguments, **keyword):
# When Hardware is created, it chooses a subclass to create instead.
# This check prevents the subclass from then trying to find a subclass
# and create that.
if cls is not Hardware:
return super(Hardware, cls).__new__(cls)
subclass = cls
for sc in get_all_subclasses(Hardware):
if sc.platform == platform.system():
subclass = sc
if PY3:
return super(cls, subclass).__new__(subclass)
else:
return super(cls, subclass).__new__(subclass, *arguments, **keyword)
def populate(self):
return self.facts
def get_sysctl(self, prefixes):
sysctl_cmd = self.module.get_bin_path('sysctl')
cmd = [sysctl_cmd]
cmd.extend(prefixes)
rc, out, err = self.module.run_command(cmd)
if rc != 0:
return dict()
sysctl = dict()
for line in out.splitlines():
if not line:
continue
(key, value) = re.split('\s?=\s?|: ', line, maxsplit=1)
sysctl[key] = value.strip()
return sysctl
class LinuxHardware(Hardware):
"""
Linux-specific subclass of Hardware. Defines memory and CPU facts:
- memfree_mb
- memtotal_mb
- swapfree_mb
- swaptotal_mb
- processor (a list)
- processor_cores
- processor_count
In addition, it also defines number of DMI facts and device facts.
"""
platform = 'Linux'
# Originally only had these four as toplevelfacts
ORIGINAL_MEMORY_FACTS = frozenset(('MemTotal', 'SwapTotal', 'MemFree', 'SwapFree'))
# Now we have all of these in a dict structure
MEMORY_FACTS = ORIGINAL_MEMORY_FACTS.union(('Buffers', 'Cached', 'SwapCached'))
# regex used against findmnt output to detect bind mounts
BIND_MOUNT_RE = re.compile(r'.*\]')
# regex used against mtab content to find entries that are bind mounts
MTAB_BIND_MOUNT_RE = re.compile(r'.*bind.*"')
def populate(self):
self.get_cpu_facts()
self.get_memory_facts()
self.get_dmi_facts()
self.get_device_facts()
self.get_uptime_facts()
self.get_lvm_facts()
try:
self.get_mount_facts()
except TimeoutError:
pass
return self.facts
def get_memory_facts(self):
if not os.access("/proc/meminfo", os.R_OK):
return
memstats = {}
for line in get_file_lines("/proc/meminfo"):
data = line.split(":", 1)
key = data[0]
if key in self.ORIGINAL_MEMORY_FACTS:
val = data[1].strip().split(' ')[0]
self.facts["%s_mb" % key.lower()] = int(val) // 1024
if key in self.MEMORY_FACTS:
val = data[1].strip().split(' ')[0]
memstats[key.lower()] = int(val) // 1024
if None not in (memstats.get('memtotal'), memstats.get('memfree')):
memstats['real:used'] = memstats['memtotal'] - memstats['memfree']
if None not in (memstats.get('cached'), memstats.get('memfree'), memstats.get('buffers')):
memstats['nocache:free'] = memstats['cached'] + memstats['memfree'] + memstats['buffers']
if None not in (memstats.get('memtotal'), memstats.get('nocache:free')):
memstats['nocache:used'] = memstats['memtotal'] - memstats['nocache:free']
if None not in (memstats.get('swaptotal'), memstats.get('swapfree')):
memstats['swap:used'] = memstats['swaptotal'] - memstats['swapfree']
self.facts['memory_mb'] = {
'real' : {
'total': memstats.get('memtotal'),
'used': memstats.get('real:used'),
'free': memstats.get('memfree'),
},
'nocache' : {
'free': memstats.get('nocache:free'),
'used': memstats.get('nocache:used'),
},
'swap' : {
'total': memstats.get('swaptotal'),
'free': memstats.get('swapfree'),
'used': memstats.get('swap:used'),
'cached': memstats.get('swapcached'),
},
}
def get_cpu_facts(self):
i = 0
vendor_id_occurrence = 0
model_name_occurrence = 0
physid = 0
coreid = 0
sockets = {}
cores = {}
xen = False
xen_paravirt = False
try:
if os.path.exists('/proc/xen'):
xen = True
else:
for line in get_file_lines('/sys/hypervisor/type'):
if line.strip() == 'xen':
xen = True
# Only interested in the first line
break
except IOError:
pass
if not os.access("/proc/cpuinfo", os.R_OK):
return
self.facts['processor'] = []
for line in get_file_lines('/proc/cpuinfo'):
data = line.split(":", 1)
key = data[0].strip()
if xen:
if key == 'flags':
# Check for vme cpu flag, Xen paravirt does not expose this.
# Need to detect Xen paravirt because it exposes cpuinfo
# differently than Xen HVM or KVM and causes reporting of
# only a single cpu core.
if 'vme' not in data:
xen_paravirt = True
# model name is for Intel arch, Processor (mind the uppercase P)
# works for some ARM devices, like the Sheevaplug.
if key in ['model name', 'Processor', 'vendor_id', 'cpu', 'Vendor']:
if 'processor' not in self.facts:
self.facts['processor'] = []
self.facts['processor'].append(data[1].strip())
if key == 'vendor_id':
vendor_id_occurrence += 1
if key == 'model name':
model_name_occurrence += 1
i += 1
elif key == 'physical id':
physid = data[1].strip()
if physid not in sockets:
sockets[physid] = 1
elif key == 'core id':
coreid = data[1].strip()
if coreid not in sockets:
cores[coreid] = 1
elif key == 'cpu cores':
sockets[physid] = int(data[1].strip())
elif key == 'siblings':
cores[coreid] = int(data[1].strip())
elif key == '# processors':
self.facts['processor_cores'] = int(data[1].strip())
# Skip for platforms without vendor_id/model_name in cpuinfo (e.g ppc64le)
if vendor_id_occurrence > 0:
if vendor_id_occurrence == model_name_occurrence:
i = vendor_id_occurrence
if self.facts['architecture'] != 's390x':
if xen_paravirt:
self.facts['processor_count'] = i
self.facts['processor_cores'] = i
self.facts['processor_threads_per_core'] = 1
self.facts['processor_vcpus'] = i
else:
if sockets:
self.facts['processor_count'] = len(sockets)
else:
self.facts['processor_count'] = i
socket_values = list(sockets.values())
if socket_values:
self.facts['processor_cores'] = socket_values[0]
else:
self.facts['processor_cores'] = 1
core_values = list(cores.values())
if core_values:
self.facts['processor_threads_per_core'] = core_values[0] // self.facts['processor_cores']
else:
self.facts['processor_threads_per_core'] = 1 // self.facts['processor_cores']
self.facts['processor_vcpus'] = (self.facts['processor_threads_per_core'] *
self.facts['processor_count'] * self.facts['processor_cores'])
def get_dmi_facts(self):
''' learn dmi facts from system
Try /sys first for dmi related facts.
If that is not available, fall back to dmidecode executable '''
if os.path.exists('/sys/devices/virtual/dmi/id/product_name'):
# Use kernel DMI info, if available
# DMI SPEC -- http://www.dmtf.org/sites/default/files/standards/documents/DSP0134_2.7.0.pdf
FORM_FACTOR = [ "Unknown", "Other", "Unknown", "Desktop",
"Low Profile Desktop", "Pizza Box", "Mini Tower", "Tower",
"Portable", "Laptop", "Notebook", "Hand Held", "Docking Station",
"All In One", "Sub Notebook", "Space-saving", "Lunch Box",
"Main Server Chassis", "Expansion Chassis", "Sub Chassis",
"Bus Expansion Chassis", "Peripheral Chassis", "RAID Chassis",
"Rack Mount Chassis", "Sealed-case PC", "Multi-system",
"CompactPCI", "AdvancedTCA", "Blade" ]
DMI_DICT = {
'bios_date': '/sys/devices/virtual/dmi/id/bios_date',
'bios_version': '/sys/devices/virtual/dmi/id/bios_version',
'form_factor': '/sys/devices/virtual/dmi/id/chassis_type',
'product_name': '/sys/devices/virtual/dmi/id/product_name',
'product_serial': '/sys/devices/virtual/dmi/id/product_serial',
'product_uuid': '/sys/devices/virtual/dmi/id/product_uuid',
'product_version': '/sys/devices/virtual/dmi/id/product_version',
'system_vendor': '/sys/devices/virtual/dmi/id/sys_vendor'
}
for (key,path) in DMI_DICT.items():
data = get_file_content(path)
if data is not None:
if key == 'form_factor':
try:
self.facts['form_factor'] = FORM_FACTOR[int(data)]
except IndexError:
self.facts['form_factor'] = 'unknown (%s)' % data
else:
self.facts[key] = data
else:
self.facts[key] = 'NA'
else:
# Fall back to using dmidecode, if available
dmi_bin = self.module.get_bin_path('dmidecode')
DMI_DICT = {
'bios_date': 'bios-release-date',
'bios_version': 'bios-version',
'form_factor': 'chassis-type',
'product_name': 'system-product-name',
'product_serial': 'system-serial-number',
'product_uuid': 'system-uuid',
'product_version': 'system-version',
'system_vendor': 'system-manufacturer'
}
for (k, v) in DMI_DICT.items():
if dmi_bin is not None:
(rc, out, err) = self.module.run_command('%s -s %s' % (dmi_bin, v))
if rc == 0:
# Strip out commented lines (specific dmidecode output)
thisvalue = ''.join([ line for line in out.splitlines() if not line.startswith('#') ])
try:
json.dumps(thisvalue)
except UnicodeDecodeError:
thisvalue = "NA"
self.facts[k] = thisvalue
else:
self.facts[k] = 'NA'
else:
self.facts[k] = 'NA'
def _run_lsblk(self, lsblk_path):
# call lsblk and collect all uuids
# --exclude 2 makes lsblk ignore floppy disks, which are slower to answer than typical timeouts
# this uses the linux major device number
# for details see https://www.kernel.org/doc/Documentation/devices.txt
args = ['--list', '--noheadings', '--paths', '--output', 'NAME,UUID', '--exclude', '2']
cmd = [lsblk_path] + args
rc, out, err = self.module.run_command(cmd)
return rc, out, err
def _lsblk_uuid(self):
uuids = {}
lsblk_path = self.module.get_bin_path("lsblk")
if not lsblk_path:
return uuids
rc, out, err = self._run_lsblk(lsblk_path)
if rc != 0:
return uuids
# each line will be in format:
# <devicename><some whitespace><uuid>
# /dev/sda1 32caaec3-ef40-4691-a3b6-438c3f9bc1c0
for lsblk_line in out.splitlines():
if not lsblk_line:
continue
line = lsblk_line.strip()
fields = line.rsplit(None, 1)
if len(fields) < 2:
continue
device_name, uuid = fields[0].strip(), fields[1].strip()
if device_name in uuids:
continue
uuids[device_name] = uuid
return uuids
def _run_findmnt(self, findmnt_path):
args = ['--list', '--noheadings', '--notruncate']
cmd = [findmnt_path] + args
rc, out, err = self.module.run_command(cmd, errors='surrogate_or_replace')
return rc, out, err
def _find_bind_mounts(self):
bind_mounts = set()
findmnt_path = self.module.get_bin_path("findmnt")
if not findmnt_path:
return bind_mounts
rc, out, err = self._run_findmnt(findmnt_path)
if rc != 0:
return bind_mounts
# find bind mounts, in case /etc/mtab is a symlink to /proc/mounts
for line in out.splitlines():
fields = line.split()
# fields[0] is the TARGET, fields[1] is the SOURCE
if len(fields) < 2:
continue
# bind mounts will have a [/directory_name] in the SOURCE column
if self.BIND_MOUNT_RE.match(fields[1]):
bind_mounts.add(fields[0])
return bind_mounts
def _mtab_entries(self):
mtab_file = '/etc/mtab'
if not os.path.exists(mtab_file):
mtab_file = '/proc/mounts'
mtab = get_file_content(mtab_file, '')
mtab_entries = []
for line in mtab.splitlines():
fields = line.split()
if len(fields) < 4:
continue
mtab_entries.append(fields)
return mtab_entries
@timeout()
def get_mount_facts(self):
self.facts['mounts'] = []
bind_mounts = self._find_bind_mounts()
uuids = self._lsblk_uuid()
mtab_entries = self._mtab_entries()
mounts = []
for fields in mtab_entries:
device, mount, fstype, options = fields[0], fields[1], fields[2], fields[3]
if not device.startswith('/') and ':/' not in device:
continue
if fstype == 'none':
continue
size_total, size_available = self._get_mount_size_facts(mount)
if mount in bind_mounts:
# only add if not already there, we might have a plain /etc/mtab
if not self.MTAB_BIND_MOUNT_RE.match(options):
options += ",bind"
mount_info = {'mount': mount,
'device': device,
'fstype': fstype,
'options': options,
# statvfs data
'size_total': size_total,
'size_available': size_available,
'uuid': uuids.get(device, 'N/A')}
mounts.append(mount_info)
self.facts['mounts'] = mounts
def get_holders(self, block_dev_dict, sysdir):
block_dev_dict['holders'] = []
if os.path.isdir(sysdir + "/holders"):
for folder in os.listdir(sysdir + "/holders"):
if not folder.startswith("dm-"):
continue
name = get_file_content(sysdir + "/holders/" + folder + "/dm/name")
if name:
block_dev_dict['holders'].append(name)
else:
block_dev_dict['holders'].append(folder)
def get_device_facts(self):
self.facts['devices'] = {}
lspci = self.module.get_bin_path('lspci')
if lspci:
rc, pcidata, err = self.module.run_command([lspci, '-D'], errors='surrogate_or_replace')
else:
pcidata = None
try:
block_devs = os.listdir("/sys/block")
except OSError:
return
for block in block_devs:
virtual = 1
sysfs_no_links = 0
try:
path = os.readlink(os.path.join("/sys/block/", block))
except OSError:
e = sys.exc_info()[1]
if e.errno == errno.EINVAL:
path = block
sysfs_no_links = 1
else:
continue
if "virtual" in path:
continue
sysdir = os.path.join("/sys/block", path)
if sysfs_no_links == 1:
for folder in os.listdir(sysdir):
if "device" in folder:
virtual = 0
break
if virtual:
continue
d = {}
diskname = os.path.basename(sysdir)
for key in ['vendor', 'model', 'sas_address', 'sas_device_handle']:
d[key] = get_file_content(sysdir + "/device/" + key)
for key,test in [ ('removable','/removable'), \
('support_discard','/queue/discard_granularity'),
]:
d[key] = get_file_content(sysdir + test)
d['partitions'] = {}
for folder in os.listdir(sysdir):
m = re.search("(" + diskname + "\d+)", folder)
if m:
part = {}
partname = m.group(1)
part_sysdir = sysdir + "/" + partname
part['start'] = get_file_content(part_sysdir + "/start",0)
part['sectors'] = get_file_content(part_sysdir + "/size",0)
part['sectorsize'] = get_file_content(part_sysdir + "/queue/logical_block_size")
if not part['sectorsize']:
part['sectorsize'] = get_file_content(part_sysdir + "/queue/hw_sector_size",512)
part['size'] = self.module.pretty_bytes((float(part['sectors']) * float(part['sectorsize'])))
part['uuid'] = get_partition_uuid(partname)
self.get_holders(part, part_sysdir)
d['partitions'][partname] = part
d['rotational'] = get_file_content(sysdir + "/queue/rotational")
d['scheduler_mode'] = ""
scheduler = get_file_content(sysdir + "/queue/scheduler")
if scheduler is not None:
m = re.match(".*?(\[(.*)\])", scheduler)
if m:
d['scheduler_mode'] = m.group(2)
d['sectors'] = get_file_content(sysdir + "/size")
if not d['sectors']:
d['sectors'] = 0
d['sectorsize'] = get_file_content(sysdir + "/queue/logical_block_size")
if not d['sectorsize']:
d['sectorsize'] = get_file_content(sysdir + "/queue/hw_sector_size",512)
d['size'] = self.module.pretty_bytes(float(d['sectors']) * float(d['sectorsize']))
d['host'] = ""
# domains are numbered (0 to ffff), bus (0 to ff), slot (0 to 1f), and function (0 to 7).
m = re.match(".+/([a-f0-9]{4}:[a-f0-9]{2}:[0|1][a-f0-9]\.[0-7])/", sysdir)
if m and pcidata:
pciid = m.group(1)
did = re.escape(pciid)
m = re.search("^" + did + "\s(.*)$", pcidata, re.MULTILINE)
if m:
d['host'] = m.group(1)
self.get_holders(d, sysdir)
self.facts['devices'][diskname] = d
def get_uptime_facts(self):
uptime_file_content = get_file_content('/proc/uptime')
if uptime_file_content:
uptime_seconds_string = uptime_file_content.split(' ')[0]
self.facts['uptime_seconds'] = int(float(uptime_seconds_string))
def get_lvm_facts(self):
""" Get LVM Facts if running as root and lvm utils are available """
if os.getuid() == 0 and self.module.get_bin_path('vgs'):
lvm_util_options = '--noheadings --nosuffix --units g'
vgs_path = self.module.get_bin_path('vgs')
#vgs fields: VG #PV #LV #SN Attr VSize VFree
vgs={}
if vgs_path:
rc, vg_lines, err = self.module.run_command( '%s %s' % (vgs_path, lvm_util_options))
for vg_line in vg_lines.splitlines():
items = vg_line.split()
vgs[items[0]] = {'size_g':items[-2],
'free_g':items[-1],
'num_lvs': items[2],
'num_pvs': items[1]}
lvs_path = self.module.get_bin_path('lvs')
#lvs fields:
#LV VG Attr LSize Pool Origin Data% Move Log Copy% Convert
lvs = {}
if lvs_path:
rc, lv_lines, err = self.module.run_command( '%s %s' % (lvs_path, lvm_util_options))
for lv_line in lv_lines.splitlines():
items = lv_line.split()
lvs[items[0]] = {'size_g': items[3], 'vg': items[1]}
self.facts['lvm'] = {'lvs': lvs, 'vgs': vgs}
class SunOSHardware(Hardware):
"""
In addition to the generic memory and cpu facts, this also sets
swap_reserved_mb and swap_allocated_mb that is available from *swap -s*.
"""
platform = 'SunOS'
def populate(self):
self.get_cpu_facts()
self.get_memory_facts()
self.get_dmi_facts()
try:
self.get_mount_facts()
except TimeoutError:
pass
return self.facts
def get_cpu_facts(self):
physid = 0
sockets = {}
rc, out, err = self.module.run_command("/usr/bin/kstat cpu_info")
self.facts['processor'] = []
for line in out.splitlines():
if len(line) < 1:
continue
data = line.split(None, 1)
key = data[0].strip()
# "brand" works on Solaris 10 & 11. "implementation" for Solaris 9.
if key == 'module:':
brand = ''
elif key == 'brand':
brand = data[1].strip()
elif key == 'clock_MHz':
clock_mhz = data[1].strip()
elif key == 'implementation':
processor = brand or data[1].strip()
# Add clock speed to description for SPARC CPU
if self.facts['machine'] != 'i86pc':
processor += " @ " + clock_mhz + "MHz"
if 'processor' not in self.facts:
self.facts['processor'] = []
self.facts['processor'].append(processor)
elif key == 'chip_id':
physid = data[1].strip()
if physid not in sockets:
sockets[physid] = 1
else:
sockets[physid] += 1
# Counting cores on Solaris can be complicated.
# https://blogs.oracle.com/mandalika/entry/solaris_show_me_the_cpu
# Treat 'processor_count' as physical sockets and 'processor_cores' as
# virtual CPUs visisble to Solaris. Not a true count of cores for modern SPARC as
# these processors have: sockets -> cores -> threads/virtual CPU.
if len(sockets) > 0:
self.facts['processor_count'] = len(sockets)
self.facts['processor_cores'] = reduce(lambda x, y: x + y, sockets.values())
else:
self.facts['processor_cores'] = 'NA'
self.facts['processor_count'] = len(self.facts['processor'])
def get_memory_facts(self):
rc, out, err = self.module.run_command(["/usr/sbin/prtconf"])
for line in out.splitlines():
if 'Memory size' in line:
self.facts['memtotal_mb'] = int(line.split()[2])
rc, out, err = self.module.run_command("/usr/sbin/swap -s")
allocated = int(out.split()[1][:-1])
reserved = int(out.split()[5][:-1])
used = int(out.split()[8][:-1])
free = int(out.split()[10][:-1])
self.facts['swapfree_mb'] = free // 1024
self.facts['swaptotal_mb'] = (free + used) // 1024
self.facts['swap_allocated_mb'] = allocated // 1024
self.facts['swap_reserved_mb'] = reserved // 1024
@timeout()
def get_mount_facts(self):
self.facts['mounts'] = []
# For a detailed format description see mnttab(4)
# special mount_point fstype options time
fstab = get_file_content('/etc/mnttab')
if fstab:
for line in fstab.splitlines():
fields = line.split('\t')
size_total, size_available = self._get_mount_size_facts(fields[1])
self.facts['mounts'].append({'mount': fields[1], 'device': fields[0], 'fstype' : fields[2], 'options': fields[3], 'time': fields[4], 'size_total': size_total, 'size_available': size_available})
def get_dmi_facts(self):
uname_path = self.module.get_bin_path("prtdiag")
rc, out, err = self.module.run_command(uname_path)
"""
rc returns 1
"""
if out:
system_conf = out.split('\n')[0]
found = re.search(r'(\w+\sEnterprise\s\w+)',system_conf)
if found:
self.facts['product_name'] = found.group(1)
class OpenBSDHardware(Hardware):
"""
OpenBSD-specific subclass of Hardware. Defines memory, CPU and device facts:
- memfree_mb
- memtotal_mb
- swapfree_mb
- swaptotal_mb
- processor (a list)
- processor_cores
- processor_count
- processor_speed
In addition, it also defines number of DMI facts and device facts.
"""
platform = 'OpenBSD'
def populate(self):
self.sysctl = self.get_sysctl(['hw'])
self.get_memory_facts()
self.get_processor_facts()
self.get_device_facts()
try:
self.get_mount_facts()
except TimeoutError:
pass
self.get_dmi_facts()
return self.facts
@timeout()
def get_mount_facts(self):
self.facts['mounts'] = []
fstab = get_file_content('/etc/fstab')
if fstab:
for line in fstab.splitlines():
if line.startswith('#') or line.strip() == '':
continue
fields = re.sub(r'\s+',' ', line).split()
if fields[1] == 'none' or fields[3] == 'xx':
continue
size_total, size_available = self._get_mount_size_facts(fields[1])
self.facts['mounts'].append({'mount': fields[1], 'device': fields[0], 'fstype' : fields[2], 'options': fields[3], 'size_total': size_total, 'size_available': size_available})
def get_memory_facts(self):
# Get free memory. vmstat output looks like:
# procs memory page disks traps cpu
# r b w avm fre flt re pi po fr sr wd0 fd0 int sys cs us sy id
# 0 0 0 47512 28160 51 0 0 0 0 0 1 0 116 89 17 0 1 99
rc, out, err = self.module.run_command("/usr/bin/vmstat")
if rc == 0:
self.facts['memfree_mb'] = int(out.splitlines()[-1].split()[4]) // 1024
self.facts['memtotal_mb'] = int(self.sysctl['hw.usermem']) // 1024 // 1024
# Get swapctl info. swapctl output looks like:
# total: 69268 1K-blocks allocated, 0 used, 69268 available
# And for older OpenBSD:
# total: 69268k bytes allocated = 0k used, 69268k available
rc, out, err = self.module.run_command("/sbin/swapctl -sk")
if rc == 0:
swaptrans = { ord(u'k'): None, ord(u'm'): None, ord(u'g'): None}
data = to_text(out, errors='surrogate_or_strict').split()
self.facts['swapfree_mb'] = int(data[-2].translate(swaptrans)) // 1024
self.facts['swaptotal_mb'] = int(data[1].translate(swaptrans)) // 1024
def get_processor_facts(self):
processor = []
for i in range(int(self.sysctl['hw.ncpu'])):
processor.append(self.sysctl['hw.model'])
self.facts['processor'] = processor
# The following is partly a lie because there is no reliable way to
# determine the number of physical CPUs in the system. We can only
# query the number of logical CPUs, which hides the number of cores.
# On amd64/i386 we could try to inspect the smt/core/package lines in
# dmesg, however even those have proven to be unreliable.
# So take a shortcut and report the logical number of processors in
# 'processor_count' and 'processor_cores' and leave it at that.
self.facts['processor_count'] = self.sysctl['hw.ncpu']
self.facts['processor_cores'] = self.sysctl['hw.ncpu']
def get_device_facts(self):
devices = []
devices.extend(self.sysctl['hw.disknames'].split(','))
self.facts['devices'] = devices
def get_dmi_facts(self):
# We don't use dmidecode(1) here because:
# - it would add dependency on an external package
# - dmidecode(1) can only be ran as root
# So instead we rely on sysctl(8) to provide us the information on a
# best-effort basis. As a bonus we also get facts on non-amd64/i386
# platforms this way.
sysctl_to_dmi = {
'hw.product': 'product_name',
'hw.version': 'product_version',
'hw.uuid': 'product_uuid',
'hw.serialno': 'product_serial',
'hw.vendor': 'system_vendor',
}
for mib in sysctl_to_dmi:
if mib in self.sysctl:
self.facts[sysctl_to_dmi[mib]] = self.sysctl[mib]
class FreeBSDHardware(Hardware):
"""
FreeBSD-specific subclass of Hardware. Defines memory and CPU facts:
- memfree_mb
- memtotal_mb
- swapfree_mb
- swaptotal_mb
- processor (a list)
- processor_cores
- processor_count
- devices
"""
platform = 'FreeBSD'
DMESG_BOOT = '/var/run/dmesg.boot'
def populate(self):
self.get_cpu_facts()
self.get_memory_facts()
self.get_dmi_facts()
self.get_device_facts()
try:
self.get_mount_facts()
except TimeoutError:
pass
return self.facts
def get_cpu_facts(self):
self.facts['processor'] = []
rc, out, err = self.module.run_command("/sbin/sysctl -n hw.ncpu")
self.facts['processor_count'] = out.strip()
dmesg_boot = get_file_content(FreeBSDHardware.DMESG_BOOT)
if not dmesg_boot:
rc, dmesg_boot, err = self.module.run_command("/sbin/dmesg")
for line in dmesg_boot.splitlines():
if 'CPU:' in line:
cpu = re.sub(r'CPU:\s+', r"", line)
self.facts['processor'].append(cpu.strip())
if 'Logical CPUs per core' in line:
self.facts['processor_cores'] = line.split()[4]
def get_memory_facts(self):
rc, out, err = self.module.run_command("/sbin/sysctl vm.stats")
for line in out.splitlines():
data = line.split()
if 'vm.stats.vm.v_page_size' in line:
pagesize = int(data[1])
if 'vm.stats.vm.v_page_count' in line:
pagecount = int(data[1])
if 'vm.stats.vm.v_free_count' in line:
freecount = int(data[1])
self.facts['memtotal_mb'] = pagesize * pagecount // 1024 // 1024
self.facts['memfree_mb'] = pagesize * freecount // 1024 // 1024
# Get swapinfo. swapinfo output looks like:
# Device 1M-blocks Used Avail Capacity
# /dev/ada0p3 314368 0 314368 0%
#
rc, out, err = self.module.run_command("/usr/sbin/swapinfo -k")
lines = out.splitlines()
if len(lines[-1]) == 0:
lines.pop()
data = lines[-1].split()
if data[0] != 'Device':
self.facts['swaptotal_mb'] = int(data[1]) // 1024
self.facts['swapfree_mb'] = int(data[3]) // 1024
@timeout()
def get_mount_facts(self):
self.facts['mounts'] = []
fstab = get_file_content('/etc/fstab')
if fstab:
for line in fstab.splitlines():
if line.startswith('#') or line.strip() == '':
continue
fields = re.sub(r'\s+',' ',line).split()
size_total, size_available = self._get_mount_size_facts(fields[1])
self.facts['mounts'].append({'mount': fields[1], 'device': fields[0], 'fstype' : fields[2], 'options': fields[3], 'size_total': size_total, 'size_available': size_available})
def get_device_facts(self):
sysdir = '/dev'
self.facts['devices'] = {}
drives = re.compile('(ada?\d+|da\d+|a?cd\d+)') #TODO: rc, disks, err = self.module.run_command("/sbin/sysctl kern.disks")
slices = re.compile('(ada?\d+s\d+\w*|da\d+s\d+\w*)')
if os.path.isdir(sysdir):
dirlist = sorted(os.listdir(sysdir))
for device in dirlist:
d = drives.match(device)
if d:
self.facts['devices'][d.group(1)] = []
s = slices.match(device)
if s:
self.facts['devices'][d.group(1)].append(s.group(1))
def get_dmi_facts(self):
''' learn dmi facts from system
Use dmidecode executable if available'''
# Fall back to using dmidecode, if available
dmi_bin = self.module.get_bin_path('dmidecode')
DMI_DICT = dict(
bios_date='bios-release-date',
bios_version='bios-version',
form_factor='chassis-type',
product_name='system-product-name',
product_serial='system-serial-number',
product_uuid='system-uuid',
product_version='system-version',
system_vendor='system-manufacturer'
)
for (k, v) in DMI_DICT.items():
if dmi_bin is not None:
(rc, out, err) = self.module.run_command('%s -s %s' % (dmi_bin, v))
if rc == 0:
# Strip out commented lines (specific dmidecode output)
self.facts[k] = ''.join([line for line in out.splitlines() if not line.startswith('#') ])
try:
json.dumps(self.facts[k])
except UnicodeDecodeError:
self.facts[k] = 'NA'
else:
self.facts[k] = 'NA'
else:
self.facts[k] = 'NA'
class DragonFlyHardware(FreeBSDHardware):
platform = 'DragonFly'
class NetBSDHardware(Hardware):
"""
NetBSD-specific subclass of Hardware. Defines memory and CPU facts:
- memfree_mb
- memtotal_mb
- swapfree_mb
- swaptotal_mb
- processor (a list)
- processor_cores
- processor_count
- devices
"""
platform = 'NetBSD'
MEMORY_FACTS = ['MemTotal', 'SwapTotal', 'MemFree', 'SwapFree']
def populate(self):
self.sysctl = self.get_sysctl(['machdep'])
self.get_cpu_facts()
self.get_memory_facts()
try:
self.get_mount_facts()
except TimeoutError:
pass
self.get_dmi_facts()
return self.facts
def get_cpu_facts(self):
i = 0
physid = 0
sockets = {}
if not os.access("/proc/cpuinfo", os.R_OK):
return
self.facts['processor'] = []
for line in get_file_lines("/proc/cpuinfo"):
data = line.split(":", 1)
key = data[0].strip()
# model name is for Intel arch, Processor (mind the uppercase P)
# works for some ARM devices, like the Sheevaplug.
if key == 'model name' or key == 'Processor':
if 'processor' not in self.facts:
self.facts['processor'] = []
self.facts['processor'].append(data[1].strip())
i += 1
elif key == 'physical id':
physid = data[1].strip()
if physid not in sockets:
sockets[physid] = 1
elif key == 'cpu cores':
sockets[physid] = int(data[1].strip())
if len(sockets) > 0:
self.facts['processor_count'] = len(sockets)
self.facts['processor_cores'] = reduce(lambda x, y: x + y, sockets.values())
else:
self.facts['processor_count'] = i
self.facts['processor_cores'] = 'NA'
def get_memory_facts(self):
if not os.access("/proc/meminfo", os.R_OK):
return
for line in get_file_lines("/proc/meminfo"):
data = line.split(":", 1)
key = data[0]
if key in NetBSDHardware.MEMORY_FACTS:
val = data[1].strip().split(' ')[0]
self.facts["%s_mb" % key.lower()] = int(val) // 1024
@timeout()
def get_mount_facts(self):
self.facts['mounts'] = []
fstab = get_file_content('/etc/fstab')
if fstab:
for line in fstab.splitlines():
if line.startswith('#') or line.strip() == '':
continue
fields = re.sub(r'\s+',' ',line).split()
size_total, size_available = self._get_mount_size_facts(fields[1])
self.facts['mounts'].append({'mount': fields[1], 'device': fields[0], 'fstype' : fields[2], 'options': fields[3], 'size_total': size_total, 'size_available': size_available})
def get_dmi_facts(self):
# We don't use dmidecode(1) here because:
# - it would add dependency on an external package
# - dmidecode(1) can only be ran as root
# So instead we rely on sysctl(8) to provide us the information on a
# best-effort basis. As a bonus we also get facts on non-amd64/i386
# platforms this way.
sysctl_to_dmi = {
'machdep.dmi.system-product': 'product_name',
'machdep.dmi.system-version': 'product_version',
'machdep.dmi.system-uuid': 'product_uuid',
'machdep.dmi.system-serial': 'product_serial',
'machdep.dmi.system-vendor': 'system_vendor',
}
for mib in sysctl_to_dmi:
if mib in self.sysctl:
self.facts[sysctl_to_dmi[mib]] = self.sysctl[mib]
class AIX(Hardware):
"""
AIX-specific subclass of Hardware. Defines memory and CPU facts:
- memfree_mb
- memtotal_mb
- swapfree_mb
- swaptotal_mb
- processor (a list)
- processor_cores
- processor_count
"""
platform = 'AIX'
def populate(self):
self.get_cpu_facts()
self.get_memory_facts()
self.get_dmi_facts()
self.get_vgs_facts()
return self.facts
def get_cpu_facts(self):
self.facts['processor'] = []
rc, out, err = self.module.run_command("/usr/sbin/lsdev -Cc processor")
if out:
i = 0
for line in out.splitlines():
if 'Available' in line:
if i == 0:
data = line.split(' ')
cpudev = data[0]
i += 1
self.facts['processor_count'] = int(i)
rc, out, err = self.module.run_command("/usr/sbin/lsattr -El " + cpudev + " -a type")
data = out.split(' ')
self.facts['processor'] = data[1]
rc, out, err = self.module.run_command("/usr/sbin/lsattr -El " + cpudev + " -a smt_threads")
data = out.split(' ')
self.facts['processor_cores'] = int(data[1])
def get_memory_facts(self):
pagesize = 4096
rc, out, err = self.module.run_command("/usr/bin/vmstat -v")
for line in out.splitlines():
data = line.split()
if 'memory pages' in line:
pagecount = int(data[0])
if 'free pages' in line:
freecount = int(data[0])
self.facts['memtotal_mb'] = pagesize * pagecount // 1024 // 1024
self.facts['memfree_mb'] = pagesize * freecount // 1024 // 1024
# Get swapinfo. swapinfo output looks like:
# Device 1M-blocks Used Avail Capacity
# /dev/ada0p3 314368 0 314368 0%
#
rc, out, err = self.module.run_command("/usr/sbin/lsps -s")
if out:
lines = out.splitlines()
data = lines[1].split()
swaptotal_mb = int(data[0].rstrip('MB'))
percused = int(data[1].rstrip('%'))
self.facts['swaptotal_mb'] = swaptotal_mb
self.facts['swapfree_mb'] = int(swaptotal_mb * ( 100 - percused ) / 100)
def get_dmi_facts(self):
rc, out, err = self.module.run_command("/usr/sbin/lsattr -El sys0 -a fwversion")
data = out.split()
self.facts['firmware_version'] = data[1].strip('IBM,')
lsconf_path = self.module.get_bin_path("lsconf")
if lsconf_path:
rc, out, err = self.module.run_command(lsconf_path)
if rc == 0 and out:
for line in out.splitlines():
data = line.split(':')
if 'Machine Serial Number' in line:
self.facts['product_serial'] = data[1].strip()
if 'LPAR Info' in line:
self.facts['lpar_info'] = data[1].strip()
if 'System Model' in line:
self.facts['product_name'] = data[1].strip()
def get_vgs_facts(self):
"""
Get vg and pv Facts
rootvg:
PV_NAME PV STATE TOTAL PPs FREE PPs FREE DISTRIBUTION
hdisk0 active 546 0 00..00..00..00..00
hdisk1 active 546 113 00..00..00..21..92
realsyncvg:
PV_NAME PV STATE TOTAL PPs FREE PPs FREE DISTRIBUTION
hdisk74 active 1999 6 00..00..00..00..06
testvg:
PV_NAME PV STATE TOTAL PPs FREE PPs FREE DISTRIBUTION
hdisk105 active 999 838 200..39..199..200..200
hdisk106 active 999 599 200..00..00..199..200
"""
lsvg_path = self.module.get_bin_path("lsvg")
xargs_path = self.module.get_bin_path("xargs")
cmd = "%s | %s %s -p" % (lsvg_path ,xargs_path,lsvg_path)
if lsvg_path and xargs_path:
rc, out, err = self.module.run_command(cmd,use_unsafe_shell=True)
if rc == 0 and out:
self.facts['vgs']= {}
for m in re.finditer(r'(\S+):\n.*FREE DISTRIBUTION(\n(\S+)\s+(\w+)\s+(\d+)\s+(\d+).*)+', out):
self.facts['vgs'][m.group(1)] = []
pp_size = 0
cmd = "%s %s" % (lsvg_path,m.group(1))
rc, out, err = self.module.run_command(cmd)
if rc == 0 and out:
pp_size = re.search(r'PP SIZE:\s+(\d+\s+\S+)',out).group(1)
for n in re.finditer(r'(\S+)\s+(\w+)\s+(\d+)\s+(\d+).*',m.group(0)):
pv_info = { 'pv_name': n.group(1),
'pv_state': n.group(2),
'total_pps': n.group(3),
'free_pps': n.group(4),
'pp_size': pp_size
}
self.facts['vgs'][m.group(1)].append(pv_info)
class HPUX(Hardware):
"""
HP-UX-specific subclass of Hardware. Defines memory and CPU facts:
- memfree_mb
- memtotal_mb
- swapfree_mb
- swaptotal_mb
- processor
- processor_cores
- processor_count
- model
- firmware
"""
platform = 'HP-UX'
def populate(self):
self.get_cpu_facts()
self.get_memory_facts()
self.get_hw_facts()
return self.facts
def get_cpu_facts(self):
if self.facts['architecture'] == '9000/800':
rc, out, err = self.module.run_command("ioscan -FkCprocessor | wc -l", use_unsafe_shell=True)
self.facts['processor_count'] = int(out.strip())
#Working with machinfo mess
elif self.facts['architecture'] == 'ia64':
if self.facts['distribution_version'] == "B.11.23":
rc, out, err = self.module.run_command("/usr/contrib/bin/machinfo | grep 'Number of CPUs'", use_unsafe_shell=True)
self.facts['processor_count'] = int(out.strip().split('=')[1])
rc, out, err = self.module.run_command("/usr/contrib/bin/machinfo | grep 'processor family'", use_unsafe_shell=True)
self.facts['processor'] = re.search('.*(Intel.*)', out).groups()[0].strip()
rc, out, err = self.module.run_command("ioscan -FkCprocessor | wc -l", use_unsafe_shell=True)
self.facts['processor_cores'] = int(out.strip())
if self.facts['distribution_version'] == "B.11.31":
#if machinfo return cores strings release B.11.31 > 1204
rc, out, err = self.module.run_command("/usr/contrib/bin/machinfo | grep core | wc -l", use_unsafe_shell=True)
if out.strip()== '0':
rc, out, err = self.module.run_command("/usr/contrib/bin/machinfo | grep Intel", use_unsafe_shell=True)
self.facts['processor_count'] = int(out.strip().split(" ")[0])
#If hyperthreading is active divide cores by 2
rc, out, err = self.module.run_command("/usr/sbin/psrset | grep LCPU", use_unsafe_shell=True)
data = re.sub(' +',' ',out).strip().split(' ')
if len(data) == 1:
hyperthreading = 'OFF'
else:
hyperthreading = data[1]
rc, out, err = self.module.run_command("/usr/contrib/bin/machinfo | grep logical", use_unsafe_shell=True)
data = out.strip().split(" ")
if hyperthreading == 'ON':
self.facts['processor_cores'] = int(data[0])/2
else:
if len(data) == 1:
self.facts['processor_cores'] = self.facts['processor_count']
else:
self.facts['processor_cores'] = int(data[0])
rc, out, err = self.module.run_command("/usr/contrib/bin/machinfo | grep Intel |cut -d' ' -f4-", use_unsafe_shell=True)
self.facts['processor'] = out.strip()
else:
rc, out, err = self.module.run_command("/usr/contrib/bin/machinfo | egrep 'socket[s]?$' | tail -1", use_unsafe_shell=True)
self.facts['processor_count'] = int(out.strip().split(" ")[0])
rc, out, err = self.module.run_command("/usr/contrib/bin/machinfo | grep -e '[0-9] core' | tail -1", use_unsafe_shell=True)
self.facts['processor_cores'] = int(out.strip().split(" ")[0])
rc, out, err = self.module.run_command("/usr/contrib/bin/machinfo | grep Intel", use_unsafe_shell=True)
self.facts['processor'] = out.strip()
def get_memory_facts(self):
pagesize = 4096
rc, out, err = self.module.run_command("/usr/bin/vmstat | tail -1", use_unsafe_shell=True)
data = int(re.sub(' +',' ',out).split(' ')[5].strip())
self.facts['memfree_mb'] = pagesize * data // 1024 // 1024
if self.facts['architecture'] == '9000/800':
try:
rc, out, err = self.module.run_command("grep Physical /var/adm/syslog/syslog.log")
data = re.search('.*Physical: ([0-9]*) Kbytes.*',out).groups()[0].strip()
self.facts['memtotal_mb'] = int(data) // 1024
except AttributeError:
#For systems where memory details aren't sent to syslog or the log has rotated, use parsed
#adb output. Unfortunately /dev/kmem doesn't have world-read, so this only works as root.
if os.access("/dev/kmem", os.R_OK):
rc, out, err = self.module.run_command("echo 'phys_mem_pages/D' | adb -k /stand/vmunix /dev/kmem | tail -1 | awk '{print $2}'", use_unsafe_shell=True)
if not err:
data = out
self.facts['memtotal_mb'] = int(data) / 256
else:
rc, out, err = self.module.run_command("/usr/contrib/bin/machinfo | grep Memory", use_unsafe_shell=True)
data = re.search('Memory[\ :=]*([0-9]*).*MB.*',out).groups()[0].strip()
self.facts['memtotal_mb'] = int(data)
rc, out, err = self.module.run_command("/usr/sbin/swapinfo -m -d -f -q")
self.facts['swaptotal_mb'] = int(out.strip())
rc, out, err = self.module.run_command("/usr/sbin/swapinfo -m -d -f | egrep '^dev|^fs'", use_unsafe_shell=True)
swap = 0
for line in out.strip().splitlines():
swap += int(re.sub(' +',' ',line).split(' ')[3].strip())
self.facts['swapfree_mb'] = swap
def get_hw_facts(self):
rc, out, err = self.module.run_command("model")
self.facts['model'] = out.strip()
if self.facts['architecture'] == 'ia64':
separator = ':'
if self.facts['distribution_version'] == "B.11.23":
separator = '='
rc, out, err = self.module.run_command("/usr/contrib/bin/machinfo |grep -i 'Firmware revision' | grep -v BMC", use_unsafe_shell=True)
self.facts['firmware_version'] = out.split(separator)[1].strip()
rc, out, err = self.module.run_command("/usr/contrib/bin/machinfo |grep -i 'Machine serial number' ",use_unsafe_shell=True)
if rc == 0 and out:
self.facts['product_serial'] = out.split(separator)[1].strip()
class Darwin(Hardware):
"""
Darwin-specific subclass of Hardware. Defines memory and CPU facts:
- processor
- processor_cores
- memtotal_mb
- memfree_mb
- model
- osversion
- osrevision
"""
platform = 'Darwin'
def populate(self):
self.sysctl = self.get_sysctl(['hw','machdep','kern'])
self.get_mac_facts()
self.get_cpu_facts()
self.get_memory_facts()
return self.facts
def get_system_profile(self):
rc, out, err = self.module.run_command(["/usr/sbin/system_profiler", "SPHardwareDataType"])
if rc != 0:
return dict()
system_profile = dict()
for line in out.splitlines():
if ': ' in line:
(key, value) = line.split(': ', 1)
system_profile[key.strip()] = ' '.join(value.strip().split())
return system_profile
def get_mac_facts(self):
rc, out, err = self.module.run_command("sysctl hw.model")
if rc == 0:
self.facts['model'] = out.splitlines()[-1].split()[1]
self.facts['osversion'] = self.sysctl['kern.osversion']
self.facts['osrevision'] = self.sysctl['kern.osrevision']
def get_cpu_facts(self):
if 'machdep.cpu.brand_string' in self.sysctl: # Intel
self.facts['processor'] = self.sysctl['machdep.cpu.brand_string']
self.facts['processor_cores'] = self.sysctl['machdep.cpu.core_count']
else: # PowerPC
system_profile = self.get_system_profile()
self.facts['processor'] = '%s @ %s' % (system_profile['Processor Name'], system_profile['Processor Speed'])
self.facts['processor_cores'] = self.sysctl['hw.physicalcpu']
def get_memory_facts(self):
self.facts['memtotal_mb'] = int(self.sysctl['hw.memsize']) // 1024 // 1024
rc, out, err = self.module.run_command("sysctl hw.usermem")
if rc == 0:
self.facts['memfree_mb'] = int(out.splitlines()[-1].split()[1]) // 1024 // 1024
class HurdHardware(LinuxHardware):
"""
GNU Hurd specific subclass of Hardware. Define memory and mount facts
based on procfs compatibility translator mimicking the interface of
the Linux kernel.
"""
platform = 'GNU'
def populate(self):
self.get_uptime_facts()
self.get_memory_facts()
try:
self.get_mount_facts()
except TimeoutError:
pass
return self.facts
class Network(Facts):
"""
This is a generic Network subclass of Facts. This should be further
subclassed to implement per platform. If you subclass this,
you must define:
- interfaces (a list of interface names)
- interface_<name> dictionary of ipv4, ipv6, and mac address information.
All subclasses MUST define platform.
"""
platform = 'Generic'
IPV6_SCOPE = { '0' : 'global',
'10' : 'host',
'20' : 'link',
'40' : 'admin',
'50' : 'site',
'80' : 'organization' }
def __new__(cls, *arguments, **keyword):
# When Network is created, it chooses a subclass to create instead.
# This check prevents the subclass from then trying to find a subclass
# and create that.
if cls is not Network:
return super(Network, cls).__new__(cls)
subclass = cls
for sc in get_all_subclasses(Network):
if sc.platform == platform.system():
subclass = sc
if PY3:
return super(cls, subclass).__new__(subclass)
else:
return super(cls, subclass).__new__(subclass, *arguments, **keyword)
def populate(self):
return self.facts
class LinuxNetwork(Network):
"""
This is a Linux-specific subclass of Network. It defines
- interfaces (a list of interface names)
- interface_<name> dictionary of ipv4, ipv6, and mac address information.
- all_ipv4_addresses and all_ipv6_addresses: lists of all configured addresses.
- ipv4_address and ipv6_address: the first non-local address for each family.
"""
platform = 'Linux'
INTERFACE_TYPE = {
'1': 'ether',
'512': 'ppp',
'772': 'loopback',
'65534': 'tunnel',
}
def populate(self):
ip_path = self.module.get_bin_path('ip')
if ip_path is None:
return self.facts
default_ipv4, default_ipv6 = self.get_default_interfaces(ip_path)
interfaces, ips = self.get_interfaces_info(ip_path, default_ipv4, default_ipv6)
self.facts['interfaces'] = interfaces.keys()
for iface in interfaces:
self.facts[iface] = interfaces[iface]
self.facts['default_ipv4'] = default_ipv4
self.facts['default_ipv6'] = default_ipv6
self.facts['all_ipv4_addresses'] = ips['all_ipv4_addresses']
self.facts['all_ipv6_addresses'] = ips['all_ipv6_addresses']
return self.facts
def get_default_interfaces(self, ip_path):
# Use the commands:
# ip -4 route get 8.8.8.8 -> Google public DNS
# ip -6 route get 2404:6800:400a:800::1012 -> ipv6.google.com
# to find out the default outgoing interface, address, and gateway
command = dict(
v4 = [ip_path, '-4', 'route', 'get', '8.8.8.8'],
v6 = [ip_path, '-6', 'route', 'get', '2404:6800:400a:800::1012']
)
interface = dict(v4 = {}, v6 = {})
for v in 'v4', 'v6':
if v == 'v6' and self.facts['os_family'] == 'RedHat' \
and self.facts['distribution_version'].startswith('4.'):
continue
if v == 'v6' and not socket.has_ipv6:
continue
rc, out, err = self.module.run_command(command[v], errors='surrogate_or_replace')
if not out:
# v6 routing may result in
# RTNETLINK answers: Invalid argument
continue
words = out.splitlines()[0].split()
# A valid output starts with the queried address on the first line
if len(words) > 0 and words[0] == command[v][-1]:
for i in range(len(words) - 1):
if words[i] == 'dev':
interface[v]['interface'] = words[i+1]
elif words[i] == 'src':
interface[v]['address'] = words[i+1]
elif words[i] == 'via' and words[i+1] != command[v][-1]:
interface[v]['gateway'] = words[i+1]
return interface['v4'], interface['v6']
def get_interfaces_info(self, ip_path, default_ipv4, default_ipv6):
interfaces = {}
ips = dict(
all_ipv4_addresses = [],
all_ipv6_addresses = [],
)
for path in glob.glob('/sys/class/net/*'):
if not os.path.isdir(path):
continue
device = os.path.basename(path)
interfaces[device] = { 'device': device }
if os.path.exists(os.path.join(path, 'address')):
macaddress = get_file_content(os.path.join(path, 'address'), default='')
if macaddress and macaddress != '00:00:00:00:00:00':
interfaces[device]['macaddress'] = macaddress
if os.path.exists(os.path.join(path, 'mtu')):
interfaces[device]['mtu'] = int(get_file_content(os.path.join(path, 'mtu')))
if os.path.exists(os.path.join(path, 'operstate')):
interfaces[device]['active'] = get_file_content(os.path.join(path, 'operstate')) != 'down'
if os.path.exists(os.path.join(path, 'device','driver', 'module')):
interfaces[device]['module'] = os.path.basename(os.path.realpath(os.path.join(path, 'device', 'driver', 'module')))
if os.path.exists(os.path.join(path, 'type')):
_type = get_file_content(os.path.join(path, 'type'))
interfaces[device]['type'] = self.INTERFACE_TYPE.get(_type, 'unknown')
if os.path.exists(os.path.join(path, 'bridge')):
interfaces[device]['type'] = 'bridge'
interfaces[device]['interfaces'] = [ os.path.basename(b) for b in glob.glob(os.path.join(path, 'brif', '*')) ]
if os.path.exists(os.path.join(path, 'bridge', 'bridge_id')):
interfaces[device]['id'] = get_file_content(os.path.join(path, 'bridge', 'bridge_id'), default='')
if os.path.exists(os.path.join(path, 'bridge', 'stp_state')):
interfaces[device]['stp'] = get_file_content(os.path.join(path, 'bridge', 'stp_state')) == '1'
if os.path.exists(os.path.join(path, 'bonding')):
interfaces[device]['type'] = 'bonding'
interfaces[device]['slaves'] = get_file_content(os.path.join(path, 'bonding', 'slaves'), default='').split()
interfaces[device]['mode'] = get_file_content(os.path.join(path, 'bonding', 'mode'), default='').split()[0]
interfaces[device]['miimon'] = get_file_content(os.path.join(path, 'bonding', 'miimon'), default='').split()[0]
interfaces[device]['lacp_rate'] = get_file_content(os.path.join(path, 'bonding', 'lacp_rate'), default='').split()[0]
primary = get_file_content(os.path.join(path, 'bonding', 'primary'))
if primary:
interfaces[device]['primary'] = primary
path = os.path.join(path, 'bonding', 'all_slaves_active')
if os.path.exists(path):
interfaces[device]['all_slaves_active'] = get_file_content(path) == '1'
if os.path.exists(os.path.join(path,'device')):
interfaces[device]['pciid'] = os.path.basename(os.readlink(os.path.join(path,'device')))
if os.path.exists(os.path.join(path, 'speed')):
speed = get_file_content(os.path.join(path, 'speed'))
if speed is not None:
interfaces[device]['speed'] = int(speed)
# Check whether an interface is in promiscuous mode
if os.path.exists(os.path.join(path,'flags')):
promisc_mode = False
# The second byte indicates whether the interface is in promiscuous mode.
# 1 = promisc
# 0 = no promisc
data = int(get_file_content(os.path.join(path, 'flags')),16)
promisc_mode = (data & 0x0100 > 0)
interfaces[device]['promisc'] = promisc_mode
def parse_ip_output(output, secondary=False):
for line in output.splitlines():
if not line:
continue
words = line.split()
broadcast = ''
if words[0] == 'inet':
if '/' in words[1]:
address, netmask_length = words[1].split('/')
if len(words) > 3:
broadcast = words[3]
else:
# pointopoint interfaces do not have a prefix
address = words[1]
netmask_length = "32"
address_bin = struct.unpack('!L', socket.inet_aton(address))[0]
netmask_bin = (1<<32) - (1<<32>>int(netmask_length))
netmask = socket.inet_ntoa(struct.pack('!L', netmask_bin))
network = socket.inet_ntoa(struct.pack('!L', address_bin & netmask_bin))
iface = words[-1]
if iface != device:
interfaces[iface] = {}
if not secondary and "ipv4" not in interfaces[iface]:
interfaces[iface]['ipv4'] = {'address': address,
'broadcast': broadcast,
'netmask': netmask,
'network': network}
else:
if "ipv4_secondaries" not in interfaces[iface]:
interfaces[iface]["ipv4_secondaries"] = []
interfaces[iface]["ipv4_secondaries"].append({
'address': address,
'broadcast': broadcast,
'netmask': netmask,
'network': network,
})
# add this secondary IP to the main device
if secondary:
if "ipv4_secondaries" not in interfaces[device]:
interfaces[device]["ipv4_secondaries"] = []
interfaces[device]["ipv4_secondaries"].append({
'address': address,
'broadcast': broadcast,
'netmask': netmask,
'network': network,
})
# If this is the default address, update default_ipv4
if 'address' in default_ipv4 and default_ipv4['address'] == address:
default_ipv4['broadcast'] = broadcast
default_ipv4['netmask'] = netmask
default_ipv4['network'] = network
default_ipv4['macaddress'] = macaddress
default_ipv4['mtu'] = interfaces[device]['mtu']
default_ipv4['type'] = interfaces[device].get("type", "unknown")
default_ipv4['alias'] = words[-1]
if not address.startswith('127.'):
ips['all_ipv4_addresses'].append(address)
elif words[0] == 'inet6':
if 'peer' == words[2]:
address = words[1]
_, prefix = words[3].split('/')
scope = words[5]
else:
address, prefix = words[1].split('/')
scope = words[3]
if 'ipv6' not in interfaces[device]:
interfaces[device]['ipv6'] = []
interfaces[device]['ipv6'].append({
'address' : address,
'prefix' : prefix,
'scope' : scope
})
# If this is the default address, update default_ipv6
if 'address' in default_ipv6 and default_ipv6['address'] == address:
default_ipv6['prefix'] = prefix
default_ipv6['scope'] = scope
default_ipv6['macaddress'] = macaddress
default_ipv6['mtu'] = interfaces[device]['mtu']
default_ipv6['type'] = interfaces[device].get("type", "unknown")
if not address == '::1':
ips['all_ipv6_addresses'].append(address)
ip_path = self.module.get_bin_path("ip")
args = [ip_path, 'addr', 'show', 'primary', device]
rc, primary_data, stderr = self.module.run_command(args, errors='surrogate_or_replace')
args = [ip_path, 'addr', 'show', 'secondary', device]
rc, secondary_data, stderr = self.module.run_command(args, errors='surrogate_or_replace')
parse_ip_output(primary_data)
parse_ip_output(secondary_data, secondary=True)
interfaces[device]['features'] = self.get_ethtool_data(device)
# replace : by _ in interface name since they are hard to use in template
new_interfaces = {}
for i in interfaces:
if ':' in i:
new_interfaces[i.replace(':','_')] = interfaces[i]
else:
new_interfaces[i] = interfaces[i]
return new_interfaces, ips
def get_ethtool_data(self, device):
features = {}
ethtool_path = self.module.get_bin_path("ethtool")
if ethtool_path:
args = [ethtool_path, '-k', device]
rc, stdout, stderr = self.module.run_command(args, errors='surrogate_or_replace')
if rc == 0:
for line in stdout.strip().splitlines():
if not line or line.endswith(":"):
continue
key,value = line.split(": ")
if not value:
continue
features[key.strip().replace('-','_')] = value.strip()
return features
class GenericBsdIfconfigNetwork(Network):
"""
This is a generic BSD subclass of Network using the ifconfig command.
It defines
- interfaces (a list of interface names)
- interface_<name> dictionary of ipv4, ipv6, and mac address information.
- all_ipv4_addresses and all_ipv6_addresses: lists of all configured addresses.
"""
platform = 'Generic_BSD_Ifconfig'
def populate(self):
ifconfig_path = self.module.get_bin_path('ifconfig')
if ifconfig_path is None:
return self.facts
route_path = self.module.get_bin_path('route')
if route_path is None:
return self.facts
default_ipv4, default_ipv6 = self.get_default_interfaces(route_path)
interfaces, ips = self.get_interfaces_info(ifconfig_path)
self.detect_type_media(interfaces)
self.merge_default_interface(default_ipv4, interfaces, 'ipv4')
self.merge_default_interface(default_ipv6, interfaces, 'ipv6')
self.facts['interfaces'] = interfaces.keys()
for iface in interfaces:
self.facts[iface] = interfaces[iface]
self.facts['default_ipv4'] = default_ipv4
self.facts['default_ipv6'] = default_ipv6
self.facts['all_ipv4_addresses'] = ips['all_ipv4_addresses']
self.facts['all_ipv6_addresses'] = ips['all_ipv6_addresses']
return self.facts
def detect_type_media(self, interfaces):
for iface in interfaces:
if 'media' in interfaces[iface]:
if 'ether' in interfaces[iface]['media'].lower():
interfaces[iface]['type'] = 'ether'
def get_default_interfaces(self, route_path):
# Use the commands:
# route -n get 8.8.8.8 -> Google public DNS
# route -n get -inet6 2404:6800:400a:800::1012 -> ipv6.google.com
# to find out the default outgoing interface, address, and gateway
command = dict(
v4 = [route_path, '-n', 'get', '8.8.8.8'],
v6 = [route_path, '-n', 'get', '-inet6', '2404:6800:400a:800::1012']
)
interface = dict(v4 = {}, v6 = {})
for v in 'v4', 'v6':
if v == 'v6' and not socket.has_ipv6:
continue
rc, out, err = self.module.run_command(command[v])
if not out:
# v6 routing may result in
# RTNETLINK answers: Invalid argument
continue
for line in out.splitlines():
words = line.split()
# Collect output from route command
if len(words) > 1:
if words[0] == 'interface:':
interface[v]['interface'] = words[1]
if words[0] == 'gateway:':
interface[v]['gateway'] = words[1]
return interface['v4'], interface['v6']
def get_interfaces_info(self, ifconfig_path, ifconfig_options='-a'):
interfaces = {}
current_if = {}
ips = dict(
all_ipv4_addresses = [],
all_ipv6_addresses = [],
)
# FreeBSD, DragonflyBSD, NetBSD, OpenBSD and OS X all implicitly add '-a'
# when running the command 'ifconfig'.
# Solaris must explicitly run the command 'ifconfig -a'.
rc, out, err = self.module.run_command([ifconfig_path, ifconfig_options])
for line in out.splitlines():
if line:
words = line.split()
if words[0] == 'pass':
continue
elif re.match('^\S', line) and len(words) > 3:
current_if = self.parse_interface_line(words)
interfaces[ current_if['device'] ] = current_if
elif words[0].startswith('options='):
self.parse_options_line(words, current_if, ips)
elif words[0] == 'nd6':
self.parse_nd6_line(words, current_if, ips)
elif words[0] == 'ether':
self.parse_ether_line(words, current_if, ips)
elif words[0] == 'media:':
self.parse_media_line(words, current_if, ips)
elif words[0] == 'status:':
self.parse_status_line(words, current_if, ips)
elif words[0] == 'lladdr':
self.parse_lladdr_line(words, current_if, ips)
elif words[0] == 'inet':
self.parse_inet_line(words, current_if, ips)
elif words[0] == 'inet6':
self.parse_inet6_line(words, current_if, ips)
elif words[0] == 'tunnel':
self.parse_tunnel_line(words, current_if, ips)
else:
self.parse_unknown_line(words, current_if, ips)
return interfaces, ips
def parse_interface_line(self, words):
device = words[0][0:-1]
current_if = {'device': device, 'ipv4': [], 'ipv6': [], 'type': 'unknown'}
current_if['flags'] = self.get_options(words[1])
if 'LOOPBACK' in current_if['flags']:
current_if['type'] = 'loopback'
current_if['macaddress'] = 'unknown' # will be overwritten later
if len(words) >= 5 : # Newer FreeBSD versions
current_if['metric'] = words[3]
current_if['mtu'] = words[5]
else:
current_if['mtu'] = words[3]
return current_if
def parse_options_line(self, words, current_if, ips):
# Mac has options like this...
current_if['options'] = self.get_options(words[0])
def parse_nd6_line(self, words, current_if, ips):
# FreeBSD has options like this...
current_if['options'] = self.get_options(words[1])
def parse_ether_line(self, words, current_if, ips):
current_if['macaddress'] = words[1]
current_if['type'] = 'ether'
def parse_media_line(self, words, current_if, ips):
# not sure if this is useful - we also drop information
current_if['media'] = words[1]
if len(words) > 2:
current_if['media_select'] = words[2]
if len(words) > 3:
current_if['media_type'] = words[3][1:]
if len(words) > 4:
current_if['media_options'] = self.get_options(words[4])
def parse_status_line(self, words, current_if, ips):
current_if['status'] = words[1]
def parse_lladdr_line(self, words, current_if, ips):
current_if['lladdr'] = words[1]
def parse_inet_line(self, words, current_if, ips):
# netbsd show aliases like this
# lo0: flags=8049<UP,LOOPBACK,RUNNING,MULTICAST> mtu 33184
# inet 127.0.0.1 netmask 0xff000000
# inet alias 127.1.1.1 netmask 0xff000000
if words[1] == 'alias':
del words[1]
address = {'address': words[1]}
# deal with hex netmask
if re.match('([0-9a-f]){8}', words[3]) and len(words[3]) == 8:
words[3] = '0x' + words[3]
if words[3].startswith('0x'):
address['netmask'] = socket.inet_ntoa(struct.pack('!L', int(words[3], base=16)))
else:
# otherwise assume this is a dotted quad
address['netmask'] = words[3]
# calculate the network
address_bin = struct.unpack('!L', socket.inet_aton(address['address']))[0]
netmask_bin = struct.unpack('!L', socket.inet_aton(address['netmask']))[0]
address['network'] = socket.inet_ntoa(struct.pack('!L', address_bin & netmask_bin))
# broadcast may be given or we need to calculate
if len(words) > 5:
address['broadcast'] = words[5]
else:
address['broadcast'] = socket.inet_ntoa(struct.pack('!L', address_bin | (~netmask_bin & 0xffffffff)))
# add to our list of addresses
if not words[1].startswith('127.'):
ips['all_ipv4_addresses'].append(address['address'])
current_if['ipv4'].append(address)
def parse_inet6_line(self, words, current_if, ips):
address = {'address': words[1]}
if (len(words) >= 4) and (words[2] == 'prefixlen'):
address['prefix'] = words[3]
if (len(words) >= 6) and (words[4] == 'scopeid'):
address['scope'] = words[5]
localhost6 = ['::1', '::1/128', 'fe80::1%lo0']
if address['address'] not in localhost6:
ips['all_ipv6_addresses'].append(address['address'])
current_if['ipv6'].append(address)
def parse_tunnel_line(self, words, current_if, ips):
current_if['type'] = 'tunnel'
def parse_unknown_line(self, words, current_if, ips):
# we are going to ignore unknown lines here - this may be
# a bad idea - but you can override it in your subclass
pass
def get_options(self, option_string):
start = option_string.find('<') + 1
end = option_string.rfind('>')
if (start > 0) and (end > 0) and (end > start + 1):
option_csv = option_string[start:end]
return option_csv.split(',')
else:
return []
def merge_default_interface(self, defaults, interfaces, ip_type):
if 'interface' not in defaults:
return
if not defaults['interface'] in interfaces:
return
ifinfo = interfaces[defaults['interface']]
# copy all the interface values across except addresses
for item in ifinfo:
if item != 'ipv4' and item != 'ipv6':
defaults[item] = ifinfo[item]
if len(ifinfo[ip_type]) > 0:
for item in ifinfo[ip_type][0]:
defaults[item] = ifinfo[ip_type][0][item]
class HPUXNetwork(Network):
"""
HP-UX-specifig subclass of Network. Defines networking facts:
- default_interface
- interfaces (a list of interface names)
- interface_<name> dictionary of ipv4 address information.
"""
platform = 'HP-UX'
def populate(self):
netstat_path = self.module.get_bin_path('netstat')
if netstat_path is None:
return self.facts
self.get_default_interfaces()
interfaces = self.get_interfaces_info()
self.facts['interfaces'] = interfaces.keys()
for iface in interfaces:
self.facts[iface] = interfaces[iface]
return self.facts
def get_default_interfaces(self):
rc, out, err = self.module.run_command("/usr/bin/netstat -nr")
lines = out.splitlines()
for line in lines:
words = line.split()
if len(words) > 1:
if words[0] == 'default':
self.facts['default_interface'] = words[4]
self.facts['default_gateway'] = words[1]
def get_interfaces_info(self):
interfaces = {}
rc, out, err = self.module.run_command("/usr/bin/netstat -ni")
lines = out.splitlines()
for line in lines:
words = line.split()
for i in range(len(words) - 1):
if words[i][:3] == 'lan':
device = words[i]
interfaces[device] = { 'device': device }
address = words[i+3]
interfaces[device]['ipv4'] = { 'address': address }
network = words[i+2]
interfaces[device]['ipv4'] = { 'network': network,
'interface': device,
'address': address }
return interfaces
class DarwinNetwork(GenericBsdIfconfigNetwork):
"""
This is the Mac OS X/Darwin Network Class.
It uses the GenericBsdIfconfigNetwork unchanged
"""
platform = 'Darwin'
# media line is different to the default FreeBSD one
def parse_media_line(self, words, current_if, ips):
# not sure if this is useful - we also drop information
current_if['media'] = 'Unknown' # Mac does not give us this
current_if['media_select'] = words[1]
if len(words) > 2:
# MacOSX sets the media to '<unknown type>' for bridge interface
# and parsing splits this into two words; this if/else helps
if words[1] == '<unknown' and words[2] == 'type>':
current_if['media_select'] = 'Unknown'
current_if['media_type'] = 'unknown type'
else:
current_if['media_type'] = words[2][1:-1]
if len(words) > 3:
current_if['media_options'] = self.get_options(words[3])
class FreeBSDNetwork(GenericBsdIfconfigNetwork):
"""
This is the FreeBSD Network Class.
It uses the GenericBsdIfconfigNetwork unchanged.
"""
platform = 'FreeBSD'
class DragonFlyNetwork(GenericBsdIfconfigNetwork):
"""
This is the DragonFly Network Class.
It uses the GenericBsdIfconfigNetwork unchanged.
"""
platform = 'DragonFly'
class AIXNetwork(GenericBsdIfconfigNetwork):
"""
This is the AIX Network Class.
It uses the GenericBsdIfconfigNetwork unchanged.
"""
platform = 'AIX'
def get_default_interfaces(self, route_path):
netstat_path = self.module.get_bin_path('netstat')
rc, out, err = self.module.run_command([netstat_path, '-nr'])
interface = dict(v4 = {}, v6 = {})
lines = out.splitlines()
for line in lines:
words = line.split()
if len(words) > 1 and words[0] == 'default':
if '.' in words[1]:
interface['v4']['gateway'] = words[1]
interface['v4']['interface'] = words[5]
elif ':' in words[1]:
interface['v6']['gateway'] = words[1]
interface['v6']['interface'] = words[5]
return interface['v4'], interface['v6']
# AIX 'ifconfig -a' does not have three words in the interface line
def get_interfaces_info(self, ifconfig_path, ifconfig_options='-a'):
interfaces = {}
current_if = {}
ips = dict(
all_ipv4_addresses = [],
all_ipv6_addresses = [],
)
uname_rc = None
uname_out = None
uname_err = None
uname_path = self.module.get_bin_path('uname')
if uname_path:
uname_rc, uname_out, uname_err = self.module.run_command([uname_path, '-W'])
rc, out, err = self.module.run_command([ifconfig_path, ifconfig_options])
for line in out.splitlines():
if line:
words = line.split()
# only this condition differs from GenericBsdIfconfigNetwork
if re.match('^\w*\d*:', line):
current_if = self.parse_interface_line(words)
interfaces[ current_if['device'] ] = current_if
elif words[0].startswith('options='):
self.parse_options_line(words, current_if, ips)
elif words[0] == 'nd6':
self.parse_nd6_line(words, current_if, ips)
elif words[0] == 'ether':
self.parse_ether_line(words, current_if, ips)
elif words[0] == 'media:':
self.parse_media_line(words, current_if, ips)
elif words[0] == 'status:':
self.parse_status_line(words, current_if, ips)
elif words[0] == 'lladdr':
self.parse_lladdr_line(words, current_if, ips)
elif words[0] == 'inet':
self.parse_inet_line(words, current_if, ips)
elif words[0] == 'inet6':
self.parse_inet6_line(words, current_if, ips)
else:
self.parse_unknown_line(words, current_if, ips)
# don't bother with wpars it does not work
# zero means not in wpar
if not uname_rc and uname_out.split()[0] == '0':
if current_if['macaddress'] == 'unknown' and re.match('^en', current_if['device']):
entstat_path = self.module.get_bin_path('entstat')
if entstat_path:
rc, out, err = self.module.run_command([entstat_path, current_if['device'] ])
if rc != 0:
break
for line in out.splitlines():
if not line:
pass
buff = re.match('^Hardware Address: (.*)', line)
if buff:
current_if['macaddress'] = buff.group(1)
buff = re.match('^Device Type:', line)
if buff and re.match('.*Ethernet', line):
current_if['type'] = 'ether'
# device must have mtu attribute in ODM
if 'mtu' not in current_if:
lsattr_path = self.module.get_bin_path('lsattr')
if lsattr_path:
rc, out, err = self.module.run_command([lsattr_path,'-El', current_if['device'] ])
if rc != 0:
break
for line in out.splitlines():
if line:
words = line.split()
if words[0] == 'mtu':
current_if['mtu'] = words[1]
return interfaces, ips
# AIX 'ifconfig -a' does not inform about MTU, so remove current_if['mtu'] here
def parse_interface_line(self, words):
device = words[0][0:-1]
current_if = {'device': device, 'ipv4': [], 'ipv6': [], 'type': 'unknown'}
current_if['flags'] = self.get_options(words[1])
current_if['macaddress'] = 'unknown' # will be overwritten later
return current_if
class OpenBSDNetwork(GenericBsdIfconfigNetwork):
"""
This is the OpenBSD Network Class.
It uses the GenericBsdIfconfigNetwork.
"""
platform = 'OpenBSD'
# OpenBSD 'ifconfig -a' does not have information about aliases
def get_interfaces_info(self, ifconfig_path, ifconfig_options='-aA'):
return super(OpenBSDNetwork, self).get_interfaces_info(ifconfig_path, ifconfig_options)
# Return macaddress instead of lladdr
def parse_lladdr_line(self, words, current_if, ips):
current_if['macaddress'] = words[1]
current_if['type'] = 'ether'
class NetBSDNetwork(GenericBsdIfconfigNetwork):
"""
This is the NetBSD Network Class.
It uses the GenericBsdIfconfigNetwork
"""
platform = 'NetBSD'
def parse_media_line(self, words, current_if, ips):
# example of line:
# $ ifconfig
# ne0: flags=8863<UP,BROADCAST,NOTRAILERS,RUNNING,SIMPLEX,MULTICAST> mtu 1500
# ec_capabilities=1<VLAN_MTU>
# ec_enabled=0
# address: 00:20:91:45:00:78
# media: Ethernet 10baseT full-duplex
# inet 192.168.156.29 netmask 0xffffff00 broadcast 192.168.156.255
current_if['media'] = words[1]
if len(words) > 2:
current_if['media_type'] = words[2]
if len(words) > 3:
current_if['media_options'] = words[3].split(',')
class SunOSNetwork(GenericBsdIfconfigNetwork):
"""
This is the SunOS Network Class.
It uses the GenericBsdIfconfigNetwork.
Solaris can have different FLAGS and MTU for IPv4 and IPv6 on the same interface
so these facts have been moved inside the 'ipv4' and 'ipv6' lists.
"""
platform = 'SunOS'
# Solaris 'ifconfig -a' will print interfaces twice, once for IPv4 and again for IPv6.
# MTU and FLAGS also may differ between IPv4 and IPv6 on the same interface.
# 'parse_interface_line()' checks for previously seen interfaces before defining
# 'current_if' so that IPv6 facts don't clobber IPv4 facts (or vice versa).
def get_interfaces_info(self, ifconfig_path):
interfaces = {}
current_if = {}
ips = dict(
all_ipv4_addresses = [],
all_ipv6_addresses = [],
)
rc, out, err = self.module.run_command([ifconfig_path, '-a'])
for line in out.splitlines():
if line:
words = line.split()
if re.match('^\S', line) and len(words) > 3:
current_if = self.parse_interface_line(words, current_if, interfaces)
interfaces[ current_if['device'] ] = current_if
elif words[0].startswith('options='):
self.parse_options_line(words, current_if, ips)
elif words[0] == 'nd6':
self.parse_nd6_line(words, current_if, ips)
elif words[0] == 'ether':
self.parse_ether_line(words, current_if, ips)
elif words[0] == 'media:':
self.parse_media_line(words, current_if, ips)
elif words[0] == 'status:':
self.parse_status_line(words, current_if, ips)
elif words[0] == 'lladdr':
self.parse_lladdr_line(words, current_if, ips)
elif words[0] == 'inet':
self.parse_inet_line(words, current_if, ips)
elif words[0] == 'inet6':
self.parse_inet6_line(words, current_if, ips)
else:
self.parse_unknown_line(words, current_if, ips)
# 'parse_interface_line' and 'parse_inet*_line' leave two dicts in the
# ipv4/ipv6 lists which is ugly and hard to read.
# This quick hack merges the dictionaries. Purely cosmetic.
for iface in interfaces:
for v in 'ipv4', 'ipv6':
combined_facts = {}
for facts in interfaces[iface][v]:
combined_facts.update(facts)
if len(combined_facts.keys()) > 0:
interfaces[iface][v] = [combined_facts]
return interfaces, ips
def parse_interface_line(self, words, current_if, interfaces):
device = words[0][0:-1]
if device not in interfaces:
current_if = {'device': device, 'ipv4': [], 'ipv6': [], 'type': 'unknown'}
else:
current_if = interfaces[device]
flags = self.get_options(words[1])
v = 'ipv4'
if 'IPv6' in flags:
v = 'ipv6'
if 'LOOPBACK' in flags:
current_if['type'] = 'loopback'
current_if[v].append({'flags': flags, 'mtu': words[3]})
current_if['macaddress'] = 'unknown' # will be overwritten later
return current_if
# Solaris displays single digit octets in MAC addresses e.g. 0:1:2:d:e:f
# Add leading zero to each octet where needed.
def parse_ether_line(self, words, current_if, ips):
macaddress = ''
for octet in words[1].split(':'):
octet = ('0' + octet)[-2:None]
macaddress += (octet + ':')
current_if['macaddress'] = macaddress[0:-1]
class HurdPfinetNetwork(Network):
"""
This is a GNU Hurd specific subclass of Network. It use fsysopts to
get the ip address and support only pfinet.
"""
platform = 'GNU'
_socket_dir = '/servers/socket/'
def populate(self):
fsysopts_path = self.module.get_bin_path('fsysopts')
if fsysopts_path is None:
return self.facts
socket_path = None
for l in ('inet', 'inet6'):
link = os.path.join(self._socket_dir, l)
if os.path.exists(link):
socket_path = link
break
if socket_path:
rc, out, err = self.module.run_command([fsysopts_path, '-L', socket_path])
self.facts['interfaces'] = []
for i in out.split():
if '=' in i and i.startswith('--'):
k,v = i.split('=',1)
# remove '--'
k = k[2:]
if k == 'interface':
# remove /dev/ from /dev/eth0
v = v[5:]
self.facts['interfaces'].append(v)
self.facts[v] = {
'active': True,
'device': v,
'ipv4': {},
'ipv6': [],
}
current_if = v
elif k == 'address':
self.facts[current_if]['ipv4']['address'] = v
elif k == 'netmask':
self.facts[current_if]['ipv4']['netmask'] = v
elif k == 'address6':
address,prefix = v.split('/')
self.facts[current_if]['ipv6'].append({
'address': address,
'prefix': prefix,
})
return self.facts
class Virtual(Facts):
"""
This is a generic Virtual subclass of Facts. This should be further
subclassed to implement per platform. If you subclass this,
you should define:
- virtualization_type
- virtualization_role
- container (e.g. solaris zones, freebsd jails, linux containers)
All subclasses MUST define platform.
"""
def __new__(cls, *arguments, **keyword):
# When Virtual is created, it chooses a subclass to create instead.
# This check prevents the subclass from then trying to find a subclass
# and create that.
if cls is not Virtual:
return super(Virtual, cls).__new__(cls)
subclass = cls
for sc in get_all_subclasses(Virtual):
if sc.platform == platform.system():
subclass = sc
if PY3:
return super(cls, subclass).__new__(subclass)
else:
return super(cls, subclass).__new__(subclass, *arguments, **keyword)
def populate(self):
self.get_virtual_facts()
return self.facts
def get_virtual_facts(self):
self.facts['virtualization_type'] = ''
self.facts['virtualization_role'] = ''
class LinuxVirtual(Virtual):
"""
This is a Linux-specific subclass of Virtual. It defines
- virtualization_type
- virtualization_role
"""
platform = 'Linux'
# For more information, check: http://people.redhat.com/~rjones/virt-what/
def get_virtual_facts(self):
# lxc/docker
if os.path.exists('/proc/1/cgroup'):
for line in get_file_lines('/proc/1/cgroup'):
if re.search(r'/docker(/|-[0-9a-f]+\.scope)', line):
self.facts['virtualization_type'] = 'docker'
self.facts['virtualization_role'] = 'guest'
return
if re.search('/lxc/', line) or re.search('/machine.slice/machine-lxc', line):
self.facts['virtualization_type'] = 'lxc'
self.facts['virtualization_role'] = 'guest'
return
# lxc does not always appear in cgroups anymore but sets 'container=lxc' environment var, requires root privs
if os.path.exists('/proc/1/environ'):
for line in get_file_lines('/proc/1/environ'):
if re.search('container=lxc', line):
self.facts['virtualization_type'] = 'lxc'
self.facts['virtualization_role'] = 'guest'
return
if os.path.exists('/proc/vz'):
self.facts['virtualization_type'] = 'openvz'
if os.path.exists('/proc/bc'):
self.facts['virtualization_role'] = 'host'
else:
self.facts['virtualization_role'] = 'guest'
return
systemd_container = get_file_content('/run/systemd/container')
if systemd_container:
self.facts['virtualization_type'] = systemd_container
self.facts['virtualization_role'] = 'guest'
return
if os.path.exists("/proc/xen"):
self.facts['virtualization_type'] = 'xen'
self.facts['virtualization_role'] = 'guest'
try:
for line in get_file_lines('/proc/xen/capabilities'):
if "control_d" in line:
self.facts['virtualization_role'] = 'host'
except IOError:
pass
return
product_name = get_file_content('/sys/devices/virtual/dmi/id/product_name')
if product_name in ['KVM', 'Bochs']:
self.facts['virtualization_type'] = 'kvm'
self.facts['virtualization_role'] = 'guest'
return
if product_name == 'RHEV Hypervisor':
self.facts['virtualization_type'] = 'RHEV'
self.facts['virtualization_role'] = 'guest'
return
if product_name == 'VMware Virtual Platform':
self.facts['virtualization_type'] = 'VMware'
self.facts['virtualization_role'] = 'guest'
return
if product_name == 'OpenStack Nova':
self.facts['virtualization_type'] = 'openstack'
self.facts['virtualization_role'] = 'guest'
return
bios_vendor = get_file_content('/sys/devices/virtual/dmi/id/bios_vendor')
if bios_vendor == 'Xen':
self.facts['virtualization_type'] = 'xen'
self.facts['virtualization_role'] = 'guest'
return
if bios_vendor == 'innotek GmbH':
self.facts['virtualization_type'] = 'virtualbox'
self.facts['virtualization_role'] = 'guest'
return
sys_vendor = get_file_content('/sys/devices/virtual/dmi/id/sys_vendor')
# FIXME: This does also match hyperv
if sys_vendor == 'Microsoft Corporation':
self.facts['virtualization_type'] = 'VirtualPC'
self.facts['virtualization_role'] = 'guest'
return
if sys_vendor == 'Parallels Software International Inc.':
self.facts['virtualization_type'] = 'parallels'
self.facts['virtualization_role'] = 'guest'
return
if sys_vendor == 'QEMU':
self.facts['virtualization_type'] = 'kvm'
self.facts['virtualization_role'] = 'guest'
return
if sys_vendor == 'oVirt':
self.facts['virtualization_type'] = 'kvm'
self.facts['virtualization_role'] = 'guest'
return
if sys_vendor == 'OpenStack Foundation':
self.facts['virtualization_type'] = 'openstack'
self.facts['virtualization_role'] = 'guest'
return
if os.path.exists('/proc/self/status'):
for line in get_file_lines('/proc/self/status'):
if re.match('^VxID: \d+', line):
self.facts['virtualization_type'] = 'linux_vserver'
if re.match('^VxID: 0', line):
self.facts['virtualization_role'] = 'host'
else:
self.facts['virtualization_role'] = 'guest'
return
if os.path.exists('/proc/cpuinfo'):
for line in get_file_lines('/proc/cpuinfo'):
if re.match('^model name.*QEMU Virtual CPU', line):
self.facts['virtualization_type'] = 'kvm'
elif re.match('^vendor_id.*User Mode Linux', line):
self.facts['virtualization_type'] = 'uml'
elif re.match('^model name.*UML', line):
self.facts['virtualization_type'] = 'uml'
elif re.match('^vendor_id.*PowerVM Lx86', line):
self.facts['virtualization_type'] = 'powervm_lx86'
elif re.match('^vendor_id.*IBM/S390', line):
self.facts['virtualization_type'] = 'PR/SM'
lscpu = self.module.get_bin_path('lscpu')
if lscpu:
rc, out, err = self.module.run_command(["lscpu"])
if rc == 0:
for line in out.splitlines():
data = line.split(":", 1)
key = data[0].strip()
if key == 'Hypervisor':
self.facts['virtualization_type'] = data[1].strip()
else:
self.facts['virtualization_type'] = 'ibm_systemz'
else:
continue
if self.facts['virtualization_type'] == 'PR/SM':
self.facts['virtualization_role'] = 'LPAR'
else:
self.facts['virtualization_role'] = 'guest'
return
# Beware that we can have both kvm and virtualbox running on a single system
if os.path.exists("/proc/modules") and os.access('/proc/modules', os.R_OK):
modules = []
for line in get_file_lines("/proc/modules"):
data = line.split(" ", 1)
modules.append(data[0])
if 'kvm' in modules:
if os.path.isdir('/rhev/'):
# Check whether this is a RHEV hypervisor (is vdsm running ?)
for f in glob.glob('/proc/[0-9]*/comm'):
try:
if open(f).read().rstrip() == 'vdsm':
self.facts['virtualization_type'] = 'RHEV'
break
except:
pass
else:
self.facts['virtualization_type'] = 'kvm'
else:
self.facts['virtualization_type'] = 'kvm'
self.facts['virtualization_role'] = 'host'
return
if 'vboxdrv' in modules:
self.facts['virtualization_type'] = 'virtualbox'
self.facts['virtualization_role'] = 'host'
return
# If none of the above matches, return 'NA' for virtualization_type
# and virtualization_role. This allows for proper grouping.
self.facts['virtualization_type'] = 'NA'
self.facts['virtualization_role'] = 'NA'
return
class VirtualSysctlDetectionMixin(object):
def detect_sysctl(self):
self.sysctl_path = self.module.get_bin_path('sysctl')
def detect_virt_product(self, key):
self.detect_sysctl()
if self.sysctl_path:
rc, out, err = self.module.run_command("%s -n %s" % (self.sysctl_path, key))
if rc == 0:
if re.match('(KVM|Bochs|SmartDC).*', out):
self.facts['virtualization_type'] = 'kvm'
self.facts['virtualization_role'] = 'guest'
elif re.match('.*VMware.*', out):
self.facts['virtualization_type'] = 'VMware'
self.facts['virtualization_role'] = 'guest'
elif out.rstrip() == 'VirtualBox':
self.facts['virtualization_type'] = 'virtualbox'
self.facts['virtualization_role'] = 'guest'
elif out.rstrip() == 'HVM domU':
self.facts['virtualization_type'] = 'xen'
self.facts['virtualization_role'] = 'guest'
elif out.rstrip() == 'Parallels':
self.facts['virtualization_type'] = 'parallels'
self.facts['virtualization_role'] = 'guest'
elif out.rstrip() == 'RHEV Hypervisor':
self.facts['virtualization_type'] = 'RHEV'
self.facts['virtualization_role'] = 'guest'
def detect_virt_vendor(self, key):
self.detect_sysctl()
if self.sysctl_path:
rc, out, err = self.module.run_command("%s -n %s" % (self.sysctl_path, key))
if rc == 0:
if out.rstrip() == 'QEMU':
self.facts['virtualization_type'] = 'kvm'
self.facts['virtualization_role'] = 'guest'
if out.rstrip() == 'OpenBSD':
self.facts['virtualization_type'] = 'vmm'
self.facts['virtualization_role'] = 'guest'
class FreeBSDVirtual(Virtual):
"""
This is a FreeBSD-specific subclass of Virtual. It defines
- virtualization_type
- virtualization_role
"""
platform = 'FreeBSD'
def get_virtual_facts(self):
# Set empty values as default
self.facts['virtualization_type'] = ''
self.facts['virtualization_role'] = ''
if os.path.exists('/dev/xen/xenstore'):
self.facts['virtualization_type'] = 'xen'
self.facts['virtualization_role'] = 'guest'
class DragonFlyVirtual(FreeBSDVirtual):
platform = 'DragonFly'
class OpenBSDVirtual(Virtual, VirtualSysctlDetectionMixin):
"""
This is a OpenBSD-specific subclass of Virtual. It defines
- virtualization_type
- virtualization_role
"""
platform = 'OpenBSD'
DMESG_BOOT = '/var/run/dmesg.boot'
def get_virtual_facts(self):
# Set empty values as default
self.facts['virtualization_type'] = ''
self.facts['virtualization_role'] = ''
self.detect_virt_product('hw.product')
if self.facts['virtualization_type'] == '':
self.detect_virt_vendor('hw.vendor')
# Check the dmesg if vmm(4) attached, indicating the host is
# capable of virtualization.
dmesg_boot = get_file_content(OpenBSDVirtual.DMESG_BOOT)
for line in dmesg_boot.splitlines():
match = re.match('^vmm0 at mainbus0: (SVM/RVI|VMX/EPT)$', line)
if match:
self.facts['virtualization_type'] = 'vmm'
self.facts['virtualization_role'] = 'host'
class NetBSDVirtual(Virtual, VirtualSysctlDetectionMixin):
platform = 'NetBSD'
def get_virtual_facts(self):
# Set empty values as default
self.facts['virtualization_type'] = ''
self.facts['virtualization_role'] = ''
self.detect_virt_product('machdep.dmi.system-product')
if self.facts['virtualization_type'] == '':
self.detect_virt_vendor('machdep.dmi.system-vendor')
if os.path.exists('/dev/xencons'):
self.facts['virtualization_type'] = 'xen'
self.facts['virtualization_role'] = 'guest'
class HPUXVirtual(Virtual):
"""
This is a HP-UX specific subclass of Virtual. It defines
- virtualization_type
- virtualization_role
"""
platform = 'HP-UX'
def get_virtual_facts(self):
if os.path.exists('/usr/sbin/vecheck'):
rc, out, err = self.module.run_command("/usr/sbin/vecheck")
if rc == 0:
self.facts['virtualization_type'] = 'guest'
self.facts['virtualization_role'] = 'HP vPar'
if os.path.exists('/opt/hpvm/bin/hpvminfo'):
rc, out, err = self.module.run_command("/opt/hpvm/bin/hpvminfo")
if rc == 0 and re.match('.*Running.*HPVM vPar.*', out):
self.facts['virtualization_type'] = 'guest'
self.facts['virtualization_role'] = 'HPVM vPar'
elif rc == 0 and re.match('.*Running.*HPVM guest.*', out):
self.facts['virtualization_type'] = 'guest'
self.facts['virtualization_role'] = 'HPVM IVM'
elif rc == 0 and re.match('.*Running.*HPVM host.*', out):
self.facts['virtualization_type'] = 'host'
self.facts['virtualization_role'] = 'HPVM'
if os.path.exists('/usr/sbin/parstatus'):
rc, out, err = self.module.run_command("/usr/sbin/parstatus")
if rc == 0:
self.facts['virtualization_type'] = 'guest'
self.facts['virtualization_role'] = 'HP nPar'
class SunOSVirtual(Virtual):
"""
This is a SunOS-specific subclass of Virtual. It defines
- virtualization_type
- virtualization_role
- container
"""
platform = 'SunOS'
def get_virtual_facts(self):
# Check if it's a zone
zonename = self.module.get_bin_path('zonename')
if zonename:
rc, out, err = self.module.run_command(zonename)
if rc == 0 and out.rstrip() != "global":
self.facts['container'] = 'zone'
# Check if it's a branded zone (i.e. Solaris 8/9 zone)
if os.path.isdir('/.SUNWnative'):
self.facts['container'] = 'zone'
# If it's a zone check if we can detect if our global zone is itself virtualized.
# Relies on the "guest tools" (e.g. vmware tools) to be installed
if 'container' in self.facts and self.facts['container'] == 'zone':
modinfo = self.module.get_bin_path('modinfo')
if modinfo:
rc, out, err = self.module.run_command(modinfo)
if rc == 0:
for line in out.splitlines():
if 'VMware' in line:
self.facts['virtualization_type'] = 'vmware'
self.facts['virtualization_role'] = 'guest'
if 'VirtualBox' in line:
self.facts['virtualization_type'] = 'virtualbox'
self.facts['virtualization_role'] = 'guest'
if os.path.exists('/proc/vz'):
self.facts['virtualization_type'] = 'virtuozzo'
self.facts['virtualization_role'] = 'guest'
# Detect domaining on Sparc hardware
virtinfo = self.module.get_bin_path('virtinfo')
if virtinfo:
# The output of virtinfo is different whether we are on a machine with logical
# domains ('LDoms') on a T-series or domains ('Domains') on a M-series. Try LDoms first.
rc, out, err = self.module.run_command("/usr/sbin/virtinfo -p")
# The output contains multiple lines with different keys like this:
# DOMAINROLE|impl=LDoms|control=false|io=false|service=false|root=false
# The output may also be not formatted and the returncode is set to 0 regardless of the error condition:
# virtinfo can only be run from the global zone
if rc == 0:
try:
for line in out.splitlines():
fields = line.split('|')
if( fields[0] == 'DOMAINROLE' and fields[1] == 'impl=LDoms' ):
self.facts['virtualization_type'] = 'ldom'
self.facts['virtualization_role'] = 'guest'
hostfeatures = []
for field in fields[2:]:
arg = field.split('=')
if( arg[1] == 'true' ):
hostfeatures.append(arg[0])
if( len(hostfeatures) > 0 ):
self.facts['virtualization_role'] = 'host (' + ','.join(hostfeatures) + ')'
except ValueError:
pass
else:
smbios = self.module.get_bin_path('smbios')
if not smbios:
return
rc, out, err = self.module.run_command(smbios)
if rc == 0:
for line in out.splitlines():
if 'VMware' in line:
self.facts['virtualization_type'] = 'vmware'
self.facts['virtualization_role'] = 'guest'
elif 'Parallels' in line:
self.facts['virtualization_type'] = 'parallels'
self.facts['virtualization_role'] = 'guest'
elif 'VirtualBox' in line:
self.facts['virtualization_type'] = 'virtualbox'
self.facts['virtualization_role'] = 'guest'
elif 'HVM domU' in line:
self.facts['virtualization_type'] = 'xen'
self.facts['virtualization_role'] = 'guest'
elif 'KVM' in line:
self.facts['virtualization_type'] = 'kvm'
self.facts['virtualization_role'] = 'guest'
class Ohai(Facts):
"""
This is a subclass of Facts for including information gathered from Ohai.
"""
def populate(self):
self.run_ohai()
return self.facts
def run_ohai(self):
ohai_path = self.module.get_bin_path('ohai')
if ohai_path is None:
return
rc, out, err = self.module.run_command(ohai_path)
try:
self.facts.update(json.loads(out))
except:
pass
class Facter(Facts):
"""
This is a subclass of Facts for including information gathered from Facter.
"""
def populate(self):
self.run_facter()
return self.facts
def run_facter(self):
facter_path = self.module.get_bin_path('facter', opt_dirs=['/opt/puppetlabs/bin'])
cfacter_path = self.module.get_bin_path('cfacter', opt_dirs=['/opt/puppetlabs/bin'])
# Prefer to use cfacter if available
if cfacter_path is not None:
facter_path = cfacter_path
if facter_path is None:
return
# if facter is installed, and we can use --json because
# ruby-json is ALSO installed, include facter data in the JSON
rc, out, err = self.module.run_command(facter_path + " --puppet --json")
try:
self.facts = json.loads(out)
except:
pass
def get_file_content(path, default=None, strip=True):
data = default
if os.path.exists(path) and os.access(path, os.R_OK):
try:
try:
datafile = open(path)
data = datafile.read()
if strip:
data = data.strip()
if len(data) == 0:
data = default
finally:
datafile.close()
except:
# ignore errors as some jails/containers might have readable permissions but not allow reads to proc
# done in 2 blocks for 2.4 compat
pass
return data
def get_uname_version(module):
rc, out, err = module.run_command(['uname', '-v'])
if rc == 0:
return out
return None
def get_partition_uuid(partname):
try:
uuids = os.listdir("/dev/disk/by-uuid")
except OSError:
return
for uuid in uuids:
dev = os.path.realpath("/dev/disk/by-uuid/" + uuid)
if dev == ("/dev/" + partname):
return uuid
return None
def get_file_lines(path):
'''get list of lines from file'''
data = get_file_content(path)
if data:
ret = data.splitlines()
else:
ret = []
return ret
def ansible_facts(module, gather_subset):
facts = {}
facts['gather_subset'] = list(gather_subset)
facts.update(Facts(module).populate())
for subset in gather_subset:
facts.update(FACT_SUBSETS[subset](module,
load_on_init=False,
cached_facts=facts).populate())
return facts
def get_all_facts(module):
setup_options = dict(module_setup=True)
# Retrieve module parameters
gather_subset = module.params['gather_subset']
global GATHER_TIMEOUT
GATHER_TIMEOUT = module.params['gather_timeout']
# Retrieve all facts elements
additional_subsets = set()
exclude_subsets = set()
for subset in gather_subset:
if subset == 'all':
additional_subsets.update(VALID_SUBSETS)
continue
if subset.startswith('!'):
subset = subset[1:]
if subset == 'all':
exclude_subsets.update(VALID_SUBSETS)
continue
exclude = True
else:
exclude = False
if subset not in VALID_SUBSETS:
raise TypeError("Bad subset '%s' given to Ansible. gather_subset options allowed: all, %s" % (subset, ", ".join(FACT_SUBSETS.keys())))
if exclude:
exclude_subsets.add(subset)
else:
additional_subsets.add(subset)
if not additional_subsets:
additional_subsets.update(VALID_SUBSETS)
additional_subsets.difference_update(exclude_subsets)
# facter and ohai are given a different prefix than other subsets
if 'facter' in additional_subsets:
additional_subsets.difference_update(('facter',))
facter_ds = FACT_SUBSETS['facter'](module, load_on_init=False).populate()
if facter_ds:
for (k, v) in facter_ds.items():
setup_options['facter_%s' % k.replace('-', '_')] = v
if 'ohai' in additional_subsets:
additional_subsets.difference_update(('ohai',))
ohai_ds = FACT_SUBSETS['ohai'](module, load_on_init=False).populate()
if ohai_ds:
for (k, v) in ohai_ds.items():
setup_options['ohai_%s' % k.replace('-', '_')] = v
facts = ansible_facts(module, additional_subsets)
for (k, v) in facts.items():
setup_options["ansible_%s" % k.replace('-', '_')] = v
setup_result = { 'ansible_facts': {} }
for (k,v) in setup_options.items():
if module.params['filter'] == '*' or fnmatch.fnmatch(k, module.params['filter']):
setup_result['ansible_facts'][k] = v
# hack to keep --verbose from showing all the setup module results
setup_result['_ansible_verbose_override'] = True
return setup_result
# Allowed fact subset for gather_subset options and what classes they use
# Note: have to define this at the bottom as it references classes defined earlier in this file
FACT_SUBSETS = dict(
hardware=Hardware,
network=Network,
virtual=Virtual,
ohai=Ohai,
facter=Facter,
)
VALID_SUBSETS = frozenset(FACT_SUBSETS.keys())
|
flippym/spytify-server
|
refs/heads/master
|
Scripts/album-strip.py
|
1
|
from argparse import ArgumentParser, RawDescriptionHelpFormatter
from os import listdir, path, popen, remove, rmdir
from shutil import copytree, Error, move
from sys import argv
from textwrap import dedent
def Parse():
parser = ArgumentParser(prog=path.basename(__file__.rpartition('.')[0]), add_help=False, formatter_class=RawDescriptionHelpFormatter,
description=dedent('''\
Download
-------------------
Music download conv
'''), epilog=dedent('''\
Check the git repository at https://github.com/flippym/spytify-server,
for more information about usage, documentation and bug report.\
''')
)
optional = parser.add_argument_group('Flags')
optional.add_argument('-d', '--destiny', metavar='path', type=str, help='Destiny path for music', required=True)
optional.add_argument('-f', '--filter', metavar='exp', type=str, help='Artist comma delimited expression')
optional.add_argument('-s', '--source', metavar='path', type=str, help='Source path with music', required=True)
optional.add_argument('-h', '--help', action='help', help='Show this help message')
if len(argv) == 1:
parser.print_help()
exit(1)
return parser.parse_args()
args = Parse()
try:
artist_filter = args.filter.split(',')
except AttributeError:
artist_filter = None
failed_transfers = []
for artist in sorted(listdir(args.source)):
artist_path = path.join(args.destiny, artist)
print("Copying:\n {0}".format(artist))
try:
if not args.filter or args.filter and artist in artist_filter:
copytree(path.join(args.source, artist), artist_path)
else:
continue
except (Error, OSError):
failed_transfers.append(artist)
continue
for album in listdir(artist_path):
album_path = path.join(artist_path, album)
print("Moving:")
for music in listdir(album_path):
music_path = path.join(album_path, music)
print(" {0}".format(music))
try:
move(music_path, path.join(artist_path, music + '.mp3'))
except (Error, OSError):
remove(music_path)
failed_transfers.append(music_path)
screen_size = int(popen('stty size', 'r').read().split()[1])
print("Deleting:\n {0}\n{1}".format(album, '-'*screen_size))
rmdir(album_path)
print("Failed:\n ".format('\n'.join(failed_transfers)))
|
eadgarchen/tensorflow
|
refs/heads/master
|
tensorflow/contrib/labeled_tensor/python/ops/core.py
|
32
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Core classes and core ops for LabeledTensor.
Core ops are ops which will eventually be called by LabeledTensor methods,
and ops which a core op depends upon.
For example, `add` is a core op because we'll eventually support the `+`
operator.
Non-core ops should go in `ops.py`.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import contextlib
import numbers
import types
import numpy as np
from six import binary_type
from six import string_types
from six import text_type
from six.moves import range # pylint: disable=redefined-builtin
from tensorflow.contrib.labeled_tensor.python.ops import _typecheck as tc
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
# pylint: disable=invalid-name
# Types coercible to Axis.labels
# We use this instead of collections.Sequence to exclude strings.
LabelsLike = tc.Union(np.ndarray, range, list, tuple)
# Types coercible to a tf.Dimension
DimensionLike = tc.Optional(tc.Union(tensor_shape.Dimension, int))
# Types usable for axis values
AxisValue = tc.Union(LabelsLike, DimensionLike)
# Valid scalar values for TensorFlow
Scalar = tc.Union(numbers.Number, bool, binary_type, text_type)
# pylint: enable=invalid-name
class Axis(object):
"""Size and label information for an axis.
Axis contains either a tf.Dimension indicating the size of an axis,
or a tuple of tick labels for the axis.
If tick labels are provided, they must be unique.
"""
@tc.accepts(object, string_types, AxisValue)
def __init__(self, name, value):
"""Construct an Axis.
Args:
name: Name of the axis.
value: Either None, an int or tf.Dimension giving the size of the axis,
or a sequence that is not a string additionally providing coordinate
(tick) labels.
Raises:
ValueError: If the user provides labels with duplicate values.
"""
if isinstance(value, tensor_shape.Dimension):
dimension = value
labels = None
elif isinstance(value, int) or value is None:
dimension = tensor_shape.Dimension(value)
labels = None
else:
dimension = tensor_shape.Dimension(len(value))
labels = tuple(value)
if dimension.value == 0:
# Treat a zero-length axis as if it has labels.
labels = ()
if labels is not None:
index = dict(zip(labels, range(len(labels))))
if len(index) != len(labels):
raise ValueError('Tick labels must be unique, but got {}'
.format(labels))
else:
index = None
self._name = name # type: string_types
self._dimension = dimension # type: tensor_shape.Dimension
self._labels = labels # type: Optional[tuple]
self._index = index # type: Optional[Dict[Any, int]]
@property
@tc.returns(string_types)
def name(self):
return self._name
@tc.returns(string_types)
def __repr__(self):
# Axis('x', Dimension(2))
# TODO(shoyer): make very long reprs more succint?
return "%s('%s', %r)" % (type(self).__name__, self.name, self.value)
@tc.returns(bool)
def __eq__(self, other):
return (isinstance(other, Axis) and self.name == other.name and
self.size == other.size and self.labels == other.labels)
def __hash__(self):
return hash((self.name, self.size, self.labels))
@tc.returns(bool)
def __ne__(self, other):
return not self == other
@tc.returns(int)
def __len__(self):
size = self.size
if size is None:
raise ValueError('axis %r has unknown length' % self.name)
return size
@property
@tc.returns(tc.Optional(tensor_shape.Dimension))
def dimension(self):
return self._dimension
@property
@tc.returns(tc.Optional(int))
def size(self):
return self._dimension.value
@property
@tc.returns(tc.Union(tuple, tensor_shape.Dimension))
def value(self):
"""Returns the tf.Dimension or tuple specifying axis ticks."""
if self.labels is None:
return self.dimension
else:
return self.labels
@property
@tc.returns(tc.Optional(tuple))
def labels(self):
"""Returns the tuple containing coordinate labels, else None."""
return self._labels
def index(self, value):
"""Returns the integer position of the given tick label."""
if self._index is None:
raise ValueError('Axis does not have tick labels')
return self._index[value]
# tc class for anything that can be coerced into an Axis
# pylint: disable=invalid-name
AxisLike = tc.Union(Axis, tc.Tuple(string_types, AxisValue))
# pylint: enable=invalid-name
@tc.returns(Axis)
@tc.accepts(AxisLike)
def as_axis(axis_data):
"""Convert an AxisLike object into an Axis.
Args:
axis_data: Axis object or tuple (axis_name, axis_value) describing an axis.
Returns:
Axis object. This may be the original object if axis_data is an Axis.
"""
if isinstance(axis_data, Axis):
axis = axis_data
else:
axis = Axis(*axis_data)
return axis
class Axes(collections.Mapping):
"""Axis names and indices for a tensor.
It is an ordered mapping, with keys given by axis name and values given
by Axis objects. Duplicate axis names are not allowed.
"""
@tc.accepts(object, tc.List(AxisLike))
def __init__(self, axes):
"""Construct an Axes.
Args:
axes: A list of Axis objects or (axis_name, axis_value) tuples.
Raises:
ValueError: If the user provides empty or duplicate axis names.
"""
self._axes = collections.OrderedDict()
for axis_data in axes:
axis = as_axis(axis_data)
name = axis.name
if name in self._axes:
raise ValueError('Duplicate axis name: %s' % name)
self._axes[name] = axis
def __iter__(self):
return iter(self._axes)
@tc.returns(string_types)
def __repr__(self):
# Axes([('x', Dimension(2)),
# ('y', ['a', 'b', 'c']),
# ('z', Dimension(4))])
cls_name = type(self).__name__
values = ["('%s', %r)" % (v.name, v.value) for v in self._axes.values()]
values_repr = (',\n' + ' ' * len(cls_name + '([')).join(values)
return '%s([%s])' % (cls_name, values_repr)
@tc.returns(Axis)
@tc.accepts(object, string_types)
def __getitem__(self, name):
return self._axes[name]
@tc.returns(bool)
def __contains__(self, name):
return name in self._axes
@tc.returns(int)
def __len__(self):
return len(self._axes)
def __hash__(self):
return hash(tuple(self.items()))
@tc.accepts(object, string_types)
def remove(self, axis_name):
"""Creates a new Axes object without the given axis."""
if axis_name not in self:
raise KeyError(axis_name)
remaining_axes = [axis for axis in self.values() if axis.name != axis_name]
return Axes(remaining_axes)
class LabeledTensor(object):
"""A tensor with annotated axes.
It has the following invariants:
1) The dimensionality of the tensor is equal to the number of elements
in axes.
2) The number of coordinate values in the ith dimension is equal to the
size of the tensor in the ith dimension.
Attributes:
tensor: tf.Tensor containing the data.
axes: lt.Axes containing axis names and coordinate labels.
"""
@tc.accepts(object, ops.Tensor,
tc.Union(Axes, tc.Collection(tc.Union(string_types, AxisLike))))
def __init__(self, tensor, axes):
"""Construct a LabeledTensor.
Args:
tensor: The underlying tensor containing the data.
axes: An Axes object, or a collection of strings, Axis objects or tuples
of (name, value) pairs indicating the axes.
Raises:
ValueError: If the provided axes do not satisfy the class invariants.
"""
self._tensor = tensor
shape = tensor.get_shape()
if isinstance(axes, Axes):
unvalidated_axes = axes
else:
mutable_axes = []
for position, axis_like in enumerate(axes):
if isinstance(axis_like, string_types):
# The coordinates for this axes are unlabeled.
# Infer the size of the axis.
value = shape[position]
axis_like = (axis_like, value)
mutable_axes.append(axis_like)
# Construct the Axis object, which will additionally validate the contents
# of the object.
unvalidated_axes = Axes(mutable_axes)
# Check our invariants.
# First, the rank of the tensor must be equal to the number of axes.
if len(shape) != len(unvalidated_axes):
raise ValueError('Tensor rank was not equal to the number of axes: %r, %r'
% (shape, unvalidated_axes))
# Second, the size of each tensor dimension must match the size of the
# corresponding indices.
for (d, axis) in zip(shape, unvalidated_axes.values()):
if d != axis.size:
raise ValueError(
'Provided axis size %d does not match tensor dimension size %d' %
(axis.size, d))
self._axes = unvalidated_axes
def __repr__(self):
# <LabeledTensor 'foo' shape=(2, 3, 4) dtype=float32
# axes=[('x', Dimension(2)),
# ('y', ('a', 'b', 'c'),
# ('z', Dimension(4))]>
axes = ["('%s', %r)" % (v.name, v.value) for v in self.axes.values()]
axes_repr = (',\n' + ' ' * len(' axes=[')).join(axes)
return ("<%s '%s' shape=%s dtype=%s\n axes=[%s]>" %
(type(self).__name__, self.tensor.name, self.tensor.get_shape(),
self.tensor.dtype.name, axes_repr))
@property
def tensor(self):
return self._tensor
def _as_graph_element(self):
"""Support tf.Graph.as_graph_element on LabeledTensor objects.
This allows operations such as tf.name_scope to take labeled tensors.
Returns:
self.tensor
"""
return self.tensor
@property
def axes(self):
return self._axes
# properties/methods directly borrowed from tf.Tensor:
@property
def dtype(self):
return self._tensor.dtype
@property
def name(self):
return self._tensor.name
def get_shape(self):
"""Returns the TensorShape that represents the shape of this tensor.
See tf.Tensor.get_shape().
Returns:
A TensorShape representing the shape of this tensor.
"""
return self._tensor.get_shape()
# TODO(shoyer): consider how/if to implement .eval(). Maybe it should return
# an xarray.DataArray?
def __getitem__(self, key):
# This should work exactly like tf.Tensor.__getitem__, except it preserves
# labels.
if not isinstance(key, tuple):
key = (key,)
if len(key) != len(self.axes):
raise ValueError('indexer %r must have the same length as the Tensor '
'rank (%r)' % (key, len(self.axes)))
selection = {a: k for a, k in zip(self.axes.keys(), key)}
return slice_function(self, selection)
# special methods for overloading arithmetic operations:
def __abs__(self):
return abs_function(self)
def __neg__(self):
return neg(self)
def __pos__(self):
return self
def __add__(self, other):
return add(self, other)
def __radd__(self, other):
return add(other, self)
def __sub__(self, other):
return sub(self, other)
def __rsub__(self, other):
return sub(other, self)
def __mul__(self, other):
return mul(self, other)
def __rmul__(self, other):
return mul(other, self)
def __truediv__(self, other):
return div(self, other)
__div__ = __truediv__
def __rtruediv__(self, other):
return div(other, self)
__rdiv__ = __rtruediv__
def __mod__(self, other):
return mod(self, other)
def __rmod__(self, other):
return mod(other, self)
def __pow__(self, other):
return pow_function(self, other)
def __rpow__(self, other):
return pow_function(other, self)
# logical operations:
def __invert__(self):
return logical_not(self)
def __and__(self, other):
return logical_and(self, other)
def __or__(self, other):
return logical_or(self, other)
def __xor__(self, other):
return logical_xor(self, other)
# boolean operations:
def __lt__(self, other):
return less(self, other)
def __le__(self, other):
return less_equal(self, other)
def __gt__(self, other):
return greater(self, other)
def __ge__(self, other):
return greater_equal(self, other)
def __eq__(self, other):
# for consistency with tf.Tensor
if not isinstance(other, LabeledTensor):
return False
return self.tensor == other.tensor and self.axes == other.axes
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash((self.tensor, self.axes))
# typecheck type abbreviations:
# abbreviations for third-party types with very long reprs
tc.register_type_abbreviation(tensor_shape.Dimension, 'tensorflow.Dimension')
tc.register_type_abbreviation(ops.Tensor, 'tensorflow.Tensor')
tc.register_type_abbreviation(dtypes.DType, 'tensorflow.DType')
# core LabeledTensor types
tc.register_type_abbreviation(Axis, 'labeled_tensor.Axis')
tc.register_type_abbreviation(Axes, 'labeled_tensor.Axes')
tc.register_type_abbreviation(LabeledTensor, 'labeled_tensor.LabeledTensor')
@tc.returns(ops.Tensor)
@tc.accepts(LabeledTensor)
def _convert_labeled_tensor_to_tensor(value, *args, **kwargs):
# call ops.convert_to_tensor to handle optional arguments appropriately
return ops.internal_convert_to_tensor(value.tensor, *args, **kwargs)
ops.register_tensor_conversion_function(LabeledTensor,
_convert_labeled_tensor_to_tensor)
# tc class for anything that can be coerced into a LabeledTensor
# pylint: disable=invalid-name
LabeledTensorLike = tc.Union(LabeledTensor, ops.Tensor, np.ndarray, Scalar)
# pylint: enable=invalid-name
@tc.returns(LabeledTensor)
@tc.accepts(LabeledTensorLike, object, tc.Optional(string_types))
def convert_to_labeled_tensor(value, dtype=None, name=None):
"""Converts the given `value` to a `LabeledTensor`.
This function accepts `LabeledTensor` objects, 0-dimensional `Tensor` objects
and numpy arrays, and Python scalars. Higher dimensional unlabeled tensors
must use the `LabeledTensor` constructor explicitly.
Args:
value: Object to convert.
dtype: Optional element type for the returned tensor. If missing, the type
is inferred from the type of value.
name: Optional name to use if a new Tensor is created.
Returns:
`value` converted into a `LabeledTensor` object.
Raises:
ValueError: If the output would have rank>0 but the input was not already a
`LabeledTensor`.
"""
# TODO(shoyer): consider extending to accept xarray.DataArray as input.
if isinstance(value, LabeledTensor):
axes = value.axes.values()
value = value.tensor
else:
axes = []
# We call convert_to_tensor even for LabeledTensor input because it also
# checks to make sure the dtype argument is compatible.
tensor = ops.convert_to_tensor(value, dtype=dtype, name=name)
if len(tensor.get_shape()) != len(axes):
raise ValueError('cannot automatically convert unlabeled arrays or tensors '
'with rank>0 into LabeledTensors: %r' % value)
return LabeledTensor(tensor, axes)
@tc.returns(Axis)
@tc.accepts(tc.Collection(Axis))
def concat_axes(axes):
"""Concatenate a list of Axes.
Args:
axes: A collection of Axis objects.
Returns:
The concatenation of the axes.
If all axes have labels, the result has the concatenation of the labels.
Else, the result has no labels, and its size is the sum of the sizes
of the axes.
Raises:
ValueError: If `others` is not a collection of Axes or if it is empty.
"""
if not axes:
raise ValueError('axes must not be empty')
for a in axes:
if not isinstance(a, Axis):
raise ValueError('Expected an Axis, but got %r of type %r' % (a, type(a)))
names = set(a.name for a in axes)
if len(names) > 1:
raise ValueError('axes do not all have the same name: %r' % names)
name, = names
all_have_labels = all(a.labels is not None for a in axes)
any_has_unknown_size = any(a.size is None for a in axes)
if all_have_labels:
value = tuple(label for a in axes for label in a.labels)
elif any_has_unknown_size:
value = None
else:
value = sum(len(a) for a in axes)
return Axis(name, value)
@tc.returns(LabeledTensor)
@tc.accepts(LabeledTensorLike, tc.Optional(string_types))
def identity(labeled_tensor, name=None):
"""The identity op.
See tf.identity.
Args:
labeled_tensor: The input tensor.
name: Optional op name.
Returns:
The tensor.
"""
with ops.name_scope(name, 'lt_identity', [labeled_tensor]) as scope:
labeled_tensor = convert_to_labeled_tensor(labeled_tensor)
return LabeledTensor(
array_ops.identity(
labeled_tensor.tensor, name=scope),
labeled_tensor.axes)
# We don't call this slice because that shadows a built-in. Instead, we alias
# this to lt.slice in __init__.py.
@tc.returns(LabeledTensor)
@tc.accepts(LabeledTensorLike,
tc.Mapping(string_types, tc.Union(int, slice)),
tc.Optional(string_types))
def slice_function(labeled_tensor, selection, name=None):
"""Slice out a subset of the tensor.
This is an analog of tf.slice.
For example:
>>> tensor = tf.reshape(tf.range(0, 6), [3, 2])
>>> labeled_tensor = lt.LabeledTensor(tensor, ['a', ('b', ['foo', 'bar'])])
>>> lt.slice(labeled_tensor, {'a': slice(0, 2), 'b': 1})
<LabeledTensor 'lt_slice:...' shape=(2,) dtype=int32
axes=[('a', Dimension(2))]>
Args:
labeled_tensor: The input tensor.
selection: A dictionary of type str -> Union(int, slice of int) mapping
axis names to sub-selections.
name: Optional op name.
Returns:
The slice as a `LabeledTensor`.
"""
with ops.name_scope(name, 'lt_slice', [labeled_tensor]) as scope:
labeled_tensor = convert_to_labeled_tensor(labeled_tensor)
slices = []
for axis_name in labeled_tensor.axes:
if axis_name not in selection:
# We're not sub-selecting this axis, so use the full slice.
slices.append(slice(None))
else:
slices.append(selection[axis_name])
sliced_tensor = labeled_tensor.tensor[tuple(slices)]
sliced_axes = []
for axis, s in zip(labeled_tensor.axes.values(), slices):
# We sub-select this axis's index with the slice s.
# `s` is either an int or a proper slice.
if isinstance(s, slice):
if axis.labels is None:
# We're not tracking coordinate names for this axis.
sliced_axes.append(axis.name)
else:
sliced_axes.append((axis.name, axis.labels[s]))
else:
# If the slice is an int this dimension now has size 1, so we remove it.
assert isinstance(s, int)
return LabeledTensor(
array_ops.identity(
sliced_tensor, name=scope), sliced_axes)
@tc.returns(LabeledTensor)
@tc.accepts(LabeledTensorLike,
tc.Optional(tc.Collection(string_types)), tc.Optional(string_types))
def transpose(labeled_tensor, axis_order=None, name=None):
"""Permute a tensor's axes.
See tf.transpose.
Args:
labeled_tensor: The input tensor.
axis_order: Optional desired axis order, as a list of names. By default, the
order of axes is reversed.
name: Optional op name.
Returns:
The permuted tensor.
Raises:
ValueError: If axis_order isn't a permutation of the existing axes.
"""
with ops.name_scope(name, 'lt_transpose', [labeled_tensor]) as scope:
labeled_tensor = convert_to_labeled_tensor(labeled_tensor)
original_order = list(labeled_tensor.axes.keys())
if axis_order is None:
axis_order = list(reversed(original_order))
elif sorted(axis_order) != sorted(original_order):
raise ValueError(
'The new axis order must have the same names as the original axes, '
'but the new order is %r while the original order is %r' %
(axis_order, original_order))
axis_names = list(labeled_tensor.axes.keys())
permutation = [axis_names.index(n) for n in axis_order]
# Note: TensorFlow doesn't copy data for the identity transpose.
transpose_tensor = array_ops.transpose(
labeled_tensor.tensor, permutation, name=scope)
permuted_axes = [labeled_tensor.axes[n] for n in axis_order]
return LabeledTensor(transpose_tensor, permuted_axes)
@tc.returns(LabeledTensor)
@tc.accepts(
LabeledTensorLike,
tc.Collection(
tc.Union(string_types, tc.Tuple(string_types, collections.Hashable))),
tc.Optional(string_types))
def expand_dims(labeled_tensor, axes, name=None):
"""Insert dimensions of size 1.
See tf.expand_dims.
Args:
labeled_tensor: The input tensor.
axes: The desired axis names as strings or tuples of (name, label),
where `label` is the coordinate name for the new dimension `name`.
These must include the existing axis names, and the existing names must
appear in the same order in this list as they do in the input tensor.
name: Optional op name.
Returns:
A tensor with an axis for each axis in axes.
New axes are created with size 1 and do not have labeled coordinates.
Raises:
AxisOrderError: If axis names don't appear in the same order in axes
and the labeled tensor.
"""
with ops.name_scope(name, 'lt_expand_dims', [labeled_tensor]) as scope:
labeled_tensor = convert_to_labeled_tensor(labeled_tensor)
axis_names = [a if isinstance(a, string_types) else a[0] for a in axes]
check_axis_order(labeled_tensor, axis_names)
reshaped_axes = []
shape = []
for axis_spec in axes:
if axis_spec in labeled_tensor.axes:
axis = labeled_tensor.axes[axis_spec]
reshaped_axes.append(axis)
shape.append(-1 if axis.size is None else axis.size)
else:
if isinstance(axis_spec, string_types):
reshaped_axes.append((axis_spec, 1))
else:
(name, label) = axis_spec
reshaped_axes.append((name, (label,)))
shape.append(1)
reshaped_tensor = array_ops.reshape(
labeled_tensor.tensor, shape, name=scope)
return LabeledTensor(reshaped_tensor, reshaped_axes)
# This should only be added to a graph collection once.
_AXIS_ORDER_KEY = ('__axis_order',)
@tc.returns(tc.Optional(tc.List(string_types)))
def get_axis_order():
"""Get the axis_order set by any containing axis_order_scope.
Returns:
List of strings giving an order to use for axis names, or None, if no axis
order is set.
"""
# By storing axis_order in the graph, we can ensure that axis_order_scope is
# thread-safe.
axis_order_list = ops.get_collection(_AXIS_ORDER_KEY)
if axis_order_list:
axis_order, = axis_order_list
else:
axis_order = None
return axis_order
@tc.accepts(tc.Optional(tc.List(string_types)))
def _set_axis_order(axis_order):
axis_order_list = ops.get_collection_ref(_AXIS_ORDER_KEY)
if axis_order_list:
axis_order_list[0] = axis_order
else:
axis_order_list.append(axis_order)
@contextlib.contextmanager
@tc.accepts(tc.Optional(tc.List(string_types)))
def axis_order_scope(axis_order=None):
"""Set axis order for the result of broadcasting operations within a scope.
This allows you to ensure that tensors resulting from arithmetic have a
predictable axis order.
Example usage:
with lt.axis_order_scope(['x', 'y', 'z']):
# result is guaranteed to have the correct axis order
result = w + b
You can nest scopes, in which case only the inner-most scope applies, e.g.,
with lt.axis_order(['x', 'y', 'z']):
with lt.axis_order():
result = w + b # uses the default (left-most) axis ordering
Args:
axis_order: optional list of strings providing axis names. By default,
creates a scope without axis order.
Yields:
The provided axis_order or `None`.
"""
original_axis_order = get_axis_order()
_set_axis_order(axis_order)
try:
yield axis_order
finally:
_set_axis_order(original_axis_order)
@tc.returns(tc.List(string_types))
def _get_valid_axis_order():
axis_order = get_axis_order()
if axis_order is None:
raise AxisOrderError('an explicit axis order must be provided with the '
'axis_order argument or by using an axis_order_scope')
return axis_order
class AxisOrderError(ValueError):
"""Error class for cases where there is no valid axis order."""
# TODO(shoyer): should this function accept a list of labeled tensors instead?
@tc.returns(type(None))
@tc.accepts(LabeledTensorLike, tc.Optional(tc.Collection(string_types)))
def check_axis_order(labeled_tensor, axis_order=None):
"""Verify that the given tensor has a consistent axis order.
Args:
labeled_tensor: The input tensor. All axes on this tensor must appear in
axis_order.
axis_order: Optional desired axis order, as a list of names. If not
provided, defaults to the current axis_order_scope (if set).
Raises:
AxisOrderError: If the axis_order is unavailable, inconsistent or does not
include all existing axes.
"""
labeled_tensor = convert_to_labeled_tensor(labeled_tensor)
if axis_order is None:
axis_order = _get_valid_axis_order()
relevant_axis_order = [a for a in axis_order if a in labeled_tensor.axes]
if len(relevant_axis_order) < len(labeled_tensor.axes):
raise AxisOrderError(
'not all axis names appear in the required axis order %r: %r' %
(axis_order, labeled_tensor))
if relevant_axis_order != list(labeled_tensor.axes):
raise AxisOrderError(
'axes on a labeled tensor do not appear in the same order as the '
'required axis order %r: %r' % (axis_order, labeled_tensor))
@tc.returns(LabeledTensor)
@tc.accepts(LabeledTensorLike,
tc.Optional(tc.Collection(string_types)), tc.Optional(string_types))
def impose_axis_order(labeled_tensor, axis_order=None, name=None):
"""Impose desired axis order on a labeled tensor.
Args:
labeled_tensor: The input tensor.
axis_order: Optional desired axis order, as a list of names. If not
provided, defaults to the current axis_order_scope (if set).
name: Optional op name.
Returns:
Labeled tensor with possibly transposed axes.
Raises:
AxisOrderError: If no axis_order is provided or axis_order does not contain
all axes on the input tensor.
"""
with ops.name_scope(name, 'lt_impose_axis_order', [labeled_tensor]) as scope:
labeled_tensor = convert_to_labeled_tensor(labeled_tensor)
if axis_order is None:
axis_order = _get_valid_axis_order()
relevant_axis_order = [a for a in axis_order if a in labeled_tensor.axes]
return transpose(labeled_tensor, relevant_axis_order, name=scope)
@tc.returns(tc.Optional(list))
@tc.accepts(list, list)
def _find_consistent_ordering(a, b):
"""Find the left-most consistent ordering between two lists of unique items.
A consistent ordering combines all elements in both a and b while keeping all
elements in their original order in both inputs. The left-most consistent
ordering orders elements from `a` not found in `b` before elements in `b` not
found in `a`.
For example, given ['x', 'z'] and ['y', 'z'], both ['x', 'y', 'z'] and ['y',
'x', 'z'] are consistent orderings because each of the inputs appears in
each consistent ordering in the same order, and ['x', 'y', 'z'] is the
left-most, because 'x' appears only in `a` and 'y' appears only in `b`. In
contrast, there is no consistent ordering between ['x', 'y'] and ['y', 'x'].
Args:
a: list with unique elements.
b: list with unique elements.
Returns:
List containing all elements in either a or b, or None, if no consistent
ordering exists.
"""
a_set = set(a)
b_set = set(b)
i = 0
j = 0
ordering = []
while i < len(a) and j < len(b):
if a[i] not in b_set:
ordering.append(a[i])
i += 1
elif b[j] not in a_set:
ordering.append(b[j])
j += 1
elif a[i] == b[j]:
ordering.append(a[i])
i += 1
j += 1
else:
return None
ordering.extend(a[i:])
ordering.extend(b[j:])
return ordering
@tc.returns(LabeledTensor, LabeledTensor, Axes)
@tc.accepts(LabeledTensorLike, LabeledTensorLike, tc.Optional(string_types))
def align(labeled_tensor_0, labeled_tensor_1, name=None):
"""Align the axes of two tensors so they may be broadcast to each other.
Axes are ordered by the current axis order scope, if present, or by the left-
most consistent ordering. An exception is raised if it is impossible to align
the tensors without a transpose (align never copies the input data).
Example usage:
>>> a = lt.LabeledTensor(tf.ones((2, 4)), ['x', 'z'])
>>> b = lt.LabeledTensor(tf.ones((3, 4)), ['y', 'z'])
>>> a2, b2, axes = lt.align(a, b)
>>> a2
<LabeledTensor 'lt_align_1/lt_align_1/0:...' shape=(2, 1, 4) dtype=float32
axes=[('x', Dimension(2)),
('y', Dimension(1)),
('z', Dimension(4))]>
>>> b2
<LabeledTensor 'lt_align_1/lt_align_1/1:...' shape=(1, 3, 4) dtype=float32
axes=[('x', Dimension(1)),
('y', Dimension(3)),
('z', Dimension(4))]>
>>> axes
Axes([('x', Dimension(2)),
('y', Dimension(3)),
('z', Dimension(4))])
Args:
labeled_tensor_0: An input tensor.
labeled_tensor_1: An input tensor.
name: Optional op name.
Returns:
The aligned tensors and the axes the resulting tensor would have if the two
aligned tensors were broadcast to each other. The aligned tensors have the
same rank but not necessarily the same shape, with axes in the same order.
Raises:
ValueError: If axes with the same name on the inputs are not equal.
AxisOrderError: If there is no way to reshape the input tensors into the
output without a transpose.
"""
with ops.name_scope(name, 'lt_align',
[labeled_tensor_0, labeled_tensor_1]) as scope:
labeled_tensor_0 = convert_to_labeled_tensor(labeled_tensor_0)
labeled_tensor_1 = convert_to_labeled_tensor(labeled_tensor_1)
axes_0 = labeled_tensor_0.axes
axes_1 = labeled_tensor_1.axes
for axis_name in axes_0:
if axis_name in axes_1:
if axes_0[axis_name] != axes_1[axis_name]:
raise ValueError('Mismatched %r axis on input tensors: %r and %r' %
(axis_name, axes_0[axis_name], axes_1[axis_name]))
axis_scope_order = get_axis_order()
if axis_scope_order is not None:
# we are in an axis_order_scope
axis_names_set = set(axes_0) | set(axes_1)
new_axis_names = [a for a in axis_scope_order if a in axis_names_set]
check_axis_order(labeled_tensor_0, axis_scope_order)
check_axis_order(labeled_tensor_1, axis_scope_order)
else:
# attempt to find a consistent ordering
new_axis_names = _find_consistent_ordering(list(axes_0), list(axes_1))
if new_axis_names is None:
raise AxisOrderError(
'No consistent axis order allows for aligning tensors with axis '
'orders %r and %r without copying data. Use transpose or '
'impose_axis_order to reorder axes on one of more of the inputs.' %
(axes_0.keys(), axes_1.keys()))
labeled_tensor_0 = expand_dims(
labeled_tensor_0, new_axis_names, name=scope + '0')
labeled_tensor_1 = expand_dims(
labeled_tensor_1, new_axis_names, name=scope + '1')
broadcast_axes = []
for axis_name in new_axis_names:
if axis_name in axes_0:
broadcast_axes.append(axes_0[axis_name])
else:
broadcast_axes.append(axes_1[axis_name])
return labeled_tensor_0, labeled_tensor_1, Axes(broadcast_axes)
@tc.returns(types.FunctionType)
@tc.accepts(string_types, collections.Callable)
def define_unary_op(op_name, elementwise_function):
"""Define a unary operation for labeled tensors.
Args:
op_name: string name of the TensorFlow op.
elementwise_function: function to call to evaluate the op on a single
tf.Tensor object. This function must accept two arguments: a tf.Tensor
object, and an optional `name`.
Returns:
Function defining the given op that acts on LabeledTensors.
"""
default_name = 'lt_%s' % op_name
@tc.returns(LabeledTensor)
@tc.accepts(LabeledTensorLike, tc.Optional(string_types))
def op(labeled_tensor, name=None):
"""LabeledTensor version of `tf.{op_name}`.
See `tf.{op_name}` for full details.
Args:
labeled_tensor: Input tensor.
name: Optional op name.
Returns:
A LabeledTensor with result of applying `tf.{op_name}` elementwise.
"""
with ops.name_scope(name, default_name, [labeled_tensor]) as scope:
labeled_tensor = convert_to_labeled_tensor(labeled_tensor)
result_tensor = elementwise_function(labeled_tensor.tensor, name=scope)
return LabeledTensor(result_tensor, labeled_tensor.axes)
op.__doc__ = op.__doc__.format(op_name=op_name)
op.__name__ = op_name
return op
abs_function = define_unary_op('abs', math_ops.abs)
neg = define_unary_op('neg', math_ops.negative)
sign = define_unary_op('sign', math_ops.sign)
reciprocal = define_unary_op('reciprocal', math_ops.reciprocal)
square = define_unary_op('square', math_ops.square)
round_function = define_unary_op('round', math_ops.round)
sqrt = define_unary_op('sqrt', math_ops.sqrt)
rsqrt = define_unary_op('rsqrt', math_ops.rsqrt)
exp = define_unary_op('exp', math_ops.exp)
log = define_unary_op('log', math_ops.log)
ceil = define_unary_op('ceil', math_ops.ceil)
floor = define_unary_op('floor', math_ops.floor)
cos = define_unary_op('cos', math_ops.cos)
sin = define_unary_op('sin', math_ops.sin)
tan = define_unary_op('tan', math_ops.tan)
acos = define_unary_op('acos', math_ops.acos)
asin = define_unary_op('asin', math_ops.asin)
atan = define_unary_op('atan', math_ops.atan)
lgamma = define_unary_op('lgamma', math_ops.lgamma)
digamma = define_unary_op('digamma', math_ops.digamma)
erf = define_unary_op('erf', math_ops.erf)
erfc = define_unary_op('erfc', math_ops.erfc)
logical_not = define_unary_op('logical_not', math_ops.logical_not)
tanh = define_unary_op('tanh', math_ops.tanh)
sigmoid = define_unary_op('sigmoid', math_ops.sigmoid)
@tc.returns(types.FunctionType)
@tc.accepts(string_types, collections.Callable)
def define_binary_op(op_name, elementwise_function):
"""Define a binary operation that broadcasts labeled tensors.
Args:
op_name: string name of the TensorFlow op.
elementwise_function: function to call to evaluate the op on tf.Tensor
objects. This function must accept three arguments: two tf.Tensor objects,
and an optional `name`.
Returns:
Function defining the given op that acts on LabeledTensors.
"""
default_name = 'lt_%s' % op_name
@tc.returns(LabeledTensor)
@tc.accepts(LabeledTensorLike, LabeledTensorLike, tc.Optional(string_types))
def op(labeled_tensor_0, labeled_tensor_1, name=None):
"""LabeledTensor version of `tf.{op_name}` with label based alignment.
See `tf.{op_name}` for full details.
Args:
labeled_tensor_0: Input tensor.
labeled_tensor_1: Input tensor.
name: Optional op name.
Returns:
A LabeledTensor with result of applying `tf.{op_name}` elementwise.
"""
with ops.name_scope(name, default_name,
[labeled_tensor_0, labeled_tensor_1]) as scope:
align_0, align_1, broadcast_axes = align(labeled_tensor_0,
labeled_tensor_1)
tensor = elementwise_function(align_0.tensor, align_1.tensor, name=scope)
return LabeledTensor(tensor, broadcast_axes)
op.__doc__ = op.__doc__.format(op_name=op_name)
op.__name__ = op_name
return op
add = define_binary_op('add', math_ops.add)
sub = define_binary_op('sub', math_ops.subtract)
mul = define_binary_op('mul', math_ops.multiply)
div = define_binary_op('div', math_ops.div)
mod = define_binary_op('mod', math_ops.mod)
pow_function = define_binary_op('pow', math_ops.pow)
equal = define_binary_op('equal', math_ops.equal)
greater = define_binary_op('greater', math_ops.greater)
greater_equal = define_binary_op('greater_equal', math_ops.greater_equal)
not_equal = define_binary_op('not_equal', math_ops.not_equal)
less = define_binary_op('less', math_ops.less)
less_equal = define_binary_op('less_equal', math_ops.less_equal)
logical_and = define_binary_op('logical_and', math_ops.logical_and)
logical_or = define_binary_op('logical_or', math_ops.logical_or)
logical_xor = define_binary_op('logical_xor', math_ops.logical_xor)
maximum = define_binary_op('maximum', math_ops.maximum)
minimum = define_binary_op('minimum', math_ops.minimum)
squared_difference = define_binary_op('squared_difference',
math_ops.squared_difference)
igamma = define_binary_op('igamma', math_ops.igamma)
igammac = define_binary_op('igammac', math_ops.igammac)
zeta = define_binary_op('zeta', math_ops.zeta)
polygamma = define_binary_op('polygamma', math_ops.polygamma)
|
hackerbolt-freelancer/jhv
|
refs/heads/master
|
Modules/connection.py
|
3
|
#coding utf-8
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
import sqlite3
from time import gmtime, strftime
from os import getcwd
connect = sqlite3.connect(str(getcwd()) +"/Database/database.db")
connect.text_factory = str
c = connect.cursor()
facebook = []
gmail = []
def create_tables():
c.execute("CREATE TABLE IF NOT EXISTS Facebook (id integer PRIMARY KEY AUTOINCREMENT, email text, password text,datestamp text)")
c.execute("CREATE TABLE IF NOT EXISTS Gmail (id integer PRIMARY KEY AUTOINCREMENT, email text, password text,datestamp text)")
c.execute("CREATE TABLE IF NOT EXISTS Route (id integer PRIMARY KEY AUTOINCREMENT, ipaddress text,password text,datestamp text)")
connect.commit()
def add_Face_db(email,password):
dataestamp = str(strftime("%a, %d %b %Y %X ", gmtime()))
c.execute("INSERT OR REPLACE INTO Facebook (email,password,datestamp) VALUES(?,?,?)",(email,password,dataestamp))
connect.commit()
def add_Route_db(Ip,password):
dataestamp = str(strftime("%a, %d %b %Y %X ", gmtime()))
c.execute("INSERT OR REPLACE INTO Route (ipaddress,password,datestamp) VALUES(?,?,?)",(Ip,password,dataestamp))
connect.commit()
def add_gmail_db(email,password):
dataestamp = str(strftime("%a, %d %b %Y %X ", gmtime()))
c.execute("INSERT or ignore INTO Gmail (email,password,datestamp) VALUES(?,?,?)",(email,password,dataestamp))
connect.commit()
def delete_one(table,n):
if table == "Route":
cursor = c.execute("SELECT id,ipaddress,password,datestamp FROM %s where id= %d"%(table,int(n)))
for row in cursor:
z = (" DELETE: IP:%s Passowrd:%s Data:%s"%(row[1], row[2], row[3]))
else:
cursor = c.execute("SELECT id,email,password,datestamp FROM %s where id= %d"%(table,int(n)))
for row in cursor:
z = (" DELETE: IP:%s Passowrd:%s Data:%s"%(row[1], row[2], row[3]))
c.execute("DELETE FROM %s WHERE id= %d"%(table,int(n)))
connect.commit()
return z
def get_data(service):
new = []
if service == "Route":
cursor = c.execute("SELECT id,password,datestamp FROM %s"%(service))
else:
cursor = c.execute("SELECT id,email,password,datestamp FROM %s"%(service))
for row in cursor:
new += str(row[0]) + str(row[1]) + str(row[2]) + str(row[3])
return new
def delete_db_all(n,db):
for num in range(n):
c.execute("DELETE FROM %s WHERE id= %s"%(db,num))
if db != None:
c.execute("UPDATE SQLITE_SEQUENCE set seq=0 WHERE name=\"%s\""%(db))
connect.commit()
|
garg10may/youtube-dl
|
refs/heads/master
|
youtube_dl/extractor/movieclips.py
|
126
|
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import (
compat_str,
)
from ..utils import (
ExtractorError,
clean_html,
)
class MovieClipsIE(InfoExtractor):
_VALID_URL = r'https?://movieclips\.com/(?P<id>[\da-zA-Z]+)(?:-(?P<display_id>[\da-z-]+))?'
_TEST = {
'url': 'http://movieclips.com/Wy7ZU-my-week-with-marilyn-movie-do-you-love-me/',
'info_dict': {
'id': 'Wy7ZU',
'display_id': 'my-week-with-marilyn-movie-do-you-love-me',
'ext': 'mp4',
'title': 'My Week with Marilyn - Do You Love Me?',
'description': 'md5:e86795bd332fe3cff461e7c8dc542acb',
'thumbnail': 're:^https?://.*\.jpg$',
},
'params': {
# rtmp download
'skip_download': True,
}
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
display_id = mobj.group('display_id')
show_id = display_id or video_id
config = self._download_xml(
'http://config.movieclips.com/player/config/%s' % video_id,
show_id, 'Downloading player config')
if config.find('./country-region').text == 'false':
raise ExtractorError(
'%s said: %s' % (self.IE_NAME, config.find('./region_alert').text), expected=True)
properties = config.find('./video/properties')
smil_file = properties.attrib['smil_file']
smil = self._download_xml(smil_file, show_id, 'Downloading SMIL')
base_url = smil.find('./head/meta').attrib['base']
formats = []
for video in smil.findall('./body/switch/video'):
vbr = int(video.attrib['system-bitrate']) / 1000
src = video.attrib['src']
formats.append({
'url': base_url,
'play_path': src,
'ext': src.split(':')[0],
'vbr': vbr,
'format_id': '%dk' % vbr,
})
self._sort_formats(formats)
title = '%s - %s' % (properties.attrib['clip_movie_title'], properties.attrib['clip_title'])
description = clean_html(compat_str(properties.attrib['clip_description']))
thumbnail = properties.attrib['image']
categories = properties.attrib['clip_categories'].split(',')
return {
'id': video_id,
'display_id': display_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'categories': categories,
'formats': formats,
}
|
kmoocdev/edx-platform
|
refs/heads/kmooc.rc0
|
common/lib/i18n/tests/test_extract_and_generate.py
|
27
|
"""
This test tests that i18n extraction (`paver i18n_extract -v`) works properly.
"""
from datetime import datetime, timedelta
import os
import sys
import string # pylint: disable=deprecated-module
import random
import re
from unittest import TestCase
from mock import patch
from polib import pofile
from pytz import UTC
from i18n import extract
from i18n import generate
from i18n import dummy
from i18n.config import CONFIGURATION
class TestGenerate(TestCase):
"""
Tests functionality of i18n/generate.py
"""
generated_files = ('django-partial.po', 'djangojs-partial.po', 'mako.po')
@classmethod
def setUpClass(cls):
sys.stderr.write(
"\nThis test tests that i18n extraction (`paver i18n_extract`) works properly. "
"If you experience failures, please check that all instances of `gettext` and "
"`ngettext` are used correctly. You can also try running `paver i18n_extract -v` "
"locally for more detail.\n"
)
sys.stderr.write(
"\nExtracting i18n strings and generating dummy translations; "
"this may take a few minutes\n"
)
sys.stderr.flush()
extract.main(verbosity=0)
dummy.main(verbosity=0)
def setUp(self):
# Subtract 1 second to help comparisons with file-modify time succeed,
# since os.path.getmtime() is not millisecond-accurate
self.start_time = datetime.now(UTC) - timedelta(seconds=1)
def test_merge(self):
"""
Tests merge script on English source files.
"""
filename = os.path.join(CONFIGURATION.source_messages_dir, random_name())
generate.merge(CONFIGURATION.source_locale, target=filename)
self.assertTrue(os.path.exists(filename))
os.remove(filename)
# Patch dummy_locales to not have esperanto present
@patch.object(CONFIGURATION, 'dummy_locales', ['fake2'])
def test_main(self):
"""
Runs generate.main() which should merge source files,
then compile all sources in all configured languages.
Validates output by checking all .mo files in all configured languages.
.mo files should exist, and be recently created (modified
after start of test suite)
"""
generate.main(verbosity=0, strict=False)
for locale in CONFIGURATION.translated_locales:
for filename in ('django', 'djangojs'):
mofile = filename + '.mo'
path = os.path.join(CONFIGURATION.get_messages_dir(locale), mofile)
exists = os.path.exists(path)
self.assertTrue(exists, msg='Missing file in locale %s: %s' % (locale, mofile))
self.assertTrue(
datetime.fromtimestamp(os.path.getmtime(path), UTC) >= self.start_time,
msg='File not recently modified: %s' % path
)
# Segmenting means that the merge headers don't work they way they
# used to, so don't make this check for now. I'm not sure if we'll
# get the merge header back eventually, or delete this code eventually.
# self.assert_merge_headers(locale)
def assert_merge_headers(self, locale):
"""
This is invoked by test_main to ensure that it runs after
calling generate.main().
There should be exactly three merge comment headers
in our merged .po file. This counts them to be sure.
A merge comment looks like this:
# #-#-#-#-# django-partial.po (0.1a) #-#-#-#-#
"""
path = os.path.join(CONFIGURATION.get_messages_dir(locale), 'django.po')
pof = pofile(path)
pattern = re.compile('^#-#-#-#-#', re.M)
match = pattern.findall(pof.header)
self.assertEqual(
len(match),
3,
msg="Found %s (should be 3) merge comments in the header for %s" % (len(match), path)
)
def random_name(size=6):
"""Returns random filename as string, like test-4BZ81W"""
chars = string.ascii_uppercase + string.digits
return 'test-' + ''.join(random.choice(chars) for x in range(size))
|
kartikp1995/gnuradio
|
refs/heads/master
|
gr-analog/python/analog/qa_simple_squelch.py
|
47
|
#!/usr/bin/env python
#
# Copyright 2012,2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, gr_unittest, analog, blocks
class test_simple_squelch(gr_unittest.TestCase):
def setUp(self):
self.tb = gr.top_block()
def tearDown(self):
self.tb = None
def test_simple_squelch_001(self):
# Test set/gets
alpha = 0.0001
thr1 = 10
thr2 = 20
op = analog.simple_squelch_cc(thr1, alpha)
op.set_threshold(thr2)
t = op.threshold()
self.assertEqual(thr2, t)
def test_simple_squelch_002(self):
alpha = 0.0001
thr = -25
src_data = map(lambda x: float(x)/10.0, range(1, 40))
src = blocks.vector_source_c(src_data)
op = analog.simple_squelch_cc(thr, alpha)
dst = blocks.vector_sink_c()
self.tb.connect(src, op)
self.tb.connect(op, dst)
self.tb.run()
expected_result = src_data
expected_result[0:20] = 20*[0,]
result_data = dst.data()
self.assertComplexTuplesAlmostEqual(expected_result, result_data, 4)
if __name__ == '__main__':
gr_unittest.run(test_simple_squelch, "test_simple_squelch.xml")
|
strogo/bigcouch
|
refs/heads/master
|
couchjs/scons/scons-local-2.0.1/SCons/compat/_scons_dbm.py
|
61
|
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__doc__ = """
dbm compatibility module for Python versions that don't have dbm.
This does not not NOT (repeat, *NOT*) provide complete dbm functionality.
It's just a stub on which to hang just enough pieces of dbm functionality
that the whichdb.whichdb() implementstation in the various 2.X versions of
Python won't blow up even if dbm wasn't compiled in.
"""
__revision__ = "src/engine/SCons/compat/_scons_dbm.py 5134 2010/08/16 23:02:40 bdeegan"
class error(Exception):
pass
def open(*args, **kw):
raise error()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
timoschwarzer/blendworks
|
refs/heads/master
|
BlendWorks Server/python/Lib/site-packages/pip/_vendor/html5lib/filters/optionaltags.py
|
1727
|
from __future__ import absolute_import, division, unicode_literals
from . import _base
class Filter(_base.Filter):
def slider(self):
previous1 = previous2 = None
for token in self.source:
if previous1 is not None:
yield previous2, previous1, token
previous2 = previous1
previous1 = token
yield previous2, previous1, None
def __iter__(self):
for previous, token, next in self.slider():
type = token["type"]
if type == "StartTag":
if (token["data"] or
not self.is_optional_start(token["name"], previous, next)):
yield token
elif type == "EndTag":
if not self.is_optional_end(token["name"], next):
yield token
else:
yield token
def is_optional_start(self, tagname, previous, next):
type = next and next["type"] or None
if tagname in 'html':
# An html element's start tag may be omitted if the first thing
# inside the html element is not a space character or a comment.
return type not in ("Comment", "SpaceCharacters")
elif tagname == 'head':
# A head element's start tag may be omitted if the first thing
# inside the head element is an element.
# XXX: we also omit the start tag if the head element is empty
if type in ("StartTag", "EmptyTag"):
return True
elif type == "EndTag":
return next["name"] == "head"
elif tagname == 'body':
# A body element's start tag may be omitted if the first thing
# inside the body element is not a space character or a comment,
# except if the first thing inside the body element is a script
# or style element and the node immediately preceding the body
# element is a head element whose end tag has been omitted.
if type in ("Comment", "SpaceCharacters"):
return False
elif type == "StartTag":
# XXX: we do not look at the preceding event, so we never omit
# the body element's start tag if it's followed by a script or
# a style element.
return next["name"] not in ('script', 'style')
else:
return True
elif tagname == 'colgroup':
# A colgroup element's start tag may be omitted if the first thing
# inside the colgroup element is a col element, and if the element
# is not immediately preceeded by another colgroup element whose
# end tag has been omitted.
if type in ("StartTag", "EmptyTag"):
# XXX: we do not look at the preceding event, so instead we never
# omit the colgroup element's end tag when it is immediately
# followed by another colgroup element. See is_optional_end.
return next["name"] == "col"
else:
return False
elif tagname == 'tbody':
# A tbody element's start tag may be omitted if the first thing
# inside the tbody element is a tr element, and if the element is
# not immediately preceeded by a tbody, thead, or tfoot element
# whose end tag has been omitted.
if type == "StartTag":
# omit the thead and tfoot elements' end tag when they are
# immediately followed by a tbody element. See is_optional_end.
if previous and previous['type'] == 'EndTag' and \
previous['name'] in ('tbody', 'thead', 'tfoot'):
return False
return next["name"] == 'tr'
else:
return False
return False
def is_optional_end(self, tagname, next):
type = next and next["type"] or None
if tagname in ('html', 'head', 'body'):
# An html element's end tag may be omitted if the html element
# is not immediately followed by a space character or a comment.
return type not in ("Comment", "SpaceCharacters")
elif tagname in ('li', 'optgroup', 'tr'):
# A li element's end tag may be omitted if the li element is
# immediately followed by another li element or if there is
# no more content in the parent element.
# An optgroup element's end tag may be omitted if the optgroup
# element is immediately followed by another optgroup element,
# or if there is no more content in the parent element.
# A tr element's end tag may be omitted if the tr element is
# immediately followed by another tr element, or if there is
# no more content in the parent element.
if type == "StartTag":
return next["name"] == tagname
else:
return type == "EndTag" or type is None
elif tagname in ('dt', 'dd'):
# A dt element's end tag may be omitted if the dt element is
# immediately followed by another dt element or a dd element.
# A dd element's end tag may be omitted if the dd element is
# immediately followed by another dd element or a dt element,
# or if there is no more content in the parent element.
if type == "StartTag":
return next["name"] in ('dt', 'dd')
elif tagname == 'dd':
return type == "EndTag" or type is None
else:
return False
elif tagname == 'p':
# A p element's end tag may be omitted if the p element is
# immediately followed by an address, article, aside,
# blockquote, datagrid, dialog, dir, div, dl, fieldset,
# footer, form, h1, h2, h3, h4, h5, h6, header, hr, menu,
# nav, ol, p, pre, section, table, or ul, element, or if
# there is no more content in the parent element.
if type in ("StartTag", "EmptyTag"):
return next["name"] in ('address', 'article', 'aside',
'blockquote', 'datagrid', 'dialog',
'dir', 'div', 'dl', 'fieldset', 'footer',
'form', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6',
'header', 'hr', 'menu', 'nav', 'ol',
'p', 'pre', 'section', 'table', 'ul')
else:
return type == "EndTag" or type is None
elif tagname == 'option':
# An option element's end tag may be omitted if the option
# element is immediately followed by another option element,
# or if it is immediately followed by an <code>optgroup</code>
# element, or if there is no more content in the parent
# element.
if type == "StartTag":
return next["name"] in ('option', 'optgroup')
else:
return type == "EndTag" or type is None
elif tagname in ('rt', 'rp'):
# An rt element's end tag may be omitted if the rt element is
# immediately followed by an rt or rp element, or if there is
# no more content in the parent element.
# An rp element's end tag may be omitted if the rp element is
# immediately followed by an rt or rp element, or if there is
# no more content in the parent element.
if type == "StartTag":
return next["name"] in ('rt', 'rp')
else:
return type == "EndTag" or type is None
elif tagname == 'colgroup':
# A colgroup element's end tag may be omitted if the colgroup
# element is not immediately followed by a space character or
# a comment.
if type in ("Comment", "SpaceCharacters"):
return False
elif type == "StartTag":
# XXX: we also look for an immediately following colgroup
# element. See is_optional_start.
return next["name"] != 'colgroup'
else:
return True
elif tagname in ('thead', 'tbody'):
# A thead element's end tag may be omitted if the thead element
# is immediately followed by a tbody or tfoot element.
# A tbody element's end tag may be omitted if the tbody element
# is immediately followed by a tbody or tfoot element, or if
# there is no more content in the parent element.
# A tfoot element's end tag may be omitted if the tfoot element
# is immediately followed by a tbody element, or if there is no
# more content in the parent element.
# XXX: we never omit the end tag when the following element is
# a tbody. See is_optional_start.
if type == "StartTag":
return next["name"] in ['tbody', 'tfoot']
elif tagname == 'tbody':
return type == "EndTag" or type is None
else:
return False
elif tagname == 'tfoot':
# A tfoot element's end tag may be omitted if the tfoot element
# is immediately followed by a tbody element, or if there is no
# more content in the parent element.
# XXX: we never omit the end tag when the following element is
# a tbody. See is_optional_start.
if type == "StartTag":
return next["name"] == 'tbody'
else:
return type == "EndTag" or type is None
elif tagname in ('td', 'th'):
# A td element's end tag may be omitted if the td element is
# immediately followed by a td or th element, or if there is
# no more content in the parent element.
# A th element's end tag may be omitted if the th element is
# immediately followed by a td or th element, or if there is
# no more content in the parent element.
if type == "StartTag":
return next["name"] in ('td', 'th')
else:
return type == "EndTag" or type is None
return False
|
aimas/TuniErp-8.0
|
refs/heads/master
|
addons/base_report_designer/plugin/openerp_report_designer/bin/script/AddAttachment.py
|
384
|
#########################################################################
#
# Copyright (c) 2003-2004 Danny Brewer d29583@groovegarden.com
# Copyright (C) 2004-2010 OpenERP SA (<http://openerp.com>).
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# See: http://www.gnu.org/licenses/lgpl.html
#
#############################################################################
import os
import uno
import unohelper
import xmlrpclib
import base64
from com.sun.star.task import XJobExecutor
if __name__<>"package":
from lib.gui import *
from lib.error import ErrorDialog
from lib.tools import *
from LoginTest import *
from lib.rpc import *
database="test"
uid = 3
class AddAttachment(unohelper.Base, XJobExecutor ):
Kind = {
'PDF' : 'pdf',
'OpenOffice': 'sxw',
}
def __init__(self, ctx):
self.ctx = ctx
self.module = "openerp_report"
self.version = "0.1"
LoginTest()
if not loginstatus and __name__=="package":
exit(1)
self.aSearchResult = []
desktop=getDesktop()
oDoc2 = desktop.getCurrentComponent()
docinfo=oDoc2.getDocumentInfo()
global passwd
self.password = passwd
global url
self.sock=RPCSession(url)
if docinfo.getUserFieldValue(2) <> "" and docinfo.getUserFieldValue(3) <> "":
self.win = DBModalDialog(60, 50, 180, 70, "Add Attachment to Server")
self.win.addFixedText("lblResourceType", 2 , 5, 100, 10, "Select Appropriate Resource Type:")
self.win.addComboListBox("lstResourceType", -2, 25, 176, 15,True)
self.win.addButton('btnOkWithoutInformation', -2 , -5, 25 , 15,'OK' ,actionListenerProc = self.btnOkWithoutInformation_clicked )
else:
self.win = DBModalDialog(60, 50, 180, 190, "Add Attachment to Server")
self.win.addFixedText("lblModuleName",2 , 9, 42, 20, "Select Module:")
self.win.addComboListBox("lstmodel", -2, 5, 134, 15,True)
self.lstModel = self.win.getControl( "lstmodel" )
self.dModel = {}
# Open a new connexion to the server
ids = self.sock.execute(database, uid, self.password, 'ir.module.module', 'search', [('name','=','base_report_model'),('state', '=', 'installed')])
if not len(ids):
# If the module 'base_report_model' is not installed, use the default model
self.dModel = {
"Partner":'res.partner',
}
else:
ids =self.sock.execute(database, uid, self.password, 'base.report.model' , 'search', [])
res = self.sock.execute(database, uid, self.password, 'base.report.model' , 'read', ids, ['name','model_id'])
models = self.sock.execute(database, uid, self.password, 'ir.model' , 'read', map(lambda x:x['model_id'][0], res), ['model'])
models = dict(map(lambda x:(x['id'],x['model']), models))
self.dModel = dict(map(lambda x: (x['name'],models[x['model_id'][0]]), res))
for item in self.dModel.keys():
self.lstModel.addItem(item, self.lstModel.getItemCount())
self.win.addFixedText("lblSearchName",2 , 25, 60, 10, "Enter Search String:")
self.win.addEdit("txtSearchName", 2, 35, 149, 15,)
self.win.addButton('btnSearch', -2 , 35, 25 , 15,'Search' ,actionListenerProc = self.btnSearch_clicked )
self.win.addFixedText("lblSearchRecord", 2 , 55, 60, 10, "Search Result:")
self.win.addComboListBox("lstResource", -2, 65, 176, 70, False )
self.lstResource = self.win.getControl( "lstResource" )
self.win.addFixedText("lblResourceType", 2 , 137, 100, 20, "Select Appropriate Resource Type:")
self.win.addComboListBox("lstResourceType", -2, 147, 176, 15,True )
self.win.addButton('btnOkWithInformation', -2 , -5, 25 , 15,'OK' ,actionListenerProc = self.btnOkWithInformation_clicked )
self.lstResourceType = self.win.getControl( "lstResourceType" )
for kind in self.Kind.keys():
self.lstResourceType.addItem( kind, self.lstResourceType.getItemCount() )
self.win.addButton('btnCancel', -2 - 27 , -5 , 30 , 15, 'Cancel' ,actionListenerProc = self.btnCancel_clicked )
self.win.doModalDialog("lstResourceType", self.Kind.keys()[0])
def btnSearch_clicked(self, oActionEvent):
modelSelectedItem = self.win.getListBoxSelectedItem("lstmodel")
if modelSelectedItem == "":
return
desktop=getDesktop()
oDoc2 = desktop.getCurrentComponent()
docinfo=oDoc2.getDocumentInfo()
self.aSearchResult =self.sock.execute( database, uid, self.password, self.dModel[modelSelectedItem], 'name_search', self.win.getEditText("txtSearchName"))
self.win.removeListBoxItems("lstResource", 0, self.win.getListBoxItemCount("lstResource"))
if self.aSearchResult == []:
ErrorDialog("No search result found.", "", "Search Error.")
return
for result in self.aSearchResult:
self.lstResource.addItem(result[1],result[0])
def _send_attachment(self, name, data, res_model, res_id):
desktop = getDesktop()
oDoc2 = desktop.getCurrentComponent()
docinfo = oDoc2.getDocumentInfo()
params = {
'name': name,
'datas': base64.encodestring( data ),
'datas_fname': name,
'res_model' : res_model,
'res_id' : int(res_id),
}
return self.sock.execute( database, uid, self.password, 'ir.attachment', 'create', params )
def send_attachment(self, model, resource_id):
desktop = getDesktop()
oDoc2 = desktop.getCurrentComponent()
docinfo = oDoc2.getDocumentInfo()
if oDoc2.getURL() == "":
ErrorDialog("You should save your file.", "", "Saving Error.")
return None
url = oDoc2.getURL()
if self.Kind[self.win.getListBoxSelectedItem("lstResourceType")] == "pdf":
url = self.doc2pdf(url[7:])
if url == None:
ErrorDialog( "Problem in creating PDF.", "", "PDF Error.")
return None
url = url[7:]
data = read_data_from_file( get_absolute_file_path( url ) )
return self._send_attachment( os.path.basename( url ), data, model, resource_id )
def btnOkWithoutInformation_clicked(self, oActionEvent):
desktop = getDesktop()
oDoc2 = desktop.getCurrentComponent()
docinfo = oDoc2.getDocumentInfo()
if self.win.getListBoxSelectedItem("lstResourceType") == "":
ErrorDialog("You have to select a resource type.", "", "Selection Error." )
return
res = self.send_attachment( docinfo.getUserFieldValue(3), docinfo.getUserFieldValue(2) )
self.win.endExecute()
def btnOkWithInformation_clicked(self, oActionEvent):
if self.win.getListBoxSelectedItem("lstResourceType") == "":
ErrorDialog( "You have to select a resource type.", "", "Selection Error." )
return
if self.win.getListBoxSelectedItem("lstResource") == "" or self.win.getListBoxSelectedItem("lstmodel") == "":
ErrorDialog("You have to select Model and Resource.", "", "Selection Error.")
return
resourceid = None
for s in self.aSearchResult:
if s[1] == self.win.getListBoxSelectedItem("lstResource"):
resourceid = s[0]
break
if resourceid == None:
ErrorDialog("No resource is selected.", "", "Resource Error." )
return
res = self.send_attachment( self.dModel[self.win.getListBoxSelectedItem('lstmodel')], resourceid )
self.win.endExecute()
def btnCancel_clicked(self, oActionEvent):
self.win.endExecute()
def doc2pdf(self, strFile):
oDoc = None
strFilterSubName = ''
strUrl = convertToURL( strFile )
desktop = getDesktop()
oDoc = desktop.loadComponentFromURL( strUrl, "_blank", 0, Array(self._MakePropertyValue("Hidden",True)))
if oDoc:
strFilterSubName = ""
# select appropriate filter
if oDoc.supportsService("com.sun.star.presentation.PresentationDocument"):
strFilterSubName = "impress_pdf_Export"
elif oDoc.supportsService("com.sun.star.sheet.SpreadsheetDocument"):
strFilterSubName = "calc_pdf_Export"
elif oDoc.supportsService("com.sun.star.text.WebDocument"):
strFilterSubName = "writer_web_pdf_Export"
elif oDoc.supportsService("com.sun.star.text.GlobalDocument"):
strFilterSubName = "writer_globaldocument_pdf_Export"
elif oDoc.supportsService("com.sun.star.text.TextDocument"):
strFilterSubName = "writer_pdf_Export"
elif oDoc.supportsService("com.sun.star.drawing.DrawingDocument"):
strFilterSubName = "draw_pdf_Export"
elif oDoc.supportsService("com.sun.star.formula.FormulaProperties"):
strFilterSubName = "math_pdf_Export"
elif oDoc.supportsService("com.sun.star.chart.ChartDocument"):
strFilterSubName = "chart_pdf_Export"
else:
pass
filename = len(strFilterSubName) > 0 and convertToURL( os.path.splitext( strFile )[0] + ".pdf" ) or None
if len(strFilterSubName) > 0:
oDoc.storeToURL( filename, Array(self._MakePropertyValue("FilterName", strFilterSubName ),self._MakePropertyValue("CompressMode", "1" )))
oDoc.close(True)
# Can be None if len(strFilterSubName) <= 0
return filename
def _MakePropertyValue(self, cName="", uValue=u""):
oPropertyValue = createUnoStruct( "com.sun.star.beans.PropertyValue" )
if cName:
oPropertyValue.Name = cName
if uValue:
oPropertyValue.Value = uValue
return oPropertyValue
if __name__<>"package" and __name__=="__main__":
AddAttachment(None)
elif __name__=="package":
g_ImplementationHelper.addImplementation( AddAttachment, "org.openoffice.openerp.report.addattachment", ("com.sun.star.task.Job",),)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
roopali8/tempest
|
refs/heads/master
|
tempest/api_schema/response/compute/v2_1/servers.py
|
25
|
# Copyright 2014 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from tempest.api_schema.response.compute.v2_1 import parameter_types
create_server = {
'status_code': [202],
'response_body': {
'type': 'object',
'properties': {
'server': {
'type': 'object',
'properties': {
'id': {'type': 'string'},
'security_groups': {'type': 'array'},
'links': parameter_types.links,
'OS-DCF:diskConfig': {'type': 'string'}
},
'additionalProperties': False,
# NOTE: OS-DCF:diskConfig & security_groups are API extension,
# and some environments return a response without these
# attributes.So they are not 'required'.
'required': ['id', 'links']
}
},
'additionalProperties': False,
'required': ['server']
}
}
create_server_with_admin_pass = copy.deepcopy(create_server)
create_server_with_admin_pass['response_body']['properties']['server'][
'properties'].update({'adminPass': {'type': 'string'}})
create_server_with_admin_pass['response_body']['properties']['server'][
'required'].append('adminPass')
list_servers = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'servers': {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'id': {'type': 'string'},
'links': parameter_types.links,
'name': {'type': 'string'}
},
'additionalProperties': False,
'required': ['id', 'links', 'name']
}
},
'servers_links': parameter_types.links
},
'additionalProperties': False,
# NOTE(gmann): servers_links attribute is not necessary to be
# present always So it is not 'required'.
'required': ['servers']
}
}
delete_server = {
'status_code': [204],
}
common_show_server = {
'type': 'object',
'properties': {
'id': {'type': 'string'},
'name': {'type': 'string'},
'status': {'type': 'string'},
'image': {'oneOf': [
{'type': 'object',
'properties': {
'id': {'type': 'string'},
'links': parameter_types.links
},
'additionalProperties': False,
'required': ['id', 'links']},
{'type': ['string', 'null']}
]},
'flavor': {
'type': 'object',
'properties': {
'id': {'type': 'string'},
'links': parameter_types.links
},
'additionalProperties': False,
'required': ['id', 'links']
},
'fault': {
'type': 'object',
'properties': {
'code': {'type': 'integer'},
'created': {'type': 'string'},
'message': {'type': 'string'},
'details': {'type': 'string'},
},
'additionalProperties': False,
# NOTE(gmann): 'details' is not necessary to be present
# in the 'fault'. So it is not defined as 'required'.
'required': ['code', 'created', 'message']
},
'user_id': {'type': 'string'},
'tenant_id': {'type': 'string'},
'created': {'type': 'string'},
'updated': {'type': 'string'},
'progress': {'type': 'integer'},
'metadata': {'type': 'object'},
'links': parameter_types.links,
'addresses': parameter_types.addresses,
'hostId': {'type': 'string'},
'OS-DCF:diskConfig': {'type': 'string'},
'accessIPv4': parameter_types.access_ip_v4,
'accessIPv6': parameter_types.access_ip_v6
},
'additionalProperties': False,
# NOTE(GMann): 'progress' attribute is present in the response
# only when server's status is one of the progress statuses
# ("ACTIVE","BUILD", "REBUILD", "RESIZE","VERIFY_RESIZE")
# 'fault' attribute is present in the response
# only when server's status is one of the "ERROR", "DELETED".
# OS-DCF:diskConfig and accessIPv4/v6 are API
# extensions, and some environments return a response
# without these attributes.So these are not defined as 'required'.
'required': ['id', 'name', 'status', 'image', 'flavor',
'user_id', 'tenant_id', 'created', 'updated',
'metadata', 'links', 'addresses', 'hostId']
}
update_server = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'server': common_show_server
},
'additionalProperties': False,
'required': ['server']
}
}
server_detail = copy.deepcopy(common_show_server)
server_detail['properties'].update({
'key_name': {'type': ['string', 'null']},
'security_groups': {'type': 'array'},
# NOTE: Non-admin users also can see "OS-SRV-USG" and "OS-EXT-AZ"
# attributes.
'OS-SRV-USG:launched_at': {'type': ['string', 'null']},
'OS-SRV-USG:terminated_at': {'type': ['string', 'null']},
'OS-EXT-AZ:availability_zone': {'type': 'string'},
# NOTE: Admin users only can see "OS-EXT-STS" and "OS-EXT-SRV-ATTR"
# attributes.
'OS-EXT-STS:task_state': {'type': ['string', 'null']},
'OS-EXT-STS:vm_state': {'type': 'string'},
'OS-EXT-STS:power_state': {'type': 'integer'},
'OS-EXT-SRV-ATTR:host': {'type': ['string', 'null']},
'OS-EXT-SRV-ATTR:instance_name': {'type': 'string'},
'OS-EXT-SRV-ATTR:hypervisor_hostname': {'type': ['string', 'null']},
'os-extended-volumes:volumes_attached': {'type': 'array'},
'config_drive': {'type': 'string'}
})
server_detail['properties']['addresses']['patternProperties'][
'^[a-zA-Z0-9-_.]+$']['items']['properties'].update({
'OS-EXT-IPS:type': {'type': 'string'},
'OS-EXT-IPS-MAC:mac_addr': parameter_types.mac_address})
# NOTE(gmann): Update OS-EXT-IPS:type and OS-EXT-IPS-MAC:mac_addr
# attributes in server address. Those are API extension,
# and some environments return a response without
# these attributes. So they are not 'required'.
get_server = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'server': server_detail
},
'additionalProperties': False,
'required': ['server']
}
}
list_servers_detail = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'servers': {
'type': 'array',
'items': server_detail
},
'servers_links': parameter_types.links
},
'additionalProperties': False,
# NOTE(gmann): servers_links attribute is not necessary to be
# present always So it is not 'required'.
'required': ['servers']
}
}
rebuild_server = copy.deepcopy(update_server)
rebuild_server['status_code'] = [202]
rebuild_server_with_admin_pass = copy.deepcopy(rebuild_server)
rebuild_server_with_admin_pass['response_body']['properties']['server'][
'properties'].update({'adminPass': {'type': 'string'}})
rebuild_server_with_admin_pass['response_body']['properties']['server'][
'required'].append('adminPass')
rescue_server = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'adminPass': {'type': 'string'}
},
'additionalProperties': False,
'required': ['adminPass']
}
}
list_virtual_interfaces = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'virtual_interfaces': {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'id': {'type': 'string'},
'mac_address': parameter_types.mac_address,
'OS-EXT-VIF-NET:net_id': {'type': 'string'}
},
'additionalProperties': False,
# 'OS-EXT-VIF-NET:net_id' is API extension So it is
# not defined as 'required'
'required': ['id', 'mac_address']
}
}
},
'additionalProperties': False,
'required': ['virtual_interfaces']
}
}
common_attach_volume_info = {
'type': 'object',
'properties': {
'id': {'type': 'string'},
'device': {'type': 'string'},
'volumeId': {'type': 'string'},
'serverId': {'type': ['integer', 'string']}
},
'additionalProperties': False,
'required': ['id', 'device', 'volumeId', 'serverId']
}
attach_volume = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'volumeAttachment': common_attach_volume_info
},
'additionalProperties': False,
'required': ['volumeAttachment']
}
}
detach_volume = {
'status_code': [202]
}
get_volume_attachment = copy.deepcopy(attach_volume)
get_volume_attachment['response_body']['properties'][
'volumeAttachment']['properties'].update({'serverId': {'type': 'string'}})
list_volume_attachments = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'volumeAttachments': {
'type': 'array',
'items': common_attach_volume_info
}
},
'additionalProperties': False,
'required': ['volumeAttachments']
}
}
list_volume_attachments['response_body']['properties'][
'volumeAttachments']['items']['properties'].update(
{'serverId': {'type': 'string'}})
list_addresses_by_network = {
'status_code': [200],
'response_body': parameter_types.addresses
}
list_addresses = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'addresses': parameter_types.addresses
},
'additionalProperties': False,
'required': ['addresses']
}
}
common_server_group = {
'type': 'object',
'properties': {
'id': {'type': 'string'},
'name': {'type': 'string'},
'policies': {
'type': 'array',
'items': {'type': 'string'}
},
# 'members' attribute contains the array of instance's UUID of
# instances present in server group
'members': {
'type': 'array',
'items': {'type': 'string'}
},
'metadata': {'type': 'object'}
},
'additionalProperties': False,
'required': ['id', 'name', 'policies', 'members', 'metadata']
}
create_get_server_group = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'server_group': common_server_group
},
'additionalProperties': False,
'required': ['server_group']
}
}
delete_server_group = {
'status_code': [204]
}
list_server_groups = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'server_groups': {
'type': 'array',
'items': common_server_group
}
},
'additionalProperties': False,
'required': ['server_groups']
}
}
instance_actions = {
'type': 'object',
'properties': {
'action': {'type': 'string'},
'request_id': {'type': 'string'},
'user_id': {'type': 'string'},
'project_id': {'type': 'string'},
'start_time': {'type': 'string'},
'message': {'type': ['string', 'null']},
'instance_uuid': {'type': 'string'}
},
'additionalProperties': False,
'required': ['action', 'request_id', 'user_id', 'project_id',
'start_time', 'message', 'instance_uuid']
}
instance_action_events = {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'event': {'type': 'string'},
'start_time': {'type': 'string'},
'finish_time': {'type': 'string'},
'result': {'type': 'string'},
'traceback': {'type': ['string', 'null']}
},
'additionalProperties': False,
'required': ['event', 'start_time', 'finish_time', 'result',
'traceback']
}
}
list_instance_actions = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'instanceActions': {
'type': 'array',
'items': instance_actions
}
},
'additionalProperties': False,
'required': ['instanceActions']
}
}
instance_actions_with_events = copy.deepcopy(instance_actions)
instance_actions_with_events['properties'].update({
'events': instance_action_events})
# 'events' does not come in response body always so it is not
# defined as 'required'
get_instance_action = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'instanceAction': instance_actions_with_events
},
'additionalProperties': False,
'required': ['instanceAction']
}
}
get_password = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'password': {'type': 'string'}
},
'additionalProperties': False,
'required': ['password']
}
}
get_vnc_console = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'console': {
'type': 'object',
'properties': {
'type': {'type': 'string'},
'url': {
'type': 'string',
'format': 'uri'
}
},
'additionalProperties': False,
'required': ['type', 'url']
}
},
'additionalProperties': False,
'required': ['console']
}
}
get_console_output = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'output': {'type': 'string'}
},
'additionalProperties': False,
'required': ['output']
}
}
set_server_metadata = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'metadata': {
'type': 'object',
'patternProperties': {
'^.+$': {'type': 'string'}
}
}
},
'additionalProperties': False,
'required': ['metadata']
}
}
list_server_metadata = copy.deepcopy(set_server_metadata)
update_server_metadata = copy.deepcopy(set_server_metadata)
delete_server_metadata_item = {
'status_code': [204]
}
set_get_server_metadata_item = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'meta': {
'type': 'object',
'patternProperties': {
'^.+$': {'type': 'string'}
}
}
},
'additionalProperties': False,
'required': ['meta']
}
}
server_actions_common_schema = {
'status_code': [202]
}
server_actions_delete_password = {
'status_code': [204]
}
server_actions_confirm_resize = copy.deepcopy(
server_actions_delete_password)
|
ghmajx/asuswrt-merlin
|
refs/heads/374.43_2-update
|
release/src/router/samba-3.0.25b/source/tdb/swig/Tdb.py
|
52
|
"""Provide a more Pythonic and object-oriented interface to tdb."""
#
# Swig interface to Samba
#
# Copyright (C) Tim Potter 2006
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
#
import os
from tdb import *
# Open flags
DEFAULT = TDB_DEFAULT
CLEAR_IF_FIRST = TDB_CLEAR_IF_FIRST
INTERNAL = TDB_INTERNAL
NOLOCK = TDB_NOLOCK
NOMMAP = TDB_NOMMAP
# Class representing a TDB file
class Tdb:
# Create and destroy Tdb objects
def __init__(self, name, hash_size = 0, flags = TDB_DEFAULT,
open_flags = os.O_RDWR | os.O_CREAT, mode = 0600):
self.tdb = tdb_open(name, hash_size, flags, open_flags, mode)
if self.tdb is None:
raise IOError, tdb_errorstr(self.tdb)
def __del__(self):
self.close()
def close(self):
if hasattr(self, 'tdb') and self.tdb is not None:
if tdb_close(self.tdb) == -1:
raise IOError, tdb_errorstr(self.tdb)
self.tdb = None
# Random access to keys, values
def __getitem__(self, key):
result = tdb_fetch(self.tdb, key)
if result is None:
raise KeyError, '%s: %s' % (key, tdb_errorstr(self.tdb))
return result
def __setitem__(self, key, item):
if tdb_store(self.tdb, key, item) == -1:
raise IOError, tdb_errorstr(self.tdb)
def __delitem__(self, key):
if not tdb_exists(self.tdb, key):
raise KeyError, '%s: %s' % (key, tdb_errorstr(self.tdb))
tdb_delete(self.tdb, key)
def has_key(self, key):
return tdb_exists(self.tdb, key)
# Tdb iterator
class TdbIterator:
def __init__(self, tdb):
self.tdb = tdb
self.key = None
def __iter__(self):
return self
def next(self):
if self.key is None:
self.key = tdb_firstkey(self.tdb)
if self.key is None:
raise StopIteration
return self.key
else:
self.key = tdb_nextkey(self.tdb, self.key)
if self.key is None:
raise StopIteration
return self.key
def __iter__(self):
return Tdb.TdbIterator(self.tdb)
# Implement other dict functions using TdbIterator
def keys(self):
return [k for k in iter(self)]
def values(self):
return [self[k] for k in iter(self)]
def items(self):
return [(k, self[k]) for k in iter(self)]
def __len__(self):
return len(self.keys())
def clear(self):
for k in iter(self):
del(self[k])
|
mshafiq9/django
|
refs/heads/master
|
tests/m2m_signals/models.py
|
448
|
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class Part(models.Model):
name = models.CharField(max_length=20)
class Meta:
ordering = ('name',)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Car(models.Model):
name = models.CharField(max_length=20)
default_parts = models.ManyToManyField(Part)
optional_parts = models.ManyToManyField(Part, related_name='cars_optional')
class Meta:
ordering = ('name',)
def __str__(self):
return self.name
class SportsCar(Car):
price = models.IntegerField()
@python_2_unicode_compatible
class Person(models.Model):
name = models.CharField(max_length=20)
fans = models.ManyToManyField('self', related_name='idols', symmetrical=False)
friends = models.ManyToManyField('self')
class Meta:
ordering = ('name',)
def __str__(self):
return self.name
|
hangarunderground/hendrix
|
refs/heads/master
|
performance-tools/perf_client1.py
|
3
|
import requests
counter = 0
for counter in range(5000):
r = requests.get('http://localhost:8000/fib/%s' % counter)
print
"%s - %s" % (counter, r)
|
nicecapj/crossplatfromMmorpgServer
|
refs/heads/master
|
ThirdParty/boost_1_61_0/libs/mpi/test/python/scatter_test.py
|
64
|
# Copyright (C) 2006 Douglas Gregor <doug.gregor -at- gmail.com>.
# Use, modification and distribution is subject to the Boost Software
# License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
# Test scatter() collective.
import boost.parallel.mpi as mpi
from generators import *
def scatter_test(comm, generator, kind, root):
if comm.rank == root:
print ("Scattering %s from root %d..." % (kind, root)),
if comm.rank == root:
values = list()
for p in range(0, comm.size):
values.append(generator(p))
result = mpi.scatter(comm, values, root = root)
else:
result = mpi.scatter(comm, root = root);
assert result == generator(comm.rank)
if comm.rank == root: print "OK."
return
scatter_test(mpi.world, int_generator, "integers", 0)
scatter_test(mpi.world, int_generator, "integers", 1)
scatter_test(mpi.world, gps_generator, "GPS positions", 0)
scatter_test(mpi.world, gps_generator, "GPS positions", 1)
scatter_test(mpi.world, string_generator, "strings", 0)
scatter_test(mpi.world, string_generator, "strings", 1)
scatter_test(mpi.world, string_list_generator, "list of strings", 0)
scatter_test(mpi.world, string_list_generator, "list of strings", 1)
|
40023247/W17test
|
refs/heads/master
|
static/Brython3.1.1-20150328-091302/Lib/sysconfig.py
|
726
|
"""Access to Python's configuration information."""
#well emulate this module since it does with settings very close to the
#OS and metal
variables={'TANH_PRESERVES_ZERO_SIGN': 0, 'WITH_DOC_STRINGS': 0}
def get_config_var(var):
if var in variables:
return variables[var]
raise NotImplementedError("sysconfig.py:get_config_var: variable '%s' does not exist" % variable)
|
liavkoren/djangoDev
|
refs/heads/master
|
django/db/transaction.py
|
10
|
from functools import wraps
from django.db import (
connections, DEFAULT_DB_ALIAS,
DatabaseError, Error, ProgrammingError)
from django.utils.decorators import available_attrs
class TransactionManagementError(ProgrammingError):
"""
This exception is thrown when transaction management is used improperly.
"""
pass
def get_connection(using=None):
"""
Get a database connection by name, or the default database connection
if no name is provided. This is a private API.
"""
if using is None:
using = DEFAULT_DB_ALIAS
return connections[using]
def get_autocommit(using=None):
"""
Get the autocommit status of the connection.
"""
return get_connection(using).get_autocommit()
def set_autocommit(autocommit, using=None):
"""
Set the autocommit status of the connection.
"""
return get_connection(using).set_autocommit(autocommit)
def commit(using=None):
"""
Commits a transaction.
"""
get_connection(using).commit()
def rollback(using=None):
"""
Rolls back a transaction.
"""
get_connection(using).rollback()
def savepoint(using=None):
"""
Creates a savepoint (if supported and required by the backend) inside the
current transaction. Returns an identifier for the savepoint that will be
used for the subsequent rollback or commit.
"""
return get_connection(using).savepoint()
def savepoint_rollback(sid, using=None):
"""
Rolls back the most recent savepoint (if one exists). Does nothing if
savepoints are not supported.
"""
get_connection(using).savepoint_rollback(sid)
def savepoint_commit(sid, using=None):
"""
Commits the most recent savepoint (if one exists). Does nothing if
savepoints are not supported.
"""
get_connection(using).savepoint_commit(sid)
def clean_savepoints(using=None):
"""
Resets the counter used to generate unique savepoint ids in this thread.
"""
get_connection(using).clean_savepoints()
def get_rollback(using=None):
"""
Gets the "needs rollback" flag -- for *advanced use* only.
"""
return get_connection(using).get_rollback()
def set_rollback(rollback, using=None):
"""
Sets or unsets the "needs rollback" flag -- for *advanced use* only.
When `rollback` is `True`, it triggers a rollback when exiting the
innermost enclosing atomic block that has `savepoint=True` (that's the
default). Use this to force a rollback without raising an exception.
When `rollback` is `False`, it prevents such a rollback. Use this only
after rolling back to a known-good state! Otherwise, you break the atomic
block and data corruption may occur.
"""
return get_connection(using).set_rollback(rollback)
#################################
# Decorators / context managers #
#################################
class Atomic(object):
"""
This class guarantees the atomic execution of a given block.
An instance can be used either as a decorator or as a context manager.
When it's used as a decorator, __call__ wraps the execution of the
decorated function in the instance itself, used as a context manager.
When it's used as a context manager, __enter__ creates a transaction or a
savepoint, depending on whether a transaction is already in progress, and
__exit__ commits the transaction or releases the savepoint on normal exit,
and rolls back the transaction or to the savepoint on exceptions.
It's possible to disable the creation of savepoints if the goal is to
ensure that some code runs within a transaction without creating overhead.
A stack of savepoints identifiers is maintained as an attribute of the
connection. None denotes the absence of a savepoint.
This allows reentrancy even if the same AtomicWrapper is reused. For
example, it's possible to define `oa = @atomic('other')` and use `@oa` or
`with oa:` multiple times.
Since database connections are thread-local, this is thread-safe.
This is a private API.
"""
def __init__(self, using, savepoint):
self.using = using
self.savepoint = savepoint
def __enter__(self):
connection = get_connection(self.using)
if not connection.in_atomic_block:
# Reset state when entering an outermost atomic block.
connection.commit_on_exit = True
connection.needs_rollback = False
if not connection.get_autocommit():
# Some database adapters (namely sqlite3) don't handle
# transactions and savepoints properly when autocommit is off.
# Turning autocommit back on isn't an option; it would trigger
# a premature commit. Give up if that happens.
if connection.features.autocommits_when_autocommit_is_off:
raise TransactionManagementError(
"Your database backend doesn't behave properly when "
"autocommit is off. Turn it on before using 'atomic'.")
# When entering an atomic block with autocommit turned off,
# Django should only use savepoints and shouldn't commit.
# This requires at least a savepoint for the outermost block.
if not self.savepoint:
raise TransactionManagementError(
"The outermost 'atomic' block cannot use "
"savepoint = False when autocommit is off.")
# Pretend we're already in an atomic block to bypass the code
# that disables autocommit to enter a transaction, and make a
# note to deal with this case in __exit__.
connection.in_atomic_block = True
connection.commit_on_exit = False
if connection.in_atomic_block:
# We're already in a transaction; create a savepoint, unless we
# were told not to or we're already waiting for a rollback. The
# second condition avoids creating useless savepoints and prevents
# overwriting needs_rollback until the rollback is performed.
if self.savepoint and not connection.needs_rollback:
sid = connection.savepoint()
connection.savepoint_ids.append(sid)
else:
connection.savepoint_ids.append(None)
else:
# We aren't in a transaction yet; create one.
# The usual way to start a transaction is to turn autocommit off.
# However, some database adapters (namely sqlite3) don't handle
# transactions and savepoints properly when autocommit is off.
# In such cases, start an explicit transaction instead, which has
# the side-effect of disabling autocommit.
if connection.features.autocommits_when_autocommit_is_off:
connection._start_transaction_under_autocommit()
connection.autocommit = False
else:
connection.set_autocommit(False)
connection.in_atomic_block = True
def __exit__(self, exc_type, exc_value, traceback):
connection = get_connection(self.using)
if connection.savepoint_ids:
sid = connection.savepoint_ids.pop()
else:
# Prematurely unset this flag to allow using commit or rollback.
connection.in_atomic_block = False
try:
if connection.closed_in_transaction:
# The database will perform a rollback by itself.
# Wait until we exit the outermost block.
pass
elif exc_type is None and not connection.needs_rollback:
if connection.in_atomic_block:
# Release savepoint if there is one
if sid is not None:
try:
connection.savepoint_commit(sid)
except DatabaseError:
try:
connection.savepoint_rollback(sid)
except Error:
# If rolling back to a savepoint fails, mark for
# rollback at a higher level and avoid shadowing
# the original exception.
connection.needs_rollback = True
raise
else:
# Commit transaction
try:
connection.commit()
except DatabaseError:
try:
connection.rollback()
except Error:
# An error during rollback means that something
# went wrong with the connection. Drop it.
connection.close()
raise
else:
# This flag will be set to True again if there isn't a savepoint
# allowing to perform the rollback at this level.
connection.needs_rollback = False
if connection.in_atomic_block:
# Roll back to savepoint if there is one, mark for rollback
# otherwise.
if sid is None:
connection.needs_rollback = True
else:
try:
connection.savepoint_rollback(sid)
except Error:
# If rolling back to a savepoint fails, mark for
# rollback at a higher level and avoid shadowing
# the original exception.
connection.needs_rollback = True
else:
# Roll back transaction
try:
connection.rollback()
except Error:
# An error during rollback means that something
# went wrong with the connection. Drop it.
connection.close()
finally:
# Outermost block exit when autocommit was enabled.
if not connection.in_atomic_block:
if connection.closed_in_transaction:
connection.connection = None
elif connection.features.autocommits_when_autocommit_is_off:
connection.autocommit = True
else:
connection.set_autocommit(True)
# Outermost block exit when autocommit was disabled.
elif not connection.savepoint_ids and not connection.commit_on_exit:
if connection.closed_in_transaction:
connection.connection = None
else:
connection.in_atomic_block = False
def __call__(self, func):
@wraps(func, assigned=available_attrs(func))
def inner(*args, **kwargs):
with self:
return func(*args, **kwargs)
return inner
def atomic(using=None, savepoint=True):
# Bare decorator: @atomic -- although the first argument is called
# `using`, it's actually the function being decorated.
if callable(using):
return Atomic(DEFAULT_DB_ALIAS, savepoint)(using)
# Decorator: @atomic(...) or context manager: with atomic(...): ...
else:
return Atomic(using, savepoint)
def _non_atomic_requests(view, using):
try:
view._non_atomic_requests.add(using)
except AttributeError:
view._non_atomic_requests = set([using])
return view
def non_atomic_requests(using=None):
if callable(using):
return _non_atomic_requests(using, DEFAULT_DB_ALIAS)
else:
if using is None:
using = DEFAULT_DB_ALIAS
return lambda view: _non_atomic_requests(view, using)
|
elmerdpadilla/iv
|
refs/heads/8.0
|
addons/pos_discount/__openerp__.py
|
312
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Point of Sale Discounts',
'version': '1.0',
'category': 'Point of Sale',
'sequence': 6,
'summary': 'Simple Discounts in the Point of Sale ',
'description': """
=======================
This module allows the cashier to quickly give a percentage
sale discount to a customer.
""",
'author': 'OpenERP SA',
'depends': ['point_of_sale'],
'data': [
'views/views.xml',
'views/templates.xml'
],
'installable': True,
'website': 'https://www.odoo.com/page/point-of-sale',
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
sunny-wyb/xen-4.1.2
|
refs/heads/master
|
dist/install/usr/lib/python2.7/site-packages/xen/util/acmpolicy.py
|
44
|
#============================================================================
# This library is free software; you can redistribute it and/or
# modify it under the terms of version 2.1 of the GNU Lesser General Public
# License as published by the Free Software Foundation.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#============================================================================
# Copyright (C) 2006,2007 International Business Machines Corp.
# Author: Stefan Berger <stefanb@us.ibm.com>
#============================================================================
import os
import stat
import array
import struct
import shutil
import commands
# sha is deprecated as of python 2.6
try:
from hashlib import sha1
except ImportError:
# but hashlib was only added in python 2.5
from sha import new as sha1
from xml.dom import minidom, Node
from xen.xend.XendLogging import log
from xen.util import xsconstants, bootloader, mkdir
from xen.util.xspolicy import XSPolicy
from xen.xend.XendError import SecurityError
import xen.util.xsm.acm.acm as security
from xen.util.xsm.xsm import XSMError
from xen.xend import XendOptions
ACM_POLICIES_DIR = security.policy_dir_prefix + "/"
# Constants needed for generating a binary policy from its XML
# representation
ACM_POLICY_VERSION = 4 # Latest one
ACM_CHWALL_VERSION = 1
ACM_STE_VERSION = 1
ACM_MAGIC = 0x001debc;
ACM_NULL_POLICY = 0
ACM_CHINESE_WALL_POLICY = 1
ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY = 2
ACM_POLICY_UNDEFINED = 15
ACM_LABEL_UNLABELED = "__UNLABELED__"
ACM_LABEL_UNLABELED_DISPLAY = "unlabeled"
"""
Error codes reported in when trying to test for a new policy
These error codes are reported in an array of tuples where
each error code is followed by a parameter describing the error
more closely, such as a domain id.
"""
ACM_EVTCHN_SHARING_VIOLATION = 0x100
ACM_GNTTAB_SHARING_VIOLATION = 0x101
ACM_DOMAIN_LOOKUP = 0x102
ACM_CHWALL_CONFLICT = 0x103
ACM_SSIDREF_IN_USE = 0x104
DEFAULT_policy = \
"<?xml version=\"1.0\" ?>\n" +\
"<SecurityPolicyDefinition xmlns=\"http://www.ibm.com\" xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:schemaLocation=\"http://www.ibm.com ../../security_policy.xsd\">\n" +\
" <PolicyHeader>\n" +\
" <PolicyName>DEFAULT</PolicyName>\n" +\
" <Version>1.0</Version>\n" +\
" </PolicyHeader>\n" +\
" <SimpleTypeEnforcement>\n" +\
" <SimpleTypeEnforcementTypes>\n" +\
" <Type>SystemManagement</Type>\n" +\
" <Type>__UNLABELED__</Type>\n" +\
" </SimpleTypeEnforcementTypes>\n" +\
" </SimpleTypeEnforcement>\n" +\
" <ChineseWall>\n" +\
" <ChineseWallTypes>\n" +\
" <Type>SystemManagement</Type>\n" +\
" </ChineseWallTypes>\n" +\
" </ChineseWall>\n" +\
" <SecurityLabelTemplate>\n" +\
" <SubjectLabels bootstrap=\"SystemManagement\">\n" +\
" <VirtualMachineLabel>\n" +\
" <Name%s>SystemManagement</Name>\n" +\
" <SimpleTypeEnforcementTypes>\n" +\
" <Type>SystemManagement</Type>\n" +\
" <Type>__UNLABELED__</Type>\n" +\
" </SimpleTypeEnforcementTypes>\n" +\
" <ChineseWallTypes>\n" +\
" <Type/>\n" +\
" </ChineseWallTypes>\n" +\
" </VirtualMachineLabel>\n" +\
" <VirtualMachineLabel>\n" +\
" <Name>__UNLABELED__</Name>\n" +\
" <SimpleTypeEnforcementTypes>\n" +\
" <Type>__UNLABELED__</Type>\n" +\
" </SimpleTypeEnforcementTypes>\n" +\
" <ChineseWallTypes>\n" +\
" <Type/>\n" +\
" </ChineseWallTypes>\n" +\
" </VirtualMachineLabel>\n" +\
" </SubjectLabels>\n" +\
" <ObjectLabels>\n" +\
" <ResourceLabel>\n" +\
" <Name>__UNLABELED__</Name>\n" +\
" <SimpleTypeEnforcementTypes>\n" +\
" <Type>__UNLABELED__</Type>\n" +\
" </SimpleTypeEnforcementTypes>\n" +\
" </ResourceLabel>\n" +\
" </ObjectLabels>\n" +\
" </SecurityLabelTemplate>\n" +\
"</SecurityPolicyDefinition>\n"
ACM_SCHEMA="""<?xml version="1.0" encoding="UTF-8"?>
<!-- Author: Ray Valdez, Reiner Sailer {rvaldez,sailer}@us.ibm.com -->
<!-- This file defines the schema, which is used to define -->
<!-- the security policy and the security labels in Xen. -->
<xsd:schema xmlns:xsd="http://www.w3.org/2001/XMLSchema" targetNamespace="http://www.ibm.com" xmlns="http://www.ibm.com" elementFormDefault="qualified">
<xsd:element name="SecurityPolicyDefinition">
<xsd:complexType>
<xsd:sequence>
<xsd:element ref="PolicyHeader" minOccurs="1" maxOccurs="1"></xsd:element>
<xsd:element ref="SimpleTypeEnforcement" minOccurs="0" maxOccurs="1"></xsd:element>
<xsd:element ref="ChineseWall" minOccurs="0" maxOccurs="1"></xsd:element>
<xsd:element ref="SecurityLabelTemplate" minOccurs="1" maxOccurs="1"></xsd:element>
</xsd:sequence>
</xsd:complexType>
</xsd:element>
<xsd:element name="PolicyHeader">
<xsd:complexType>
<xsd:sequence>
<xsd:element name="PolicyName" minOccurs="1" maxOccurs="1" type="xsd:string"></xsd:element>
<xsd:element name="PolicyUrl" minOccurs="0" maxOccurs="1" type="xsd:string"></xsd:element>
<xsd:element name="Reference" type="xsd:string" minOccurs="0" maxOccurs="1" />
<xsd:element name="Date" minOccurs="0" maxOccurs="1" type="xsd:string"></xsd:element>
<xsd:element name="NameSpaceUrl" minOccurs="0" maxOccurs="1" type="xsd:string"></xsd:element>
<xsd:element name="Version" minOccurs="1" maxOccurs="1" type="VersionFormat"/>
<xsd:element ref="FromPolicy" minOccurs="0" maxOccurs="1"/>
</xsd:sequence>
</xsd:complexType>
</xsd:element>
<xsd:element name="ChineseWall">
<xsd:complexType>
<xsd:sequence>
<xsd:element ref="ChineseWallTypes" minOccurs="1" maxOccurs="1" />
<xsd:element ref="ConflictSets" minOccurs="0" maxOccurs="1" />
</xsd:sequence>
<xsd:attribute name="priority" type="PolicyOrder" use="optional"></xsd:attribute>
</xsd:complexType>
</xsd:element>
<xsd:element name="SimpleTypeEnforcement">
<xsd:complexType>
<xsd:sequence>
<xsd:element ref="SimpleTypeEnforcementTypes" />
</xsd:sequence>
<xsd:attribute name="priority" type="PolicyOrder" use="optional"></xsd:attribute>
</xsd:complexType>
</xsd:element>
<xsd:element name="SecurityLabelTemplate">
<xsd:complexType>
<xsd:sequence>
<xsd:element name="SubjectLabels" minOccurs="0" maxOccurs="1">
<xsd:complexType>
<xsd:sequence>
<xsd:element ref="VirtualMachineLabel" minOccurs="1" maxOccurs="unbounded"></xsd:element>
</xsd:sequence>
<xsd:attribute name="bootstrap" type="xsd:string" use="required"></xsd:attribute>
</xsd:complexType>
</xsd:element>
<xsd:element name="ObjectLabels" minOccurs="0" maxOccurs="1">
<xsd:complexType>
<xsd:sequence>
<xsd:element ref="ResourceLabel" minOccurs="1" maxOccurs="unbounded"></xsd:element>
</xsd:sequence>
</xsd:complexType>
</xsd:element>
</xsd:sequence>
</xsd:complexType>
</xsd:element>
<xsd:element name="ChineseWallTypes">
<xsd:complexType>
<xsd:sequence>
<xsd:element maxOccurs="unbounded" minOccurs="1" ref="Type" />
</xsd:sequence>
</xsd:complexType>
</xsd:element>
<xsd:element name="ConflictSets">
<xsd:complexType>
<xsd:sequence>
<xsd:element maxOccurs="unbounded" minOccurs="1" ref="Conflict" />
</xsd:sequence>
</xsd:complexType>
</xsd:element>
<xsd:element name="SimpleTypeEnforcementTypes">
<xsd:complexType>
<xsd:sequence>
<xsd:element maxOccurs="unbounded" minOccurs="1" ref="Type" />
</xsd:sequence>
</xsd:complexType>
</xsd:element>
<xsd:element name="Conflict">
<xsd:complexType>
<xsd:sequence>
<xsd:element maxOccurs="unbounded" minOccurs="1" ref="Type" />
</xsd:sequence>
<xsd:attribute name="name" type="xsd:string" use="required"></xsd:attribute>
</xsd:complexType>
</xsd:element>
<xsd:element name="VirtualMachineLabel">
<xsd:complexType>
<xsd:sequence>
<xsd:element name="Name" type="NameWithFrom"></xsd:element>
<xsd:element ref="SimpleTypeEnforcementTypes" minOccurs="0" maxOccurs="1" />
<xsd:element ref="ChineseWallTypes" minOccurs="0" maxOccurs="unbounded" />
</xsd:sequence>
</xsd:complexType>
</xsd:element>
<xsd:element name="ResourceLabel">
<xsd:complexType>
<xsd:sequence>
<xsd:element name="Name" type="NameWithFrom"></xsd:element>
<xsd:element name="SimpleTypeEnforcementTypes" type="SingleSimpleTypeEnforcementType" />
</xsd:sequence>
</xsd:complexType>
</xsd:element>
<xsd:element name="Name" type="xsd:string" />
<xsd:element name="Type" type="xsd:string" />
<xsd:simpleType name="PolicyOrder">
<xsd:restriction base="xsd:string">
<xsd:enumeration value="PrimaryPolicyComponent"></xsd:enumeration>
</xsd:restriction>
</xsd:simpleType>
<xsd:element name="FromPolicy">
<xsd:complexType>
<xsd:sequence>
<xsd:element name="PolicyName" minOccurs="1" maxOccurs="1" type="xsd:string"/>
<xsd:element name="Version" minOccurs="1" maxOccurs="1" type="VersionFormat"/>
</xsd:sequence>
</xsd:complexType>
</xsd:element>
<xsd:simpleType name="VersionFormat">
<xsd:restriction base="xsd:string">
<xsd:pattern value="[0-9]{1,8}.[0-9]{1,8}"></xsd:pattern>
</xsd:restriction>
</xsd:simpleType>
<xsd:complexType name="NameWithFrom">
<xsd:simpleContent>
<xsd:extension base="xsd:string">
<xsd:attribute name="from" type="xsd:string" use="optional"></xsd:attribute>
</xsd:extension>
</xsd:simpleContent>
</xsd:complexType>
<xsd:complexType name="SingleSimpleTypeEnforcementType">
<xsd:sequence>
<xsd:element maxOccurs="1" minOccurs="1" ref="Type" />
</xsd:sequence>
</xsd:complexType>
</xsd:schema>"""
def get_DEFAULT_policy(dom0label=""):
fromnode = ""
if dom0label != "":
fromnode = " from=\"%s\"" % dom0label
return DEFAULT_policy % fromnode
def initialize():
xoptions = XendOptions.instance()
basedir = xoptions.get_xend_security_path()
policiesdir = basedir + "/policies"
mkdir.parents(policiesdir, stat.S_IRWXU)
instdir = security.install_policy_dir_prefix
DEF_policy_file = "DEFAULT-security_policy.xml"
#Install default policy.
f = open(policiesdir + "/" + DEF_policy_file, 'w')
if f:
f.write(get_DEFAULT_policy())
f.close()
else:
log.error("Could not write the default policy's file.")
defpol = ACMPolicy(xml=get_DEFAULT_policy())
defpol.compile()
class ACMPolicy(XSPolicy):
"""
ACMPolicy class. Implements methods for getting information from
the XML representation of the policy as well as compilation and
loading of a policy into the HV.
"""
def __init__(self, name=None, dom=None, ref=None, xml=None):
if name:
self.name = name
try:
self.dom = minidom.parse(self.path_from_policy_name(name))
except Exception, e:
raise SecurityError(-xsconstants.XSERR_XML_PROCESSING,
str(e))
elif dom:
self.dom = dom
self.name = self.get_name()
elif xml:
try:
self.dom = minidom.parseString(xml)
except Exception, e:
raise SecurityError(-xsconstants.XSERR_XML_PROCESSING,
str(e))
self.name = self.get_name()
rc = self.validate()
if rc != xsconstants.XSERR_SUCCESS:
raise SecurityError(rc)
if ref:
from xen.xend.XendXSPolicy import XendACMPolicy
self.xendacmpolicy = XendACMPolicy(self, {}, ref)
else:
self.xendacmpolicy = None
XSPolicy.__init__(self, name=self.name, ref=ref)
def get_dom(self):
return self.dom
def get_name(self):
return self.policy_dom_get_hdr_item("PolicyName")
def get_type(self):
return xsconstants.XS_POLICY_ACM
def get_type_name(self):
return xsconstants.ACM_POLICY_ID
def __str__(self):
return self.get_name()
def validate(self):
"""
validate against the policy's schema Does not fail if the
libxml2 python lib is not installed
"""
rc = xsconstants.XSERR_SUCCESS
try:
import libxml2
except Exception, e:
log.warn("Libxml2 python-wrapper is not installed on the system.")
return xsconstants.XSERR_SUCCESS
try:
parserctxt = libxml2.schemaNewMemParserCtxt(ACM_SCHEMA,
len(ACM_SCHEMA))
schemaparser = parserctxt.schemaParse()
valid = schemaparser.schemaNewValidCtxt()
doc = libxml2.parseDoc(self.toxml())
if doc.schemaValidateDoc(valid) != 0:
rc = -xsconstants.XSERR_BAD_XML
except Exception, e:
log.warn("Problem with the schema: %s" % str(e))
rc = -xsconstants.XSERR_GENERAL_FAILURE
if rc != xsconstants.XSERR_SUCCESS:
log.warn("XML did not validate against schema")
if rc == xsconstants.XSERR_SUCCESS:
rc = self.__validate_name_and_labels()
return rc
def __validate_name_and_labels(self):
""" no ':' allowed in the policy name and the labels """
if ':' in self.get_name():
return -xsconstants.XSERR_BAD_POLICY_NAME
for s in self.policy_get_resourcelabel_names():
if ':' in s:
return -xsconstants.XSERR_BAD_LABEL
for s in self.policy_get_virtualmachinelabel_names():
if ':' in s:
return -xsconstants.XSERR_BAD_LABEL
return xsconstants.XSERR_SUCCESS
def is_default_policy(self):
"""
Determine whether this is the default policy
"""
default = ['SystemManagement', ACM_LABEL_UNLABELED ]
if self.policy_get_virtualmachinelabel_names() == default and \
self.policy_get_bootstrap_vmlabel() == default[0] and \
self.policy_get_stetypes_types() == default and \
self.policy_get_stes_of_vmlabel(default[0]) == default and \
self.policy_get_stes_of_vmlabel(default[1]) == [default[1]] and \
self.policy_get_resourcelabel_names() == [default[1]] and \
self.policy_get_chwall_types() == [ default[0] ] and \
self.get_name() == "DEFAULT":
return True
return False
def update(self, xml_new):
"""
Update the policy with the new XML. The hypervisor decides
whether the new policy can be applied.
"""
rc = -xsconstants.XSERR_XML_PROCESSING
errors = ""
acmpol_old = self
try:
acmpol_new = ACMPolicy(xml=xml_new)
except Exception:
return -xsconstants.XSERR_XML_PROCESSING, errors
vmlabel_map = acmpol_new.policy_get_vmlabel_translation_map()
# An update requires version information in the current
# and new policy. The version number of the current policy
# must be the same as what is in the FromPolicy/Version node
# in the new one and the current policy's name must be the
# same as in FromPolicy/PolicyName
# The default policy when it is set skips this step.
if not acmpol_new.is_default_policy() and \
not acmpol_old.is_default_policy():
irc = self.__do_update_version_check(acmpol_new)
if irc != xsconstants.XSERR_SUCCESS:
return irc, errors
if self.isloaded():
newvmnames = \
acmpol_new.policy_get_virtualmachinelabel_names_sorted()
oldvmnames = \
acmpol_old.policy_get_virtualmachinelabel_names_sorted()
del_array = ""
chg_array = ""
for o in oldvmnames:
if o not in newvmnames:
old_idx = oldvmnames.index(o)
if vmlabel_map.has_key(o):
#not a deletion, but a renaming
new = vmlabel_map[o]
new_idx = newvmnames.index(new)
chg_array += struct.pack("ii", old_idx, new_idx)
else:
del_array += struct.pack("i", old_idx)
for v in newvmnames:
if v in oldvmnames:
old_idx = oldvmnames.index(v)
new_idx = newvmnames.index(v)
if old_idx != new_idx:
chg_array += struct.pack("ii", old_idx, new_idx)
# VM labels indicated in the 'from' attribute of a VM or
# resource node but that did not exist in the old policy
# are considered bad labels.
bad_renamings = set(vmlabel_map.keys()) - set(oldvmnames)
if len(bad_renamings) > 0:
log.error("Bad VM label renamings: %s" %
list(bad_renamings))
return -xsconstants.XSERR_BAD_LABEL, errors
reslabel_map = acmpol_new.policy_get_reslabel_translation_map()
oldresnames = acmpol_old.policy_get_resourcelabel_names()
bad_renamings = set(reslabel_map.keys()) - set(oldresnames)
if len(bad_renamings) > 0:
log.error("Bad resource label renamings: %s" %
list(bad_renamings))
return -xsconstants.XSERR_BAD_LABEL, errors
#Get binary and map from the new policy
rc, pol_map, bin_pol = acmpol_new.policy_create_map_and_bin()
if rc != xsconstants.XSERR_SUCCESS:
log.error("Could not build the map and binary policy.")
return rc, errors
#Need to do / check the following:
# - relabel all resources where there is a 'from' field in
# the policy and mark those as unlabeled where the label
# does not appear in the new policy anymore
# - relabel all VMs where there is a 'from' field in the
# policy and mark those as unlabeled where the label
# does not appear in the new policy anymore; no running
# or paused VM may be unlabeled through this
# - check that under the new labeling conditions the VMs
# still have access to their resources as before. Unlabeled
# resources are inaccessible. If this check fails, the
# update failed.
# - Attempt changes in the hypervisor; if this step fails,
# roll back the relabeling of resources and VMs
# - Commit the relabeling of resources
rc, errors = security.change_acm_policy(bin_pol,
del_array, chg_array,
vmlabel_map, reslabel_map,
self, acmpol_new,
acmpol_new.is_default_policy())
if rc == 0:
# Replace the old DOM with the new one and save it
self.dom = acmpol_new.dom
self.compile()
log.info("ACM policy update was successful")
else:
#Not loaded in HV
self.dom = acmpol_new.dom
rc = self.compile()
return rc, errors
def force_default_policy(klass, policy_ref):
"""
Force the installation of the DEFAULT policy if for
example no XML of the current policy is available and
the update path with comparisons of old and new policy
cannot be taken.
This only succeeds if only Domain-0 is running or
all guest have the same ssidref as Domain-0.
"""
errors = ""
acmpol_new = ACMPolicy(xml = get_DEFAULT_policy(), ref=policy_ref)
from xen.lowlevel import acm
dom0_ssidref = acm.getssid(0)
del_array = ""
chg_array = struct.pack("ii",
dom0_ssidref['ssidref'] & 0xffff,
0x1)
rc, pol_map, bin_pol = acmpol_new.policy_create_map_and_bin()
if rc != xsconstants.XSERR_SUCCESS:
return rc, errors, acmpol_new
rc, errors = security.hv_chg_policy(bin_pol, del_array, chg_array)
return rc, errors, acmpol_new
force_default_policy = classmethod(force_default_policy)
def get_reset_policy_xml(klass):
dom0_label = security.get_ssid(0)[1]
return get_DEFAULT_policy(dom0_label)
get_reset_policy_xml = classmethod(get_reset_policy_xml)
def __do_update_version_check(self, acmpol_new):
acmpol_old = self
now_vers = acmpol_old.policy_dom_get_hdr_item("Version")
now_name = acmpol_old.policy_dom_get_hdr_item("PolicyName")
req_oldvers = acmpol_new.policy_dom_get_frompol_item("Version")
req_oldname = acmpol_new.policy_dom_get_frompol_item("PolicyName")
if now_vers == "" or \
now_vers != req_oldvers or \
now_name != req_oldname:
log.info("Policy rejected: %s != %s or %s != %s" % \
(now_vers,req_oldvers,now_name,req_oldname))
return -xsconstants.XSERR_VERSION_PREVENTS_UPDATE
if not self.isVersionUpdate(acmpol_new):
log.info("Policy rejected since new version is not an update.")
return -xsconstants.XSERR_VERSION_PREVENTS_UPDATE
return xsconstants.XSERR_SUCCESS
def compareVersions(self, v1, v2):
"""
Compare two policy versions given their tuples of major and
minor.
Return '0' if versions are equal, '>0' if v1 > v2 and
'<' if v1 < v2
"""
rc = v1[0] - v2[0]
if rc == 0:
rc = v1[1] - v2[1]
return rc
def getVersionTuple(self, item="Version"):
v_str = self.policy_dom_get_hdr_item(item)
return self.__convVersionToTuple(v_str)
def get_version(self):
return self.policy_dom_get_hdr_item("Version")
def isVersionUpdate(self, polnew):
if self.compareVersions(polnew.getVersionTuple(),
self.getVersionTuple()) > 0:
return True
return False
def __convVersionToTuple(self, v_str):
""" Convert a version string, formatted according to the scheme
"%d.%d" into a tuple of (major, minor). Return (0,0) if the
string is empty.
"""
major = 0
minor = 0
if v_str != "":
tmp = v_str.split(".")
major = int(tmp[0])
if len(tmp) > 1:
minor = int(tmp[1])
return (major, minor)
def get_policies_path(self):
xoptions = XendOptions.instance()
basedir = xoptions.get_xend_security_path()
return basedir + "/policies/"
def policy_path(self, name):
prefix = self.get_policies_path()
path = prefix + name.replace('.','/')
_path = path.split("/")
del _path[-1]
mkdir.parents("/".join(_path), stat.S_IRWXU)
return path
def path_from_policy_name(self, name):
return self.policy_path(name) + "-security_policy.xml"
#
# Functions interacting with the bootloader
#
def vmlabel_to_ssidref(self, vm_label):
""" Convert a VMlabel into an ssidref given the current
policy
Return xsconstants.INVALID_SSIDREF if conversion failed.
"""
ssidref = xsconstants.INVALID_SSIDREF
names = self.policy_get_virtualmachinelabel_names_sorted()
try:
vmidx = names.index(vm_label)
ssidref = (vmidx << 16) | vmidx
except:
pass
return ssidref
def set_vm_bootlabel(self, vm_label, remove=False):
parms="<>"
if vm_label != "":
ssidref = self.vmlabel_to_ssidref(vm_label)
if ssidref == xsconstants.INVALID_SSIDREF:
return -xsconstants.XSERR_BAD_LABEL
parms = "0x%08x:%s:%s:%s" % \
(ssidref, xsconstants.ACM_POLICY_ID, \
self.get_name(),vm_label)
else:
ssidref = 0 #Identifier for removal
if remove == True:
parms = "<>"
try:
def_title = bootloader.get_default_title()
bootloader.set_kernel_attval(def_title, "ssidref", parms)
except:
return -xsconstants.XSERR_GENERAL_FAILURE
return ssidref
#
# Utility functions related to the policy's files
#
def get_filename(self, postfix, prefix=None, dotted=False):
"""
Create the filename for the policy. The prefix is prepended
to the path. If dotted is True, then a policy name like
'a.b.c' will remain as is, otherwise it will become 'a/b/c'
"""
if prefix == None:
prefix = self.get_policies_path()
name = self.get_name()
if name:
p = name.split(".")
path = ""
if dotted:
sep = "."
else:
sep = "/"
if len(p) > 1:
path = sep.join(p[0:len(p)-1])
if prefix != "" or path != "":
allpath = prefix + path + sep + p[-1] + postfix
else:
allpath = p[-1] + postfix
return allpath
return None
def __readfile(self, name):
cont = ""
filename = self.get_filename(name)
f = open(filename, "r")
if f:
cont = f.read()
f.close()
return cont
def get_map(self):
return self.__readfile(".map")
def get_bin(self):
return self.__readfile(".bin")
def copy_policy_file(self, suffix, destdir):
spolfile = self.get_filename(suffix)
dpolfile = destdir + "/" + self.get_filename(suffix,"",dotted=True)
try:
shutil.copyfile(spolfile, dpolfile)
except Exception, e:
log.error("Could not copy policy file %s to %s: %s" %
(spolfile, dpolfile, str(e)))
return -xsconstants.XSERR_FILE_ERROR
return xsconstants.XSERR_SUCCESS
#
# DOM-related functions
#
def policy_dom_get(self, parent, key, createit=False):
for node in parent.childNodes:
if node.nodeType == Node.ELEMENT_NODE:
if node.nodeName == key:
return node
if createit:
self.dom_create_node(parent, key)
return self.policy_dom_get(parent, key)
def dom_create_node(self, parent, newname, value=" "):
xml = "<a><"+newname+">"+ value +"</"+newname+"></a>"
frag = minidom.parseString(xml)
frag.childNodes[0].nodeType = Node.DOCUMENT_FRAGMENT_NODE
parent.appendChild(frag.childNodes[0])
return frag.childNodes[0]
def dom_get_node(self, path, createit=False):
node = None
parts = path.split("/")
doc = self.get_dom()
if len(parts) > 0:
node = self.policy_dom_get(doc.documentElement, parts[0])
if node:
i = 1
while i < len(parts):
_node = self.policy_dom_get(node, parts[i], createit)
if not _node:
if not createit:
break
else:
self.dom_create_node(node, parts[i])
_node = self.policy_dom_get(node, parts[i])
node = _node
i += 1
return node
#
# Header-related functions
#
def policy_dom_get_header_subnode(self, nodename):
node = self.dom_get_node("PolicyHeader/%s" % nodename)
return node
def policy_dom_get_hdr_item(self, name, default=""):
node = self.policy_dom_get_header_subnode(name)
if node and len(node.childNodes) > 0:
return node.childNodes[0].nodeValue
return default
def policy_dom_get_frompol_item(self, name, default="", createit=False):
node = self.dom_get_node("PolicyHeader/FromPolicy",createit)
if node:
node = self.policy_dom_get(node, name, createit)
if node and len(node.childNodes) > 0:
return node.childNodes[0].nodeValue
return default
def get_header_fields_map(self):
header = {
'policyname' : self.policy_dom_get_hdr_item("PolicyName"),
'policyurl' : self.policy_dom_get_hdr_item("PolicyUrl"),
'reference' : self.policy_dom_get_hdr_item("Reference"),
'date' : self.policy_dom_get_hdr_item("Date"),
'namespaceurl' : self.policy_dom_get_hdr_item("NameSpaceUrl"),
'version' : self.policy_dom_get_hdr_item("Version")
}
return header
def set_frompolicy_name(self, name):
""" For tools to adapt the header of the policy """
node = self.dom_get_node("PolicyHeader/FromPolicy/PolicyName",
createit=True)
node.childNodes[0].nodeValue = name
def set_frompolicy_version(self, version):
""" For tools to adapt the header of the policy """
node = self.dom_get_node("PolicyHeader/FromPolicy/Version",
createit=True)
node.childNodes[0].nodeValue = version
def set_policy_name(self, name):
""" For tools to adapt the header of the policy """
node = self.dom_get_node("PolicyHeader/PolicyName")
node.childNodes[0].nodeValue = name
def set_policy_version(self, version):
""" For tools to adapt the header of the policy """
node = self.dom_get_node("PolicyHeader/Version")
node.childNodes[0].nodeValue = version
def update_frompolicy(self, curpol):
self.set_frompolicy_name(curpol.policy_dom_get_hdr_item("PolicyName"))
version = curpol.policy_dom_get_hdr_item("Version")
self.set_frompolicy_version(version)
(maj, minor) = self.__convVersionToTuple(version)
self.set_policy_version("%s.%s" % (maj, minor+1))
#
# Get all types that are part of a node
#
def policy_get_types(self, node):
strings = []
i = 0
while i < len(node.childNodes):
if node.childNodes[i].nodeName == "Type" and \
len(node.childNodes[i].childNodes) > 0:
strings.append(node.childNodes[i].childNodes[0].nodeValue)
i += 1
return strings
#
# Simple Type Enforcement-related functions
#
def policy_get_stetypes_node(self):
node = self.dom_get_node("SimpleTypeEnforcement/SimpleTypeEnforcementTypes")
return node
def policy_get_stetypes_types(self):
strings = []
node = self.policy_get_stetypes_node()
if node:
strings = self.policy_get_types(node)
return strings
#
# Chinese Wall Type-related functions
#
def policy_get_chwall_types(self):
strings = []
node = self.dom_get_node("ChineseWall/ChineseWallTypes")
if node:
strings = self.policy_get_types(node)
return strings
def policy_get_chwall_cfses(self):
cfs = []
node = self.dom_get_node("ChineseWall/ConflictSets")
if node:
i = 0
while i < len(node.childNodes):
_cfs = {}
if node.childNodes[i].nodeName == "Conflict":
_cfs['name'] = node.childNodes[i].getAttribute('name')
_cfs['chws'] = self.policy_get_types(node.childNodes[i])
cfs.append(_cfs)
i += 1
return cfs
def policy_get_chwall_cfses_names_sorted(self):
"""
Return the list of all conflict set names in alphabetical
order.
"""
cfs_names = []
node = self.dom_get_node("ChineseWall/ConflictSets")
if node:
i = 0
while i < len(node.childNodes):
if node.childNodes[i].nodeName == "Conflict":
n = node.childNodes[i].getAttribute('name')
#it better have a name!
if n:
cfs_names.append(n)
i += 1
cfs_names.sort()
return cfs_names
#
# Subject Label-related functions
#
def policy_get_bootstrap_vmlabel(self):
node = self.dom_get_node("SecurityLabelTemplate/SubjectLabels")
if node:
vmlabel = node.getAttribute("bootstrap")
return vmlabel
# Get the names of all virtual machine labels; returns an array
def policy_get_virtualmachinelabel_names(self):
strings = []
node = self.dom_get_node("SecurityLabelTemplate/SubjectLabels")
if node:
i = 0
while i < len(node.childNodes):
if node.childNodes[i].nodeName == "VirtualMachineLabel":
name = self.policy_dom_get(node.childNodes[i], "Name")
if len(name.childNodes) > 0:
strings.append(name.childNodes[0].nodeValue)
i += 1
return strings
def policy_sort_virtualmachinelabel_names(self, vmnames):
bootstrap = self.policy_get_bootstrap_vmlabel()
if bootstrap not in vmnames:
raise SecurityError(-xsconstants.XSERR_POLICY_INCONSISTENT)
vmnames.remove(bootstrap)
vmnames.sort()
vmnames.insert(0, bootstrap)
if ACM_LABEL_UNLABELED in vmnames:
vmnames.remove(ACM_LABEL_UNLABELED)
vmnames.insert(0, ACM_LABEL_UNLABELED)
return vmnames
def policy_get_virtualmachinelabel_names_sorted(self):
""" Get a sorted list of VMlabel names. The bootstrap VM's
label will be the first one in that list, followed
by an alphabetically sorted list of VM label names """
vmnames = self.policy_get_virtualmachinelabel_names()
res = self.policy_sort_virtualmachinelabel_names(vmnames)
if res[0] != ACM_LABEL_UNLABELED:
res.insert(0, ACM_LABEL_UNLABELED)
return res
def policy_get_virtualmachinelabels(self):
""" Get a list of all virtual machine labels in this policy """
res = []
node = self.dom_get_node("SecurityLabelTemplate/SubjectLabels")
if node:
i = 0
while i < len(node.childNodes):
if node.childNodes[i].nodeName == "VirtualMachineLabel":
name = self.policy_dom_get(node.childNodes[i], "Name")
if len(name.childNodes) > 0:
_res = {}
_res['type'] = xsconstants.ACM_LABEL_VM
_res['name'] = name.childNodes[0].nodeValue
stes = self.policy_dom_get(node.childNodes[i],
"SimpleTypeEnforcementTypes")
if stes:
_res['stes'] = self.policy_get_types(stes)
else:
_res['stes'] = []
chws = self.policy_dom_get(node.childNodes[i],
"ChineseWallTypes")
if chws:
_res['chws'] = self.policy_get_types(chws)
else:
_res['chws'] = []
res.append(_res)
i += 1
return res
def policy_get_stes_of_vmlabel(self, vmlabel):
""" Get a list of all STEs of a given VMlabel """
return self.__policy_get_stes_of_labeltype(vmlabel,
"/SubjectLabels", "VirtualMachineLabel")
def policy_get_stes_of_resource(self, reslabel):
""" Get a list of all resources of a given VMlabel """
return self.__policy_get_stes_of_labeltype(reslabel,
"/ObjectLabels", "ResourceLabel")
def __policy_get_stes_of_labeltype(self, label, path, labeltype):
node = self.dom_get_node("SecurityLabelTemplate" + path)
if node:
i = 0
while i < len(node.childNodes):
if node.childNodes[i].nodeName == labeltype:
name = self.policy_dom_get(node.childNodes[i], "Name")
if len(name.childNodes) > 0 and \
name.childNodes[0].nodeValue == label:
stes = self.policy_dom_get(node.childNodes[i],
"SimpleTypeEnforcementTypes")
if not stes:
return []
return self.policy_get_types(stes)
i += 1
return []
def policy_check_vmlabel_against_reslabels(self, vmlabel, resources):
"""
Check whether the given vmlabel is compatible with the given
resource labels. Do this by getting all the STEs of the
vmlabel and the STEs of the resources. Any STE type of the
VM label must match an STE type of the resource.
"""
vm_stes = self.policy_get_stes_of_vmlabel(vmlabel)
if len(vm_stes) == 0:
return False
for res in resources:
res_stes = self.policy_get_stes_of_resource(res)
if len(res_stes) == 0 or \
len( set(res_stes).intersection( set(vm_stes) ) ) == 0:
return False
return True
def __policy_get_label_translation_map(self, path, labeltype):
res = {}
node = self.dom_get_node("SecurityLabelTemplate/" + path)
if node:
i = 0
while i < len(node.childNodes):
if node.childNodes[i].nodeName == labeltype:
name = self.policy_dom_get(node.childNodes[i], "Name")
from_name = name.getAttribute("from")
if from_name and len(name.childNodes) > 0:
res.update({from_name : name.childNodes[0].nodeValue})
i += 1
return res
def policy_get_vmlabel_translation_map(self):
"""
Get a dictionary of virtual machine mappings from their
old VMlabel name to the new VMlabel name.
"""
return self.__policy_get_label_translation_map("SubjectLabels",
"VirtualMachineLabel")
def policy_get_reslabel_translation_map(self):
"""
Get a dictionary of resource mappings from their
old resource label name to the new resource label name.
"""
return self.__policy_get_label_translation_map("ObjectLabels",
"ResourceLabel")
#
# Object Label-related functions
#
def policy_get_resourcelabel_names(self):
"""
Get the names of all resource labels in an array but
only those that actually have types
"""
strings = []
node = self.dom_get_node("SecurityLabelTemplate/ObjectLabels")
if node:
i = 0
while i < len(node.childNodes):
if node.childNodes[i].nodeName == "ResourceLabel":
name = self.policy_dom_get(node.childNodes[i], "Name")
stes = self.policy_dom_get(node.childNodes[i],
"SimpleTypeEnforcementTypes")
if stes and len(name.childNodes) > 0:
strings.append(name.childNodes[0].nodeValue)
i += 1
return strings
def policy_get_resourcelabels(self):
"""
Get all information about all resource labels of this policy.
"""
res = []
node = self.dom_get_node("SecurityLabelTemplate/ObjectLabels")
if node:
i = 0
while i < len(node.childNodes):
if node.childNodes[i].nodeName == "ResourceLabel":
name = self.policy_dom_get(node.childNodes[i], "Name")
if len(name.childNodes) > 0:
_res = {}
_res['type'] = xsconstants.ACM_LABEL_RES
_res['name'] = name.childNodes[0].nodeValue
stes = self.policy_dom_get(node.childNodes[i],
"SimpleTypeEnforcementTypes")
if stes:
_res['stes'] = self.policy_get_types(stes)
else:
_res['stes'] = []
_res['chws'] = []
res.append(_res)
i += 1
return res
def policy_find_reslabels_with_stetype(self, stetype):
"""
Find those resource labels that hold a given STE type.
"""
res = []
reslabels = self.policy_get_resourcelabels()
for resl in reslabels:
if stetype in resl['stes']:
res.append(resl['name'])
return res
def toxml(self):
dom = self.get_dom()
if dom:
return dom.toxml()
return None
def hash(self):
""" Calculate a SHA1 hash of the XML policy """
return sha1(self.toxml())
def save(self):
### Save the XML policy into a file ###
rc = -xsconstants.XSERR_FILE_ERROR
name = self.get_name()
if name:
path = self.path_from_policy_name(name)
if path:
f = open(path, 'w')
if f:
try:
try:
f.write(self.toxml())
rc = 0
except:
pass
finally:
f.close()
return rc
def __write_to_file(self, suffix, data):
#write the data into a file with the given suffix
f = open(self.get_filename(suffix),"w")
if f:
try:
try:
f.write(data)
except Exception, e:
log.error("Error writing file: %s" % str(e))
return -xsconstants.XSERR_FILE_ERROR
finally:
f.close()
else:
return -xsconstants.XSERR_FILE_ERROR
return xsconstants.XSERR_SUCCESS
def compile(self):
rc = self.save()
if rc == 0:
rc, mapfile, bin_pol = self.policy_create_map_and_bin()
if rc == 0:
try:
security.mapfile_lock()
rc = self.__write_to_file(".map", mapfile)
if rc != 0:
log.error("Error writing map file")
finally:
security.mapfile_unlock()
if rc == 0:
rc = self.__write_to_file(".bin", bin_pol)
if rc != 0:
log.error("Error writing binary policy file")
return rc
def loadintohv(self):
"""
load this policy into the hypervisor
if successful,the policy's flags will indicate that the
policy is the one loaded into the hypervisor
"""
if not self.isloaded():
(ret, output) = commands.getstatusoutput(
security.xensec_tool +
" loadpolicy " +
self.get_filename(".bin"))
if ret != 0:
return -xsconstants.XSERR_POLICY_LOAD_FAILED
return xsconstants.XSERR_SUCCESS
def isloaded(self):
"""
Determine whether this policy is the active one.
"""
if self.get_name() == security.get_active_policy_name():
return True
return False
def destroy(self):
"""
Destroy the policy including its binary, mapping and
XML files.
This only works if the policy is not the one that's loaded
"""
if self.isloaded():
return -xsconstants.XSERR_POLICY_LOADED
files = [ self.get_filename(".map",""),
self.get_filename(".bin","") ]
for f in files:
try:
os.unlink(f)
except:
pass
if self.xendacmpolicy:
self.xendacmpolicy.destroy()
XSPolicy.destroy(self)
return xsconstants.XSERR_SUCCESS
def policy_get_domain_label(self, domid):
"""
Given a domain's ID, retrieve the label it has using
its ssidref for reverse calculation.
"""
try:
mgmt_dom = security.get_ssid(domid)
except:
return ""
return self.policy_get_domain_label_by_ssidref(int(mgmt_dom[3]))
def policy_get_domain_label_by_ssidref(self, ssidref):
""" Given an ssidref, find the corresponding VM label """
chwall_ref = ssidref & 0xffff
try:
allvmtypes = self.policy_get_virtualmachinelabel_names_sorted()
except:
return None
return allvmtypes[chwall_ref]
def policy_get_domain_label_formatted(self, domid):
label = self.policy_get_domain_label(domid)
if label == "":
label = ACM_LABEL_UNLABELED
return "%s:%s:%s" % (xsconstants.ACM_POLICY_ID, self.get_name(), label)
def policy_get_domain_label_by_ssidref_formatted(self, ssidref):
label = self.policy_get_domain_label_by_ssidref(ssidref)
if label == "":
return ""
return "%s:%s:%s" % (xsconstants.ACM_POLICY_ID, self.get_name(), label)
def policy_create_map_and_bin(self):
"""
Create the policy's map and binary files -- compile the policy.
"""
def roundup8(len):
return ((len + 7) & ~7)
rc = xsconstants.XSERR_SUCCESS
mapfile = ""
primpolcode = ACM_POLICY_UNDEFINED
secpolcode = ACM_POLICY_UNDEFINED
unknown_ste = set()
unknown_chw = set()
unlabeled_ste = "__NULL_LABEL__"
unlabeled_chw = "__NULL_LABEL__"
rc = self.validate()
if rc:
return rc, "", ""
stes = self.policy_get_stetypes_types()
if stes:
stes.sort()
chws = self.policy_get_chwall_types()
if chws:
chws.sort()
vms = self.policy_get_virtualmachinelabels()
bootstrap = self.policy_get_bootstrap_vmlabel()
vmlabels = self.policy_get_virtualmachinelabel_names_sorted()
if bootstrap not in vmlabels:
log.error("Bootstrap label '%s' not found among VM labels '%s'." \
% (bootstrap, vmlabels))
return -xsconstants.XSERR_POLICY_INCONSISTENT, "", ""
vms_with_chws = []
chws_by_vm = { ACM_LABEL_UNLABELED : [] }
for v in vms:
if v.has_key("chws"):
vms_with_chws.append(v["name"])
chws_by_vm[v["name"]] = v["chws"]
if bootstrap in vms_with_chws:
vms_with_chws.remove(bootstrap)
vms_with_chws.sort()
vms_with_chws.insert(0, bootstrap)
else:
vms_with_chws.sort()
if ACM_LABEL_UNLABELED in vms_with_chws:
unlabeled_chw = ACM_LABEL_UNLABELED
vms_with_chws.remove(ACM_LABEL_UNLABELED) ; # @1
vms_with_stes = []
stes_by_vm = { ACM_LABEL_UNLABELED : [] }
for v in vms:
if v.has_key("stes"):
vms_with_stes.append(v["name"])
stes_by_vm[v["name"]] = v["stes"]
if bootstrap in vms_with_stes:
vms_with_stes.remove(bootstrap)
vms_with_stes.sort()
vms_with_stes.insert(0, bootstrap)
else:
vms_with_stes.sort()
if ACM_LABEL_UNLABELED in vms_with_stes:
unlabeled_ste = ACM_LABEL_UNLABELED
vms_with_stes.remove(ACM_LABEL_UNLABELED) ; # @2
resnames = self.policy_get_resourcelabel_names()
resnames.sort()
stes_by_res = {}
res = self.policy_get_resourcelabels()
for r in res:
if r.has_key("stes"):
stes_by_res[r["name"]] = r["stes"]
if ACM_LABEL_UNLABELED in resnames:
resnames.remove(ACM_LABEL_UNLABELED)
# check for duplicate labels
if len(vmlabels) != len(set(vmlabels)) or \
len(resnames) != len(set(resnames)) or \
len(stes) != len(set(stes)) or \
len(chws) != len(set(chws)):
return -xsconstants.XSERR_POLICY_HAS_DUPLICATES, "", ""
max_chw_ssids = 1 + len(vms_with_chws)
max_chw_types = 1 + len(vms_with_chws)
max_ste_ssids = 1 + len(vms_with_stes) + len(resnames)
max_ste_types = 1 + len(vms_with_stes) + len(resnames)
mapfile = "POLICYREFERENCENAME %s\n" % self.get_name()
mapfile += "MAGIC %08x\n" % ACM_MAGIC
mapfile += "POLICFILE %s\n" % \
self.path_from_policy_name(self.get_name())
mapfile += "BINARYFILE %s\n" % self.get_filename(".bin")
mapfile += "MAX-CHWALL-TYPES %08x\n" % len(chws)
mapfile += "MAX-CHWALL-SSIDS %08x\n" % max_chw_ssids
mapfile += "MAX-CHWALL-LABELS %08x\n" % max_chw_ssids
mapfile += "MAX-STE-TYPES %08x\n" % len(stes)
mapfile += "MAX-STE-SSIDS %08x\n" % max_ste_ssids
mapfile += "MAX-STE-LABELS %08x\n" % max_ste_ssids
mapfile += "\n"
if chws:
mapfile += \
"PRIMARY CHWALL\n"
primpolcode = ACM_CHINESE_WALL_POLICY
if stes:
mapfile += \
"SECONDARY STE\n"
else:
mapfile += \
"SECONDARY NULL\n"
secpolcode = ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY
else:
if stes:
mapfile += \
"PRIMARY STE\n"
primpolcode = ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY
mapfile += \
"SECONDARY NULL\n"
mapfile += "\n"
if len(vms_with_chws) > 0:
mapfile += \
"LABEL->SSID ANY CHWALL %-20s %x\n" % \
(unlabeled_chw, 0)
i = 0
for v in vms_with_chws:
mapfile += \
"LABEL->SSID VM CHWALL %-20s %x\n" % \
(v, i+1)
i += 1
mapfile += "\n"
if len(vms_with_stes) > 0 or len(resnames) > 0:
mapfile += \
"LABEL->SSID ANY STE %-20s %08x\n" % \
(unlabeled_ste, 0)
i = 0
for v in vms_with_stes:
mapfile += \
"LABEL->SSID VM STE %-20s %x\n" % (v, i+1)
i += 1
j = 0
for r in resnames:
mapfile += \
"LABEL->SSID RES STE %-20s %x\n" % (r, j+i+1)
j += 1
mapfile += "\n"
if vms_with_chws:
mapfile += \
"SSID->TYPE CHWALL %08x\n" % 0
i = 1
for v in vms_with_chws:
mapfile += \
"SSID->TYPE CHWALL %08x" % i
for c in chws_by_vm[v]:
mapfile += " %s" % c
mapfile += "\n"
i += 1
mapfile += "\n"
if len(vms_with_stes) > 0 or len(resnames) > 0:
mapfile += \
"SSID->TYPE STE %08x\n" % 0
i = 1
for v in vms_with_stes:
mapfile += \
"SSID->TYPE STE %08x" % i
for s in stes_by_vm[v]:
mapfile += " %s" % s
mapfile += "\n"
i += 1
for r in resnames:
mapfile += \
"SSID->TYPE STE %08x" % i
for s in stes_by_res[r]:
mapfile += " %s" % s
mapfile += "\n"
i += 1
mapfile += "\n"
if chws:
i = 0
while i < len(chws):
mapfile += \
"TYPE CHWALL %-20s %d\n" % (chws[i], i)
i += 1
mapfile += "\n"
if stes:
i = 0
while i < len(stes):
mapfile += \
"TYPE STE %-20s %d\n" % (stes[i], i)
i += 1
mapfile += "\n"
mapfile += "\n"
# Build header with policy name
length = roundup8(4 + len(self.get_name()) + 1)
polname = self.get_name();
pr_bin = struct.pack("!i", len(polname)+1)
pr_bin += polname;
while len(pr_bin) < length:
pr_bin += "\x00"
# Build chinese wall part
vms_with_chws.insert(0, ACM_LABEL_UNLABELED)
cfses_names = self.policy_get_chwall_cfses_names_sorted()
cfses = self.policy_get_chwall_cfses()
chwformat = "!iiiiiiiii"
max_chw_cfs = len(cfses)
chw_ssid_offset = struct.calcsize(chwformat)
chw_confset_offset = chw_ssid_offset + \
2 * len(chws) * max_chw_types
chw_running_types_offset = 0
chw_conf_agg_offset = 0
chw_bin = struct.pack(chwformat,
ACM_CHWALL_VERSION,
ACM_CHINESE_WALL_POLICY,
len(chws),
max_chw_ssids,
max_chw_cfs,
chw_ssid_offset,
chw_confset_offset,
chw_running_types_offset,
chw_conf_agg_offset)
chw_bin_body = ""
# VMs that are listed and their chinese walls
for v in vms_with_chws:
for c in chws:
unknown_chw |= (set(chws_by_vm[v]) - set(chws))
if c in chws_by_vm[v]:
chw_bin_body += struct.pack("!h",1)
else:
chw_bin_body += struct.pack("!h",0)
# Conflict sets -- they need to be processed in alphabetical order
for cn in cfses_names:
if cn == "" or cn is None:
return -xsconstants.XSERR_BAD_CONFLICTSET, "", ""
i = 0
while i < len(cfses):
if cfses[i]['name'] == cn:
conf = cfses[i]['chws']
break
i += 1
for c in chws:
if c in conf:
chw_bin_body += struct.pack("!h",1)
else:
chw_bin_body += struct.pack("!h",0)
del cfses[i]
if len(cfses) != 0:
return -xsconstants.XSERR_BAD_CONFLICTSET, "", ""
chw_bin += chw_bin_body
while len(chw_bin) < roundup8(len(chw_bin)):
chw_bin += "\x00"
# Build STE part
vms_with_stes.insert(0, ACM_LABEL_UNLABELED) # Took out in @2
steformat="!iiiii"
ste_bin = struct.pack(steformat,
ACM_STE_VERSION,
ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY,
len(stes),
max_ste_types,
struct.calcsize(steformat))
ste_bin_body = ""
if stes:
# VMs that are listed and their STE types
for v in vms_with_stes:
unknown_ste |= (set(stes_by_vm[v]) - set(stes))
for s in stes:
if s in stes_by_vm[v]:
ste_bin_body += struct.pack("!h",1)
else:
ste_bin_body += struct.pack("!h",0)
for r in resnames:
unknown_ste |= (set(stes_by_res[r]) - set(stes))
for s in stes:
if s in stes_by_res[r]:
ste_bin_body += struct.pack("!h",1)
else:
ste_bin_body += struct.pack("!h",0)
ste_bin += ste_bin_body;
while len(ste_bin) < roundup8(len(ste_bin)):
ste_bin += "\x00"
#Write binary header:
headerformat="!iiiiiiiiii20s"
totallen_bin = struct.calcsize(headerformat) + \
len(pr_bin) + len(chw_bin) + len(ste_bin)
polref_offset = struct.calcsize(headerformat)
primpoloffset = polref_offset + len(pr_bin)
if primpolcode == ACM_CHINESE_WALL_POLICY:
secpoloffset = primpoloffset + len(chw_bin)
elif primpolcode == ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY:
secpoloffset = primpoloffset + len(ste_bin)
else:
secpoloffset = primpoloffset
(major, minor) = self.getVersionTuple()
hdr_bin = struct.pack(headerformat,
ACM_MAGIC,
ACM_POLICY_VERSION,
totallen_bin,
polref_offset,
primpolcode,
primpoloffset,
secpolcode,
secpoloffset,
major, minor,
self.hash().digest())
all_bin = array.array('B')
for s in [ hdr_bin, pr_bin, chw_bin, ste_bin ]:
for c in s:
all_bin.append(ord(c))
log.info("Compiled policy: rc = %s" % hex(rc))
if len(unknown_ste) > 0:
log.info("The following STEs in VM/res labels were unknown:" \
" %s" % list(unknown_ste))
rc = -xsconstants.XSERR_BAD_LABEL
if len(unknown_chw) > 0:
log.info("The following Ch. Wall types in labels were unknown:" \
" %s" % list(unknown_chw))
rc = -xsconstants.XSERR_BAD_LABEL
return rc, mapfile, all_bin.tostring()
def validate_enforced_policy_hash(self):
""" verify that the policy hash embedded in the binary policy
that is currently enforce matches the one of the XML policy.
"""
if self.hash().digest() != self.get_enforced_policy_hash():
raise Exception('Policy hashes do not match')
def get_enforced_policy_hash(self):
binpol = self.get_enforced_binary()
headerformat="!iiiiiiiiii20s"
res = struct.unpack(headerformat, binpol[:60])
if len(res) >= 11:
return res[10]
return None
def get_enforced_binary(self):
rc, binpol = security.hv_get_policy()
if rc != 0:
raise SecurityError(-xsconstants.XSERR_HV_OP_FAILED)
return binpol
get_enforced_binary = classmethod(get_enforced_binary)
|
lambder/bigcouch
|
refs/heads/master
|
couchjs/scons/scons-local-2.0.1/SCons/Tool/packaging/src_tarbz2.py
|
61
|
"""SCons.Tool.Packaging.tarbz2
The tarbz2 SRC packager.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/packaging/src_tarbz2.py 5134 2010/08/16 23:02:40 bdeegan"
from SCons.Tool.packaging import putintopackageroot
def package(env, target, source, PACKAGEROOT, **kw):
bld = env['BUILDERS']['Tar']
bld.set_suffix('.tar.bz2')
target, source = putintopackageroot(target, source, env, PACKAGEROOT, honor_install_location=0)
return bld(env, target, source, TARFLAGS='-jc')
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
JeremiahDJordan/cassandra
|
refs/heads/trunk
|
pylib/cqlshlib/util.py
|
30
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import codecs
from itertools import izip
def split_list(items, pred):
"""
Split up a list (or other iterable) on the elements which satisfy the
given predicate 'pred'. Elements for which 'pred' returns true start a new
sublist for subsequent elements, which will accumulate in the new sublist
until the next satisfying element.
>>> split_list([0, 1, 2, 5, 99, 8], lambda n: (n % 2) == 0)
[[0], [1, 2], [5, 99, 8], []]
"""
thisresult = []
results = [thisresult]
for i in items:
thisresult.append(i)
if pred(i):
thisresult = []
results.append(thisresult)
return results
def find_common_prefix(strs):
"""
Given a list (iterable) of strings, return the longest common prefix.
>>> find_common_prefix(['abracadabra', 'abracadero', 'abranch'])
'abra'
>>> find_common_prefix(['abracadabra', 'abracadero', 'mt. fuji'])
''
"""
common = []
for cgroup in izip(*strs):
if all(x == cgroup[0] for x in cgroup[1:]):
common.append(cgroup[0])
else:
break
return ''.join(common)
def list_bifilter(pred, iterable):
"""
Filter an iterable into two output lists: the first containing all
elements of the iterable for which 'pred' returns true, and the second
containing all others. Order of the elements is otherwise retained.
>>> list_bifilter(lambda x: isinstance(x, int), (4, 'bingo', 1.2, 6, True))
([4, 6], ['bingo', 1.2, True])
"""
yes_s = []
no_s = []
for i in iterable:
(yes_s if pred(i) else no_s).append(i)
return yes_s, no_s
def identity(x):
return x
def trim_if_present(s, prefix):
if s.startswith(prefix):
return s[len(prefix):]
return s
def get_file_encoding_bomsize(filename):
"""
Checks the beginning of a file for a Unicode BOM. Based on this check,
the encoding that should be used to open the file and the number of
bytes that should be skipped (to skip the BOM) are returned.
"""
bom_encodings = ((codecs.BOM_UTF8, 'utf-8-sig'),
(codecs.BOM_UTF16_LE, 'utf-16le'),
(codecs.BOM_UTF16_BE, 'utf-16be'),
(codecs.BOM_UTF32_LE, 'utf-32be'),
(codecs.BOM_UTF32_BE, 'utf-32be'))
firstbytes = open(filename, 'rb').read(4)
for bom, encoding in bom_encodings:
if firstbytes.startswith(bom):
file_encoding, size = encoding, len(bom)
break
else:
file_encoding, size = "ascii", 0
return (file_encoding, size)
|
spartonia/saleor
|
refs/heads/master
|
saleor/order/migrations/0001_initial.py
|
26
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.db.models.deletion
from django.conf import settings
import django.utils.timezone
import django_prices.models
import django.core.validators
import satchless.item
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('product', '0001_initial'),
('userprofile', '__first__'),
]
operations = [
migrations.CreateModel(
name='DeliveryGroup',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('status', models.CharField(default='new', max_length=32, verbose_name='delivery status', choices=[('new', 'Processing'), ('cancelled', 'Cancelled'), ('shipped', 'Shipped')])),
('shipping_required', models.BooleanField(default=True, verbose_name='shipping required')),
('shipping_price', django_prices.models.PriceField(decimal_places=4, default=0, editable=False, currency=b'USD', max_digits=12, verbose_name='shipping price')),
],
bases=(models.Model, satchless.item.ItemSet),
),
migrations.CreateModel(
name='Order',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('status', models.CharField(default='new', max_length=32, verbose_name='order status', choices=[('new', 'Processing'), ('cancelled', 'Cancelled'), ('payment-pending', 'Waiting for payment'), ('fully-paid', 'Fully paid'), ('shipped', 'Shipped')])),
('created', models.DateTimeField(default=django.utils.timezone.now, verbose_name='created', editable=False)),
('last_status_change', models.DateTimeField(default=django.utils.timezone.now, verbose_name='last status change', editable=False)),
('tracking_client_id', models.CharField(max_length=36, editable=False, blank=True)),
('shipping_method', models.CharField(max_length=255, verbose_name='Delivery method', blank=True)),
('anonymous_user_email', models.EmailField(default='', max_length=254, editable=False, blank=True)),
('token', models.CharField(unique=True, max_length=36, verbose_name='token')),
('billing_address', models.ForeignKey(related_name='+', editable=False, to='userprofile.Address')),
('shipping_address', models.ForeignKey(related_name='+', editable=False, to='userprofile.Address', null=True)),
('user', models.ForeignKey(related_name='orders', verbose_name='user', blank=True, to=settings.AUTH_USER_MODEL, null=True)),
],
options={
'ordering': ('-last_status_change',),
},
bases=(models.Model, satchless.item.ItemSet),
),
migrations.CreateModel(
name='OrderedItem',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('product_name', models.CharField(max_length=128, verbose_name='product name')),
('product_sku', models.CharField(max_length=32, verbose_name='sku')),
('quantity', models.IntegerField(verbose_name='quantity', validators=[django.core.validators.MinValueValidator(0), django.core.validators.MaxValueValidator(999)])),
('unit_price_net', models.DecimalField(verbose_name='unit price (net)', max_digits=12, decimal_places=4)),
('unit_price_gross', models.DecimalField(verbose_name='unit price (gross)', max_digits=12, decimal_places=4)),
('delivery_group', models.ForeignKey(related_name='items', editable=False, to='order.DeliveryGroup')),
('product', models.ForeignKey(related_name='+', on_delete=django.db.models.deletion.SET_NULL, verbose_name='product', blank=True, to='product.Product', null=True)),
],
bases=(models.Model, satchless.item.ItemLine),
),
migrations.CreateModel(
name='OrderHistoryEntry',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('date', models.DateTimeField(default=django.utils.timezone.now, verbose_name='last history change', editable=False)),
('status', models.CharField(max_length=32, verbose_name='order status', choices=[('new', 'Processing'), ('cancelled', 'Cancelled'), ('payment-pending', 'Waiting for payment'), ('fully-paid', 'Fully paid'), ('shipped', 'Shipped')])),
('comment', models.CharField(default='', max_length=100, blank=True)),
('order', models.ForeignKey(related_name='history', to='order.Order')),
('user', models.ForeignKey(blank=True, to=settings.AUTH_USER_MODEL, null=True)),
],
options={
'ordering': ['date'],
},
),
migrations.CreateModel(
name='OrderNote',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('date', models.DateTimeField(auto_now_add=True)),
('content', models.CharField(max_length=250)),
('order', models.ForeignKey(related_name='notes', to='order.Order')),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Payment',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('variant', models.CharField(max_length=255)),
('status', models.CharField(default='waiting', max_length=10, choices=[('waiting', 'Waiting for confirmation'), ('preauth', 'Pre-authorized'), ('confirmed', 'Confirmed'), ('rejected', 'Rejected'), ('refunded', 'Refunded'), ('error', 'Error'), ('input', 'Input')])),
('fraud_status', models.CharField(default='unknown', max_length=10, verbose_name='fraud check', choices=[('unknown', 'Unknown'), ('accept', 'Passed'), ('reject', 'Rejected'), ('review', 'Review')])),
('fraud_message', models.TextField(default='', blank=True)),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('transaction_id', models.CharField(max_length=255, blank=True)),
('currency', models.CharField(max_length=10)),
('total', models.DecimalField(default='0.0', max_digits=9, decimal_places=2)),
('delivery', models.DecimalField(default='0.0', max_digits=9, decimal_places=2)),
('tax', models.DecimalField(default='0.0', max_digits=9, decimal_places=2)),
('description', models.TextField(default='', blank=True)),
('billing_first_name', models.CharField(max_length=256, blank=True)),
('billing_last_name', models.CharField(max_length=256, blank=True)),
('billing_address_1', models.CharField(max_length=256, blank=True)),
('billing_address_2', models.CharField(max_length=256, blank=True)),
('billing_city', models.CharField(max_length=256, blank=True)),
('billing_postcode', models.CharField(max_length=256, blank=True)),
('billing_country_code', models.CharField(max_length=2, blank=True)),
('billing_country_area', models.CharField(max_length=256, blank=True)),
('billing_email', models.EmailField(max_length=254, blank=True)),
('customer_ip_address', models.IPAddressField(blank=True)),
('extra_data', models.TextField(default='', blank=True)),
('message', models.TextField(default='', blank=True)),
('token', models.CharField(default='', max_length=36, blank=True)),
('captured_amount', models.DecimalField(default='0.0', max_digits=9, decimal_places=2)),
('order', models.ForeignKey(related_name='payments', to='order.Order')),
],
options={
'abstract': False,
},
),
migrations.AddField(
model_name='deliverygroup',
name='order',
field=models.ForeignKey(related_name='groups', editable=False, to='order.Order'),
),
]
|
jw84/gallant
|
refs/heads/master
|
saleor/order/migrations/0001_initial.py
|
26
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.db.models.deletion
from django.conf import settings
import django.utils.timezone
import django_prices.models
import django.core.validators
import satchless.item
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('product', '0001_initial'),
('userprofile', '__first__'),
]
operations = [
migrations.CreateModel(
name='DeliveryGroup',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('status', models.CharField(default='new', max_length=32, verbose_name='delivery status', choices=[('new', 'Processing'), ('cancelled', 'Cancelled'), ('shipped', 'Shipped')])),
('shipping_required', models.BooleanField(default=True, verbose_name='shipping required')),
('shipping_price', django_prices.models.PriceField(decimal_places=4, default=0, editable=False, currency=b'USD', max_digits=12, verbose_name='shipping price')),
],
bases=(models.Model, satchless.item.ItemSet),
),
migrations.CreateModel(
name='Order',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('status', models.CharField(default='new', max_length=32, verbose_name='order status', choices=[('new', 'Processing'), ('cancelled', 'Cancelled'), ('payment-pending', 'Waiting for payment'), ('fully-paid', 'Fully paid'), ('shipped', 'Shipped')])),
('created', models.DateTimeField(default=django.utils.timezone.now, verbose_name='created', editable=False)),
('last_status_change', models.DateTimeField(default=django.utils.timezone.now, verbose_name='last status change', editable=False)),
('tracking_client_id', models.CharField(max_length=36, editable=False, blank=True)),
('shipping_method', models.CharField(max_length=255, verbose_name='Delivery method', blank=True)),
('anonymous_user_email', models.EmailField(default='', max_length=254, editable=False, blank=True)),
('token', models.CharField(unique=True, max_length=36, verbose_name='token')),
('billing_address', models.ForeignKey(related_name='+', editable=False, to='userprofile.Address')),
('shipping_address', models.ForeignKey(related_name='+', editable=False, to='userprofile.Address', null=True)),
('user', models.ForeignKey(related_name='orders', verbose_name='user', blank=True, to=settings.AUTH_USER_MODEL, null=True)),
],
options={
'ordering': ('-last_status_change',),
},
bases=(models.Model, satchless.item.ItemSet),
),
migrations.CreateModel(
name='OrderedItem',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('product_name', models.CharField(max_length=128, verbose_name='product name')),
('product_sku', models.CharField(max_length=32, verbose_name='sku')),
('quantity', models.IntegerField(verbose_name='quantity', validators=[django.core.validators.MinValueValidator(0), django.core.validators.MaxValueValidator(999)])),
('unit_price_net', models.DecimalField(verbose_name='unit price (net)', max_digits=12, decimal_places=4)),
('unit_price_gross', models.DecimalField(verbose_name='unit price (gross)', max_digits=12, decimal_places=4)),
('delivery_group', models.ForeignKey(related_name='items', editable=False, to='order.DeliveryGroup')),
('product', models.ForeignKey(related_name='+', on_delete=django.db.models.deletion.SET_NULL, verbose_name='product', blank=True, to='product.Product', null=True)),
],
bases=(models.Model, satchless.item.ItemLine),
),
migrations.CreateModel(
name='OrderHistoryEntry',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('date', models.DateTimeField(default=django.utils.timezone.now, verbose_name='last history change', editable=False)),
('status', models.CharField(max_length=32, verbose_name='order status', choices=[('new', 'Processing'), ('cancelled', 'Cancelled'), ('payment-pending', 'Waiting for payment'), ('fully-paid', 'Fully paid'), ('shipped', 'Shipped')])),
('comment', models.CharField(default='', max_length=100, blank=True)),
('order', models.ForeignKey(related_name='history', to='order.Order')),
('user', models.ForeignKey(blank=True, to=settings.AUTH_USER_MODEL, null=True)),
],
options={
'ordering': ['date'],
},
),
migrations.CreateModel(
name='OrderNote',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('date', models.DateTimeField(auto_now_add=True)),
('content', models.CharField(max_length=250)),
('order', models.ForeignKey(related_name='notes', to='order.Order')),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Payment',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('variant', models.CharField(max_length=255)),
('status', models.CharField(default='waiting', max_length=10, choices=[('waiting', 'Waiting for confirmation'), ('preauth', 'Pre-authorized'), ('confirmed', 'Confirmed'), ('rejected', 'Rejected'), ('refunded', 'Refunded'), ('error', 'Error'), ('input', 'Input')])),
('fraud_status', models.CharField(default='unknown', max_length=10, verbose_name='fraud check', choices=[('unknown', 'Unknown'), ('accept', 'Passed'), ('reject', 'Rejected'), ('review', 'Review')])),
('fraud_message', models.TextField(default='', blank=True)),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('transaction_id', models.CharField(max_length=255, blank=True)),
('currency', models.CharField(max_length=10)),
('total', models.DecimalField(default='0.0', max_digits=9, decimal_places=2)),
('delivery', models.DecimalField(default='0.0', max_digits=9, decimal_places=2)),
('tax', models.DecimalField(default='0.0', max_digits=9, decimal_places=2)),
('description', models.TextField(default='', blank=True)),
('billing_first_name', models.CharField(max_length=256, blank=True)),
('billing_last_name', models.CharField(max_length=256, blank=True)),
('billing_address_1', models.CharField(max_length=256, blank=True)),
('billing_address_2', models.CharField(max_length=256, blank=True)),
('billing_city', models.CharField(max_length=256, blank=True)),
('billing_postcode', models.CharField(max_length=256, blank=True)),
('billing_country_code', models.CharField(max_length=2, blank=True)),
('billing_country_area', models.CharField(max_length=256, blank=True)),
('billing_email', models.EmailField(max_length=254, blank=True)),
('customer_ip_address', models.IPAddressField(blank=True)),
('extra_data', models.TextField(default='', blank=True)),
('message', models.TextField(default='', blank=True)),
('token', models.CharField(default='', max_length=36, blank=True)),
('captured_amount', models.DecimalField(default='0.0', max_digits=9, decimal_places=2)),
('order', models.ForeignKey(related_name='payments', to='order.Order')),
],
options={
'abstract': False,
},
),
migrations.AddField(
model_name='deliverygroup',
name='order',
field=models.ForeignKey(related_name='groups', editable=False, to='order.Order'),
),
]
|
dsprenkels/servo
|
refs/heads/master
|
tests/wpt/web-platform-tests/tools/py/doc/conf.py
|
218
|
# -*- coding: utf-8 -*-
#
# py documentation build configuration file, created by
# sphinx-quickstart on Thu Oct 21 08:30:10 2010.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.intersphinx', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.txt'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'py'
copyright = u'2010, holger krekel et. al.'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
# The full version, including alpha/beta/rc tags.
import py
release = py.__version__
version = ".".join(release.split(".")[:2])
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'py'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'py.tex', u'py Documentation',
u'holger krekel et. al.', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'py', u'py Documentation',
[u'holger krekel et. al.'], 1)
]
autodoc_member_order = "bysource"
autodoc_default_flags = "inherited-members"
# -- Options for Epub output ---------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'py'
epub_author = u'holger krekel et. al.'
epub_publisher = u'holger krekel et. al.'
epub_copyright = u'2010, holger krekel et. al.'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
#epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
|
H1ghT0p/kitsune
|
refs/heads/master
|
kitsune/upload/models.py
|
15
|
from django.conf import settings
from django.contrib.auth.models import User
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes import generic
from django.db import models
from kitsune.sumo.helpers import reverse
from kitsune.sumo.models import ModelBase
from kitsune.sumo.utils import auto_delete_files
@auto_delete_files
class ImageAttachment(ModelBase):
"""An image attached to an object using a generic foreign key"""
file = models.ImageField(upload_to=settings.IMAGE_UPLOAD_PATH,
max_length=settings.MAX_FILEPATH_LENGTH)
thumbnail = models.ImageField(upload_to=settings.THUMBNAIL_UPLOAD_PATH,
null=True)
creator = models.ForeignKey(User, related_name='image_attachments')
content_type = models.ForeignKey(ContentType)
object_id = models.PositiveIntegerField()
content_object = generic.GenericForeignKey()
def __unicode__(self):
return self.file.name
def get_absolute_url(self):
return self.file.url
def thumbnail_if_set(self):
"""Returns self.thumbnail, if set, else self.file"""
return self.thumbnail if self.thumbnail else self.file
def get_delete_url(self):
"""Returns the URL to delete this object. Assumes the object has an
id."""
return reverse('upload.del_image_async', args=[self.id])
|
gnuhub/intellij-community
|
refs/heads/master
|
python/helpers/profiler/thrift/TTornado.py
|
105
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from __future__ import absolute_import
import socket
import struct
import logging
logger = logging.getLogger(__name__)
from thrift.transport.TTransport import TTransportException, TTransportBase, TMemoryBuffer
from io import BytesIO
from collections import deque
from contextlib import contextmanager
from tornado import gen, iostream, ioloop, tcpserver, concurrent
__all__ = ['TTornadoServer', 'TTornadoStreamTransport']
class _Lock(object):
def __init__(self):
self._waiters = deque()
def acquired(self):
return len(self._waiters) > 0
@gen.coroutine
def acquire(self):
blocker = self._waiters[-1] if self.acquired() else None
future = concurrent.Future()
self._waiters.append(future)
if blocker:
yield blocker
raise gen.Return(self._lock_context())
def release(self):
assert self.acquired(), 'Lock not aquired'
future = self._waiters.popleft()
future.set_result(None)
@contextmanager
def _lock_context(self):
try:
yield
finally:
self.release()
class TTornadoStreamTransport(TTransportBase):
"""a framed, buffered transport over a Tornado stream"""
def __init__(self, host, port, stream=None, io_loop=None):
self.host = host
self.port = port
self.io_loop = io_loop or ioloop.IOLoop.current()
self.__wbuf = BytesIO()
self._read_lock = _Lock()
# servers provide a ready-to-go stream
self.stream = stream
def with_timeout(self, timeout, future):
return gen.with_timeout(timeout, future, self.io_loop)
@gen.coroutine
def open(self, timeout=None):
logger.debug('socket connecting')
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
self.stream = iostream.IOStream(sock)
try:
connect = self.stream.connect((self.host, self.port))
if timeout is not None:
yield self.with_timeout(timeout, connect)
else:
yield connect
except (socket.error, IOError, ioloop.TimeoutError) as e:
message = 'could not connect to {}:{} ({})'.format(self.host, self.port, e)
raise TTransportException(
type=TTransportException.NOT_OPEN,
message=message)
raise gen.Return(self)
def set_close_callback(self, callback):
"""
Should be called only after open() returns
"""
self.stream.set_close_callback(callback)
def close(self):
# don't raise if we intend to close
self.stream.set_close_callback(None)
self.stream.close()
def read(self, _):
# The generated code for Tornado shouldn't do individual reads -- only
# frames at a time
assert False, "you're doing it wrong"
@contextmanager
def io_exception_context(self):
try:
yield
except (socket.error, IOError) as e:
raise TTransportException(
type=TTransportException.END_OF_FILE,
message=str(e))
except iostream.StreamBufferFullError as e:
raise TTransportException(
type=TTransportException.UNKNOWN,
message=str(e))
@gen.coroutine
def readFrame(self):
# IOStream processes reads one at a time
with (yield self._read_lock.acquire()):
with self.io_exception_context():
frame_header = yield self.stream.read_bytes(4)
if len(frame_header) == 0:
raise iostream.StreamClosedError('Read zero bytes from stream')
frame_length, = struct.unpack('!i', frame_header)
frame = yield self.stream.read_bytes(frame_length)
raise gen.Return(frame)
def write(self, buf):
self.__wbuf.write(buf)
def flush(self):
frame = self.__wbuf.getvalue()
# reset wbuf before write/flush to preserve state on underlying failure
frame_length = struct.pack('!i', len(frame))
self.__wbuf = BytesIO()
with self.io_exception_context():
return self.stream.write(frame_length + frame)
class TTornadoServer(tcpserver.TCPServer):
def __init__(self, processor, iprot_factory, oprot_factory=None,
*args, **kwargs):
super(TTornadoServer, self).__init__(*args, **kwargs)
self._processor = processor
self._iprot_factory = iprot_factory
self._oprot_factory = (oprot_factory if oprot_factory is not None
else iprot_factory)
@gen.coroutine
def handle_stream(self, stream, address):
host, port = address
trans = TTornadoStreamTransport(host=host, port=port, stream=stream,
io_loop=self.io_loop)
oprot = self._oprot_factory.getProtocol(trans)
try:
while not trans.stream.closed():
frame = yield trans.readFrame()
tr = TMemoryBuffer(frame)
iprot = self._iprot_factory.getProtocol(tr)
yield self._processor.process(iprot, oprot)
except Exception:
logger.exception('thrift exception in handle_stream')
trans.close()
logger.info('client disconnected %s:%d', host, port)
|
laperry1/android_external_chromium_org
|
refs/heads/cm-12.1
|
tools/cr/cr/autocomplete.py
|
103
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Bash auto completion support.
Contains the special mode that returns lists of possible completions for the
current command line.
"""
import cr
def Complete():
"""Attempts to build a completion list for the current command line.
COMP_WORD contains the word that is being completed, and COMP_CWORD has
the index of that word on the command line.
"""
# TODO(iancottrell): support auto complete of more than just the command
# try to parse the command line using parser
print ' '.join(command.name for command in cr.Command.Plugins())
|
wackywendell/numpy
|
refs/heads/master
|
numpy/polynomial/tests/test_classes.py
|
66
|
"""Test inter-conversion of different polynomial classes.
This tests the convert and cast methods of all the polynomial classes.
"""
from __future__ import division, absolute_import, print_function
import operator as op
from numbers import Number
import numpy as np
from numpy.polynomial import (
Polynomial, Legendre, Chebyshev, Laguerre, Hermite, HermiteE)
from numpy.testing import (
assert_almost_equal, assert_raises, assert_equal, assert_,
run_module_suite)
from numpy.compat import long
classes = (
Polynomial, Legendre, Chebyshev, Laguerre,
Hermite, HermiteE)
def test_class_methods():
for Poly1 in classes:
for Poly2 in classes:
yield check_conversion, Poly1, Poly2
yield check_cast, Poly1, Poly2
for Poly in classes:
yield check_call, Poly
yield check_identity, Poly
yield check_basis, Poly
yield check_fromroots, Poly
yield check_fit, Poly
yield check_equal, Poly
yield check_not_equal, Poly
yield check_add, Poly
yield check_sub, Poly
yield check_mul, Poly
yield check_floordiv, Poly
yield check_truediv, Poly
yield check_mod, Poly
yield check_divmod, Poly
yield check_pow, Poly
yield check_integ, Poly
yield check_deriv, Poly
yield check_roots, Poly
yield check_linspace, Poly
yield check_mapparms, Poly
yield check_degree, Poly
yield check_copy, Poly
yield check_cutdeg, Poly
yield check_truncate, Poly
yield check_trim, Poly
#
# helper functions
#
random = np.random.random
def assert_poly_almost_equal(p1, p2, msg=""):
try:
assert_(np.all(p1.domain == p2.domain))
assert_(np.all(p1.window == p2.window))
assert_almost_equal(p1.coef, p2.coef)
except AssertionError:
msg = "Result: %s\nTarget: %s", (p1, p2)
raise AssertionError(msg)
#
# conversion methods that depend on two classes
#
def check_conversion(Poly1, Poly2):
x = np.linspace(0, 1, 10)
coef = random((3,))
d1 = Poly1.domain + random((2,))*.25
w1 = Poly1.window + random((2,))*.25
p1 = Poly1(coef, domain=d1, window=w1)
d2 = Poly2.domain + random((2,))*.25
w2 = Poly2.window + random((2,))*.25
p2 = p1.convert(kind=Poly2, domain=d2, window=w2)
assert_almost_equal(p2.domain, d2)
assert_almost_equal(p2.window, w2)
assert_almost_equal(p2(x), p1(x))
def check_cast(Poly1, Poly2):
x = np.linspace(0, 1, 10)
coef = random((3,))
d1 = Poly1.domain + random((2,))*.25
w1 = Poly1.window + random((2,))*.25
p1 = Poly1(coef, domain=d1, window=w1)
d2 = Poly2.domain + random((2,))*.25
w2 = Poly2.window + random((2,))*.25
p2 = Poly2.cast(p1, domain=d2, window=w2)
assert_almost_equal(p2.domain, d2)
assert_almost_equal(p2.window, w2)
assert_almost_equal(p2(x), p1(x))
#
# methods that depend on one class
#
def check_identity(Poly):
d = Poly.domain + random((2,))*.25
w = Poly.window + random((2,))*.25
x = np.linspace(d[0], d[1], 11)
p = Poly.identity(domain=d, window=w)
assert_equal(p.domain, d)
assert_equal(p.window, w)
assert_almost_equal(p(x), x)
def check_basis(Poly):
d = Poly.domain + random((2,))*.25
w = Poly.window + random((2,))*.25
p = Poly.basis(5, domain=d, window=w)
assert_equal(p.domain, d)
assert_equal(p.window, w)
assert_equal(p.coef, [0]*5 + [1])
def check_fromroots(Poly):
# check that requested roots are zeros of a polynomial
# of correct degree, domain, and window.
d = Poly.domain + random((2,))*.25
w = Poly.window + random((2,))*.25
r = random((5,))
p1 = Poly.fromroots(r, domain=d, window=w)
assert_equal(p1.degree(), len(r))
assert_equal(p1.domain, d)
assert_equal(p1.window, w)
assert_almost_equal(p1(r), 0)
# check that polynomial is monic
pdom = Polynomial.domain
pwin = Polynomial.window
p2 = Polynomial.cast(p1, domain=pdom, window=pwin)
assert_almost_equal(p2.coef[-1], 1)
def check_fit(Poly):
def f(x):
return x*(x - 1)*(x - 2)
x = np.linspace(0, 3)
y = f(x)
# check default value of domain and window
p = Poly.fit(x, y, 3)
assert_almost_equal(p.domain, [0, 3])
assert_almost_equal(p(x), y)
assert_equal(p.degree(), 3)
# check with given domains and window
d = Poly.domain + random((2,))*.25
w = Poly.window + random((2,))*.25
p = Poly.fit(x, y, 3, domain=d, window=w)
assert_almost_equal(p(x), y)
assert_almost_equal(p.domain, d)
assert_almost_equal(p.window, w)
p = Poly.fit(x, y, [0, 1, 2, 3], domain=d, window=w)
assert_almost_equal(p(x), y)
assert_almost_equal(p.domain, d)
assert_almost_equal(p.window, w)
# check with class domain default
p = Poly.fit(x, y, 3, [])
assert_equal(p.domain, Poly.domain)
assert_equal(p.window, Poly.window)
p = Poly.fit(x, y, [0, 1, 2, 3], [])
assert_equal(p.domain, Poly.domain)
assert_equal(p.window, Poly.window)
# check that fit accepts weights.
w = np.zeros_like(x)
z = y + random(y.shape)*.25
w[::2] = 1
p1 = Poly.fit(x[::2], z[::2], 3)
p2 = Poly.fit(x, z, 3, w=w)
p3 = Poly.fit(x, z, [0, 1, 2, 3], w=w)
assert_almost_equal(p1(x), p2(x))
assert_almost_equal(p2(x), p3(x))
def check_equal(Poly):
p1 = Poly([1, 2, 3], domain=[0, 1], window=[2, 3])
p2 = Poly([1, 1, 1], domain=[0, 1], window=[2, 3])
p3 = Poly([1, 2, 3], domain=[1, 2], window=[2, 3])
p4 = Poly([1, 2, 3], domain=[0, 1], window=[1, 2])
assert_(p1 == p1)
assert_(not p1 == p2)
assert_(not p1 == p3)
assert_(not p1 == p4)
def check_not_equal(Poly):
p1 = Poly([1, 2, 3], domain=[0, 1], window=[2, 3])
p2 = Poly([1, 1, 1], domain=[0, 1], window=[2, 3])
p3 = Poly([1, 2, 3], domain=[1, 2], window=[2, 3])
p4 = Poly([1, 2, 3], domain=[0, 1], window=[1, 2])
assert_(not p1 != p1)
assert_(p1 != p2)
assert_(p1 != p3)
assert_(p1 != p4)
def check_add(Poly):
# This checks commutation, not numerical correctness
c1 = list(random((4,)) + .5)
c2 = list(random((3,)) + .5)
p1 = Poly(c1)
p2 = Poly(c2)
p3 = p1 + p2
assert_poly_almost_equal(p2 + p1, p3)
assert_poly_almost_equal(p1 + c2, p3)
assert_poly_almost_equal(c2 + p1, p3)
assert_poly_almost_equal(p1 + tuple(c2), p3)
assert_poly_almost_equal(tuple(c2) + p1, p3)
assert_poly_almost_equal(p1 + np.array(c2), p3)
assert_poly_almost_equal(np.array(c2) + p1, p3)
assert_raises(TypeError, op.add, p1, Poly([0], domain=Poly.domain + 1))
assert_raises(TypeError, op.add, p1, Poly([0], window=Poly.window + 1))
if Poly is Polynomial:
assert_raises(TypeError, op.add, p1, Chebyshev([0]))
else:
assert_raises(TypeError, op.add, p1, Polynomial([0]))
def check_sub(Poly):
# This checks commutation, not numerical correctness
c1 = list(random((4,)) + .5)
c2 = list(random((3,)) + .5)
p1 = Poly(c1)
p2 = Poly(c2)
p3 = p1 - p2
assert_poly_almost_equal(p2 - p1, -p3)
assert_poly_almost_equal(p1 - c2, p3)
assert_poly_almost_equal(c2 - p1, -p3)
assert_poly_almost_equal(p1 - tuple(c2), p3)
assert_poly_almost_equal(tuple(c2) - p1, -p3)
assert_poly_almost_equal(p1 - np.array(c2), p3)
assert_poly_almost_equal(np.array(c2) - p1, -p3)
assert_raises(TypeError, op.sub, p1, Poly([0], domain=Poly.domain + 1))
assert_raises(TypeError, op.sub, p1, Poly([0], window=Poly.window + 1))
if Poly is Polynomial:
assert_raises(TypeError, op.sub, p1, Chebyshev([0]))
else:
assert_raises(TypeError, op.sub, p1, Polynomial([0]))
def check_mul(Poly):
c1 = list(random((4,)) + .5)
c2 = list(random((3,)) + .5)
p1 = Poly(c1)
p2 = Poly(c2)
p3 = p1 * p2
assert_poly_almost_equal(p2 * p1, p3)
assert_poly_almost_equal(p1 * c2, p3)
assert_poly_almost_equal(c2 * p1, p3)
assert_poly_almost_equal(p1 * tuple(c2), p3)
assert_poly_almost_equal(tuple(c2) * p1, p3)
assert_poly_almost_equal(p1 * np.array(c2), p3)
assert_poly_almost_equal(np.array(c2) * p1, p3)
assert_poly_almost_equal(p1 * 2, p1 * Poly([2]))
assert_poly_almost_equal(2 * p1, p1 * Poly([2]))
assert_raises(TypeError, op.mul, p1, Poly([0], domain=Poly.domain + 1))
assert_raises(TypeError, op.mul, p1, Poly([0], window=Poly.window + 1))
if Poly is Polynomial:
assert_raises(TypeError, op.mul, p1, Chebyshev([0]))
else:
assert_raises(TypeError, op.mul, p1, Polynomial([0]))
def check_floordiv(Poly):
c1 = list(random((4,)) + .5)
c2 = list(random((3,)) + .5)
c3 = list(random((2,)) + .5)
p1 = Poly(c1)
p2 = Poly(c2)
p3 = Poly(c3)
p4 = p1 * p2 + p3
c4 = list(p4.coef)
assert_poly_almost_equal(p4 // p2, p1)
assert_poly_almost_equal(p4 // c2, p1)
assert_poly_almost_equal(c4 // p2, p1)
assert_poly_almost_equal(p4 // tuple(c2), p1)
assert_poly_almost_equal(tuple(c4) // p2, p1)
assert_poly_almost_equal(p4 // np.array(c2), p1)
assert_poly_almost_equal(np.array(c4) // p2, p1)
assert_poly_almost_equal(2 // p2, Poly([0]))
assert_poly_almost_equal(p2 // 2, 0.5*p2)
assert_raises(
TypeError, op.floordiv, p1, Poly([0], domain=Poly.domain + 1))
assert_raises(
TypeError, op.floordiv, p1, Poly([0], window=Poly.window + 1))
if Poly is Polynomial:
assert_raises(TypeError, op.floordiv, p1, Chebyshev([0]))
else:
assert_raises(TypeError, op.floordiv, p1, Polynomial([0]))
def check_truediv(Poly):
# true division is valid only if the denominator is a Number and
# not a python bool.
p1 = Poly([1,2,3])
p2 = p1 * 5
for stype in np.ScalarType:
if not issubclass(stype, Number) or issubclass(stype, bool):
continue
s = stype(5)
assert_poly_almost_equal(op.truediv(p2, s), p1)
assert_raises(TypeError, op.truediv, s, p2)
for stype in (int, long, float):
s = stype(5)
assert_poly_almost_equal(op.truediv(p2, s), p1)
assert_raises(TypeError, op.truediv, s, p2)
for stype in [complex]:
s = stype(5, 0)
assert_poly_almost_equal(op.truediv(p2, s), p1)
assert_raises(TypeError, op.truediv, s, p2)
for s in [tuple(), list(), dict(), bool(), np.array([1])]:
assert_raises(TypeError, op.truediv, p2, s)
assert_raises(TypeError, op.truediv, s, p2)
for ptype in classes:
assert_raises(TypeError, op.truediv, p2, ptype(1))
def check_mod(Poly):
# This checks commutation, not numerical correctness
c1 = list(random((4,)) + .5)
c2 = list(random((3,)) + .5)
c3 = list(random((2,)) + .5)
p1 = Poly(c1)
p2 = Poly(c2)
p3 = Poly(c3)
p4 = p1 * p2 + p3
c4 = list(p4.coef)
assert_poly_almost_equal(p4 % p2, p3)
assert_poly_almost_equal(p4 % c2, p3)
assert_poly_almost_equal(c4 % p2, p3)
assert_poly_almost_equal(p4 % tuple(c2), p3)
assert_poly_almost_equal(tuple(c4) % p2, p3)
assert_poly_almost_equal(p4 % np.array(c2), p3)
assert_poly_almost_equal(np.array(c4) % p2, p3)
assert_poly_almost_equal(2 % p2, Poly([2]))
assert_poly_almost_equal(p2 % 2, Poly([0]))
assert_raises(TypeError, op.mod, p1, Poly([0], domain=Poly.domain + 1))
assert_raises(TypeError, op.mod, p1, Poly([0], window=Poly.window + 1))
if Poly is Polynomial:
assert_raises(TypeError, op.mod, p1, Chebyshev([0]))
else:
assert_raises(TypeError, op.mod, p1, Polynomial([0]))
def check_divmod(Poly):
# This checks commutation, not numerical correctness
c1 = list(random((4,)) + .5)
c2 = list(random((3,)) + .5)
c3 = list(random((2,)) + .5)
p1 = Poly(c1)
p2 = Poly(c2)
p3 = Poly(c3)
p4 = p1 * p2 + p3
c4 = list(p4.coef)
quo, rem = divmod(p4, p2)
assert_poly_almost_equal(quo, p1)
assert_poly_almost_equal(rem, p3)
quo, rem = divmod(p4, c2)
assert_poly_almost_equal(quo, p1)
assert_poly_almost_equal(rem, p3)
quo, rem = divmod(c4, p2)
assert_poly_almost_equal(quo, p1)
assert_poly_almost_equal(rem, p3)
quo, rem = divmod(p4, tuple(c2))
assert_poly_almost_equal(quo, p1)
assert_poly_almost_equal(rem, p3)
quo, rem = divmod(tuple(c4), p2)
assert_poly_almost_equal(quo, p1)
assert_poly_almost_equal(rem, p3)
quo, rem = divmod(p4, np.array(c2))
assert_poly_almost_equal(quo, p1)
assert_poly_almost_equal(rem, p3)
quo, rem = divmod(np.array(c4), p2)
assert_poly_almost_equal(quo, p1)
assert_poly_almost_equal(rem, p3)
quo, rem = divmod(p2, 2)
assert_poly_almost_equal(quo, 0.5*p2)
assert_poly_almost_equal(rem, Poly([0]))
quo, rem = divmod(2, p2)
assert_poly_almost_equal(quo, Poly([0]))
assert_poly_almost_equal(rem, Poly([2]))
assert_raises(TypeError, divmod, p1, Poly([0], domain=Poly.domain + 1))
assert_raises(TypeError, divmod, p1, Poly([0], window=Poly.window + 1))
if Poly is Polynomial:
assert_raises(TypeError, divmod, p1, Chebyshev([0]))
else:
assert_raises(TypeError, divmod, p1, Polynomial([0]))
def check_roots(Poly):
d = Poly.domain + random((2,))*.25
w = Poly.window + random((2,))*.25
tgt = np.sort(random((5,)))
res = np.sort(Poly.fromroots(tgt, domain=d, window=w).roots())
assert_almost_equal(res, tgt)
# default domain and window
res = np.sort(Poly.fromroots(tgt).roots())
assert_almost_equal(res, tgt)
def check_degree(Poly):
p = Poly.basis(5)
assert_equal(p.degree(), 5)
def check_copy(Poly):
p1 = Poly.basis(5)
p2 = p1.copy()
assert_(p1 == p2)
assert_(p1 is not p2)
assert_(p1.coef is not p2.coef)
assert_(p1.domain is not p2.domain)
assert_(p1.window is not p2.window)
def check_integ(Poly):
P = Polynomial
# Check defaults
p0 = Poly.cast(P([1*2, 2*3, 3*4]))
p1 = P.cast(p0.integ())
p2 = P.cast(p0.integ(2))
assert_poly_almost_equal(p1, P([0, 2, 3, 4]))
assert_poly_almost_equal(p2, P([0, 0, 1, 1, 1]))
# Check with k
p0 = Poly.cast(P([1*2, 2*3, 3*4]))
p1 = P.cast(p0.integ(k=1))
p2 = P.cast(p0.integ(2, k=[1, 1]))
assert_poly_almost_equal(p1, P([1, 2, 3, 4]))
assert_poly_almost_equal(p2, P([1, 1, 1, 1, 1]))
# Check with lbnd
p0 = Poly.cast(P([1*2, 2*3, 3*4]))
p1 = P.cast(p0.integ(lbnd=1))
p2 = P.cast(p0.integ(2, lbnd=1))
assert_poly_almost_equal(p1, P([-9, 2, 3, 4]))
assert_poly_almost_equal(p2, P([6, -9, 1, 1, 1]))
# Check scaling
d = 2*Poly.domain
p0 = Poly.cast(P([1*2, 2*3, 3*4]), domain=d)
p1 = P.cast(p0.integ())
p2 = P.cast(p0.integ(2))
assert_poly_almost_equal(p1, P([0, 2, 3, 4]))
assert_poly_almost_equal(p2, P([0, 0, 1, 1, 1]))
def check_deriv(Poly):
# Check that the derivative is the inverse of integration. It is
# assumes that the integration has been checked elsewhere.
d = Poly.domain + random((2,))*.25
w = Poly.window + random((2,))*.25
p1 = Poly([1, 2, 3], domain=d, window=w)
p2 = p1.integ(2, k=[1, 2])
p3 = p1.integ(1, k=[1])
assert_almost_equal(p2.deriv(1).coef, p3.coef)
assert_almost_equal(p2.deriv(2).coef, p1.coef)
# default domain and window
p1 = Poly([1, 2, 3])
p2 = p1.integ(2, k=[1, 2])
p3 = p1.integ(1, k=[1])
assert_almost_equal(p2.deriv(1).coef, p3.coef)
assert_almost_equal(p2.deriv(2).coef, p1.coef)
def check_linspace(Poly):
d = Poly.domain + random((2,))*.25
w = Poly.window + random((2,))*.25
p = Poly([1, 2, 3], domain=d, window=w)
# check default domain
xtgt = np.linspace(d[0], d[1], 20)
ytgt = p(xtgt)
xres, yres = p.linspace(20)
assert_almost_equal(xres, xtgt)
assert_almost_equal(yres, ytgt)
# check specified domain
xtgt = np.linspace(0, 2, 20)
ytgt = p(xtgt)
xres, yres = p.linspace(20, domain=[0, 2])
assert_almost_equal(xres, xtgt)
assert_almost_equal(yres, ytgt)
def check_pow(Poly):
d = Poly.domain + random((2,))*.25
w = Poly.window + random((2,))*.25
tgt = Poly([1], domain=d, window=w)
tst = Poly([1, 2, 3], domain=d, window=w)
for i in range(5):
assert_poly_almost_equal(tst**i, tgt)
tgt = tgt * tst
# default domain and window
tgt = Poly([1])
tst = Poly([1, 2, 3])
for i in range(5):
assert_poly_almost_equal(tst**i, tgt)
tgt = tgt * tst
# check error for invalid powers
assert_raises(ValueError, op.pow, tgt, 1.5)
assert_raises(ValueError, op.pow, tgt, -1)
def check_call(Poly):
P = Polynomial
d = Poly.domain
x = np.linspace(d[0], d[1], 11)
# Check defaults
p = Poly.cast(P([1, 2, 3]))
tgt = 1 + x*(2 + 3*x)
res = p(x)
assert_almost_equal(res, tgt)
def check_cutdeg(Poly):
p = Poly([1, 2, 3])
assert_raises(ValueError, p.cutdeg, .5)
assert_raises(ValueError, p.cutdeg, -1)
assert_equal(len(p.cutdeg(3)), 3)
assert_equal(len(p.cutdeg(2)), 3)
assert_equal(len(p.cutdeg(1)), 2)
assert_equal(len(p.cutdeg(0)), 1)
def check_truncate(Poly):
p = Poly([1, 2, 3])
assert_raises(ValueError, p.truncate, .5)
assert_raises(ValueError, p.truncate, 0)
assert_equal(len(p.truncate(4)), 3)
assert_equal(len(p.truncate(3)), 3)
assert_equal(len(p.truncate(2)), 2)
assert_equal(len(p.truncate(1)), 1)
def check_trim(Poly):
c = [1, 1e-6, 1e-12, 0]
p = Poly(c)
assert_equal(p.trim().coef, c[:3])
assert_equal(p.trim(1e-10).coef, c[:2])
assert_equal(p.trim(1e-5).coef, c[:1])
def check_mapparms(Poly):
# check with defaults. Should be identity.
d = Poly.domain
w = Poly.window
p = Poly([1], domain=d, window=w)
assert_almost_equal([0, 1], p.mapparms())
#
w = 2*d + 1
p = Poly([1], domain=d, window=w)
assert_almost_equal([1, 2], p.mapparms())
if __name__ == "__main__":
run_module_suite()
|
magudev17/gifsta
|
refs/heads/master
|
gifspool/protection.py
|
2
|
import re
import random
import string
import hashlib
import hmac
USERNAME_RE = re.compile(r"^[a-zA-Z0-9_-]{3,20}$")
PASSWORD_RE = re.compile(r"^.{3,20}$")
EMAIL_RE = re.compile(r'^[\S]+@[\S]+\.[\S]+$')
USERNAME_ERROR = "That's not a valid username."
PASSWORD_ERROR = "That wasn't a valid password."
V_PASSWORD_ERROR = "Your passwords didn't match."
EMAIL_ERROR = "That's not a valid email."
EXISTS_ERROR = "That user already exists"
LOGIN_ERROR = "Invalid login"
###################
####HASHING#######
###################
class Protection(object):
SECRET = 'imsosecret'
@staticmethod
def hash_str(s, secret = SECRET):
return hmac.new(secret, s).hexdigest()
@staticmethod
def make_secure_val(s):
return "%s|%s" % (s, Protection.hash_str(s))
@staticmethod
def check_secure_val(h):
val = h.split('|')[0]
if h == Protection.make_secure_val(val):
return val
@staticmethod
def make_salt():
return ''.join(random.choice(string.letters) for x in xrange(5))
@staticmethod
def make_pw_hash(name, pw, salt=None):
if not salt:
salt = Protection.make_salt()
h = hashlib.sha256(name + pw + salt).hexdigest()
return '%s,%s' % (h, salt)
@staticmethod
def valid_pw(name, pw, h):
hashed, salt = h.split(',')
return hashlib.sha256(name + pw + salt).hexdigest() == hashed
@staticmethod
def chek_username(self, username):
return username and USERNAME_RE.match(username)
@staticmethod
def chek_password(self, password):
return password and PASSWORD_RE.match(password)
@staticmethod
def chek_email(self, email):
return not email or EMAIL_RE.match(email)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.