code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
|---|---|---|---|---|---|
#!/usr/bin/python -tt
# vm-mgmt connects to a vCenter/ESX Server to execute tasks
# and generate reports
# Copyright: (c) 2014 Jonar M.
# License: BSD, see LICENSE for more details.
import sys
import os
import re
import inspect
import getpass
import time
import pysphere
from pysphere import VIServer, VIProperty, MORTypes
def get_user_info():
""" Obtain accoount details through user input"""
user = raw_input('Enter username: ')
if len(user) == 0 or user.isspace():
sys.exit('Error: Emtpy username')
passwd = getpass.getpass('Password: ')
vcenter = raw_input('Enter vCenter/ESX Server: ')
if len(vcenter) == 0 or vcenter.isspace():
sys.exit('Error: Emtpy vCenter/ESX Server')
return (user,passwd,vcenter)
def get_serverlist():
"""Obtain hostname listed on serverlist file through user input"""
serverlist = raw_input('Enter the serverlist file: ')
try:
f = open(serverlist)
except IOError:
sys.exit('IOError: No such file or directory. Check also the file permissions')
servers = f.readlines()
servercount = len(servers)
for host in servers:
return (host,servers,servercount)
def getserver_type_api():
"""Display's the server type(vCenter or ESX) and also the VMWare API version"""
s = VIServer()
print 'Server Type: ', s.get_server_type()
print 'API Version: ', s.get_api_version()
def vm_migrate():
"""Migrate the VMs based on the list given"""
user, passwd, vcenter = get_user_info()
host, servers, servercount = get_serverlist()
s = VIServer()
s.connect(vcenter, user, passwd)
getserver_type_api()
esxhost_input = raw_input('Enter target ESX Host: ')
esxhost = s.get_hosts().items()
for k,v in esxhost:
if v == esxhost_input:
for host in servers:
host = host.strip()
vm = s.get_vm_by_name(host)
vm.migrate(host=k)
time.sleep(10)
s.disconnect()
def menu(list, question):
"""Provides the list of options or tasks that the user may choose"""
for entry in list:
print '[', 1 + list.index(entry), '] ' + entry
return input(question)
#return raw_input(question)
def main():
items = ['Vmotion servers based on given list', \
'Exit'\
]
choice = menu(items,"Select an option: ")
while True:
if choice == 1:
vm_migrate()
elif choice == 2:
print sys.exit('Bye!')
else:
print 'Choose from the options only!'
break
if __name__ == '__main__':
main()
|
jonarm/vm-mgmt
|
vm-mgmt.py
|
Python
|
bsd-2-clause
| 2,632
|
"""
Copyright (c) 2015 Michael Bright and Bamboo HR LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import abc
from rapid.lib.constants import Constants
class AbstractParser(object):
__metaclass__ = abc.ABCMeta
def __init__(self, workspace='', failures_only=False, failures_count=False):
self.workspace = workspace
self.failures_only = failures_only
self.failures_count = failures_count
@abc.abstractmethod
def _parse_lines(self, lines):
yield
@staticmethod
@abc.abstractmethod
def get_type():
yield
def prepare_summary(self):
return {Constants.STATUS_FAILED: 0, Constants.STATUS_SUCCESS: 0, Constants.STATUS_SKIPPED: 0, Constants.FAILURES_COUNT: self.failures_count}
def parse(self, lines, ignore_type_check=False):
if self.get_type() != lines[0].strip() and not ignore_type_check:
raise Exception("Invalid first line identifier")
if ignore_type_check:
return self._parse_lines(lines)
return self._parse_lines(lines[1:])
|
BambooHR/rapid
|
rapid/client/parsers/abstract_parser.py
|
Python
|
apache-2.0
| 1,564
|
# -*- coding: utf-8 -*-
"""
Dynamic factor model.
Author: Chad Fulton
License: BSD-3
"""
from collections import OrderedDict
from warnings import warn
import numpy as np
import pandas as pd
from scipy.linalg import cho_factor, cho_solve, LinAlgError
from statsmodels.tools.data import _is_using_pandas
from statsmodels.tools.validation import int_like
from statsmodels.tools.decorators import cache_readonly
from statsmodels.regression.linear_model import OLS
from statsmodels.genmod.generalized_linear_model import GLM
from statsmodels.multivariate.pca import PCA
from statsmodels.tsa.statespace.sarimax import SARIMAX
from statsmodels.tsa.statespace._quarterly_ar1 import QuarterlyAR1
from statsmodels.tsa.vector_ar.var_model import VAR
from statsmodels.tools.tools import Bunch
from statsmodels.tools.validation import string_like
from statsmodels.tsa.tsatools import lagmat
from statsmodels.tsa.statespace import mlemodel, initialization
from statsmodels.tsa.statespace.tools import (
companion_matrix, is_invertible, constrain_stationary_univariate,
constrain_stationary_multivariate, unconstrain_stationary_univariate,
unconstrain_stationary_multivariate)
from statsmodels.tsa.statespace.kalman_smoother import (
SMOOTHER_STATE, SMOOTHER_STATE_COV, SMOOTHER_STATE_AUTOCOV)
from statsmodels.base.data import PandasData
from statsmodels.iolib.table import SimpleTable
from statsmodels.iolib.summary import Summary
from statsmodels.iolib.tableformatting import fmt_params
class FactorBlock(dict):
"""
Helper class for describing and indexing a block of factors.
Parameters
----------
factor_names : tuple of str
Tuple of factor names in the block (in the order that they will appear
in the state vector).
factor_order : int
Order of the vector autoregression governing the factor block dynamics.
endog_factor_map : pd.DataFrame
Mapping from endog variable names to factor names.
state_offset : int
Offset of this factor block in the state vector.
has_endog_Q : bool
Flag if the model contains quarterly data.
Notes
-----
The goal of this class is, in particular, to make it easier to retrieve
indexes of subsets of the state vector that are associated with a
particular block of factors.
- `factors_ix` is a matrix of indices, with rows corresponding to factors
in the block and columns corresponding to lags
- `factors` is vec(factors_ix) (i.e. it stacks columns, so that it is
`factors_ix.ravel(order='F')`). Thinking about a VAR system, the first
k*p elements correspond to the equation for the first variable. The next
k*p elements correspond to the equation for the second variable, and so
on. It contains all of the lags in the state vector, which is max(5, p)
- `factors_ar` is the subset of `factors` that have nonzero coefficients,
so it contains lags up to p.
- `factors_L1` only contains the first lag of the factors
- `factors_L1_5` contains the first - fifth lags of the factors
"""
def __init__(self, factor_names, factor_order, endog_factor_map,
state_offset, k_endog_Q):
self.factor_names = factor_names
self.k_factors = len(self.factor_names)
self.factor_order = factor_order
self.endog_factor_map = endog_factor_map.loc[:, factor_names]
self.state_offset = state_offset
self.k_endog_Q = k_endog_Q
if self.k_endog_Q > 0:
self._factor_order = max(5, self.factor_order)
else:
self._factor_order = self.factor_order
self.k_states = self.k_factors * self._factor_order
# Save items
self['factors'] = self.factors
self['factors_ar'] = self.factors_ar
self['factors_ix'] = self.factors_ix
self['factors_L1'] = self.factors_L1
self['factors_L1_5'] = self.factors_L1_5
@property
def factors_ix(self):
"""Factor state index array, shaped (k_factors, lags)."""
# i.e. the position in the state vector of the second lag of the third
# factor is factors_ix[2, 1]
# ravel(order='F') gives e.g (f0.L1, f1.L1, f0.L2, f1.L2, f0.L3, ...)
# while
# ravel(order='C') gives e.g (f0.L1, f0.L2, f0.L3, f1.L1, f1.L2, ...)
o = self.state_offset
return np.reshape(o + np.arange(self.k_factors * self._factor_order),
(self._factor_order, self.k_factors)).T
@property
def factors(self):
"""Factors and all lags in the state vector (max(5, p))."""
# Note that this is equivalent to factors_ix with ravel(order='F')
o = self.state_offset
return np.s_[o:o + self.k_factors * self._factor_order]
@property
def factors_ar(self):
"""Factors and all lags used in the factor autoregression (p)."""
o = self.state_offset
return np.s_[o:o + self.k_factors * self.factor_order]
@property
def factors_L1(self):
"""Factors (first block / lag only)."""
o = self.state_offset
return np.s_[o:o + self.k_factors]
@property
def factors_L1_5(self):
"""Factors plus four lags."""
o = self.state_offset
return np.s_[o:o + self.k_factors * 5]
class DynamicFactorMQStates(dict):
"""
Helper class for describing and indexing the state vector.
Parameters
----------
k_endog_M : int
Number of monthly (or non-time-specific, if k_endog_Q=0) variables.
k_endog_Q : int
Number of quarterly variables.
endog_names : list
Names of the endogenous variables.
factors : int, list, or dict
Integer giving the number of (global) factors, a list with the names of
(global) factors, or a dictionary with:
- keys : names of endogenous variables
- values : lists of factor names.
If this is an integer, then the factor names will be 0, 1, ....
factor_orders : int or dict
Integer describing the order of the vector autoregression (VAR)
governing all factor block dynamics or dictionary with:
- keys : factor name or tuples of factor names in a block
- values : integer describing the VAR order for that factor block
If a dictionary, this defines the order of the factor blocks in the
state vector. Otherwise, factors are ordered so that factors that load
on more variables come first (and then alphabetically, to break ties).
factor_multiplicities : int or dict
This argument provides a convenient way to specify multiple factors
that load identically on variables. For example, one may want two
"global" factors (factors that load on all variables) that evolve
jointly according to a VAR. One could specify two global factors in the
`factors` argument and specify that they are in the same block in the
`factor_orders` argument, but it is easier to specify a single global
factor in the `factors` argument, and set the order in the
`factor_orders` argument, and then set the factor multiplicity to 2.
This argument must be an integer describing the factor multiplicity for
all factors or dictionary with:
- keys : factor name
- values : integer describing the factor multiplicity for the factors
in the given block
idiosyncratic_ar1 : bool
Whether or not to model the idiosyncratic component for each series as
an AR(1) process. If False, the idiosyncratic component is instead
modeled as white noise.
Attributes
----------
k_endog : int
Total number of endogenous variables.
k_states : int
Total number of state variables (those associated with the factors and
those associated with the idiosyncratic disturbances).
k_posdef : int
Total number of state disturbance terms (those associated with the
factors and those associated with the idiosyncratic disturbances).
k_endog_M : int
Number of monthly (or non-time-specific, if k_endog_Q=0) variables.
k_endog_Q : int
Number of quarterly variables.
k_factors : int
Total number of factors. Note that factor multiplicities will have
already been expanded.
k_states_factors : int
The number of state variables associated with factors (includes both
factors and lags of factors included in the state vector).
k_posdef_factors : int
The number of state disturbance terms associated with factors.
k_states_idio : int
Total number of state variables associated with idiosyncratic
disturbances.
k_posdef_idio : int
Total number of state disturbance terms associated with idiosyncratic
disturbances.
k_states_idio_M : int
The number of state variables associated with idiosyncratic
disturbances for monthly (or non-time-specific if there are no
quarterly variables) variables. If the disturbances are AR(1), then
this will be equal to `k_endog_M`, otherwise it will be equal to zero.
k_states_idio_Q : int
The number of state variables associated with idiosyncratic
disturbances for quarterly variables. This will always be equal to
`k_endog_Q * 5`, even if the disturbances are not AR(1).
k_posdef_idio_M : int
The number of state disturbance terms associated with idiosyncratic
disturbances for monthly (or non-time-specific if there are no
quarterly variables) variables. If the disturbances are AR(1), then
this will be equal to `k_endog_M`, otherwise it will be equal to zero.
k_posdef_idio_Q : int
The number of state disturbance terms associated with idiosyncratic
disturbances for quarterly variables. This will always be equal to
`k_endog_Q`, even if the disturbances are not AR(1).
idiosyncratic_ar1 : bool
Whether or not to model the idiosyncratic component for each series as
an AR(1) process.
factor_blocks : list of FactorBlock
List of `FactorBlock` helper instances for each factor block.
factor_names : list of str
List of factor names.
factors : dict
Dictionary with:
- keys : names of endogenous variables
- values : lists of factor names.
Note that factor multiplicities will have already been expanded.
factor_orders : dict
Dictionary with:
- keys : tuple of factor names
- values : integer describing autoregression order
Note that factor multiplicities will have already been expanded.
max_factor_order : int
Maximum autoregression order across all factor blocks.
factor_block_orders : pd.Series
Series containing lag orders, with the factor block (a tuple of factor
names) as the index.
factor_multiplicities : dict
Dictionary with:
- keys : factor name
- values : integer describing the factor multiplicity for the factors
in the given block
endog_factor_map : dict
Dictionary with:
- keys : endog name
- values : list of factor names
loading_counts : pd.Series
Series containing number of endogenous variables loading on each
factor, with the factor name as the index.
block_loading_counts : dict
Dictionary with:
- keys : tuple of factor names
- values : average number of endogenous variables loading on the block
(note that average is over the factors in the block)
Notes
-----
The goal of this class is, in particular, to make it easier to retrieve
indexes of subsets of the state vector.
Note that the ordering of the factor blocks in the state vector is
determined by the `factor_orders` argument if a dictionary. Otherwise,
factors are ordered so that factors that load on more variables come first
(and then alphabetically, to break ties).
- `factors_L1` is an array with the indexes of first lag of the factors
from each block. Ordered first by block, and then by lag.
- `factors_L1_5` is an array with the indexes contains the first - fifth
lags of the factors from each block. Ordered first by block, and then by
lag.
- `factors_L1_5_ix` is an array shaped (5, k_factors) with the indexes
of the first - fifth lags of the factors from each block.
- `idio_ar_L1` is an array with the indexes of the first lag of the
idiosyncratic AR states, both monthly (if appliable) and quarterly.
- `idio_ar_M` is a slice with the indexes of the idiosyncratic disturbance
states for the monthly (or non-time-specific if there are no quarterly
variables) variables. It is an empty slice if
`idiosyncratic_ar1 = False`.
- `idio_ar_Q` is a slice with the indexes of the idiosyncratic disturbance
states and all lags, for the quarterly variables. It is an empty slice if
there are no quarterly variable.
- `idio_ar_Q_ix` is an array shaped (k_endog_Q, 5) with the indexes of the
first - fifth lags of the idiosyncratic disturbance states for the
quarterly variables.
- `endog_factor_iloc` is a list of lists, with entries for each endogenous
variable. The entry for variable `i`, `endog_factor_iloc[i]` is a list of
indexes of the factors that variable `i` loads on. This does not include
any lags, but it can be used with e.g. `factors_L1_5_ix` to get lags.
"""
def __init__(self, k_endog_M, k_endog_Q, endog_names, factors,
factor_orders, factor_multiplicities, idiosyncratic_ar1):
# Save model parameterization
self.k_endog_M = k_endog_M
self.k_endog_Q = k_endog_Q
self.k_endog = self.k_endog_M + self.k_endog_Q
self.idiosyncratic_ar1 = idiosyncratic_ar1
# Validate factor-related inputs
factors_is_int = np.issubdtype(type(factors), np.integer)
factors_is_list = isinstance(factors, (list, tuple))
orders_is_int = np.issubdtype(type(factor_orders), np.integer)
if factor_multiplicities is None:
factor_multiplicities = 1
mult_is_int = np.issubdtype(type(factor_multiplicities), np.integer)
if not (factors_is_int or factors_is_list or
isinstance(factors, dict)):
raise ValueError('`factors` argument must an integer number of'
' factors, a list of global factor names, or a'
' dictionary, mapping observed variables to'
' factors.')
if not (orders_is_int or isinstance(factor_orders, dict)):
raise ValueError('`factor_orders` argument must either be an'
' integer or a dictionary.')
if not (mult_is_int or isinstance(factor_multiplicities, dict)):
raise ValueError('`factor_multiplicities` argument must either be'
' an integer or a dictionary.')
# Expand integers
# If `factors` is an integer, we assume that it denotes the number of
# global factors (factors that load on each variable)
if factors_is_int or factors_is_list:
# Validate this here for a more informative error message
if ((factors_is_int and factors == 0) or
(factors_is_list and len(factors) == 0)):
raise ValueError('The model must contain at least one factor.')
if factors_is_list:
factor_names = list(factors)
else:
factor_names = [f'{i}' for i in range(factors)]
factors = {name: factor_names[:] for name in endog_names}
factor_names = set(np.concatenate(list(factors.values())))
if orders_is_int:
factor_orders = {factor_name: factor_orders
for factor_name in factor_names}
if mult_is_int:
factor_multiplicities = {factor_name: factor_multiplicities
for factor_name in factor_names}
# Apply the factor multiplities
factors, factor_orders = self._apply_factor_multiplicities(
factors, factor_orders, factor_multiplicities)
# Save the (potentially expanded) variables
self.factors = factors
self.factor_orders = factor_orders
self.factor_multiplicities = factor_multiplicities
# Get the mapping between endog and factors
self.endog_factor_map = self._construct_endog_factor_map(
factors, endog_names)
self.k_factors = self.endog_factor_map.shape[1]
# Validate number of factors
# TODO: could do more extensive validation here.
if self.k_factors > self.k_endog_M:
raise ValueError(f'Number of factors ({self.k_factors}) cannot be'
' greater than the number of monthly endogenous'
f' variables ({self.k_endog_M}).')
# Get `loading_counts`: factor -> # endog loading on the factor
self.loading_counts = (
self.endog_factor_map.sum(axis=0).rename('count')
.reset_index().sort_values(['count', 'factor'],
ascending=[False, True])
.set_index('factor'))
# `block_loading_counts`: block -> average of (# loading on factor)
# across each factor in the block
block_loading_counts = {
block: np.atleast_1d(
self.loading_counts.loc[list(block), 'count']).mean(axis=0)
for block in factor_orders.keys()}
ix = pd.Index(block_loading_counts.keys(), tupleize_cols=False,
name='block')
self.block_loading_counts = pd.Series(
list(block_loading_counts.values()),
index=ix, name='count').to_frame().sort_values(
['count', 'block'], ascending=[False, True])['count']
# Get the mapping between factor blocks and VAR order
# `factor_block_orders`: pd.Series of factor block -> lag order
ix = pd.Index(factor_orders.keys(), tupleize_cols=False, name='block')
self.factor_block_orders = pd.Series(
list(factor_orders.values()), index=ix, name='order')
# If the `factor_orders` variable was an integer, then it did not
# define an ordering for the factor blocks. In this case, we use the
# loading counts to do so. This ensures that e.g. global factors are
# listed first.
if orders_is_int:
keys = self.block_loading_counts.keys()
self.factor_block_orders = self.factor_block_orders.loc[keys]
self.factor_block_orders.index.name = 'block'
# Define factor_names based on factor_block_orders (instead of on those
# from `endog_factor_map`) to (a) make sure that factors are allocated
# to only one block, and (b) order the factor names to be consistent
# with the block definitions.
factor_names = pd.Series(
np.concatenate(list(self.factor_block_orders.index)))
missing = [name for name in self.endog_factor_map.columns
if name not in factor_names.tolist()]
if len(missing):
ix = pd.Index([(factor_name,) for factor_name in missing],
tupleize_cols=False, name='block')
default_block_orders = pd.Series(np.ones(len(ix), dtype=int),
index=ix, name='order')
self.factor_block_orders = (
self.factor_block_orders.append(default_block_orders))
factor_names = pd.Series(
np.concatenate(list(self.factor_block_orders.index)))
duplicates = factor_names.duplicated()
if duplicates.any():
duplicate_names = set(factor_names[duplicates])
raise ValueError('Each factor can be assigned to at most one'
' block of factors in `factor_orders`.'
f' Duplicate entries for {duplicate_names}')
self.factor_names = factor_names.tolist()
self.max_factor_order = np.max(self.factor_block_orders)
# Re-order the columns of the endog factor mapping to reflect the
# orderings of endog_names and factor_names
self.endog_factor_map = (
self.endog_factor_map.loc[endog_names, factor_names])
# Create factor block helpers, and get factor-related state and posdef
# dimensions
self.k_states_factors = 0
self.k_posdef_factors = 0
state_offset = 0
self.factor_blocks = []
for factor_names, factor_order in self.factor_block_orders.items():
block = FactorBlock(factor_names, factor_order,
self.endog_factor_map, state_offset,
self.k_endog_Q)
self.k_states_factors += block.k_states
self.k_posdef_factors += block.k_factors
state_offset += block.k_states
self.factor_blocks.append(block)
# Idiosyncratic state dimensions
self.k_states_idio_M = self.k_endog_M if idiosyncratic_ar1 else 0
self.k_states_idio_Q = self.k_endog_Q * 5
self.k_states_idio = self.k_states_idio_M + self.k_states_idio_Q
# Idiosyncratic posdef dimensions
self.k_posdef_idio_M = self.k_endog_M if self.idiosyncratic_ar1 else 0
self.k_posdef_idio_Q = self.k_endog_Q
self.k_posdef_idio = self.k_posdef_idio_M + self.k_posdef_idio_Q
# Total states, posdef
self.k_states = self.k_states_factors + self.k_states_idio
self.k_posdef = self.k_posdef_factors + self.k_posdef_idio
# Cache
self._endog_factor_iloc = None
def _apply_factor_multiplicities(self, factors, factor_orders,
factor_multiplicities):
"""
Expand `factors` and `factor_orders` to account for factor multiplity.
For example, if there is a `global` factor with multiplicity 2, then
this method expands that into `global.1` and `global.2` in both the
`factors` and `factor_orders` dictionaries.
Parameters
----------
factors : dict
Dictionary of {endog_name: list of factor names}
factor_orders : dict
Dictionary of {tuple of factor names: factor order}
factor_multiplicities : dict
Dictionary of {factor name: factor multiplicity}
Returns
-------
new_factors : dict
Dictionary of {endog_name: list of factor names}, with factor names
expanded to incorporate multiplicities.
new_factors : dict
Dictionary of {tuple of factor names: factor order}, with factor
names in each tuple expanded to incorporate multiplicities.
"""
# Expand the factors to account for the multiplicities
new_factors = {}
for endog_name, factors_list in factors.items():
new_factor_list = []
for factor_name in factors_list:
n = factor_multiplicities.get(factor_name, 1)
if n > 1:
new_factor_list += [f'{factor_name}.{i + 1}'
for i in range(n)]
else:
new_factor_list.append(factor_name)
new_factors[endog_name] = new_factor_list
# Expand the factor orders to account for the multiplicities
new_factor_orders = {}
for block, factor_order in factor_orders.items():
if not isinstance(block, tuple):
block = (block,)
new_block = []
for factor_name in block:
n = factor_multiplicities.get(factor_name, 1)
if n > 1:
new_block += [f'{factor_name}.{i + 1}'
for i in range(n)]
else:
new_block += [factor_name]
new_factor_orders[tuple(new_block)] = factor_order
return new_factors, new_factor_orders
def _construct_endog_factor_map(self, factors, endog_names):
"""
Construct mapping of observed variables to factors.
Parameters
----------
factors : dict
Dictionary of {endog_name: list of factor names}
endog_names : list of str
List of the names of the observed variables.
Returns
-------
endog_factor_map : pd.DataFrame
Boolean dataframe with `endog_names` as the index and the factor
names (computed from the `factors` input) as the columns. Each cell
is True if the associated factor is allowed to load on the
associated observed variable.
"""
# Validate that all entries in the factors dictionary have associated
# factors
missing = []
for key, value in factors.items():
if not isinstance(value, (list, tuple)) or len(value) == 0:
missing.append(key)
if len(missing):
raise ValueError('Each observed variable must be mapped to at'
' least one factor in the `factors` dictionary.'
f' Variables missing factors are: {missing}.')
# Validate that we have been told about the factors for each endog
# variable. This is because it doesn't make sense to include an
# observed variable that doesn't load on any factor
missing = set(endog_names).difference(set(factors.keys()))
if len(missing):
raise ValueError('If a `factors` dictionary is provided, then'
' it must include entries for each observed'
f' variable. Missing variables are: {missing}.')
# Figure out the set of factor names
# (0 is just a dummy value for the dict - we just do it this way to
# collect the keys, in order, without duplicates.)
factor_names = {}
for key, value in factors.items():
if isinstance(value, str):
factor_names[value] = 0
else:
factor_names.update({v: 0 for v in value})
factor_names = list(factor_names.keys())
k_factors = len(factor_names)
endog_factor_map = pd.DataFrame(
np.zeros((self.k_endog, k_factors), dtype=bool),
index=pd.Index(endog_names, name='endog'),
columns=pd.Index(factor_names, name='factor'))
for key, value in factors.items():
endog_factor_map.loc[key, value] = True
return endog_factor_map
@property
def factors_L1(self):
"""Factors."""
ix = np.arange(self.k_states_factors)
iloc = tuple(ix[block.factors_L1] for block in self.factor_blocks)
return np.concatenate(iloc)
@property
def factors_L1_5_ix(self):
"""Factors plus any lags, index shaped (5, k_factors)."""
ix = np.arange(self.k_states_factors)
iloc = []
for block in self.factor_blocks:
iloc.append(ix[block.factors_L1_5].reshape(5, block.k_factors))
return np.concatenate(iloc, axis=1)
@property
def idio_ar_L1(self):
"""Idiosyncratic AR states, (first block / lag only)."""
ix1 = self.k_states_factors
if self.idiosyncratic_ar1:
ix2 = ix1 + self.k_endog
else:
ix2 = ix1 + self.k_endog_Q
return np.s_[ix1:ix2]
@property
def idio_ar_M(self):
"""Idiosyncratic AR states for monthly variables."""
ix1 = self.k_states_factors
ix2 = ix1
if self.idiosyncratic_ar1:
ix2 += self.k_endog_M
return np.s_[ix1:ix2]
@property
def idio_ar_Q(self):
"""Idiosyncratic AR states and all lags for quarterly variables."""
# Note that this is equivalent to idio_ar_Q_ix with ravel(order='F')
ix1 = self.k_states_factors
if self.idiosyncratic_ar1:
ix1 += self.k_endog_M
ix2 = ix1 + self.k_endog_Q * 5
return np.s_[ix1:ix2]
@property
def idio_ar_Q_ix(self):
"""Idiosyncratic AR (quarterly) state index, (k_endog_Q, lags)."""
# i.e. the position in the state vector of the second lag of the third
# quarterly variable is idio_ar_Q_ix[2, 1]
# ravel(order='F') gives e.g (y1.L1, y2.L1, y1.L2, y2.L3, y1.L3, ...)
# while
# ravel(order='C') gives e.g (y1.L1, y1.L2, y1.L3, y2.L1, y2.L2, ...)
start = self.k_states_factors
if self.idiosyncratic_ar1:
start += self.k_endog_M
return (start + np.reshape(
np.arange(5 * self.k_endog_Q), (5, self.k_endog_Q)).T)
@property
def endog_factor_iloc(self):
"""List of list of int, factor indexes for each observed variable."""
# i.e. endog_factor_iloc[i] is a list of integer locations of the
# factors that load on the ith observed variable
if self._endog_factor_iloc is None:
ilocs = []
for i in range(self.k_endog):
ilocs.append(np.where(self.endog_factor_map.iloc[i])[0])
self._endog_factor_iloc = ilocs
return self._endog_factor_iloc
def __getitem__(self, key):
"""
Use square brackets to access index / slice elements.
This is convenient in highlighting the indexing / slice quality of
these attributes in the code below.
"""
if key in ['factors_L1', 'factors_L1_5_ix', 'idio_ar_L1', 'idio_ar_M',
'idio_ar_Q', 'idio_ar_Q_ix']:
return getattr(self, key)
else:
raise KeyError(key)
class DynamicFactorMQ(mlemodel.MLEModel):
r"""
Dynamic factor model with EM algorithm; option for monthly/quarterly data.
Implementation of the dynamic factor model of Bańbura and Modugno (2014)
([1]_) and Bańbura, Giannone, and Reichlin (2011) ([2]_). Uses the EM
algorithm for parameter fitting, and so can accommodate a large number of
left-hand-side variables. Specifications can include any collection of
blocks of factors, including different factor autoregression orders, and
can include AR(1) processes for idiosyncratic disturbances. Can
incorporate monthly/quarterly mixed frequency data along the lines of
Mariano and Murasawa (2011) ([4]_). A special case of this model is the
Nowcasting model of Bok et al. (2017) ([3]_). Moreover, this model can be
used to compute the news associated with updated data releases.
Parameters
----------
endog : array_like
Observed time-series process :math:`y`. See the "Notes" section for
details on how to set up a model with monthly/quarterly mixed frequency
data.
k_endog_monthly : int, optional
If specifying a monthly/quarterly mixed frequency model in which the
provided `endog` dataset contains both the monthly and quarterly data,
this variable should be used to indicate how many of the variables
are monthly. Note that when using the `k_endog_monthly` argument, the
columns with monthly variables in `endog` should be ordered first, and
the columns with quarterly variables should come afterwards. See the
"Notes" section for details on how to set up a model with
monthly/quarterly mixed frequency data.
factors : int, list, or dict, optional
Integer giving the number of (global) factors, a list with the names of
(global) factors, or a dictionary with:
- keys : names of endogenous variables
- values : lists of factor names.
If this is an integer, then the factor names will be 0, 1, .... The
default is a single factor that loads on all variables. Note that there
cannot be more factors specified than there are monthly variables.
factor_orders : int or dict, optional
Integer describing the order of the vector autoregression (VAR)
governing all factor block dynamics or dictionary with:
- keys : factor name or tuples of factor names in a block
- values : integer describing the VAR order for that factor block
If a dictionary, this defines the order of the factor blocks in the
state vector. Otherwise, factors are ordered so that factors that load
on more variables come first (and then alphabetically, to break ties).
factor_multiplicities : int or dict, optional
This argument provides a convenient way to specify multiple factors
that load identically on variables. For example, one may want two
"global" factors (factors that load on all variables) that evolve
jointly according to a VAR. One could specify two global factors in the
`factors` argument and specify that they are in the same block in the
`factor_orders` argument, but it is easier to specify a single global
factor in the `factors` argument, and set the order in the
`factor_orders` argument, and then set the factor multiplicity to 2.
This argument must be an integer describing the factor multiplicity for
all factors or dictionary with:
- keys : factor name
- values : integer describing the factor multiplicity for the factors
in the given block
idiosyncratic_ar1 : bool
Whether or not to model the idiosyncratic component for each series as
an AR(1) process. If False, the idiosyncratic component is instead
modeled as white noise.
standardize : bool or tuple, optional
If a boolean, whether or not to standardize each endogenous variable to
have mean zero and standard deviation 1 before fitting the model. See
"Notes" for details about how this option works with postestimation
output. If a tuple (usually only used internally), then the tuple must
have length 2, with each element containing a Pandas series with index
equal to the names of the endogenous variables. The first element
should contain the mean values and the second element should contain
the standard deviations. Default is True.
endog_quarterly : pandas.Series or pandas.DataFrame
Observed quarterly variables. If provided, must be a Pandas Series or
DataFrame with a DatetimeIndex or PeriodIndex at the quarterly
frequency. See the "Notes" section for details on how to set up a model
with monthly/quarterly mixed frequency data.
init_t0 : bool, optional
If True, this option initializes the Kalman filter with the
distribution for :math:`\alpha_0` rather than :math:`\alpha_1`. See
the "Notes" section for more details. This option is rarely used except
for testing. Default is False.
obs_cov_diag : bool, optional
If True and if `idiosyncratic_ar1 is True`, then this option puts small
positive values in the observation disturbance covariance matrix. This
is not required for estimation and is rarely used except for testing.
(It is sometimes used to prevent numerical errors, for example those
associated with a positive semi-definite forecast error covariance
matrix at the first time step when using EM initialization, but state
space models in Statsmodels switch to the univariate approach in those
cases, and so do not need to use this trick). Default is False.
Notes
-----
The basic model is:
.. math::
y_t & = \Lambda f_t + \epsilon_t \\
f_t & = A_1 f_{t-1} + \dots + A_p f_{t-p} + u_t
where:
- :math:`y_t` is observed data at time t
- :math:`\epsilon_t` is idiosyncratic disturbance at time t (see below for
details, including modeling serial correlation in this term)
- :math:`f_t` is the unobserved factor at time t
- :math:`u_t \sim N(0, Q)` is the factor disturbance at time t
and:
- :math:`\Lambda` is referred to as the matrix of factor loadings
- :math:`A_i` are matrices of autoregression coefficients
Furthermore, we allow the idiosyncratic disturbances to be serially
correlated, so that, if `idiosyncratic_ar1=True`,
:math:`\epsilon_{i,t} = \rho_i \epsilon_{i,t-1} + e_{i,t}`, where
:math:`e_{i,t} \sim N(0, \sigma_i^2)`. If `idiosyncratic_ar1=False`,
then we instead have :math:`\epsilon_{i,t} = e_{i,t}`.
This basic setup can be found in [1]_, [2]_, [3]_, and [4]_.
We allow for two generalizations of this model:
1. Following [2]_, we allow multiple "blocks" of factors, which are
independent from the other blocks of factors. Different blocks can be
set to load on different subsets of the observed variables, and can be
specified with different lag orders.
2. Following [4]_ and [2]_, we allow mixed frequency models in which both
monthly and quarterly data are used. See the section on "Mixed frequency
models", below, for more details.
Additional notes:
- The observed data may contain arbitrary patterns of missing entries.
**EM algorithm**
This model contains a potentially very large number of parameters, and it
can be difficult and take a prohibitively long time to numerically optimize
the likelihood function using quasi-Newton methods. Instead, the default
fitting method in this model uses the EM algorithm, as detailed in [1]_.
As a result, the model can accommodate datasets with hundreds of
observed variables.
**Mixed frequency data**
This model can handle mixed frequency data in two ways. In this section,
we only briefly describe this, and refer readers to [2]_ and [4]_ for all
details.
First, because there can be arbitrary patterns of missing data in the
observed vector, one can simply include lower frequency variables as
observed in a particular higher frequency period, and missing otherwise.
For example, in a monthly model, one could include quarterly data as
occurring on the third month of each quarter. To use this method, one
simply needs to combine the data into a single dataset at the higher
frequency that can be passed to this model as the `endog` argument.
However, depending on the type of variables used in the analysis and the
assumptions about the data generating process, this approach may not be
valid.
For example, suppose that we are interested in the growth rate of real GDP,
which is measured at a quarterly frequency. If the basic factor model is
specified at a monthly frequency, then the quarterly growth rate in the
third month of each quarter -- which is what we actually observe -- is
approximated by a particular weighted average of unobserved monthly growth
rates. We need to take this particular weight moving average into account
in constructing our model, and this is what the second approach does.
The second approach follows [2]_ and [4]_ in constructing a state space
form to explicitly model the quarterly growth rates in terms of the
unobserved monthly growth rates. To use this approach, there are two
methods:
1. Combine the monthly and quarterly data into a single dataset at the
monthly frequency, with the monthly data in the first columns and the
quarterly data in the last columns. Pass this dataset to the model as
the `endog` argument and give the number of the variables that are
monthly as the `k_endog_monthly` argument.
2. Construct a monthly dataset as a Pandas DataFrame with a DatetimeIndex
or PeriodIndex at the monthly frequency and separately construct a
quarterly dataset as a Pandas DataFrame with a DatetimeIndex or
PeriodIndex at the quarterly frequency. Pass the monthly DataFrame to
the model as the `endog` argument and pass the quarterly DataFrame to
the model as the `endog_quarterly` argument.
Note that this only incorporates one particular type of mixed frequency
data. See also Banbura et al. (2013). "Now-Casting and the Real-Time Data
Flow." for discussion about other types of mixed frequency data that are
not supported by this framework.
**Nowcasting and the news**
Through its support for monthly/quarterly mixed frequency data, this model
can allow for the nowcasting of quarterly variables based on monthly
observations. In particular, [2]_ and [3]_ use this model to construct
nowcasts of real GDP and analyze the impacts of "the news", derived from
incoming data on a real-time basis. This latter functionality can be
accessed through the `news` method of the results object.
**Standardizing data**
As is often the case in formulating a dynamic factor model, we do not
explicitly account for the mean of each observed variable. Instead, the
default behavior is to standardize each variable prior to estimation. Thus
if :math:`y_t` are the given observed data, the dynamic factor model is
actually estimated on the standardized data defined by:
.. math::
x_{i, t} = (y_{i, t} - \bar y_i) / s_i
where :math:`\bar y_i` is the sample mean and :math:`s_i` is the sample
standard deviation.
By default, if standardization is applied prior to estimation, results such
as in-sample predictions, out-of-sample forecasts, and the computation of
the "news" are reported in the scale of the original data (i.e. the model
output has the reverse transformation applied before it is returned to the
user).
Standardization can be disabled by passing `standardization=False` to the
model constructor.
**Identification of factors and loadings**
The estimated factors and the factor loadings in this model are only
identified up to an invertible transformation. As described in (the working
paper version of) [2]_, while it is possible to impose normalizations to
achieve identification, the EM algorithm does will converge regardless.
Moreover, for nowcasting and forecasting purposes, identification is not
required. This model does not impose any normalization to identify the
factors and the factor loadings.
**Miscellaneous**
There are two arguments available in the model constructor that are rarely
used but which deserve a brief mention: `init_t0` and `obs_cov_diag`. These
arguments are provided to allow exactly matching the output of other
packages that have slight differences in how the underlying state space
model is set up / applied.
- `init_t0`: state space models in Statsmodels follow Durbin and Koopman in
initializing the model with :math:`\alpha_1 \sim N(a_1, P_1)`. Other
implementations sometimes initialize instead with
:math:`\alpha_0 \sim N(a_0, P_0)`. We can accommodate this by prepending
a row of NaNs to the observed dataset.
- `obs_cov_diag`: the state space form in [1]_ incorporates non-zero (but
very small) diagonal elements for the observation disturbance covariance
matrix.
Examples
--------
Constructing and fitting a `DynamicFactorMQ` model.
>>> data = sm.datasets.macrodata.load_pandas().data.iloc[-100:]
>>> data.index = pd.period_range(start='1984Q4', end='2009Q3', freq='Q')
>>> endog = data[['infl', 'tbilrate']].resample('M').last()
>>> endog_Q = np.log(data[['realgdp', 'realcons']]).diff().iloc[1:] * 400
**Basic usage**
In the simplest case, passing only the `endog` argument results in a model
with a single factor that follows an AR(1) process. Note that because we
are not also providing an `endog_quarterly` dataset, `endog` can be a numpy
array or Pandas DataFrame with any index (it does not have to be monthly).
The `summary` method can be useful in checking the model specification.
>>> mod = sm.tsa.DynamicFactorMQ(endog)
>>> print(mod.summary())
Model Specification: Dynamic Factor Model
==========================================================================
Model: Dynamic Factor Model # of monthly variables: 2
+ 1 factors in 1 blocks # of factors: 1
+ AR(1) idiosyncratic Idiosyncratic disturbances: AR(1)
Sample: 1984-10 Standardize variables: True
- 2009-09
Observed variables / factor loadings
========================
Dep. variable 0
------------------------
infl X
tbilrate X
Factor blocks:
=====================
block order
---------------------
0 1
=====================
**Factors**
With `factors=2`, there will be two independent factors that will each
evolve according to separate AR(1) processes.
>>> mod = sm.tsa.DynamicFactorMQ(endog, factors=2)
>>> print(mod.summary())
Model Specification: Dynamic Factor Model
==========================================================================
Model: Dynamic Factor Model # of monthly variables: 2
+ 2 factors in 2 blocks # of factors: 2
+ AR(1) idiosyncratic Idiosyncratic disturbances: AR(1)
Sample: 1984-10 Standardize variables: True
- 2009-09
Observed variables / factor loadings
===================================
Dep. variable 0 1
-----------------------------------
infl X X
tbilrate X X
Factor blocks:
=====================
block order
---------------------
0 1
1 1
=====================
**Factor multiplicities**
By instead specifying `factor_multiplicities=2`, we would still have two
factors, but they would be dependent and would evolve jointly according
to a VAR(1) process.
>>> mod = sm.tsa.DynamicFactorMQ(endog, factor_multiplicities=2)
>>> print(mod.summary())
Model Specification: Dynamic Factor Model
==========================================================================
Model: Dynamic Factor Model # of monthly variables: 2
+ 2 factors in 1 blocks # of factors: 2
+ AR(1) idiosyncratic Idiosyncratic disturbances: AR(1)
Sample: 1984-10 Standardize variables: True
- 2009-09
Observed variables / factor loadings
===================================
Dep. variable 0.1 0.2
-----------------------------------
infl X X
tbilrate X X
Factor blocks:
=====================
block order
---------------------
0.1, 0.2 1
=====================
**Factor orders**
In either of the above cases, we could extend the order of the (vector)
autoregressions by using the `factor_orders` argument. For example, the
below model would contain two independent factors that each evolve
according to a separate AR(2) process:
>>> mod = sm.tsa.DynamicFactorMQ(endog, factors=2, factor_orders=2)
>>> print(mod.summary())
Model Specification: Dynamic Factor Model
==========================================================================
Model: Dynamic Factor Model # of monthly variables: 2
+ 2 factors in 2 blocks # of factors: 2
+ AR(1) idiosyncratic Idiosyncratic disturbances: AR(1)
Sample: 1984-10 Standardize variables: True
- 2009-09
Observed variables / factor loadings
===================================
Dep. variable 0 1
-----------------------------------
infl X X
tbilrate X X
Factor blocks:
=====================
block order
---------------------
0 2
1 2
=====================
**Serial correlation in the idiosyncratic disturbances**
By default, the model allows each idiosyncratic disturbance terms to evolve
according to an AR(1) process. If preferred, they can instead be specified
to be serially independent by passing `ididosyncratic_ar1=False`.
>>> mod = sm.tsa.DynamicFactorMQ(endog, idiosyncratic_ar1=False)
>>> print(mod.summary())
Model Specification: Dynamic Factor Model
==========================================================================
Model: Dynamic Factor Model # of monthly variables: 2
+ 1 factors in 1 blocks # of factors: 1
+ iid idiosyncratic Idiosyncratic disturbances: iid
Sample: 1984-10 Standardize variables: True
- 2009-09
Observed variables / factor loadings
========================
Dep. variable 0
------------------------
infl X
tbilrate X
Factor blocks:
=====================
block order
---------------------
0 1
=====================
*Monthly / Quarterly mixed frequency*
To specify a monthly / quarterly mixed frequency model see the (Notes
section for more details about these models):
>>> mod = sm.tsa.DynamicFactorMQ(endog, endog_quarterly=endog_Q)
>>> print(mod.summary())
Model Specification: Dynamic Factor Model
==========================================================================
Model: Dynamic Factor Model # of monthly variables: 2
+ 1 factors in 1 blocks # of quarterly variables: 2
+ Mixed frequency (M/Q) # of factors: 1
+ AR(1) idiosyncratic Idiosyncratic disturbances: AR(1)
Sample: 1984-10 Standardize variables: True
- 2009-09
Observed variables / factor loadings
========================
Dep. variable 0
------------------------
infl X
tbilrate X
realgdp X
realcons X
Factor blocks:
=====================
block order
---------------------
0 1
=====================
*Customize observed variable / factor loadings*
To specify that certain that certain observed variables only load on
certain factors, it is possible to pass a dictionary to the `factors`
argument.
>>> factors = {'infl': ['global']
... 'tbilrate': ['global']
... 'realgdp': ['global', 'real']
... 'realcons': ['global', 'real']}
>>> mod = sm.tsa.DynamicFactorMQ(endog, endog_quarterly=endog_Q)
>>> print(mod.summary())
Model Specification: Dynamic Factor Model
==========================================================================
Model: Dynamic Factor Model # of monthly variables: 2
+ 2 factors in 2 blocks # of quarterly variables: 2
+ Mixed frequency (M/Q) # of factor blocks: 2
+ AR(1) idiosyncratic Idiosyncratic disturbances: AR(1)
Sample: 1984-10 Standardize variables: True
- 2009-09
Observed variables / factor loadings
===================================
Dep. variable global real
-----------------------------------
infl X
tbilrate X
realgdp X X
realcons X X
Factor blocks:
=====================
block order
---------------------
global 1
real 1
=====================
**Fitting parameters**
To fit the model, use the `fit` method. This method uses the EM algorithm
by default.
>>> mod = sm.tsa.DynamicFactorMQ(endog)
>>> res = mod.fit()
>>> print(res.summary())
Dynamic Factor Results
==========================================================================
Dep. Variable: ['infl', 'tbilrate'] No. Observations: 300
Model: Dynamic Factor Model Log Likelihood -127.909
+ 1 factors in 1 blocks AIC 271.817
+ AR(1) idiosyncratic BIC 301.447
Date: Tue, 04 Aug 2020 HQIC 283.675
Time: 15:59:11 EM Iterations 83
Sample: 10-31-1984
- 09-30-2009
Covariance Type: Not computed
Observation equation:
==============================================================
Factor loadings: 0 idiosyncratic: AR(1) var.
--------------------------------------------------------------
infl -0.67 0.39 0.73
tbilrate -0.63 0.99 0.01
Transition: Factor block 0
=======================================
L1.0 error variance
---------------------------------------
0 0.98 0.01
=======================================
Warnings:
[1] Covariance matrix not calculated.
*Displaying iteration progress*
To display information about the EM iterations, use the `disp` argument.
>>> mod = sm.tsa.DynamicFactorMQ(endog)
>>> res = mod.fit(disp=10)
EM start iterations, llf=-291.21
EM iteration 10, llf=-157.17, convergence criterion=0.053801
EM iteration 20, llf=-128.99, convergence criterion=0.0035545
EM iteration 30, llf=-127.97, convergence criterion=0.00010224
EM iteration 40, llf=-127.93, convergence criterion=1.3281e-05
EM iteration 50, llf=-127.92, convergence criterion=5.4725e-06
EM iteration 60, llf=-127.91, convergence criterion=2.8665e-06
EM iteration 70, llf=-127.91, convergence criterion=1.6999e-06
EM iteration 80, llf=-127.91, convergence criterion=1.1085e-06
EM converged at iteration 83, llf=-127.91,
convergence criterion=9.9004e-07 < tolerance=1e-06
**Results: forecasting, impulse responses, and more**
One the model is fitted, there are a number of methods available from the
results object. Some examples include:
*Forecasting*
>>> mod = sm.tsa.DynamicFactorMQ(endog)
>>> res = mod.fit()
>>> print(res.forecast(steps=5))
infl tbilrate
2009-10 1.784169 0.260401
2009-11 1.735848 0.305981
2009-12 1.730674 0.350968
2010-01 1.742110 0.395369
2010-02 1.759786 0.439194
*Impulse responses*
>>> mod = sm.tsa.DynamicFactorMQ(endog)
>>> res = mod.fit()
>>> print(res.impulse_responses(steps=5))
infl tbilrate
0 -1.511956 -1.341498
1 -1.483172 -1.315960
2 -1.454937 -1.290908
3 -1.427240 -1.266333
4 -1.400069 -1.242226
5 -1.373416 -1.218578
For other available methods (including in-sample prediction, simulation of
time series, extending the results to incorporate new data, and the news),
see the documentation for state space models.
References
----------
.. [1] Bańbura, Marta, and Michele Modugno.
"Maximum likelihood estimation of factor models on datasets with
arbitrary pattern of missing data."
Journal of Applied Econometrics 29, no. 1 (2014): 133-160.
.. [2] Bańbura, Marta, Domenico Giannone, and Lucrezia Reichlin.
"Nowcasting."
The Oxford Handbook of Economic Forecasting. July 8, 2011.
.. [3] Bok, Brandyn, Daniele Caratelli, Domenico Giannone,
Argia M. Sbordone, and Andrea Tambalotti. 2018.
"Macroeconomic Nowcasting and Forecasting with Big Data."
Annual Review of Economics 10 (1): 615-43.
https://doi.org/10.1146/annurev-economics-080217-053214.
.. [4] Mariano, Roberto S., and Yasutomo Murasawa.
"A coincident index, common factors, and monthly real GDP."
Oxford Bulletin of Economics and Statistics 72, no. 1 (2010): 27-46.
"""
def __init__(self, endog, k_endog_monthly=None, factors=1, factor_orders=1,
factor_multiplicities=None, idiosyncratic_ar1=True,
standardize=True, endog_quarterly=None, init_t0=False,
obs_cov_diag=False, **kwargs):
# Handle endog variables
if endog_quarterly is not None:
if k_endog_monthly is not None:
raise ValueError('If `endog_quarterly` is specified, then'
' `endog` must contain only monthly'
' variables, and so `k_endog_monthly` cannot'
' be specified since it will be inferred from'
' the shape of `endog`.')
endog, k_endog_monthly = self.construct_endog(
endog, endog_quarterly)
endog_is_pandas = _is_using_pandas(endog, None)
if endog_is_pandas:
if isinstance(endog, pd.Series):
endog = endog.to_frame()
else:
if np.ndim(endog) < 2:
endog = np.atleast_2d(endog).T
if k_endog_monthly is None:
k_endog_monthly = endog.shape[1]
if endog_is_pandas:
endog_names = endog.columns.tolist()
else:
if endog.shape[1] == 1:
endog_names = ['y']
else:
endog_names = [f'y{i + 1}' for i in range(endog.shape[1])]
self.k_endog_M = int_like(k_endog_monthly, 'k_endog_monthly')
self.k_endog_Q = endog.shape[1] - self.k_endog_M
# Compute helper for handling factors / state indexing
s = self._s = DynamicFactorMQStates(
self.k_endog_M, self.k_endog_Q, endog_names, factors,
factor_orders, factor_multiplicities, idiosyncratic_ar1)
# Save parameterization
self.factors = factors
self.factor_orders = factor_orders
self.factor_multiplicities = factor_multiplicities
self.endog_factor_map = self._s.endog_factor_map
self.factor_block_orders = self._s.factor_block_orders
self.factor_names = self._s.factor_names
self.k_factors = self._s.k_factors
self.k_factor_blocks = len(self.factor_block_orders)
self.max_factor_order = self._s.max_factor_order
self.idiosyncratic_ar1 = idiosyncratic_ar1
self.init_t0 = init_t0
self.obs_cov_diag = obs_cov_diag
if self.init_t0:
# TODO: test each of these options
if endog_is_pandas:
ix = pd.period_range(endog.index[0] - 1, endog.index[-1],
freq='M')
endog = endog.reindex(ix)
else:
endog = np.c_[[np.nan] * endog.shape[1], endog.T].T
# Standardize endog, if requested
# Note: endog_mean and endog_std will always each be 1-dimensional with
# length equal to the number of endog variables
if isinstance(standardize, tuple) and len(standardize) == 2:
endog_mean, endog_std = standardize
# Validate the input
n = endog.shape[1]
if (isinstance(endog_mean, pd.Series) and not
endog_mean.index.equals(pd.Index(endog_names))):
raise ValueError('Invalid value passed for `standardize`:'
' if a Pandas Series, must have index'
f' {endog_names}. Got {endog_mean.index}.')
else:
endog_mean = np.atleast_1d(endog_mean)
if (isinstance(endog_std, pd.Series) and not
endog_std.index.equals(pd.Index(endog_names))):
raise ValueError('Invalid value passed for `standardize`:'
' if a Pandas Series, must have index'
f' {endog_names}. Got {endog_std.index}.')
else:
endog_std = np.atleast_1d(endog_std)
if (np.shape(endog_mean) != (n,) or np.shape(endog_std) != (n,)):
raise ValueError('Invalid value passed for `standardize`: each'
f' element must be shaped ({n},).')
standardize = True
# Make sure we have Pandas if endog is Pandas
if endog_is_pandas:
endog_mean = pd.Series(endog_mean, index=endog_names)
endog_std = pd.Series(endog_std, index=endog_names)
elif standardize in [1, True]:
endog_mean = endog.mean(axis=0)
endog_std = endog.std(axis=0)
elif standardize in [0, False]:
endog_mean = np.zeros(endog.shape[1])
endog_std = np.ones(endog.shape[1])
else:
raise ValueError('Invalid value passed for `standardize`.')
self._endog_mean = endog_mean
self._endog_std = endog_std
self.standardize = standardize
if np.any(self._endog_std < 1e-10):
ix = np.where(self._endog_std < 1e-10)
names = np.array(endog_names)[ix[0]].tolist()
raise ValueError('Constant variable(s) found in observed'
' variables, but constants cannot be included'
f' in this model. These variables are: {names}.')
if self.standardize:
endog = (endog - self._endog_mean) / self._endog_std
# Observation / states slices
o = self._o = {
'M': np.s_[:self.k_endog_M],
'Q': np.s_[self.k_endog_M:]}
# Construct the basic state space representation
super().__init__(endog, k_states=s.k_states, k_posdef=s.k_posdef,
**kwargs)
# Revert the standardization for orig_endog
if self.standardize:
self.data.orig_endog = (
self.data.orig_endog * self._endog_std + self._endog_mean)
# State initialization
# Note: we could just initialize the entire thing as stationary, but
# doing each block separately should be faster and avoid numerical
# issues
if 'initialization' not in kwargs:
self.ssm.initialize(self._default_initialization())
# Fixed components of the state space representation
# > design
if self.idiosyncratic_ar1:
self['design', o['M'], s['idio_ar_M']] = np.eye(self.k_endog_M)
multipliers = [1, 2, 3, 2, 1]
for i in range(len(multipliers)):
m = multipliers[i]
self['design', o['Q'], s['idio_ar_Q_ix'][:, i]] = (
m * np.eye(self.k_endog_Q))
# > obs cov
if self.obs_cov_diag:
self['obs_cov'] = np.eye(self.k_endog) * 1e-4
# > transition
for block in s.factor_blocks:
if block.k_factors == 1:
tmp = 0
else:
tmp = np.zeros((block.k_factors, block.k_factors))
self['transition', block['factors'], block['factors']] = (
companion_matrix([1] + [tmp] * block._factor_order).T)
if self.k_endog_Q == 1:
tmp = 0
else:
tmp = np.zeros((self.k_endog_Q, self.k_endog_Q))
self['transition', s['idio_ar_Q'], s['idio_ar_Q']] = (
companion_matrix([1] + [tmp] * 5).T)
# > selection
ix1 = ix2 = 0
for block in s.factor_blocks:
ix2 += block.k_factors
self['selection', block['factors_ix'][:, 0], ix1:ix2] = (
np.eye(block.k_factors))
ix1 = ix2
if self.idiosyncratic_ar1:
ix2 = ix1 + self.k_endog_M
self['selection', s['idio_ar_M'], ix1:ix2] = np.eye(self.k_endog_M)
ix1 = ix2
ix2 = ix1 + self.k_endog_Q
self['selection', s['idio_ar_Q_ix'][:, 0], ix1:ix2] = (
np.eye(self.k_endog_Q))
# Parameters
self.params = OrderedDict([
('loadings', np.sum(self.endog_factor_map.values)),
('factor_ar', np.sum([block.k_factors**2 * block.factor_order
for block in s.factor_blocks])),
('factor_cov', np.sum([block.k_factors * (block.k_factors + 1) // 2
for block in s.factor_blocks])),
('idiosyncratic_ar1',
self.k_endog if self.idiosyncratic_ar1 else 0),
('idiosyncratic_var', self.k_endog)])
self.k_params = np.sum(list(self.params.values()))
# Parameter slices
ix = np.split(np.arange(self.k_params),
np.cumsum(list(self.params.values()))[:-1])
self._p = dict(zip(self.params.keys(), ix))
# Cache
self._loading_constraints = {}
# Initialization kwarg keys, e.g. for cloning
self._init_keys += [
'factors', 'factor_orders', 'factor_multiplicities',
'idiosyncratic_ar1', 'standardize', 'init_t0',
'obs_cov_diag'] + list(kwargs.keys())
@classmethod
def construct_endog(cls, endog_monthly, endog_quarterly):
"""
Construct a combined dataset from separate monthly and quarterly data.
Parameters
----------
endog_monthly : array_like
Monthly dataset. If a quarterly dataset is given, then this must
be a Pandas object with a PeriodIndex or DatetimeIndex at a monthly
frequency.
endog_quarterly : array_like or None
Quarterly dataset. If not None, then this must be a Pandas object
with a PeriodIndex or DatetimeIndex at a quarterly frequency.
Returns
-------
endog : array_like
If both endog_monthly and endog_quarterly were given, this is a
Pandas DataFrame with a PeriodIndex at the monthly frequency, with
all of the columns from `endog_monthly` ordered first and the
columns from `endog_quarterly` ordered afterwards. Otherwise it is
simply the input `endog_monthly` dataset.
k_endog_monthly : int
The number of monthly variables (which are ordered first) in the
returned `endog` dataset.
"""
# Create combined dataset
if endog_quarterly is not None:
# Validate endog_monthly
base_msg = ('If given both monthly and quarterly data'
' then the monthly dataset must be a Pandas'
' object with a date index at a monthly frequency.')
if not isinstance(endog_monthly, (pd.Series, pd.DataFrame)):
raise ValueError('Given monthly dataset is not a'
' Pandas object. ' + base_msg)
elif endog_monthly.index.inferred_type not in ("datetime64",
"period"):
raise ValueError('Given monthly dataset has an'
' index with non-date values. ' + base_msg)
elif not getattr(endog_monthly.index, 'freqstr', 'N')[0] == 'M':
freqstr = getattr(endog_monthly.index, 'freqstr', 'None')
raise ValueError('Index of given monthly dataset has a'
' non-monthly frequency (to check this,'
' examine the `freqstr` attribute of the'
' index of the dataset - it should start with'
' M if it is monthly).'
f' Got {freqstr}. ' + base_msg)
# Validate endog_quarterly
base_msg = ('If a quarterly dataset is given, then it must be a'
' Pandas object with a date index at a quarterly'
' frequency.')
if not isinstance(endog_quarterly, (pd.Series, pd.DataFrame)):
raise ValueError('Given quarterly dataset is not a'
' Pandas object. ' + base_msg)
elif endog_quarterly.index.inferred_type not in ("datetime64",
"period"):
raise ValueError('Given quarterly dataset has an'
' index with non-date values. ' + base_msg)
elif not getattr(endog_quarterly.index, 'freqstr', 'N')[0] == 'Q':
freqstr = getattr(endog_quarterly.index, 'freqstr', 'None')
raise ValueError('Index of given quarterly dataset'
' has a non-quarterly frequency (to check'
' this, examine the `freqstr` attribute of'
' the index of the dataset - it should start'
' with Q if it is quarterly).'
f' Got {freqstr}. ' + base_msg)
# Convert to PeriodIndex, if applicable
if hasattr(endog_monthly.index, 'to_period'):
endog_monthly = endog_monthly.to_period('M')
if hasattr(endog_quarterly.index, 'to_period'):
endog_quarterly = endog_quarterly.to_period('Q')
# Combine the datasets
endog = pd.concat([
endog_monthly,
endog_quarterly.resample('M', convention='end').first()],
axis=1)
# Make sure we didn't accidentally get duplicate column names
column_counts = endog.columns.value_counts()
if column_counts.max() > 1:
columns = endog.columns.values.astype(object)
for name in column_counts.index:
count = column_counts.loc[name]
if count == 1:
continue
mask = columns == name
columns[mask] = [f'{name}{i + 1}' for i in range(count)]
endog.columns = columns
else:
endog = endog_monthly.copy()
shape = endog_monthly.shape
k_endog_monthly = shape[1] if len(shape) == 2 else 1
return endog, k_endog_monthly
def clone(self, endog, k_endog_monthly=None, endog_quarterly=None,
retain_standardization=False, **kwargs):
"""
Clone state space model with new data and optionally new specification.
Parameters
----------
endog : array_like
The observed time-series process :math:`y`
k_endog_monthly : int, optional
If specifying a monthly/quarterly mixed frequency model in which
the provided `endog` dataset contains both the monthly and
quarterly data, this variable should be used to indicate how many
of the variables are monthly.
endog_quarterly : array_like, optional
Observations of quarterly variables. If provided, must be a
Pandas Series or DataFrame with a DatetimeIndex or PeriodIndex at
the quarterly frequency.
kwargs
Keyword arguments to pass to the new model class to change the
model specification.
Returns
-------
model : DynamicFactorMQ instance
"""
if retain_standardization and self.standardize:
kwargs['standardize'] = (self._endog_mean, self._endog_std)
mod = self._clone_from_init_kwds(
endog, k_endog_monthly=k_endog_monthly,
endog_quarterly=endog_quarterly, **kwargs)
return mod
@property
def _res_classes(self):
return {'fit': (DynamicFactorMQResults, mlemodel.MLEResultsWrapper)}
def _default_initialization(self):
s = self._s
init = initialization.Initialization(self.k_states)
for block in s.factor_blocks:
init.set(block['factors'], 'stationary')
if self.idiosyncratic_ar1:
for i in range(s['idio_ar_M'].start, s['idio_ar_M'].stop):
init.set(i, 'stationary')
init.set(s['idio_ar_Q'], 'stationary')
return init
def _get_endog_names(self, truncate=None, as_string=None):
if truncate is None:
truncate = False if as_string is False or self.k_endog == 1 else 24
if as_string is False and truncate is not False:
raise ValueError('Can only truncate endog names if they'
' are returned as a string.')
if as_string is None:
as_string = truncate is not False
# The base `endog_names` property is only a list if there are at least
# two variables; often, we need it to be a list
endog_names = self.endog_names
if not isinstance(endog_names, list):
endog_names = [endog_names]
if as_string:
endog_names = [str(name) for name in endog_names]
if truncate is not False:
n = truncate
endog_names = [name if len(name) <= n else name[:n] + '...'
for name in endog_names]
return endog_names
@property
def _model_name(self):
model_name = [
'Dynamic Factor Model',
f'{self.k_factors} factors in {self.k_factor_blocks} blocks']
if self.k_endog_Q > 0:
model_name.append('Mixed frequency (M/Q)')
error_type = 'AR(1)' if self.idiosyncratic_ar1 else 'iid'
model_name.append(f'{error_type} idiosyncratic')
return model_name
def summary(self, truncate_endog_names=None):
"""
Create a summary table describing the model.
Parameters
----------
truncate_endog_names : int, optional
The number of characters to show for names of observed variables.
Default is 24 if there is more than one observed variable, or
an unlimited number of there is only one.
"""
# Get endog names
endog_names = self._get_endog_names(truncate=truncate_endog_names,
as_string=True)
title = 'Model Specification: Dynamic Factor Model'
if self._index_dates:
ix = self._index
d = ix[0]
sample = ['%s' % d]
d = ix[-1]
sample += ['- ' + '%s' % d]
else:
sample = [str(0), ' - ' + str(self.nobs)]
# Standardize the model name as a list of str
model_name = self._model_name
# - Top summary table ------------------------------------------------
top_left = []
top_left.append(('Model:', [model_name[0]]))
for i in range(1, len(model_name)):
top_left.append(('', ['+ ' + model_name[i]]))
top_left += [
('Sample:', [sample[0]]),
('', [sample[1]])]
top_right = []
if self.k_endog_Q > 0:
top_right += [
('# of monthly variables:', [self.k_endog_M]),
('# of quarterly variables:', [self.k_endog_Q])]
else:
top_right += [('# of observed variables:', [self.k_endog])]
if self.k_factor_blocks == 1:
top_right += [('# of factors:', [self.k_factors])]
else:
top_right += [('# of factor blocks:', [self.k_factor_blocks])]
top_right += [('Idiosyncratic disturbances:',
['AR(1)' if self.idiosyncratic_ar1 else 'iid']),
('Standardize variables:', [self.standardize])]
summary = Summary()
self.model = self
summary.add_table_2cols(self, gleft=top_left, gright=top_right,
title=title)
table_ix = 1
del self.model
# - Endog / factor map -----------------------------------------------
data = self.endog_factor_map.replace({True: 'X', False: ''})
data.index = endog_names
for name, col in data.iteritems():
data[name] = data[name] + (' ' * (len(name) // 2))
data.index.name = 'Dep. variable'
data = data.reset_index()
params_data = data.values
params_header = data.columns.map(str).tolist()
params_stubs = None
title = 'Observed variables / factor loadings'
table = SimpleTable(
params_data, params_header, params_stubs,
txt_fmt=fmt_params, title=title)
summary.tables.insert(table_ix, table)
table_ix += 1
# - Factor blocks summary table --------------------------------------
data = self.factor_block_orders.reset_index()
data['block'] = data['block'].map(
lambda factor_names: ', '.join(factor_names))
data[['order']] = (
data[['order']].applymap(str))
params_data = data.values
params_header = data.columns.map(str).tolist()
params_stubs = None
title = 'Factor blocks:'
table = SimpleTable(
params_data, params_header, params_stubs,
txt_fmt=fmt_params, title=title)
summary.tables.insert(table_ix, table)
table_ix += 1
return summary
def __str__(self):
"""Summary tables showing model specification."""
return str(self.summary())
@property
def state_names(self):
"""(list of str) List of human readable names for unobserved states."""
# Factors
state_names = []
for block in self._s.factor_blocks:
state_names += [f'{name}' for name in block.factor_names[:]]
for s in range(1, block._factor_order):
state_names += [f'L{s}.{name}'
for name in block.factor_names]
# Monthly error
endog_names = self._get_endog_names()
if self.idiosyncratic_ar1:
endog_names_M = endog_names[self._o['M']]
state_names += [f'eps_M.{name}' for name in endog_names_M]
endog_names_Q = endog_names[self._o['Q']]
# Quarterly error
state_names += [f'eps_Q.{name}' for name in endog_names_Q]
for s in range(1, 5):
state_names += [f'L{s}.eps_Q.{name}' for name in endog_names_Q]
return state_names
@property
def param_names(self):
"""(list of str) List of human readable parameter names."""
param_names = []
# Loadings
# So that Lambda = params[ix].reshape(self.k_endog, self.k_factors)
# (where Lambda stacks Lambda_M and Lambda_Q)
endog_names = self._get_endog_names(as_string=False)
for endog_name in endog_names:
for block in self._s.factor_blocks:
for factor_name in block.factor_names:
if self.endog_factor_map.loc[endog_name, factor_name]:
param_names.append(
f'loading.{factor_name}->{endog_name}')
# Factor VAR
for block in self._s.factor_blocks:
for to_factor in block.factor_names:
param_names += [f'L{i}.{from_factor}->{to_factor}'
for i in range(1, block.factor_order + 1)
for from_factor in block.factor_names]
# Factor covariance
for i in range(len(self._s.factor_blocks)):
block = self._s.factor_blocks[i]
param_names += [f'fb({i}).cov.chol[{j + 1},{k + 1}]'
for j in range(block.k_factors)
for k in range(j + 1)]
# Error AR(1)
if self.idiosyncratic_ar1:
endog_names_M = endog_names[self._o['M']]
param_names += [f'L1.eps_M.{name}' for name in endog_names_M]
endog_names_Q = endog_names[self._o['Q']]
param_names += [f'L1.eps_Q.{name}' for name in endog_names_Q]
# Error innovation variances
param_names += [f'sigma2.{name}' for name in endog_names]
return param_names
@property
def start_params(self):
"""(array) Starting parameters for maximum likelihood estimation."""
params = np.zeros(self.k_params, dtype=np.float64)
# (1) estimate factors one at a time, where the first step uses
# PCA on all `endog` variables that load on the first factor, and
# subsequent steps use residuals from the previous steps.
# TODO: what about factors that only load on quarterly variables?
endog_factor_map_M = self.endog_factor_map.iloc[:self.k_endog_M]
factors = []
endog = (pd.DataFrame(self.endog).interpolate()
.fillna(method='backfill')
.values)
for name in self.factor_names:
# Try to retrieve this from monthly variables, which is most
# consistent
endog_ix = np.where(endog_factor_map_M.loc[:, name])[0]
# But fall back to quarterly if necessary
if len(endog_ix) == 0:
endog_ix = np.where(self.endog_factor_map.loc[:, name])[0]
factor_endog = endog[:, endog_ix]
res_pca = PCA(factor_endog, ncomp=1, method='eig', normalize=False)
factors.append(res_pca.factors)
endog[:, endog_ix] -= res_pca.projection
factors = np.concatenate(factors, axis=1)
# (2) Estimate coefficients for each endog, one at a time (OLS for
# monthly variables, restricted OLS for quarterly). Also, compute
# residuals.
loadings = []
resid = []
for i in range(self.k_endog_M):
factor_ix = self._s.endog_factor_iloc[i]
factor_exog = factors[:, factor_ix]
mod_ols = OLS(self.endog[:, i], exog=factor_exog, missing='drop')
res_ols = mod_ols.fit()
loadings += res_ols.params.tolist()
resid.append(res_ols.resid)
for i in range(self.k_endog_M, self.k_endog):
factor_ix = self._s.endog_factor_iloc[i]
factor_exog = lagmat(factors[:, factor_ix], 4, original='in')
mod_glm = GLM(self.endog[:, i], factor_exog, missing='drop')
res_glm = mod_glm.fit_constrained(self.loading_constraints(i))
loadings += res_glm.params[:len(factor_ix)].tolist()
resid.append(res_glm.resid_response)
params[self._p['loadings']] = loadings
# (3) For each factor block, use an AR or VAR model to get coefficients
# and covariance estimate
# Factor transitions
stationary = True
factor_ar = []
factor_cov = []
i = 0
for block in self._s.factor_blocks:
factors_endog = factors[:, i:i + block.k_factors]
i += block.k_factors
if block.factor_order == 0:
continue
if block.k_factors == 1:
mod_factors = SARIMAX(factors_endog,
order=(block.factor_order, 0, 0))
sp = mod_factors.start_params
block_factor_ar = sp[:-1]
block_factor_cov = sp[-1:]
coefficient_matrices = mod_factors.start_params[:-1]
elif block.k_factors > 1:
mod_factors = VAR(factors_endog)
res_factors = mod_factors.fit(
maxlags=block.factor_order, ic=None, trend='nc')
block_factor_ar = res_factors.params.T.ravel()
L = np.linalg.cholesky(res_factors.sigma_u)
block_factor_cov = L[np.tril_indices_from(L)]
coefficient_matrices = np.transpose(
np.reshape(block_factor_ar,
(block.k_factors, block.k_factors,
block.factor_order)), (2, 0, 1))
# Test for stationarity
stationary = is_invertible([1] + list(-coefficient_matrices))
# Check for stationarity
if not stationary:
warn('Non-stationary starting factor autoregressive'
' parameters found for factor block'
f' {block.factor_names}. Using zeros as starting'
' parameters.')
block_factor_ar[:] = 0
cov_factor = np.diag(factors_endog.std(axis=0))
block_factor_cov = (
cov_factor[np.tril_indices(block.k_factors)])
factor_ar += block_factor_ar.tolist()
factor_cov += block_factor_cov.tolist()
params[self._p['factor_ar']] = factor_ar
params[self._p['factor_cov']] = factor_cov
# (4) Use residuals from step (2) to estimate the idiosyncratic
# component
# Idiosyncratic component
if self.idiosyncratic_ar1:
idio_ar1 = []
idio_var = []
for i in range(self.k_endog_M):
mod_idio = SARIMAX(resid[i], order=(1, 0, 0), trend='c')
sp = mod_idio.start_params
idio_ar1.append(np.clip(sp[1], -0.99, 0.99))
idio_var.append(np.clip(sp[-1], 1e-5, np.inf))
for i in range(self.k_endog_M, self.k_endog):
y = self.endog[:, i].copy()
y[~np.isnan(y)] = resid[i]
mod_idio = QuarterlyAR1(y)
res_idio = mod_idio.fit(maxiter=10, return_params=True,
disp=False)
res_idio = mod_idio.fit_em(res_idio, maxiter=5,
return_params=True)
idio_ar1.append(np.clip(res_idio[0], -0.99, 0.99))
idio_var.append(np.clip(res_idio[1], 1e-5, np.inf))
params[self._p['idiosyncratic_ar1']] = idio_ar1
params[self._p['idiosyncratic_var']] = idio_var
else:
idio_var = [np.var(resid[i]) for i in range(self.k_endog_M)]
for i in range(self.k_endog_M, self.k_endog):
y = self.endog[:, i].copy()
y[~np.isnan(y)] = resid[i]
mod_idio = QuarterlyAR1(y)
res_idio = mod_idio.fit(return_params=True, disp=False)
idio_var.append(np.clip(res_idio[1], 1e-5, np.inf))
params[self._p['idiosyncratic_var']] = idio_var
return params
def transform_params(self, unconstrained):
"""
Transform parameters from optimizer space to model space.
Transform unconstrained parameters used by the optimizer to constrained
parameters used in likelihood evaluation.
Parameters
----------
unconstrained : array_like
Array of unconstrained parameters used by the optimizer, to be
transformed.
Returns
-------
constrained : array_like
Array of constrained parameters which may be used in likelihood
evaluation.
"""
constrained = unconstrained.copy()
# Stationary factor VAR
unconstrained_factor_ar = unconstrained[self._p['factor_ar']]
constrained_factor_ar = []
i = 0
for block in self._s.factor_blocks:
length = block.k_factors**2 * block.factor_order
tmp_coeff = np.reshape(
unconstrained_factor_ar[i:i + length],
(block.k_factors, block.k_factors * block.factor_order))
tmp_cov = np.eye(block.k_factors)
tmp_coeff, _ = constrain_stationary_multivariate(tmp_coeff,
tmp_cov)
constrained_factor_ar += tmp_coeff.ravel().tolist()
i += length
constrained[self._p['factor_ar']] = constrained_factor_ar
# Stationary idiosyncratic AR(1)
if self.idiosyncratic_ar1:
idio_ar1 = unconstrained[self._p['idiosyncratic_ar1']]
constrained[self._p['idiosyncratic_ar1']] = [
constrain_stationary_univariate(idio_ar1[i:i + 1])[0]
for i in range(self.k_endog)]
# Positive idiosyncratic variances
constrained[self._p['idiosyncratic_var']] = (
constrained[self._p['idiosyncratic_var']]**2)
return constrained
def untransform_params(self, constrained):
"""
Transform parameters from model space to optimizer space.
Transform constrained parameters used in likelihood evaluation
to unconstrained parameters used by the optimizer.
Parameters
----------
constrained : array_like
Array of constrained parameters used in likelihood evaluation, to
be transformed.
Returns
-------
unconstrained : array_like
Array of unconstrained parameters used by the optimizer.
"""
unconstrained = constrained.copy()
# Stationary factor VAR
constrained_factor_ar = constrained[self._p['factor_ar']]
unconstrained_factor_ar = []
i = 0
for block in self._s.factor_blocks:
length = block.k_factors**2 * block.factor_order
tmp_coeff = np.reshape(
constrained_factor_ar[i:i + length],
(block.k_factors, block.k_factors * block.factor_order))
tmp_cov = np.eye(block.k_factors)
tmp_coeff, _ = unconstrain_stationary_multivariate(tmp_coeff,
tmp_cov)
unconstrained_factor_ar += tmp_coeff.ravel().tolist()
i += length
unconstrained[self._p['factor_ar']] = unconstrained_factor_ar
# Stationary idiosyncratic AR(1)
if self.idiosyncratic_ar1:
idio_ar1 = constrained[self._p['idiosyncratic_ar1']]
unconstrained[self._p['idiosyncratic_ar1']] = [
unconstrain_stationary_univariate(idio_ar1[i:i + 1])[0]
for i in range(self.k_endog)]
# Positive idiosyncratic variances
unconstrained[self._p['idiosyncratic_var']] = (
unconstrained[self._p['idiosyncratic_var']]**0.5)
return unconstrained
def update(self, params, **kwargs):
"""
Update the parameters of the model.
Parameters
----------
params : array_like
Array of new parameters.
transformed : bool, optional
Whether or not `params` is already transformed. If set to False,
`transform_params` is called. Default is True.
"""
params = super().update(params, **kwargs)
# Local copies
o = self._o
s = self._s
p = self._p
# Loadings
loadings = params[p['loadings']]
start = 0
for i in range(self.k_endog_M):
iloc = self._s.endog_factor_iloc[i]
k_factors = len(iloc)
factor_ix = s['factors_L1'][iloc]
self['design', i, factor_ix] = loadings[start:start + k_factors]
start += k_factors
multipliers = np.array([1, 2, 3, 2, 1])[:, None]
for i in range(self.k_endog_M, self.k_endog):
iloc = self._s.endog_factor_iloc[i]
k_factors = len(iloc)
factor_ix = s['factors_L1_5_ix'][:, iloc]
self['design', i, factor_ix.ravel()] = np.ravel(
loadings[start:start + k_factors] * multipliers)
start += k_factors
# Factor VAR
factor_ar = params[p['factor_ar']]
start = 0
for block in s.factor_blocks:
k_params = block.k_factors**2 * block.factor_order
A = np.reshape(
factor_ar[start:start + k_params],
(block.k_factors, block.k_factors * block.factor_order))
start += k_params
self['transition', block['factors_L1'], block['factors_ar']] = A
# Factor covariance
factor_cov = params[p['factor_cov']]
start = 0
ix1 = 0
for block in s.factor_blocks:
k_params = block.k_factors * (block.k_factors + 1) // 2
L = np.zeros((block.k_factors, block.k_factors),
dtype=params.dtype)
L[np.tril_indices_from(L)] = factor_cov[start:start + k_params]
start += k_params
Q = L @ L.T
ix2 = ix1 + block.k_factors
self['state_cov', ix1:ix2, ix1:ix2] = Q
ix1 = ix2
# Error AR(1)
if self.idiosyncratic_ar1:
alpha = np.diag(params[p['idiosyncratic_ar1']])
self['transition', s['idio_ar_L1'], s['idio_ar_L1']] = alpha
# Error variances
if self.idiosyncratic_ar1:
self['state_cov', self.k_factors:, self.k_factors:] = (
np.diag(params[p['idiosyncratic_var']]))
else:
idio_var = params[p['idiosyncratic_var']]
self['obs_cov', o['M'], o['M']] = np.diag(idio_var[o['M']])
self['state_cov', self.k_factors:, self.k_factors:] = (
np.diag(idio_var[o['Q']]))
@property
def loglike_constant(self):
"""
Constant term in the joint log-likelihood function.
Useful in facilitating comparisons to other packages that exclude the
constant from the log-likelihood computation.
"""
return -0.5 * (1 - np.isnan(self.endog)).sum() * np.log(2 * np.pi)
def loading_constraints(self, i):
r"""
Matrix formulation of quarterly variables' factor loading constraints.
Parameters
----------
i : int
Index of the `endog` variable to compute constraints for.
Returns
-------
R : array (k_constraints, k_factors * 5)
q : array (k_constraints,)
Notes
-----
If the factors were known, then the factor loadings for the ith
quarterly variable would be computed by a linear regression of the form
y_i = A_i' f + B_i' L1.f + C_i' L2.f + D_i' L3.f + E_i' L4.f
where:
- f is (k_i x 1) and collects all of the factors that load on y_i
- L{j}.f is (k_i x 1) and collects the jth lag of each factor
- A_i, ..., E_i are (k_i x 1) and collect factor loadings
As the observed variable is quarterly while the factors are monthly, we
want to restrict the estimated regression coefficients to be:
y_i = A_i f + 2 A_i L1.f + 3 A_i L2.f + 2 A_i L3.f + A_i L4.f
Stack the unconstrained coefficients: \Lambda_i = [A_i' B_i' ... E_i']'
Then the constraints can be written as follows, for l = 1, ..., k_i
- 2 A_{i,l} - B_{i,l} = 0
- 3 A_{i,l} - C_{i,l} = 0
- 2 A_{i,l} - D_{i,l} = 0
- A_{i,l} - E_{i,l} = 0
So that k_constraints = 4 * k_i. In matrix form the constraints are:
.. math::
R \Lambda_i = q
where :math:`\Lambda_i` is shaped `(k_i * 5,)`, :math:`R` is shaped
`(k_constraints, k_i * 5)`, and :math:`q` is shaped `(k_constraints,)`.
For example, for the case that k_i = 2, we can write:
| 2 0 -1 0 0 0 0 0 0 0 | | A_{i,1} | | 0 |
| 0 2 0 -1 0 0 0 0 0 0 | | A_{i,2} | | 0 |
| 3 0 0 0 -1 0 0 0 0 0 | | B_{i,1} | | 0 |
| 0 3 0 0 0 -1 0 0 0 0 | | B_{i,2} | | 0 |
| 2 0 0 0 0 0 -1 0 0 0 | | C_{i,1} | = | 0 |
| 0 2 0 0 0 0 0 -1 0 0 | | C_{i,2} | | 0 |
| 1 0 0 0 0 0 0 0 -1 0 | | D_{i,1} | | 0 |
| 0 1 0 0 0 0 0 0 0 -1 | | D_{i,2} | | 0 |
| E_{i,1} | | 0 |
| E_{i,2} | | 0 |
"""
if i < self.k_endog_M:
raise ValueError('No constraints for monthly variables.')
if i not in self._loading_constraints:
k_factors = self.endog_factor_map.iloc[i].sum()
R = np.zeros((k_factors * 4, k_factors * 5))
q = np.zeros(R.shape[0])
# Let R = [R_1 R_2]
# Then R_1 is multiples of the identity matrix
multipliers = np.array([1, 2, 3, 2, 1])
R[:, :k_factors] = np.reshape(
(multipliers[1:] * np.eye(k_factors)[..., None]).T,
(k_factors * 4, k_factors))
# And R_2 is the identity
R[:, k_factors:] = np.diag([-1] * (k_factors * 4))
self._loading_constraints[i] = (R, q)
return self._loading_constraints[i]
def fit(self, start_params=None, transformed=True, includes_fixed=False,
cov_type='none', cov_kwds=None, method='em', maxiter=500,
tolerance=1e-6, em_initialization=True, mstep_method=None,
full_output=1, disp=False, callback=None, return_params=False,
optim_score=None, optim_complex_step=None, optim_hessian=None,
flags=None, low_memory=False, llf_decrease_action='revert',
llf_decrease_tolerance=1e-4, **kwargs):
"""
Fits the model by maximum likelihood via Kalman filter.
Parameters
----------
start_params : array_like, optional
Initial guess of the solution for the loglikelihood maximization.
If None, the default is given by Model.start_params.
transformed : bool, optional
Whether or not `start_params` is already transformed. Default is
True.
includes_fixed : bool, optional
If parameters were previously fixed with the `fix_params` method,
this argument describes whether or not `start_params` also includes
the fixed parameters, in addition to the free parameters. Default
is False.
cov_type : str, optional
The `cov_type` keyword governs the method for calculating the
covariance matrix of parameter estimates. Can be one of:
- 'opg' for the outer product of gradient estimator
- 'oim' for the observed information matrix estimator, calculated
using the method of Harvey (1989)
- 'approx' for the observed information matrix estimator,
calculated using a numerical approximation of the Hessian matrix.
- 'robust' for an approximate (quasi-maximum likelihood) covariance
matrix that may be valid even in the presence of some
misspecifications. Intermediate calculations use the 'oim'
method.
- 'robust_approx' is the same as 'robust' except that the
intermediate calculations use the 'approx' method.
- 'none' for no covariance matrix calculation.
Default is 'none', since computing this matrix can be very slow
when there are a large number of parameters.
cov_kwds : dict or None, optional
A dictionary of arguments affecting covariance matrix computation.
**opg, oim, approx, robust, robust_approx**
- 'approx_complex_step' : bool, optional - If True, numerical
approximations are computed using complex-step methods. If False,
numerical approximations are computed using finite difference
methods. Default is True.
- 'approx_centered' : bool, optional - If True, numerical
approximations computed using finite difference methods use a
centered approximation. Default is False.
method : str, optional
The `method` determines which solver from `scipy.optimize`
is used, and it can be chosen from among the following strings:
- 'em' for the EM algorithm
- 'newton' for Newton-Raphson
- 'nm' for Nelder-Mead
- 'bfgs' for Broyden-Fletcher-Goldfarb-Shanno (BFGS)
- 'lbfgs' for limited-memory BFGS with optional box constraints
- 'powell' for modified Powell's method
- 'cg' for conjugate gradient
- 'ncg' for Newton-conjugate gradient
- 'basinhopping' for global basin-hopping solver
The explicit arguments in `fit` are passed to the solver,
with the exception of the basin-hopping solver. Each
solver has several optional arguments that are not the same across
solvers. See the notes section below (or scipy.optimize) for the
available arguments and for the list of explicit arguments that the
basin-hopping solver supports.
maxiter : int, optional
The maximum number of iterations to perform.
full_output : bool, optional
Set to True to have all available output in the Results object's
mle_retvals attribute. The output is dependent on the solver.
See LikelihoodModelResults notes section for more information.
disp : bool, optional
Set to True to print convergence messages.
callback : callable callback(xk), optional
Called after each iteration, as callback(xk), where xk is the
current parameter vector.
return_params : bool, optional
Whether or not to return only the array of maximizing parameters.
Default is False.
optim_score : {'harvey', 'approx'} or None, optional
The method by which the score vector is calculated. 'harvey' uses
the method from Harvey (1989), 'approx' uses either finite
difference or complex step differentiation depending upon the
value of `optim_complex_step`, and None uses the built-in gradient
approximation of the optimizer. Default is None. This keyword is
only relevant if the optimization method uses the score.
optim_complex_step : bool, optional
Whether or not to use complex step differentiation when
approximating the score; if False, finite difference approximation
is used. Default is True. This keyword is only relevant if
`optim_score` is set to 'harvey' or 'approx'.
optim_hessian : {'opg','oim','approx'}, optional
The method by which the Hessian is numerically approximated. 'opg'
uses outer product of gradients, 'oim' uses the information
matrix formula from Harvey (1989), and 'approx' uses numerical
approximation. This keyword is only relevant if the
optimization method uses the Hessian matrix.
low_memory : bool, optional
If set to True, techniques are applied to substantially reduce
memory usage. If used, some features of the results object will
not be available (including smoothed results and in-sample
prediction), although out-of-sample forecasting is possible.
Note that this option is not available when using the EM algorithm
(which is the default for this model). Default is False.
llf_decrease_action : {'ignore', 'warn', 'revert'}, optional
Action to take if the log-likelihood decreases in an EM iteration.
'ignore' continues the iterations, 'warn' issues a warning but
continues the iterations, while 'revert' ends the iterations and
returns the result from the last good iteration. Default is 'warn'.
llf_decrease_tolerance : float, optional
Minimum size of the log-likelihood decrease required to trigger a
warning or to end the EM iterations. Setting this value slightly
larger than zero allows small decreases in the log-likelihood that
may be caused by numerical issues. If set to zero, then any
decrease will trigger the `llf_decrease_action`. Default is 1e-4.
**kwargs
Additional keyword arguments to pass to the optimizer.
Returns
-------
MLEResults
See Also
--------
statsmodels.base.model.LikelihoodModel.fit
statsmodels.tsa.statespace.mlemodel.MLEResults
"""
if method == 'em':
return self.fit_em(
start_params=start_params, transformed=transformed,
cov_type=cov_type, cov_kwds=cov_kwds, maxiter=maxiter,
tolerance=tolerance, em_initialization=em_initialization,
mstep_method=mstep_method, full_output=full_output, disp=disp,
return_params=return_params, low_memory=low_memory,
llf_decrease_action=llf_decrease_action,
llf_decrease_tolerance=llf_decrease_tolerance, **kwargs)
else:
return super().fit(
start_params=start_params, transformed=transformed,
includes_fixed=includes_fixed, cov_type=cov_type,
cov_kwds=cov_kwds, method=method, maxiter=maxiter,
tolerance=tolerance, full_output=full_output, disp=disp,
callback=callback, return_params=return_params,
optim_score=optim_score,
optim_complex_step=optim_complex_step,
optim_hessian=optim_hessian, flags=flags,
low_memory=low_memory, **kwargs)
def fit_em(self, start_params=None, transformed=True, cov_type='none',
cov_kwds=None, maxiter=500, tolerance=1e-6, disp=False,
em_initialization=True, mstep_method=None, full_output=True,
return_params=False, low_memory=False,
llf_decrease_action='revert', llf_decrease_tolerance=1e-4):
"""
Fits the model by maximum likelihood via the EM algorithm.
Parameters
----------
start_params : array_like, optional
Initial guess of the solution for the loglikelihood maximization.
The default is to use `DynamicFactorMQ.start_params`.
transformed : bool, optional
Whether or not `start_params` is already transformed. Default is
True.
cov_type : str, optional
The `cov_type` keyword governs the method for calculating the
covariance matrix of parameter estimates. Can be one of:
- 'opg' for the outer product of gradient estimator
- 'oim' for the observed information matrix estimator, calculated
using the method of Harvey (1989)
- 'approx' for the observed information matrix estimator,
calculated using a numerical approximation of the Hessian matrix.
- 'robust' for an approximate (quasi-maximum likelihood) covariance
matrix that may be valid even in the presence of some
misspecifications. Intermediate calculations use the 'oim'
method.
- 'robust_approx' is the same as 'robust' except that the
intermediate calculations use the 'approx' method.
- 'none' for no covariance matrix calculation.
Default is 'none', since computing this matrix can be very slow
when there are a large number of parameters.
cov_kwds : dict or None, optional
A dictionary of arguments affecting covariance matrix computation.
**opg, oim, approx, robust, robust_approx**
- 'approx_complex_step' : bool, optional - If True, numerical
approximations are computed using complex-step methods. If False,
numerical approximations are computed using finite difference
methods. Default is True.
- 'approx_centered' : bool, optional - If True, numerical
approximations computed using finite difference methods use a
centered approximation. Default is False.
maxiter : int, optional
The maximum number of EM iterations to perform.
tolerance : float, optional
Parameter governing convergence of the EM algorithm. The
`tolerance` is the minimum relative increase in the likelihood
for which convergence will be declared. A smaller value for the
`tolerance` will typically yield more precise parameter estimates,
but will typically require more EM iterations. Default is 1e-6.
disp : int or bool, optional
Controls printing of EM iteration progress. If an integer, progress
is printed at every `disp` iterations. A value of True is
interpreted as the value of 1. Default is False (nothing will be
printed).
em_initialization : bool, optional
Whether or not to also update the Kalman filter initialization
using the EM algorithm. Default is True.
mstep_method : {None, 'missing', 'nonmissing'}, optional
The EM algorithm maximization step. If there are no NaN values
in the dataset, this can be set to "nonmissing" (which is slightly
faster) or "missing", otherwise it must be "missing". Default is
"nonmissing" if there are no NaN values or "missing" if there are.
full_output : bool, optional
Set to True to have all available output from EM iterations in
the Results object's mle_retvals attribute.
return_params : bool, optional
Whether or not to return only the array of maximizing parameters.
Default is False.
low_memory : bool, optional
This option cannot be used with the EM algorithm and will raise an
error if set to True. Default is False.
llf_decrease_action : {'ignore', 'warn', 'revert'}, optional
Action to take if the log-likelihood decreases in an EM iteration.
'ignore' continues the iterations, 'warn' issues a warning but
continues the iterations, while 'revert' ends the iterations and
returns the result from the last good iteration. Default is 'warn'.
llf_decrease_tolerance : float, optional
Minimum size of the log-likelihood decrease required to trigger a
warning or to end the EM iterations. Setting this value slightly
larger than zero allows small decreases in the log-likelihood that
may be caused by numerical issues. If set to zero, then any
decrease will trigger the `llf_decrease_action`. Default is 1e-4.
Returns
-------
DynamicFactorMQResults
See Also
--------
statsmodels.tsa.statespace.mlemodel.MLEModel.fit
statsmodels.tsa.statespace.mlemodel.MLEResults
"""
if self._has_fixed_params:
raise NotImplementedError('Cannot fit using the EM algorithm while'
' holding some parameters fixed.')
if low_memory:
raise ValueError('Cannot fit using the EM algorithm when using'
' low_memory option.')
if start_params is None:
start_params = self.start_params
transformed = True
else:
start_params = np.array(start_params, ndmin=1)
if not transformed:
start_params = self.transform_params(start_params)
llf_decrease_action = string_like(
llf_decrease_action, 'llf_decrease_action',
options=['ignore', 'warn', 'revert'])
disp = int(disp)
# Perform expectation-maximization
s = self._s
llf = []
params = [start_params]
init = None
inits = [self.ssm.initialization]
i = 0
delta = 0
terminate = False
# init_stationary = None if em_initialization else True
while i < maxiter and not terminate and (i < 1 or (delta > tolerance)):
out = self._em_iteration(params[-1], init=init,
mstep_method=mstep_method)
new_llf = out[0].llf_obs.sum()
# If we are not using EM initialization, then we need to check for
# non-stationary parameters
if not em_initialization:
self.update(out[1])
switch_init = []
T = self['transition']
init = self.ssm.initialization
iloc = np.arange(self.k_states)
# We may only have global initialization if we have no
# quarterly variables and idiosyncratic_ar1=False
if self.k_endog_Q == 0 and not self.idiosyncratic_ar1:
block = s.factor_blocks[0]
if init.initialization_type == 'stationary':
Tb = T[block['factors'], block['factors']]
if not np.all(np.linalg.eigvals(Tb) < (1 - 1e-10)):
init.set(block['factors'], 'diffuse')
switch_init.append(
'factor block:'
f' {tuple(block.factor_names)}')
else:
# Factor blocks
for block in s.factor_blocks:
b = tuple(iloc[block['factors']])
init_type = init.blocks[b].initialization_type
if init_type == 'stationary':
Tb = T[block['factors'], block['factors']]
if not np.all(np.linalg.eigvals(Tb) < (1 - 1e-10)):
init.set(block['factors'], 'diffuse')
switch_init.append(
'factor block:'
f' {tuple(block.factor_names)}')
if self.idiosyncratic_ar1:
endog_names = self._get_endog_names(as_string=True)
# Monthly variables
for j in range(s['idio_ar_M'].start, s['idio_ar_M'].stop):
init_type = init.blocks[(j,)].initialization_type
if init_type == 'stationary':
if not np.abs(T[j, j]) < (1 - 1e-10):
init.set(j, 'diffuse')
name = endog_names[j - s['idio_ar_M'].start]
switch_init.append(
'idiosyncratic AR(1) for monthly'
f' variable: {name}')
# Quarterly variables
if self.k_endog_Q > 0:
b = tuple(iloc[s['idio_ar_Q']])
init_type = init.blocks[b].initialization_type
if init_type == 'stationary':
Tb = T[s['idio_ar_Q'], s['idio_ar_Q']]
if not np.all(np.linalg.eigvals(Tb) < (1 - 1e-10)):
init.set(s['idio_ar_Q'], 'diffuse')
switch_init.append(
'idiosyncratic AR(1) for the'
' block of quarterly variables')
if len(switch_init) > 0:
warn('Non-stationary parameters found at EM iteration'
f' {i + 1}, which is not compatible with'
' stationary initialization. Initialization was'
' switched to diffuse for the following: '
f' {switch_init}, and fitting was restarted.')
results = self.fit_em(
start_params=params[-1], transformed=transformed,
cov_type=cov_type, cov_kwds=cov_kwds,
maxiter=maxiter, tolerance=tolerance,
em_initialization=em_initialization,
mstep_method=mstep_method, full_output=full_output,
disp=disp, return_params=return_params,
low_memory=low_memory,
llf_decrease_action=llf_decrease_action,
llf_decrease_tolerance=llf_decrease_tolerance)
self.ssm.initialize(self._default_initialization())
return results
# Check for decrease in the log-likelihood
# Note: allow a little numerical error before declaring a decrease
llf_decrease = (
i > 0 and (new_llf - llf[-1]) < -llf_decrease_tolerance)
if llf_decrease_action == 'revert' and llf_decrease:
warn(f'Log-likelihood decreased at EM iteration {i + 1}.'
f' Reverting to the results from EM iteration {i}'
' (prior to the decrease) and returning the solution.')
# Terminated iteration
i -= 1
terminate = True
else:
if llf_decrease_action == 'warn' and llf_decrease:
warn(f'Log-likelihood decreased at EM iteration {i + 1},'
' which can indicate numerical issues.')
llf.append(new_llf)
params.append(out[1])
if em_initialization:
init = initialization.Initialization(
self.k_states, 'known',
constant=out[0].smoothed_state[..., 0],
stationary_cov=out[0].smoothed_state_cov[..., 0])
inits.append(init)
if i > 0:
delta = (2 * np.abs(llf[-1] - llf[-2]) /
(np.abs(llf[-1]) + np.abs(llf[-2])))
else:
delta = np.inf
# If `disp` is not False, display the first iteration
if disp and i == 0:
print(f'EM start iterations, llf={llf[-1]:.5g}')
# Print output every `disp` observations
elif disp and ((i + 1) % disp) == 0:
print(f'EM iteration {i + 1}, llf={llf[-1]:.5g},'
f' convergence criterion={delta:.5g}')
# Advance the iteration counter
i += 1
# Check for convergence
not_converged = (i == maxiter and delta > tolerance)
# If no convergence without explicit termination, warn users
if not_converged:
warn(f'EM reached maximum number of iterations ({maxiter}),'
f' without achieving convergence: llf={llf[-1]:.5g},'
f' convergence criterion={delta:.5g}'
f' (while specified tolerance was {tolerance:.5g})')
# If `disp` is not False, display the final iteration
if disp:
if terminate:
print(f'EM terminated at iteration {i}, llf={llf[-1]:.5g},'
f' convergence criterion={delta:.5g}'
f' (while specified tolerance was {tolerance:.5g})')
elif not_converged:
print(f'EM reached maximum number of iterations ({maxiter}),'
f' without achieving convergence: llf={llf[-1]:.5g},'
f' convergence criterion={delta:.5g}'
f' (while specified tolerance was {tolerance:.5g})')
else:
print(f'EM converged at iteration {i}, llf={llf[-1]:.5g},'
f' convergence criterion={delta:.5g}'
f' < tolerance={tolerance:.5g}')
# Just return the fitted parameters if requested
if return_params:
result = params[-1]
# Otherwise construct the results class if desired
else:
if em_initialization:
base_init = self.ssm.initialization
self.ssm.initialization = init
# Note that because we are using params[-1], we are actually using
# the results from one additional iteration compared to the
# iteration at which we declared convergence.
result = self.smooth(params[-1], transformed=True,
cov_type=cov_type, cov_kwds=cov_kwds)
if em_initialization:
self.ssm.initialization = base_init
# Save the output
if full_output:
llf.append(result.llf)
em_retvals = Bunch(**{'params': np.array(params),
'llf': np.array(llf),
'iter': i,
'inits': inits})
em_settings = Bunch(**{'method': 'em',
'tolerance': tolerance,
'maxiter': maxiter})
else:
em_retvals = None
em_settings = None
result._results.mle_retvals = em_retvals
result._results.mle_settings = em_settings
return result
def _em_iteration(self, params0, init=None, mstep_method=None):
"""EM iteration."""
# (E)xpectation step
res = self._em_expectation_step(params0, init=init)
# (M)aximization step
params1 = self._em_maximization_step(res, params0,
mstep_method=mstep_method)
return res, params1
def _em_expectation_step(self, params0, init=None):
"""EM expectation step."""
# (E)xpectation step
self.update(params0)
# Re-initialize state, if new initialization is given
if init is not None:
base_init = self.ssm.initialization
self.ssm.initialization = init
# Perform smoothing, only saving what is required
res = self.ssm.smooth(
SMOOTHER_STATE | SMOOTHER_STATE_COV | SMOOTHER_STATE_AUTOCOV,
update_filter=False)
res.llf_obs = np.array(
self.ssm._kalman_filter.loglikelihood, copy=True)
# Reset initialization
if init is not None:
self.ssm.initialization = base_init
return res
def _em_maximization_step(self, res, params0, mstep_method=None):
"""EM maximization step."""
s = self._s
a = res.smoothed_state.T[..., None]
cov_a = res.smoothed_state_cov.transpose(2, 0, 1)
acov_a = res.smoothed_state_autocov.transpose(2, 0, 1)
# E[a_t a_t'], t = 0, ..., T
Eaa = cov_a.copy() + np.matmul(a, a.transpose(0, 2, 1))
# E[a_t a_{t-1}'], t = 1, ..., T
Eaa1 = acov_a[:-1] + np.matmul(a[1:], a[:-1].transpose(0, 2, 1))
# Observation equation
has_missing = np.any(res.nmissing)
if mstep_method is None:
mstep_method = 'missing' if has_missing else 'nonmissing'
mstep_method = mstep_method.lower()
if mstep_method == 'nonmissing' and has_missing:
raise ValueError('Cannot use EM algorithm option'
' `mstep_method="nonmissing"` with missing data.')
if mstep_method == 'nonmissing':
func = self._em_maximization_obs_nonmissing
elif mstep_method == 'missing':
func = self._em_maximization_obs_missing
else:
raise ValueError('Invalid maximization step method: "%s".'
% mstep_method)
# TODO: compute H is pretty slow
Lambda, H = func(res, Eaa, a, compute_H=(not self.idiosyncratic_ar1))
# Factor VAR and covariance
factor_ar = []
factor_cov = []
for b in s.factor_blocks:
A = Eaa[:-1, b['factors_ar'], b['factors_ar']].sum(axis=0)
B = Eaa1[:, b['factors_L1'], b['factors_ar']].sum(axis=0)
C = Eaa[1:, b['factors_L1'], b['factors_L1']].sum(axis=0)
nobs = Eaa.shape[0] - 1
# want: x = B A^{-1}, so solve: x A = B or solve: A' x' = B'
try:
f_A = cho_solve(cho_factor(A), B.T).T
except LinAlgError:
# Fall back to general solver if there are problems with
# postive-definiteness
f_A = np.linalg.solve(A, B.T).T
f_Q = (C - f_A @ B.T) / nobs
factor_ar += f_A.ravel().tolist()
factor_cov += (
np.linalg.cholesky(f_Q)[np.tril_indices_from(f_Q)].tolist())
# Idiosyncratic AR(1) and variances
if self.idiosyncratic_ar1:
ix = s['idio_ar_L1']
Ad = Eaa[:-1, ix, ix].sum(axis=0).diagonal()
Bd = Eaa1[:, ix, ix].sum(axis=0).diagonal()
Cd = Eaa[1:, ix, ix].sum(axis=0).diagonal()
nobs = Eaa.shape[0] - 1
alpha = Bd / Ad
sigma2 = (Cd - alpha * Bd) / nobs
else:
ix = s['idio_ar_L1']
C = Eaa[:, ix, ix].sum(axis=0)
sigma2 = np.r_[H.diagonal()[self._o['M']],
C.diagonal() / Eaa.shape[0]]
# Save parameters
params1 = np.zeros_like(params0)
loadings = []
for i in range(self.k_endog):
iloc = self._s.endog_factor_iloc[i]
factor_ix = s['factors_L1'][iloc]
loadings += Lambda[i, factor_ix].tolist()
params1[self._p['loadings']] = loadings
params1[self._p['factor_ar']] = factor_ar
params1[self._p['factor_cov']] = factor_cov
if self.idiosyncratic_ar1:
params1[self._p['idiosyncratic_ar1']] = alpha
params1[self._p['idiosyncratic_var']] = sigma2
return params1
def _em_maximization_obs_nonmissing(self, res, Eaa, a, compute_H=False):
"""EM maximization step, observation equation without missing data."""
s = self._s
dtype = Eaa.dtype
# Observation equation (non-missing)
# Note: we only compute loadings for monthly variables because
# quarterly variables will always have missing entries, so we would
# never choose this method in that case
k = s.k_states_factors
Lambda = np.zeros((self.k_endog, k), dtype=dtype)
for i in range(self.k_endog):
y = self.endog[:, i:i + 1]
iloc = self._s.endog_factor_iloc[i]
factor_ix = s['factors_L1'][iloc]
ix = (np.s_[:],) + np.ix_(factor_ix, factor_ix)
A = Eaa[ix].sum(axis=0)
B = y.T @ a[:, factor_ix, 0]
if self.idiosyncratic_ar1:
ix1 = s.k_states_factors + i
ix2 = ix1 + 1
B -= Eaa[:, ix1:ix2, factor_ix].sum(axis=0)
# want: x = B A^{-1}, so solve: x A = B or solve: A' x' = B'
try:
Lambda[i, factor_ix] = cho_solve(cho_factor(A), B.T).T
except LinAlgError:
# Fall back to general solver if there are problems with
# postive-definiteness
Lambda[i, factor_ix] = np.linalg.solve(A, B.T).T
# Compute new obs cov
# Note: this is unnecessary if `idiosyncratic_ar1=True`.
# This is written in a slightly more general way than
# Banbura and Modugno (2014), equation (7); see instead equation (13)
# of Wu et al. (1996)
# "An algorithm for estimating parameters of state-space models"
if compute_H:
Z = self['design'].copy()
Z[:, :k] = Lambda
BL = self.endog.T @ a[..., 0] @ Z.T
C = self.endog.T @ self.endog
H = (C + -BL - BL.T + Z @ Eaa.sum(axis=0) @ Z.T) / self.nobs
else:
H = np.zeros((self.k_endog, self.k_endog), dtype=dtype) * np.nan
return Lambda, H
def _em_maximization_obs_missing(self, res, Eaa, a, compute_H=False):
"""EM maximization step, observation equation with missing data."""
s = self._s
dtype = Eaa.dtype
# Observation equation (missing)
k = s.k_states_factors
Lambda = np.zeros((self.k_endog, k), dtype=dtype)
W = (1 - res.missing.T)
mask = W.astype(bool)
# Compute design for monthly
# Note: the relevant A changes for each i
for i in range(self.k_endog_M):
iloc = self._s.endog_factor_iloc[i]
factor_ix = s['factors_L1'][iloc]
m = mask[:, i]
yt = self.endog[m, i:i + 1]
ix = np.ix_(m, factor_ix, factor_ix)
Ai = Eaa[ix].sum(axis=0)
Bi = yt.T @ a[np.ix_(m, factor_ix)][..., 0]
if self.idiosyncratic_ar1:
ix1 = s.k_states_factors + i
ix2 = ix1 + 1
Bi -= Eaa[m, ix1:ix2][..., factor_ix].sum(axis=0)
# want: x = B A^{-1}, so solve: x A = B or solve: A' x' = B'
try:
Lambda[i, factor_ix] = cho_solve(cho_factor(Ai), Bi.T).T
except LinAlgError:
# Fall back to general solver if there are problems with
# postive-definiteness
Lambda[i, factor_ix] = np.linalg.solve(Ai, Bi.T).T
# Compute unrestricted design for quarterly
# See Banbura at al. (2011), where this is described in Appendix C,
# between equations (13) and (14).
if self.k_endog_Q > 0:
# Note: the relevant A changes for each i
multipliers = np.array([1, 2, 3, 2, 1])[:, None]
for i in range(self.k_endog_M, self.k_endog):
iloc = self._s.endog_factor_iloc[i]
factor_ix = s['factors_L1_5_ix'][:, iloc].ravel().tolist()
R, _ = self.loading_constraints(i)
iQ = i - self.k_endog_M
m = mask[:, i]
yt = self.endog[m, i:i + 1]
ix = np.ix_(m, factor_ix, factor_ix)
Ai = Eaa[ix].sum(axis=0)
BiQ = yt.T @ a[np.ix_(m, factor_ix)][..., 0]
if self.idiosyncratic_ar1:
ix = (np.s_[:],) + np.ix_(s['idio_ar_Q_ix'][iQ], factor_ix)
Eepsf = Eaa[ix]
BiQ -= (multipliers * Eepsf[m].sum(axis=0)).sum(axis=0)
# Note that there was a typo in Banbura et al. (2011) for
# the formula applying the restrictions. In their notation,
# they show (C D C')^{-1} while it should be (C D^{-1} C')^{-1}
# Note: in reality, this is:
# unrestricted - Aii @ R.T @ RARi @ (R @ unrestricted - q)
# where the restrictions are defined as: R @ unrestricted = q
# However, here q = 0, so we can simplify.
try:
L_and_lower = cho_factor(Ai)
# x = BQ A^{-1}, or x A = BQ, so solve A' x' = (BQ)'
unrestricted = cho_solve(L_and_lower, BiQ.T).T[0]
AiiRT = cho_solve(L_and_lower, R.T)
L_and_lower = cho_factor(R @ AiiRT)
RAiiRTiR = cho_solve(L_and_lower, R)
restricted = unrestricted - AiiRT @ RAiiRTiR @ unrestricted
except LinAlgError:
# Fall back to slower method if there are problems with
# postive-definiteness
Aii = np.linalg.inv(Ai)
unrestricted = (BiQ @ Aii)[0]
RARi = np.linalg.inv(R @ Aii @ R.T)
restricted = (unrestricted -
Aii @ R.T @ RARi @ R @ unrestricted)
Lambda[i, factor_ix] = restricted
# Compute new obs cov
# Note: this is unnecessary if `idiosyncratic_ar1=True`.
# See Banbura and Modugno (2014), equation (12)
# This does not literally follow their formula, e.g. multiplying by the
# W_t selection matrices, because those formulas require loops that are
# relatively slow. The formulation here is vectorized.
if compute_H:
Z = self['design'].copy()
Z[:, :Lambda.shape[1]] = Lambda
y = np.nan_to_num(self.endog)
C = y.T @ y
W = W[..., None]
IW = 1 - W
WL = W * Z
WLT = WL.transpose(0, 2, 1)
BL = y[..., None] @ a.transpose(0, 2, 1) @ WLT
A = Eaa
BLT = BL.transpose(0, 2, 1)
IWT = IW.transpose(0, 2, 1)
H = (C + (-BL - BLT + WL @ A @ WLT +
IW * self['obs_cov'] * IWT).sum(axis=0)) / self.nobs
else:
H = np.zeros((self.k_endog, self.k_endog), dtype=dtype) * np.nan
return Lambda, H
def smooth(self, params, transformed=True, includes_fixed=False,
complex_step=False, cov_type='none', cov_kwds=None,
return_ssm=False, results_class=None,
results_wrapper_class=None, **kwargs):
"""
Kalman smoothing.
Parameters
----------
params : array_like
Array of parameters at which to evaluate the loglikelihood
function.
transformed : bool, optional
Whether or not `params` is already transformed. Default is True.
return_ssm : bool,optional
Whether or not to return only the state space output or a full
results object. Default is to return a full results object.
cov_type : str, optional
See `MLEResults.fit` for a description of covariance matrix types
for results object. Default is None.
cov_kwds : dict or None, optional
See `MLEResults.get_robustcov_results` for a description required
keywords for alternative covariance estimators
**kwargs
Additional keyword arguments to pass to the Kalman filter. See
`KalmanFilter.filter` for more details.
"""
return super().smooth(
params, transformed=transformed, includes_fixed=includes_fixed,
complex_step=complex_step, cov_type=cov_type, cov_kwds=cov_kwds,
return_ssm=return_ssm, results_class=results_class,
results_wrapper_class=results_wrapper_class, **kwargs)
def filter(self, params, transformed=True, includes_fixed=False,
complex_step=False, cov_type='none', cov_kwds=None,
return_ssm=False, results_class=None,
results_wrapper_class=None, low_memory=False, **kwargs):
"""
Kalman filtering.
Parameters
----------
params : array_like
Array of parameters at which to evaluate the loglikelihood
function.
transformed : bool, optional
Whether or not `params` is already transformed. Default is True.
return_ssm : bool,optional
Whether or not to return only the state space output or a full
results object. Default is to return a full results object.
cov_type : str, optional
See `MLEResults.fit` for a description of covariance matrix types
for results object. Default is 'none'.
cov_kwds : dict or None, optional
See `MLEResults.get_robustcov_results` for a description required
keywords for alternative covariance estimators
low_memory : bool, optional
If set to True, techniques are applied to substantially reduce
memory usage. If used, some features of the results object will
not be available (including in-sample prediction), although
out-of-sample forecasting is possible. Default is False.
**kwargs
Additional keyword arguments to pass to the Kalman filter. See
`KalmanFilter.filter` for more details.
"""
return super().filter(
params, transformed=transformed, includes_fixed=includes_fixed,
complex_step=complex_step, cov_type=cov_type, cov_kwds=cov_kwds,
return_ssm=return_ssm, results_class=results_class,
results_wrapper_class=results_wrapper_class, **kwargs)
def simulate(self, params, nsimulations, measurement_shocks=None,
state_shocks=None, initial_state=None, anchor=None,
repetitions=None, exog=None, extend_model=None,
extend_kwargs=None, transformed=True, includes_fixed=False,
original_scale=True, **kwargs):
r"""
Simulate a new time series following the state space model.
Parameters
----------
params : array_like
Array of parameters to use in constructing the state space
representation to use when simulating.
nsimulations : int
The number of observations to simulate. If the model is
time-invariant this can be any number. If the model is
time-varying, then this number must be less than or equal to the
number of observations.
measurement_shocks : array_like, optional
If specified, these are the shocks to the measurement equation,
:math:`\varepsilon_t`. If unspecified, these are automatically
generated using a pseudo-random number generator. If specified,
must be shaped `nsimulations` x `k_endog`, where `k_endog` is the
same as in the state space model.
state_shocks : array_like, optional
If specified, these are the shocks to the state equation,
:math:`\eta_t`. If unspecified, these are automatically
generated using a pseudo-random number generator. If specified,
must be shaped `nsimulations` x `k_posdef` where `k_posdef` is the
same as in the state space model.
initial_state : array_like, optional
If specified, this is the initial state vector to use in
simulation, which should be shaped (`k_states` x 1), where
`k_states` is the same as in the state space model. If unspecified,
but the model has been initialized, then that initialization is
used. This must be specified if `anchor` is anything other than
"start" or 0 (or else you can use the `simulate` method on a
results object rather than on the model object).
anchor : int, str, or datetime, optional
First period for simulation. The simulation will be conditional on
all existing datapoints prior to the `anchor`. Type depends on the
index of the given `endog` in the model. Two special cases are the
strings 'start' and 'end'. `start` refers to beginning the
simulation at the first period of the sample, and `end` refers to
beginning the simulation at the first period after the sample.
Integer values can run from 0 to `nobs`, or can be negative to
apply negative indexing. Finally, if a date/time index was provided
to the model, then this argument can be a date string to parse or a
datetime type. Default is 'start'.
repetitions : int, optional
Number of simulated paths to generate. Default is 1 simulated path.
exog : array_like, optional
New observations of exogenous regressors, if applicable.
transformed : bool, optional
Whether or not `params` is already transformed. Default is
True.
includes_fixed : bool, optional
If parameters were previously fixed with the `fix_params` method,
this argument describes whether or not `params` also includes
the fixed parameters, in addition to the free parameters. Default
is False.
original_scale : bool, optional
If the model specification standardized the data, whether or not
to return simulations in the original scale of the data (i.e.
before it was standardized by the model). Default is True.
Returns
-------
simulated_obs : ndarray
An array of simulated observations. If `repetitions=None`, then it
will be shaped (nsimulations x k_endog) or (nsimulations,) if
`k_endog=1`. Otherwise it will be shaped
(nsimulations x k_endog x repetitions). If the model was given
Pandas input then the output will be a Pandas object. If
`k_endog > 1` and `repetitions` is not None, then the output will
be a Pandas DataFrame that has a MultiIndex for the columns, with
the first level containing the names of the `endog` variables and
the second level containing the repetition number.
"""
# Get usual simulations (in the possibly-standardized scale)
sim = super().simulate(
params, nsimulations, measurement_shocks=measurement_shocks,
state_shocks=state_shocks, initial_state=initial_state,
anchor=anchor, repetitions=repetitions, exog=exog,
extend_model=extend_model, extend_kwargs=extend_kwargs,
transformed=transformed, includes_fixed=includes_fixed, **kwargs)
# If applicable, convert predictions back to original space
if self.standardize and original_scale:
use_pandas = isinstance(self.data, PandasData)
shape = sim.shape
if use_pandas:
# pd.Series (k_endog=1, replications=None)
if len(shape) == 1:
sim = sim * self._endog_std[0] + self._endog_mean[0]
# pd.DataFrame (k_endog > 1, replications=None)
# [or]
# pd.DataFrame with MultiIndex (replications > 0)
elif len(shape) == 2:
sim = (sim.multiply(self._endog_std, axis=1, level=0)
.add(self._endog_mean, axis=1, level=0))
else:
# 1-dim array (k_endog=1, replications=None)
if len(shape) == 1:
sim = sim * self._endog_std + self._endog_mean
# 2-dim array (k_endog > 1, replications=None)
elif len(shape) == 2:
sim = sim * self._endog_std + self._endog_mean
# 3-dim array with MultiIndex (replications > 0)
else:
# Get arrays into the form that can be used for
# broadcasting
std = np.atleast_2d(self._endog_std)[..., None]
mean = np.atleast_2d(self._endog_mean)[..., None]
sim = sim * std + mean
return sim
def impulse_responses(self, params, steps=1, impulse=0,
orthogonalized=False, cumulative=False, anchor=None,
exog=None, extend_model=None, extend_kwargs=None,
transformed=True, includes_fixed=False,
original_scale=True, **kwargs):
"""
Impulse response function.
Parameters
----------
params : array_like
Array of model parameters.
steps : int, optional
The number of steps for which impulse responses are calculated.
Default is 1. Note that for time-invariant models, the initial
impulse is not counted as a step, so if `steps=1`, the output will
have 2 entries.
impulse : int or array_like
If an integer, the state innovation to pulse; must be between 0
and `k_posdef-1`. Alternatively, a custom impulse vector may be
provided; must be shaped `k_posdef x 1`.
orthogonalized : bool, optional
Whether or not to perform impulse using orthogonalized innovations.
Note that this will also affect custum `impulse` vectors. Default
is False.
cumulative : bool, optional
Whether or not to return cumulative impulse responses. Default is
False.
anchor : int, str, or datetime, optional
Time point within the sample for the state innovation impulse. Type
depends on the index of the given `endog` in the model. Two special
cases are the strings 'start' and 'end', which refer to setting the
impulse at the first and last points of the sample, respectively.
Integer values can run from 0 to `nobs - 1`, or can be negative to
apply negative indexing. Finally, if a date/time index was provided
to the model, then this argument can be a date string to parse or a
datetime type. Default is 'start'.
exog : array_like, optional
New observations of exogenous regressors for our-of-sample periods,
if applicable.
transformed : bool, optional
Whether or not `params` is already transformed. Default is
True.
includes_fixed : bool, optional
If parameters were previously fixed with the `fix_params` method,
this argument describes whether or not `params` also includes
the fixed parameters, in addition to the free parameters. Default
is False.
original_scale : bool, optional
If the model specification standardized the data, whether or not
to return impulse responses in the original scale of the data (i.e.
before it was standardized by the model). Default is True.
**kwargs
If the model has time-varying design or transition matrices and the
combination of `anchor` and `steps` implies creating impulse
responses for the out-of-sample period, then these matrices must
have updated values provided for the out-of-sample steps. For
example, if `design` is a time-varying component, `nobs` is 10,
`anchor=1`, and `steps` is 15, a (`k_endog` x `k_states` x 7)
matrix must be provided with the new design matrix values.
Returns
-------
impulse_responses : ndarray
Responses for each endogenous variable due to the impulse
given by the `impulse` argument. For a time-invariant model, the
impulse responses are given for `steps + 1` elements (this gives
the "initial impulse" followed by `steps` responses for the
important cases of VAR and SARIMAX models), while for time-varying
models the impulse responses are only given for `steps` elements
(to avoid having to unexpectedly provide updated time-varying
matrices).
"""
# Get usual simulations (in the possibly-standardized scale)
irfs = super().impulse_responses(
params, steps=steps, impulse=impulse,
orthogonalized=orthogonalized, cumulative=cumulative,
anchor=anchor, exog=exog, extend_model=extend_model,
extend_kwargs=extend_kwargs, transformed=transformed,
includes_fixed=includes_fixed, original_scale=original_scale,
**kwargs)
# If applicable, convert predictions back to original space
if self.standardize and original_scale:
use_pandas = isinstance(self.data, PandasData)
shape = irfs.shape
if use_pandas:
# pd.Series (k_endog=1, replications=None)
if len(shape) == 1:
irfs = irfs * self._endog_std[0]
# pd.DataFrame (k_endog > 1)
# [or]
# pd.DataFrame with MultiIndex (replications > 0)
elif len(shape) == 2:
irfs = irfs.multiply(self._endog_std, axis=1, level=0)
else:
# 1-dim array (k_endog=1)
if len(shape) == 1:
irfs = irfs * self._endog_std
# 2-dim array (k_endog > 1)
elif len(shape) == 2:
irfs = irfs * self._endog_std
return irfs
class DynamicFactorMQResults(mlemodel.MLEResults):
"""
Results from fitting a dynamic factor model
"""
def __init__(self, model, params, filter_results, cov_type=None, **kwargs):
super(DynamicFactorMQResults, self).__init__(
model, params, filter_results, cov_type, **kwargs)
@property
def factors(self):
"""
Estimates of unobserved factors.
Returns
-------
out : Bunch
Has the following attributes shown in Notes.
Notes
-----
The output is a bunch of the following format:
- `filtered`: a time series array with the filtered estimate of
the component
- `filtered_cov`: a time series array with the filtered estimate of
the variance/covariance of the component
- `smoothed`: a time series array with the smoothed estimate of
the component
- `smoothed_cov`: a time series array with the smoothed estimate of
the variance/covariance of the component
- `offset`: an integer giving the offset in the state vector where
this component begins
"""
out = None
if self.model.k_factors > 0:
iloc = self.model._s.factors_L1
ix = np.array(self.model.state_names)[iloc].tolist()
out = Bunch(
filtered=self.states.filtered.loc[:, ix],
filtered_cov=self.states.filtered_cov.loc[np.s_[ix, :], ix],
smoothed=None, smoothed_cov=None)
if self.smoothed_state is not None:
out.smoothed = self.states.smoothed.loc[:, ix]
if self.smoothed_state_cov is not None:
out.smoothed_cov = (
self.states.smoothed_cov.loc[np.s_[ix, :], ix])
return out
def get_coefficients_of_determination(self, method='individual',
which=None):
"""
Get coefficients of determination (R-squared) for variables / factors.
Parameters
----------
method : {'individual', 'joint', 'cumulative'}, optional
The type of R-squared values to generate. "individual" plots
the R-squared of each variable on each factor; "joint" plots the
R-squared of each variable on each factor that it loads on;
"cumulative" plots the successive R-squared values as each
additional factor is added to the regression, for each variable.
Default is 'individual'.
which: {None, 'filtered', 'smoothed'}, optional
Whether to compute R-squared values based on filtered or smoothed
estimates of the factors. Default is 'smoothed' if smoothed results
are available and 'filtered' otherwise.
Returns
-------
rsquared : pd.DataFrame or pd.Series
The R-squared values from regressions of observed variables on
one or more of the factors. If method='individual' or
method='cumulative', this will be a Pandas DataFrame with observed
variables as the index and factors as the columns . If
method='joint', will be a Pandas Series with observed variables as
the index.
See Also
--------
plot_coefficients_of_determination
coefficients_of_determination
"""
from statsmodels.tools import add_constant
method = string_like(method, 'method', options=['individual', 'joint',
'cumulative'])
if which is None:
which = 'filtered' if self.smoothed_state is None else 'smoothed'
k_endog = self.model.k_endog
k_factors = self.model.k_factors
ef_map = self.model._s.endog_factor_map
endog_names = self.model.endog_names
factor_names = self.model.factor_names
if method == 'individual':
coefficients = np.zeros((k_endog, k_factors))
for i in range(k_factors):
exog = add_constant(self.factors[which].iloc[:, i])
for j in range(k_endog):
if ef_map.iloc[j, i]:
endog = self.filter_results.endog[j]
coefficients[j, i] = (
OLS(endog, exog, missing='drop').fit().rsquared)
else:
coefficients[j, i] = np.nan
coefficients = pd.DataFrame(coefficients, index=endog_names,
columns=factor_names)
elif method == 'joint':
coefficients = np.zeros((k_endog,))
exog = add_constant(self.factors[which])
for j in range(k_endog):
endog = self.filter_results.endog[j]
ix = np.r_[True, ef_map.iloc[j]].tolist()
X = exog.loc[:, ix]
coefficients[j] = (
OLS(endog, X, missing='drop').fit().rsquared)
coefficients = pd.Series(coefficients, index=endog_names)
elif method == 'cumulative':
coefficients = np.zeros((k_endog, k_factors))
exog = add_constant(self.factors[which])
for j in range(k_endog):
endog = self.filter_results.endog[j]
for i in range(k_factors):
if self.model._s.endog_factor_map.iloc[j, i]:
ix = np.r_[True, ef_map.iloc[j, :i + 1],
[False] * (k_factors - i - 1)]
X = exog.loc[:, ix.astype(bool).tolist()]
coefficients[j, i] = (
OLS(endog, X, missing='drop').fit().rsquared)
else:
coefficients[j, i] = np.nan
coefficients = pd.DataFrame(coefficients, index=endog_names,
columns=factor_names)
return coefficients
@cache_readonly
def coefficients_of_determination(self):
"""
Individual coefficients of determination (:math:`R^2`).
Coefficients of determination (:math:`R^2`) from regressions of
endogenous variables on individual estimated factors.
Returns
-------
coefficients_of_determination : ndarray
A `k_endog` x `k_factors` array, where
`coefficients_of_determination[i, j]` represents the :math:`R^2`
value from a regression of factor `j` and a constant on endogenous
variable `i`.
Notes
-----
Although it can be difficult to interpret the estimated factor loadings
and factors, it is often helpful to use the coefficients of
determination from univariate regressions to assess the importance of
each factor in explaining the variation in each endogenous variable.
In models with many variables and factors, this can sometimes lend
interpretation to the factors (for example sometimes one factor will
load primarily on real variables and another on nominal variables).
See Also
--------
get_coefficients_of_determination
plot_coefficients_of_determination
"""
return self.get_coefficients_of_determination(method='individual')
def plot_coefficients_of_determination(self, method='individual',
which=None, endog_labels=None,
fig=None, figsize=None):
"""
Plot coefficients of determination (R-squared) for variables / factors.
Parameters
----------
method : {'individual', 'joint', 'cumulative'}, optional
The type of R-squared values to generate. "individual" plots
the R-squared of each variable on each factor; "joint" plots the
R-squared of each variable on each factor that it loads on;
"cumulative" plots the successive R-squared values as each
additional factor is added to the regression, for each variable.
Default is 'individual'.
which: {None, 'filtered', 'smoothed'}, optional
Whether to compute R-squared values based on filtered or smoothed
estimates of the factors. Default is 'smoothed' if smoothed results
are available and 'filtered' otherwise.
endog_labels : bool, optional
Whether or not to label the endogenous variables along the x-axis
of the plots. Default is to include labels if there are 5 or fewer
endogenous variables.
fig : Figure, optional
If given, subplots are created in this figure instead of in a new
figure. Note that the grid will be created in the provided
figure using `fig.add_subplot()`.
figsize : tuple, optional
If a figure is created, this argument allows specifying a size.
The tuple is (width, height).
Notes
-----
The endogenous variables are arranged along the x-axis according to
their position in the model's `endog` array.
See Also
--------
get_coefficients_of_determination
"""
from statsmodels.graphics.utils import _import_mpl, create_mpl_fig
_import_mpl()
fig = create_mpl_fig(fig, figsize)
method = string_like(method, 'method', options=['individual', 'joint',
'cumulative'])
# Should we label endogenous variables?
if endog_labels is None:
endog_labels = self.model.k_endog <= 5
# Plot the coefficients of determination
rsquared = self.get_coefficients_of_determination(method=method,
which=which)
if method in ['individual', 'cumulative']:
plot_idx = 1
for factor_name, coeffs in rsquared.T.iterrows():
# Create the new axis
ax = fig.add_subplot(self.model.k_factors, 1, plot_idx)
ax.set_ylim((0, 1))
ax.set(title=f'{factor_name}', ylabel=r'$R^2$')
coeffs.plot(ax=ax, kind='bar')
if plot_idx < len(rsquared.columns) or not endog_labels:
ax.xaxis.set_ticklabels([])
plot_idx += 1
elif method == 'joint':
ax = fig.add_subplot(1, 1, 1)
ax.set_ylim((0, 1))
ax.set(title=r'$R^2$ - regression on all loaded factors',
ylabel=r'$R^2$')
rsquared.plot(ax=ax, kind='bar')
if not endog_labels:
ax.xaxis.set_ticklabels([])
return fig
def get_prediction(self, start=None, end=None, dynamic=False,
index=None, exog=None, extend_model=None,
extend_kwargs=None, original_scale=True, **kwargs):
"""
In-sample prediction and out-of-sample forecasting.
Parameters
----------
start : int, str, or datetime, optional
Zero-indexed observation number at which to start forecasting,
i.e., the first forecast is start. Can also be a date string to
parse or a datetime type. Default is the the zeroth observation.
end : int, str, or datetime, optional
Zero-indexed observation number at which to end forecasting, i.e.,
the last forecast is end. Can also be a date string to
parse or a datetime type. However, if the dates index does not
have a fixed frequency, end must be an integer index if you
want out of sample prediction. Default is the last observation in
the sample.
dynamic : bool, int, str, or datetime, optional
Integer offset relative to `start` at which to begin dynamic
prediction. Can also be an absolute date string to parse or a
datetime type (these are not interpreted as offsets).
Prior to this observation, true endogenous values will be used for
prediction; starting with this observation and continuing through
the end of prediction, forecasted endogenous values will be used
instead.
original_scale : bool, optional
If the model specification standardized the data, whether or not
to return predictions in the original scale of the data (i.e.
before it was standardized by the model). Default is True.
**kwargs
Additional arguments may required for forecasting beyond the end
of the sample. See `FilterResults.predict` for more details.
Returns
-------
forecast : ndarray
Array of out of in-sample predictions and / or out-of-sample
forecasts. An (npredict x k_endog) array.
"""
# Get usual predictions (in the possibly-standardized scale)
res = super().get_prediction(start=start, end=end, dynamic=dynamic,
index=index, exog=exog,
extend_model=extend_model,
extend_kwargs=extend_kwargs, **kwargs)
# If applicable, convert predictions back to original space
if self.model.standardize and original_scale:
prediction_results = res.prediction_results
k_endog, nobs = prediction_results.endog.shape
mean = np.array(self.model._endog_mean)
std = np.array(self.model._endog_std)
if self.model.k_endog > 1:
mean = mean[None, :]
std = std[None, :]
if not prediction_results.results.memory_no_forecast_mean:
res._results._predicted_mean = (
res._results._predicted_mean * std + mean)
if not prediction_results.results.memory_no_forecast_cov:
if k_endog == 1:
res._results._var_pred_mean *= std**2
else:
res._results._var_pred_mean = (
std * res._results._var_pred_mean * std.T)
return res
def news(self, comparison, impact_date=None, impacted_variable=None,
start=None, end=None, periods=None, exog=None,
comparison_type=None, return_raw=False, tolerance=1e-10,
endog_quarterly=None, original_scale=True, **kwargs):
"""
Compute impacts from updated data (news and revisions).
Parameters
----------
comparison : array_like or MLEResults
An updated dataset with updated and/or revised data from which the
news can be computed, or an updated or previous results object
to use in computing the news.
impact_date : int, str, or datetime, optional
A single specific period of impacts from news and revisions to
compute. Can also be a date string to parse or a datetime type.
This argument cannot be used in combination with `start`, `end`, or
`periods`. Default is the first out-of-sample observation.
impacted_variable : str, list, array, or slice, optional
Observation variable label or slice of labels specifying that only
specific impacted variables should be shown in the News output. The
impacted variable(s) describe the variables that were *affected* by
the news. If you do not know the labels for the variables, check
the `endog_names` attribute of the model instance.
start : int, str, or datetime, optional
The first period of impacts from news and revisions to compute.
Can also be a date string to parse or a datetime type. Default is
the first out-of-sample observation.
end : int, str, or datetime, optional
The last period of impacts from news and revisions to compute.
Can also be a date string to parse or a datetime type. Default is
the first out-of-sample observation.
periods : int, optional
The number of periods of impacts from news and revisions to
compute.
exog : array_like, optional
Array of exogenous regressors for the out-of-sample period, if
applicable.
comparison_type : {None, 'previous', 'updated'}
This denotes whether the `comparison` argument represents a
*previous* results object or dataset or an *updated* results object
or dataset. If not specified, then an attempt is made to determine
the comparison type.
return_raw : bool, optional
Whether or not to return only the specific output or a full
results object. Default is to return a full results object.
tolerance : float, optional
The numerical threshold for determining zero impact. Default is
that any impact less than 1e-10 is assumed to be zero.
endog_quarterly : array_like, optional
New observations of quarterly variables, if `comparison` was
provided as an updated monthly dataset. If this argument is
provided, it must be a Pandas Series or DataFrame with a
DatetimeIndex or PeriodIndex at the quarterly frequency.
References
----------
.. [1] Bańbura, Marta, and Michele Modugno.
"Maximum likelihood estimation of factor models on datasets with
arbitrary pattern of missing data."
Journal of Applied Econometrics 29, no. 1 (2014): 133-160.
.. [2] Bańbura, Marta, Domenico Giannone, and Lucrezia Reichlin.
"Nowcasting."
The Oxford Handbook of Economic Forecasting. July 8, 2011.
.. [3] Bańbura, Marta, Domenico Giannone, Michele Modugno, and Lucrezia
Reichlin.
"Now-casting and the real-time data flow."
In Handbook of economic forecasting, vol. 2, pp. 195-237.
Elsevier, 2013.
"""
news_results = super().news(
comparison, impact_date=impact_date,
impacted_variable=impacted_variable, start=start, end=end,
periods=periods, exog=exog, comparison_type=comparison_type,
return_raw=return_raw, tolerance=tolerance,
endog_quarterly=endog_quarterly, **kwargs)
# If we have standardized the data, we may want to report the news in
# the original scale. If so, we need to modify the data to "undo" the
# standardization.
if not return_raw and self.model.standardize and original_scale:
endog_mean = self.model._endog_mean
endog_std = self.model._endog_std
# Don't need to add in the mean for the impacts, since they are
# the difference of two forecasts
news_results.total_impacts = (
news_results.total_impacts * endog_std)
news_results.update_impacts = (
news_results.update_impacts * endog_std)
if news_results.revision_impacts is not None:
news_results.revision_impacts = (
news_results.revision_impacts * endog_std)
# Update forecasts
for name in ['prev_impacted_forecasts', 'news', 'update_realized',
'update_forecasts', 'post_impacted_forecasts']:
dta = getattr(news_results, name)
# for pd.Series, dta.multiply(...) removes the name attribute;
# save it now so that we can add it back in
orig_name = None
if hasattr(dta, 'name'):
orig_name = dta.name
dta = dta.multiply(endog_std, level=1)
# add back in the name attribute if it was removed
if orig_name is not None:
dta.name = orig_name
if name != 'news':
dta = dta.add(endog_mean, level=1)
setattr(news_results, name, dta)
# For the weights: rows correspond to update (date, variable) and
# columns correspond to the impacted variable.
# 1. Because we have modified the updates (realized, forecasts, and
# forecast errors) to be in the scale of the original updated
# variable, we need to essentially reverse that change for each
# row of the weights by dividing by the standard deviation of
# that row's updated variable
# 2. Because we want the impacts to be in the scale of the original
# impacted variable, we need to multiply each column by the
# standard deviation of that column's impacted variable
news_results.weights = (
news_results.weights.divide(endog_std, axis=0, level=1)
.multiply(endog_std, axis=1, level=1))
return news_results
def append(self, endog, endog_quarterly=None, refit=False, fit_kwargs=None,
copy_initialization=True, retain_standardization=True,
**kwargs):
"""
Recreate the results object with new data appended to original data.
Creates a new result object applied to a dataset that is created by
appending new data to the end of the model's original data. The new
results can then be used for analysis or forecasting.
Parameters
----------
endog : array_like
New observations from the modeled time-series process.
endog_quarterly : array_like, optional
New observations of quarterly variables. If provided, must be a
Pandas Series or DataFrame with a DatetimeIndex or PeriodIndex at
the quarterly frequency.
refit : bool, optional
Whether to re-fit the parameters, based on the combined dataset.
Default is False (so parameters from the current results object
are used to create the new results object).
fit_kwargs : dict, optional
Keyword arguments to pass to `fit` (if `refit=True`) or `filter` /
`smooth`.
copy_initialization : bool, optional
Whether or not to copy the initialization from the current results
set to the new model. Default is True.
retain_standardization : bool, optional
Whether or not to use the mean and standard deviations that were
used to standardize the data in the current model in the new model.
Default is True.
**kwargs
Keyword arguments may be used to modify model specification
arguments when created the new model object.
Returns
-------
results
Updated Results object, that includes results from both the
original dataset and the new dataset.
Notes
-----
The `endog` and `exog` arguments to this method must be formatted in
the same way (e.g. Pandas Series versus Numpy array) as were the
`endog` and `exog` arrays passed to the original model.
The `endog` (and, if applicable, `endog_quarterly`) arguments to this
method should consist of new observations that occurred directly after
the last element of `endog`. For any other kind of dataset, see the
`apply` method.
This method will apply filtering to all of the original data as well
as to the new data. To apply filtering only to the new data (which
can be much faster if the original dataset is large), see the `extend`
method.
See Also
--------
extend
apply
"""
# Construct the combined dataset, if necessary
endog, k_endog_monthly = DynamicFactorMQ.construct_endog(
endog, endog_quarterly)
# Check for compatible dimensions
k_endog = endog.shape[1] if len(endog.shape) == 2 else 1
if (k_endog_monthly != self.model.k_endog_M or
k_endog != self.model.k_endog):
raise ValueError('Cannot append data of a different dimension to'
' a model.')
kwargs['k_endog_monthly'] = k_endog_monthly
return super().append(
endog, refit=refit, fit_kwargs=fit_kwargs,
copy_initialization=copy_initialization,
retain_standardization=retain_standardization, **kwargs)
def extend(self, endog, endog_quarterly=None, fit_kwargs=None,
retain_standardization=True, **kwargs):
"""
Recreate the results object for new data that extends original data.
Creates a new result object applied to a new dataset that is assumed to
follow directly from the end of the model's original data. The new
results can then be used for analysis or forecasting.
Parameters
----------
endog : array_like
New observations from the modeled time-series process.
endog_quarterly : array_like, optional
New observations of quarterly variables. If provided, must be a
Pandas Series or DataFrame with a DatetimeIndex or PeriodIndex at
the quarterly frequency.
fit_kwargs : dict, optional
Keyword arguments to pass to `filter` or `smooth`.
retain_standardization : bool, optional
Whether or not to use the mean and standard deviations that were
used to standardize the data in the current model in the new model.
Default is True.
**kwargs
Keyword arguments may be used to modify model specification
arguments when created the new model object.
Returns
-------
results
Updated Results object, that includes results only for the new
dataset.
See Also
--------
append
apply
Notes
-----
The `endog` argument to this method should consist of new observations
that occurred directly after the last element of the model's original
`endog` array. For any other kind of dataset, see the `apply` method.
This method will apply filtering only to the new data provided by the
`endog` argument, which can be much faster than re-filtering the entire
dataset. However, the returned results object will only have results
for the new data. To retrieve results for both the new data and the
original data, see the `append` method.
"""
# Construct the combined dataset, if necessary
endog, k_endog_monthly = DynamicFactorMQ.construct_endog(
endog, endog_quarterly)
# Check for compatible dimensions
k_endog = endog.shape[1] if len(endog.shape) == 2 else 1
if (k_endog_monthly != self.model.k_endog_M or
k_endog != self.model.k_endog):
raise ValueError('Cannot append data of a different dimension to'
' a model.')
kwargs['k_endog_monthly'] = k_endog_monthly
return super().extend(
endog, fit_kwargs=fit_kwargs,
retain_standardization=retain_standardization, **kwargs)
def apply(self, endog, k_endog_monthly=None, endog_quarterly=None,
refit=False, fit_kwargs=None, copy_initialization=False,
retain_standardization=True, **kwargs):
"""
Apply the fitted parameters to new data unrelated to the original data.
Creates a new result object using the current fitted parameters,
applied to a completely new dataset that is assumed to be unrelated to
the model's original data. The new results can then be used for
analysis or forecasting.
Parameters
----------
endog : array_like
New observations from the modeled time-series process.
k_endog_monthly : int, optional
If specifying a monthly/quarterly mixed frequency model in which
the provided `endog` dataset contains both the monthly and
quarterly data, this variable should be used to indicate how many
of the variables are monthly.
endog_quarterly : array_like, optional
New observations of quarterly variables. If provided, must be a
Pandas Series or DataFrame with a DatetimeIndex or PeriodIndex at
the quarterly frequency.
refit : bool, optional
Whether to re-fit the parameters, using the new dataset.
Default is False (so parameters from the current results object
are used to create the new results object).
fit_kwargs : dict, optional
Keyword arguments to pass to `fit` (if `refit=True`) or `filter` /
`smooth`.
copy_initialization : bool, optional
Whether or not to copy the initialization from the current results
set to the new model. Default is False.
retain_standardization : bool, optional
Whether or not to use the mean and standard deviations that were
used to standardize the data in the current model in the new model.
Default is True.
**kwargs
Keyword arguments may be used to modify model specification
arguments when created the new model object.
Returns
-------
results
Updated Results object, that includes results only for the new
dataset.
See Also
--------
statsmodels.tsa.statespace.mlemodel.MLEResults.append
statsmodels.tsa.statespace.mlemodel.MLEResults.apply
Notes
-----
The `endog` argument to this method should consist of new observations
that are not necessarily related to the original model's `endog`
dataset. For observations that continue that original dataset by follow
directly after its last element, see the `append` and `extend` methods.
"""
mod = self.model.clone(endog, k_endog_monthly=k_endog_monthly,
endog_quarterly=endog_quarterly,
retain_standardization=retain_standardization,
**kwargs)
if copy_initialization:
res = self.filter_results
init = initialization.Initialization(
self.model.k_states, 'known', constant=res.initial_state,
stationary_cov=res.initial_state_cov)
mod.ssm.initialization = init
res = self._apply(mod, refit=refit, fit_kwargs=fit_kwargs, **kwargs)
return res
def summary(self, alpha=.05, start=None, title=None, model_name=None,
display_params=True, display_diagnostics=False,
display_params_as_list=False, truncate_endog_names=None,
display_max_endog=3):
"""
Summarize the Model.
Parameters
----------
alpha : float, optional
Significance level for the confidence intervals. Default is 0.05.
start : int, optional
Integer of the start observation. Default is 0.
title : str, optional
The title used for the summary table.
model_name : str, optional
The name of the model used. Default is to use model class name.
Returns
-------
summary : Summary instance
This holds the summary table and text, which can be printed or
converted to various output formats.
See Also
--------
statsmodels.iolib.summary.Summary
"""
mod = self.model
# Default title / model name
if title is None:
title = 'Dynamic Factor Results'
if model_name is None:
model_name = self.model._model_name
# Get endog names
endog_names = self.model._get_endog_names(
truncate=truncate_endog_names)
# Get extra elements for top summary table
extra_top_left = None
extra_top_right = []
mle_retvals = getattr(self, 'mle_retvals', None)
mle_settings = getattr(self, 'mle_settings', None)
if mle_settings is not None and mle_settings.method == 'em':
extra_top_right += [('EM Iterations', [f'{mle_retvals.iter}'])]
# Get the basic summary tables
summary = super().summary(
alpha=alpha, start=start, title=title, model_name=model_name,
display_params=(display_params and display_params_as_list),
display_diagnostics=display_diagnostics,
truncate_endog_names=truncate_endog_names,
display_max_endog=display_max_endog,
extra_top_left=extra_top_left, extra_top_right=extra_top_right)
# Get tables of parameters
table_ix = 1
if not display_params_as_list:
# Observation equation table
data = pd.DataFrame(
self.filter_results.design[:, mod._s['factors_L1'], 0],
index=endog_names, columns=mod.factor_names)
data = data.applymap(lambda s: '%.2f' % s)
# Idiosyncratic terms
# data[' '] = ' '
k_idio = 1
if mod.idiosyncratic_ar1:
data[' idiosyncratic: AR(1)'] = (
self.params[mod._p['idiosyncratic_ar1']])
k_idio += 1
data['var.'] = self.params[mod._p['idiosyncratic_var']]
data.iloc[:, -k_idio:] = data.iloc[:, -k_idio:].applymap(
lambda s: '%.2f' % s)
data.index.name = 'Factor loadings:'
# Clear entries for non-loading factors
base_iloc = np.arange(mod.k_factors)
for i in range(mod.k_endog):
iloc = [j for j in base_iloc
if j not in mod._s.endog_factor_iloc[i]]
data.iloc[i, iloc] = '.'
data = data.reset_index()
# Build the table
params_data = data.values
params_header = data.columns.tolist()
params_stubs = None
title = 'Observation equation:'
table = SimpleTable(
params_data, params_header, params_stubs,
txt_fmt=fmt_params, title=title)
summary.tables.insert(table_ix, table)
table_ix += 1
# Factor transitions
ix1 = 0
ix2 = 0
for i in range(len(mod._s.factor_blocks)):
block = mod._s.factor_blocks[i]
ix2 += block.k_factors
T = self.filter_results.transition
lag_names = []
for j in range(block.factor_order):
lag_names += [f'L{j + 1}.{name}'
for name in block.factor_names]
data = pd.DataFrame(T[block.factors_L1, block.factors_ar, 0],
index=block.factor_names,
columns=lag_names)
data.index.name = ''
data = data.applymap(lambda s: '%.2f' % s)
Q = self.filter_results.state_cov
# data[' '] = ''
if block.k_factors == 1:
data[' error variance'] = Q[ix1, ix1]
else:
data[' error covariance'] = block.factor_names
for j in range(block.k_factors):
data[block.factor_names[j]] = Q[ix1:ix2, ix1 + j]
data.iloc[:, -block.k_factors:] = (
data.iloc[:, -block.k_factors:].applymap(
lambda s: '%.2f' % s))
data = data.reset_index()
params_data = data.values
params_header = data.columns.tolist()
params_stubs = None
title = f'Transition: Factor block {i}'
table = SimpleTable(
params_data, params_header, params_stubs,
txt_fmt=fmt_params, title=title)
summary.tables.insert(table_ix, table)
table_ix += 1
ix1 = ix2
return summary
|
jseabold/statsmodels
|
statsmodels/tsa/statespace/dynamic_factor_mq.py
|
Python
|
bsd-3-clause
| 186,145
|
# Copyright 2000 by Jeffrey Chang. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""
This module is OBSOLETE.
Most of the functionality in this module has moved to Bio.ExPASy.Prodoc;
please see
Bio.ExPASy.Prodoc.read To read a Prodoc file containing one entry.
Bio.ExPASy.Prodoc.parse Iterates over entries in a Prodoc file.
Bio.ExPASy.Prodoc.Record Holds Prodoc data.
Bio.ExPASy.Prodoc.Reference Holds data from a Prodoc reference.
The other functions and classes in Bio.Prosite.Prodoc (including
Bio.Prosite.Prodoc.index_file and Bio.Prosite.Prodoc.Dictionary) are
considered deprecated, and were not moved to Bio.ExPASy.Prodoc. If you use
this functionality, please contact the Biopython developers at
biopython-dev@biopython.org to avoid permanent removal of this module from
Biopython.
This module provides code to work with the prosite.doc file from
Prosite, available at http://www.expasy.ch/prosite/.
Tested with:
Release 15.0, July 1998
Release 16.0, July 1999
Release 20.22, 13 November 2007
Functions:
parse Iterates over entries in a Prodoc file.
index_file Index a Prodoc file for a Dictionary.
_extract_record Extract Prodoc data from a web page.
Classes:
Record Holds Prodoc data.
Reference Holds data from a Prodoc reference.
Dictionary Accesses a Prodoc file using a dictionary interface.
RecordParser Parses a Prodoc record into a Record object.
_Scanner Scans Prodoc-formatted data.
_RecordConsumer Consumes Prodoc data to a Record object.
"""
import warnings
warnings.warn("This module is OBSOLETE. Most of the functionality in this module has moved to Bio.ExPASy.Prodoc.", PendingDeprecationWarning)
from types import *
import os
import sgmllib
from Bio import File
from Bio import Index
from Bio.ParserSupport import *
def parse(handle):
import cStringIO
parser = RecordParser()
text = ""
for line in handle:
text += line
if line[:5] == '{END}':
handle = cStringIO.StringIO(text)
record = parser.parse(handle)
text = ""
yield record
def read(handle):
parser = RecordParser()
record = parser.parse(handle)
# We should have reached the end of the record by now
remainder = handle.read()
if remainder:
raise ValueError("More than one Prodoc record found")
return record
# It may be a good idea to rewrite read(), parse() at some point to avoid
# using the old-style "parser = RecordParser(); parser.parse(handle)" approach.
class Record:
"""Holds information from a Prodoc record.
Members:
accession Accession number of the record.
prosite_refs List of tuples (prosite accession, prosite name).
text Free format text.
references List of reference objects.
"""
def __init__(self):
self.accession = ''
self.prosite_refs = []
self.text = ''
self.references = []
class Reference:
"""Holds information from a Prodoc citation.
Members:
number Number of the reference. (string)
authors Names of the authors.
citation Describes the citation.
"""
def __init__(self):
self.number = ''
self.authors = ''
self.citation = ''
class Dictionary:
"""Accesses a Prodoc file using a dictionary interface.
"""
__filename_key = '__filename'
def __init__(self, indexname, parser=None):
"""__init__(self, indexname, parser=None)
Open a Prodoc Dictionary. indexname is the name of the
index for the dictionary. The index should have been created
using the index_file function. parser is an optional Parser
object to change the results into another form. If set to None,
then the raw contents of the file will be returned.
"""
self._index = Index.Index(indexname)
self._handle = open(self._index[Dictionary.__filename_key])
self._parser = parser
def __len__(self):
return len(self._index)
def __getitem__(self, key):
start, len = self._index[key]
self._handle.seek(start)
data = self._handle.read(len)
if self._parser is not None:
return self._parser.parse(File.StringHandle(data))
return data
def __getattr__(self, name):
return getattr(self._index, name)
class RecordParser(AbstractParser):
"""Parses Prodoc data into a Record object.
"""
def __init__(self):
self._scanner = _Scanner()
self._consumer = _RecordConsumer()
def parse(self, handle):
self._scanner.feed(handle, self._consumer)
return self._consumer.data
class _Scanner:
"""Scans Prodoc-formatted data.
Tested with:
Release 15.0, July 1998
"""
def feed(self, handle, consumer):
"""feed(self, handle, consumer)
Feed in Prodoc data for scanning. handle is a file-like
object that contains prosite data. consumer is a
Consumer object that will receive events as the report is scanned.
"""
if isinstance(handle, File.UndoHandle):
uhandle = handle
else:
uhandle = File.UndoHandle(handle)
while 1:
line = uhandle.peekline()
if not line:
break
elif is_blank_line(line):
# Skip blank lines between records
uhandle.readline()
continue
else:
self._scan_record(uhandle, consumer)
def _scan_record(self, uhandle, consumer):
consumer.start_record()
self._scan_accession(uhandle, consumer)
self._scan_prosite_refs(uhandle, consumer)
read_and_call(uhandle, consumer.noevent, start='{BEGIN}')
self._scan_text(uhandle, consumer)
self._scan_refs(uhandle, consumer)
self._scan_copyright(uhandle, consumer)
read_and_call(uhandle, consumer.noevent, start='{END}')
consumer.end_record()
def _scan_accession(self, uhandle, consumer):
read_and_call(uhandle, consumer.accession, start='{PDOC')
def _scan_prosite_refs(self, uhandle, consumer):
while attempt_read_and_call(uhandle, consumer.prosite_reference,
start='{PS'):
pass
def _scan_text(self, uhandle, consumer):
while 1:
line = safe_readline(uhandle)
if (line[0] == '[' and line[3] == ']' and line[4] == ' ') or \
line[:5] == '{END}':
uhandle.saveline(line)
break
consumer.text(line)
def _scan_refs(self, uhandle, consumer):
while 1:
line = safe_readline(uhandle)
if line[:5] == '{END}' or is_blank_line(line):
uhandle.saveline(line)
break
consumer.reference(line)
def _scan_copyright(self, uhandle, consumer):
# Cayte Lindner found some PRODOC records with the copyrights
# appended at the end. We'll try and recognize these.
read_and_call_while(uhandle, consumer.noevent, blank=1)
if attempt_read_and_call(uhandle, consumer.noevent, start='+----'):
read_and_call_until(uhandle, consumer.noevent, start='+----')
read_and_call(uhandle, consumer.noevent, start='+----')
read_and_call_while(uhandle, consumer.noevent, blank=1)
class _RecordConsumer(AbstractConsumer):
"""Consumer that converts a Prodoc record to a Record object.
Members:
data Record with Prodoc data.
"""
def __init__(self):
self.data = None
def start_record(self):
self.data = Record()
def end_record(self):
self._clean_data()
def accession(self, line):
line = line.rstrip()
if line[0] != '{' or line[-1] != '}':
raise ValueError("I don't understand accession line\n%s" % line)
acc = line[1:-1]
if acc[:4] != 'PDOC':
raise ValueError("Invalid accession in line\n%s" % line)
self.data.accession = acc
def prosite_reference(self, line):
line = line.rstrip()
if line[0] != '{' or line[-1] != '}':
raise ValueError("I don't understand accession line\n%s" % line)
acc, name = line[1:-1].split('; ')
self.data.prosite_refs.append((acc, name))
def text(self, line):
self.data.text = self.data.text + line
def reference(self, line):
if line[0] == '[' and line[3] == ']': # new reference
self._ref = Reference()
self._ref.number = line[1:3].strip()
if line[1] == 'E':
# If it's an electronic reference, then the URL is on the
# line, instead of the author.
self._ref.citation = line[4:].strip()
else:
self._ref.authors = line[4:].strip()
self.data.references.append(self._ref)
elif line[:4] == ' ':
if not self._ref:
raise ValueError("Unnumbered reference lines\n%s" % line)
self._ref.citation = self._ref.citation + line[5:]
else:
raise Exception("I don't understand the reference line\n%s" % line)
def _clean_data(self):
# get rid of trailing newlines
for ref in self.data.references:
ref.citation = ref.citation.rstrip()
ref.authors = ref.authors.rstrip()
def index_file(filename, indexname, rec2key=None):
"""index_file(filename, indexname, rec2key=None)
Index a Prodoc file. filename is the name of the file.
indexname is the name of the dictionary. rec2key is an
optional callback that takes a Record and generates a unique key
(e.g. the accession number) for the record. If not specified,
the id name will be used.
"""
import os
if not os.path.exists(filename):
raise ValueError("%s does not exist" % filename)
index = Index.Index(indexname, truncate=1)
index[Dictionary._Dictionary__filename_key] = filename
handle = open(filename)
records = parse(handle)
end = 0L
for record in records:
start = end
end = handle.tell()
length = end - start
if rec2key is not None:
key = rec2key(record)
else:
key = record.accession
if not key:
raise KeyError("empty key was produced")
elif key in index:
raise KeyError("duplicate key %s found" % key)
index[key] = start, length
|
BlogomaticProject/Blogomatic
|
opt/blog-o-matic/usr/lib/python/Bio/Prosite/Prodoc.py
|
Python
|
gpl-2.0
| 10,822
|
#-*- coding:utf-8 -*-
"""
This file is part of OpenSesame.
OpenSesame is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
OpenSesame is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with OpenSesame. If not, see <http://www.gnu.org/licenses/>.
"""
from PyQt4 import QtCore, QtGui
import os.path
import sip
from libopensesame.exceptions import osexception
from libopensesame import debug, item
from libqtopensesame.widgets.item_view_button import item_view_button
from libqtopensesame.widgets.tree_item_item import tree_item_item
from libqtopensesame.widgets.qtitem_splitter import qtitem_splitter
from libqtopensesame.widgets import header_widget, user_hint_widget
from libqtopensesame.misc import _
from libqtopensesame.misc.config import cfg
class qtitem(QtCore.QObject):
"""Base class for the GUI controls of other items"""
initial_view = u'controls'
def __init__(self):
"""Constructor"""
QtCore.QObject.__init__(self)
# The auto-widgets are stored in name -> (var, widget) dictionaries
self.auto_line_edit = {}
self.auto_combobox = {}
self.auto_spinbox = {}
self.auto_slider = {}
self.auto_editor = {}
self.auto_checkbox = {}
self.init_edit_widget()
self.lock = False
self.maximized = False
debug.msg(u'created %s' % self.name)
@property
def main_window(self):
return self.experiment.main_window
@property
def theme(self):
return self.experiment.main_window.theme
@property
def tabwidget(self):
return self.experiment.main_window.tabwidget
def open_tab(self, select_in_tree=True):
"""
desc:
Opens the tab if it wasn't yet open, and switches to it.
"""
self.tabwidget.add(self.widget(), self.item_icon(), self.name)
if select_in_tree:
self.experiment.main_window.ui.itemtree.select_item(self.name)
def close_tab(self):
"""
desc:
Closes the tab if it was open.
"""
self.tabwidget.remove(self.widget())
def set_focus(self):
"""
desc:
Gives focus to the most important widget.
"""
if hasattr(self, u'focus_widget') and self.focus_widget != None:
self.focus_widget.setFocus()
def set_focus_widget(self, widget, override=False):
"""
desc:
Sets the widget that receives focus when the tab is opened.
arguments:
widget:
desc: The widget to receive focus or `None` to reset.
type: [QWidget, NoneType]
keywords:
override:
desc: Indicates whether the focus widget should be changed if
there already is a focus widget.
type: bool
"""
if override or not hasattr(self, u'focus_widget') or self.focus_widget == None:
self.focus_widget = widget
def update_item_icon(self):
"""
desc:
Updates the item icon.
"""
self.tabwidget.set_icon(self.widget(), self.item_icon())
self.experiment.items.set_icon(self.name, self.item_icon())
self.header_item_icon.setPixmap(
self.theme.qpixmap(self.item_icon(), 32))
def item_icon(self):
"""
returns:
desc: The name of the item icon.
type: unicode
"""
return self.item_type
def show_tab(self):
"""
desc:
Is called when the tab becomes visible, and updated the contents.
"""
self.update_script()
self.edit_widget()
self.set_focus()
def widget(self):
"""
returns:
desc: The widget that is added to the tabwidget.
type: QWidget
"""
return self.container_widget
def init_edit_widget(self, stretch=True):
"""
desc:
Builds the UI.
keywords:
stretch:
desc: Indicates whether a vertical stretch should be added to
the bottom of the controls. This is necessary if the
controls don't expand.
type: bool
"""
# Header widget
self.header = header_widget.header_widget(self)
self.user_hint_widget = user_hint_widget.user_hint_widget(
self.experiment.main_window, self)
self.header_hbox = QtGui.QHBoxLayout()
self.header_item_icon = self.experiment.label_image(self.item_icon())
self.header_hbox.addWidget(self.header_item_icon)
self.header_hbox.addWidget(self.header)
self.header_hbox.setContentsMargins(0, 5, 0, 10)
# Maximize button
self.button_toggle_maximize = QtGui.QPushButton(
self.theme.qicon(u'view-fullscreen'), u'')
self.button_toggle_maximize.setToolTip(_(u'Toggle pop-out'))
self.button_toggle_maximize.setIconSize(QtCore.QSize(16, 16))
self.button_toggle_maximize.clicked.connect(self.toggle_maximize)
self.header_hbox.addWidget(self.button_toggle_maximize)
# View button
self.button_view = item_view_button(self)
self.header_hbox.addWidget(self.button_view)
# Help button
self.button_help = QtGui.QPushButton(self.experiment.icon(u"help"), u"")
self.button_help.setToolTip(
_(u"Tell me more about the %s item") % self.item_type)
self.button_help.setIconSize(QtCore.QSize(16, 16))
self.button_help.clicked.connect(self.open_help_tab)
self.header_hbox.addWidget(self.button_help)
self.header_widget = QtGui.QWidget()
self.header_widget.setLayout(self.header_hbox)
# The edit_grid is the layout that contains the actual controls for the
# items.
self.edit_grid = QtGui.QGridLayout()
self.edit_grid.setColumnStretch(2, 2)
self.edit_grid_widget = QtGui.QWidget()
self.edit_grid.setMargin(0)
self.edit_grid_widget.setLayout(self.edit_grid)
# The edit_vbox contains the edit_grid and the header widget
self.edit_vbox = QtGui.QVBoxLayout()
self.edit_vbox.setMargin(5)
self.edit_vbox.addWidget(self.user_hint_widget)
self.edit_vbox.addWidget(self.edit_grid_widget)
if stretch:
self.edit_vbox.addStretch()
self._edit_widget = QtGui.QWidget()
self._edit_widget.setWindowIcon(self.experiment.icon(self.item_type))
self._edit_widget.setLayout(self.edit_vbox)
# The _script_widget contains the script editor
from QProgEdit import QTabManager
self._script_widget = QTabManager(
handlerButtonText=_(u'Apply and close'), cfg=cfg)
self._script_widget.focusLost.connect(self.apply_script_changes)
self._script_widget.handlerButtonClicked.connect(self.set_view_controls)
self._script_widget.addTab(u'Script').setLang(u'OpenSesame')
# The container_widget is the top-level widget that is actually inserted
# into the tab widget.
self.splitter = qtitem_splitter(self)
if self.initial_view == u'controls':
self.set_view_controls()
elif self.initial_view == u'script':
self.set_view_script()
elif self.initial_view == u'split':
self.set_view_split()
else:
debug.msg(u'Invalid initial_view: %s' % self.initial_view,
reason=u'warning')
self.set_view_controls()
self.splitter.splitterMoved.connect(self.splitter_moved)
self.container_vbox = QtGui.QVBoxLayout()
self.container_vbox.addWidget(self.header_widget)
self.container_vbox.addWidget(self.splitter)
self.container_widget = QtGui.QWidget()
self.container_widget.setLayout(self.container_vbox)
self.container_widget.on_activate = self.show_tab
self.container_widget.__item__ = self.name
def splitter_moved(self, pos, index):
"""
desc:
Is called when the splitter handle is manually moved.
arguments:
pos:
desc: The splitter-handle position.
type: int
index:
desc: The index of the splitter handle. Since there is only
one handle, this is always 0.
type: int
"""
sizes = self.splitter.sizes()
self.edit_size = sizes[0]
self.script_size = sizes[1]
if self.script_size == 0:
self.button_view.set_view_icon(u'controls')
elif self.edit_size == 0:
self.button_view.set_view_icon(u'script')
else:
self.button_view.set_view_icon(u'split')
def set_view_controls(self):
"""
desc:
Puts the splitter in control view.
"""
self.splitter.setSizes([self.splitter.width(), 0])
self.button_view.set_view_icon(u'controls')
def set_view_script(self):
"""
desc:
Puts the splitter in script view.
"""
self.splitter.setSizes([0, self.splitter.width()])
self.button_view.set_view_icon(u'script')
def set_view_split(self):
"""
desc:
Puts the splitter in split view.
"""
self.splitter.setSizes([self.splitter.width()/2,
self.splitter.width()/2])
self.button_view.set_view_icon(u'split')
def update(self):
"""
desc:
Updates both the script and the controls.
"""
self.update_script()
self.edit_widget()
def update_script(self):
"""
desc:
Regenerates the script and updates the script widget.
"""
# Normally, the script starts with a 'define' line and is indented by
# a tab. We want to undo this, and present only unindented content.
import textwrap
script = self.to_string()
script = script[script.find(u'\t'):]
script = textwrap.dedent(script)
self._script_widget.setText(script)
def edit_widget(self, *deprecated, **_deprecated):
"""
desc:
This function updates the controls based on the item state.
"""
debug.msg()
self.auto_edit_widget()
self.header.refresh()
def apply_edit_changes(self, *deprecated, **_deprecated):
"""
desc:
Applies changes to the graphical controls.
"""
debug.msg()
self.auto_apply_edit_changes()
self.update_script()
self.main_window.set_unsaved(True)
return True
def apply_script_changes(self, *deprecated, **_deprecated):
"""
desc:
Applies changes to the script, by re-parsing the item from string.
"""
debug.msg()
old_script = self.to_string()
new_script = self._script_widget.text()
try:
self.from_string(new_script)
except osexception as e:
self.experiment.notify(e.html())
self.main_window.print_debug_window(e)
self.from_string(old_script)
self.edit_widget()
self.main_window.set_unsaved(True)
def rename(self, from_name, to_name):
"""
desc:
Handles renaming of an item (not necesarrily the current item).
arguments:
from_name:
desc: The old item name.
type: unicode
to_name:
desc: The new item name
type: unicode
"""
if self.name != from_name:
return
self.name = to_name
self.container_widget.__item__ = self.name
self.header.set_name(to_name)
index = self.tabwidget.indexOf(self.widget())
if index != None:
self.tabwidget.setTabText(index, to_name)
def open_help_tab(self):
"""
desc:
Opens a help tab.
"""
self.experiment.main_window.ui.tabwidget.open_help(self.item_type)
def toggle_maximize(self):
"""
desc:
Toggles edit-widget maximization.
"""
if not self.maximized:
# Always ignore close events. This is necessary, because otherwise
# the pop-out widget can be closed without re-enabling the main
# window.
self.container_widget.closeEvent = lambda e: e.ignore()
self.container_widget.setParent(None)
self.container_widget.setWindowFlags(QtCore.Qt.WindowStaysOnTopHint|\
QtCore.Qt.WindowMaximizeButtonHint|\
QtCore.Qt.CustomizeWindowHint)
self.container_widget.showMaximized()
self.container_widget.show()
self.button_toggle_maximize.setIcon(
self.theme.qicon(u'view-restore'))
else:
self.container_widget.setParent(self.main_window)
self.open_tab()
self.button_toggle_maximize.setIcon(
self.theme.qicon(u'view-fullscreen'))
self.maximized = not self.maximized
self.user_hint_widget.disable(self.maximized)
self.button_help.setDisabled(self.maximized)
self.main_window.setDisabled(self.maximized)
def delete(self, item_name, item_parent=None, index=None):
"""
Delete an item (not necessarily the current one)
Arguments:
item_name -- the name of the item to be deleted
Keywords arguments:
item_parent -- the parent item (default=None)
index -- the index of the item in the parent (default=None)
"""
pass
def build_item_tree(self, toplevel=None, items=[], max_depth=-1,
extra_info=None):
"""
Construct an item tree
Keyword arguments:
toplevel -- the toplevel widget (default = None)
items -- a list of items that have been added, to prevent recursion
(default=[])
"""
widget = tree_item_item(self, extra_info=extra_info)
items.append(self.name)
if toplevel != None:
toplevel.addChild(widget)
return widget
def parents(self):
"""
Creates a list of all the items that the current sequences is connected
to upstream
Returns:
A list of item names
"""
l = [self.name]
for item in self.experiment.items:
if self.experiment.items[item].is_child_item(self.name):
l.append(item)
return l
def variable_vars(self, exclude=[]):
"""
Determines if one of the variables of the current item is defined in
terms of another variable
Keywords arguments:
exclude -- a list of variables that should not be checked
Returns:
True if there are variably defined variables, False otherwise
"""
for var in self.variables:
if var not in exclude:
val = self.variables[var]
if self.experiment.varref(val):
return True
return False
def get_ready(self):
"""
This function should be overridden to do any last-minute stuff that
and item should do before an experiment is actually run, such as
applying pending script changes.
Returns:
True if some action has been taken, False if nothing was done
"""
return False
def auto_edit_widget(self):
"""Update the GUI controls based on the auto-widgets"""
debug.msg()
for var, edit in self.auto_line_edit.iteritems():
if self.has(var):
edit.setText(self.unistr(self.get(var, _eval=False)))
else:
edit.setText(u'')
for var, combobox in self.auto_combobox.iteritems():
val = self.get_check(var, _eval=False, default=u'')
i = combobox.findText(self.unistr(val))
# Set the combobox to the select item
if i >= 0:
combobox.setDisabled(False)
combobox.setCurrentIndex(i)
# If no value was specified, set the combobox to a blank item
elif val == u'':
combobox.setDisabled(False)
combobox.setCurrentIndex(-1)
# If an unknown value has been specified, notify the user
else:
combobox.setDisabled(True)
self.user_hint_widget.add(_(u'"%s" is set to a '
u'variable or unknown value and can only be edited through '
u'the script.' % var))
for var, spinbox in self.auto_spinbox.iteritems():
if self.has(var):
val = self.get(var, _eval=False)
if type(val) in (float, int):
spinbox.setDisabled(False)
try:
spinbox.setValue(val)
except Exception as e:
self.experiment.notify(_( \
u"Failed to set control '%s': %s") % (var, e))
else:
spinbox.setDisabled(True)
self.user_hint_widget.add(_( \
u'"%s" is defined using variables and can only be edited through the script.' \
% var))
for var, slider in self.auto_slider.iteritems():
if self.has(var):
val = self.get(var, _eval=False)
if type(val) in (float, int):
slider.setDisabled(False)
try:
slider.setValue(val)
except Exception as e:
self.experiment.notify(_( \
u"Failed to set control '%s': %s") % (var, e))
else:
slider.setDisabled(True)
self.user_hint_widget.add(_( \
u'"%s" is defined using variables and can only be edited through the script.' \
% var))
for var, checkbox in self.auto_checkbox.iteritems():
if self.has(var):
try:
checkbox.setChecked(self.get(var, _eval=False) == u"yes")
except Exception as e:
self.experiment.notify(_(u"Failed to set control '%s': %s") \
% (var, e))
for var, qprogedit in self.auto_editor.iteritems():
if self.has(var):
try:
qprogedit.setText(self.unistr(self.get(var, _eval=False)))
except Exception as e:
self.experiment.notify(_(u"Failed to set control '%s': %s") \
% (var, e))
def sanitize_check(self, s, strict=False, allow_vars=True, notify=True):
"""
Checks whether a string is sane (i.e. unchanged by sanitize()) and
optionally presents a warning.
Arguments:
s -- The string to check.
Keyword arguments:
strict -- See sanitize().
allow_vars -- See sanitize().
notify -- Indicates whether a notification should be presented if
the string is not sane.
Returns:
True if s is sane, False otherwise.
"""
sane = s == self.sanitize(s, strict=strict, allow_vars=allow_vars)
if not sane and notify:
if strict:
self.experiment.notify(
_(u'All non-alphanumeric characters except underscores have been stripped'))
else:
self.experiment.notify(
_(u'The following characters are not allowed and have been stripped: double-quote ("), backslash (\), and newline'))
return sane
def auto_apply_edit_changes(self, rebuild=True):
"""
Apply the auto-widget controls
Keyword arguments:
rebuild -- deprecated (does nothing) (default=True)
"""
debug.msg()
for var, edit in self.auto_line_edit.iteritems():
if edit.isEnabled() and isinstance(var, basestring):
val = unicode(edit.text()).strip()
if val != u"":
self.set(var, val)
# If the variable has no value, we assign a default value if it
# has been specified, and unset it otherwise.
elif hasattr(edit, u"default"):
self.set(var, edit.default)
else:
self.unset(var)
for var, combobox in self.auto_combobox.iteritems():
if combobox.isEnabled() and isinstance(var, basestring):
self.set(var, unicode(combobox.currentText()))
for var, spinbox in self.auto_spinbox.iteritems():
if spinbox.isEnabled() and isinstance(var, basestring):
self.set(var, spinbox.value())
for var, slider in self.auto_slider.iteritems():
if slider.isEnabled() and isinstance(var, basestring):
self.set(var, slider.value())
for var, checkbox in self.auto_checkbox.iteritems():
if checkbox.isEnabled() and isinstance(var, basestring):
if checkbox.isChecked():
val = u"yes"
else:
val = u"no"
self.set(var, val)
for var, qprogedit in self.auto_editor.iteritems():
if isinstance(var, basestring):
self.set(var, qprogedit.text())
return True
def auto_add_widget(self, widget, var=None):
"""
Add a widget to the list of auto-widgets
Arguments:
widget -- a QWidget
Keyword arguments:
var -- the variable to be linked to the widget (default=None)
"""
# Use the object id as a fallback name
if var == None:
var = id(widget)
debug.msg(var)
self.set_focus_widget(widget)
if isinstance(widget, QtGui.QSpinBox) or isinstance(widget,
QtGui.QDoubleSpinBox):
widget.editingFinished.connect(self.apply_edit_changes)
self.auto_spinbox[var] = widget
elif isinstance(widget, QtGui.QComboBox):
widget.activated.connect(self.apply_edit_changes)
self.auto_combobox[var] = widget
elif isinstance(widget, QtGui.QSlider):
widget.editingFinished.connect(self.apply_edit_changes)
self.auto_slider[var] = widget
elif isinstance(widget, QtGui.QLineEdit):
widget.editingFinished.connect(self.apply_edit_changes)
self.auto_line_edit[var] = widget
elif isinstance(widget, QtGui.QCheckBox):
widget.clicked.connect(self.apply_edit_changes)
self.auto_checkbox[var] = widget
else:
raise Exception(u"Cannot auto-add widget of type %s" % widget)
def clean_cond(self, cond, default=u'always'):
"""
Cleans a conditional statement. May raise a dialog box if problems are
encountered.
Arguments:
cond -- A (potentially filthy) conditional statement.
Keyword arguments:
default -- A default value to use for empty
Returns:
cond -- A clean conditional statement conditional statements.
(default=u'always')
"""
cond = self.unistr(cond)
if not self.sanitize_check(cond):
cond = self.sanitize(cond)
if cond.strip() == u'':
cond = default
try:
self.compile_cond(cond)
except osexception as e:
self.experiment.notify( \
u'Failed to compile conditional statement "%s": %s' % (cond, e))
return default
return cond
def children(self):
"""
returns:
desc: A list of children, including grand children, and so on.
type: list
"""
return []
def is_child_item(self, item_name):
"""
desc:
Checks if an item is somewhere downstream from the current item
in the experimental hierarchy.
arguments:
item_name:
desc: The name of the child item.
type: unicode
returns:
desc: True if the current item is offspring of the item, False
otherwise.
type: bool
"""
return False
def insert_child_item(self, item_name, index=0):
"""
desc:
Inserts a child item, if applicable to the item type.
arguments:
item_name:
desc: The name of the child item.
type: unicode
keywords:
index:
desc: The index of the child item.
type: int
"""
pass
def remove_child_item(self, item_name, index=0):
"""
desc:
Removes a child item, if applicable to the item type.
arguments:
item_name:
desc: The name of the child item.
type: unicode
keywords:
index:
desc: The index of the child item, if applicable. A negative
value indicates all instances.
type: int
"""
pass
|
amazinger2013/OpenSesame
|
libqtopensesame/items/qtitem.py
|
Python
|
gpl-3.0
| 21,218
|
from __future__ import print_function
import nose.tools as nt
import numpy as np
import numpy.testing.decorators as dec
from ..gaussian import truncated_gaussian, truncated_gaussian_old
from ...tests.decorators import set_sampling_params_iftrue, set_seed_iftrue
from ...tests.flags import SMALL_SAMPLES, SET_SEED
intervals = [(-np.inf,-4.),(3.,np.inf)]
tg = truncated_gaussian(intervals)
X = np.linspace(-5,5,101)
F = [tg.cdf(x) for x in X]
def test_sigma():
tg2 = truncated_gaussian_old(intervals, scale=2.)
tg1 = truncated_gaussian_old(np.array(intervals)/2., scale=1.)
Z = 3.5
nt.assert_equal(np.around(float(tg1.cdf(Z/2.)), 3),
np.around(float(tg2.cdf(Z)), 3))
np.testing.assert_equal(np.around(np.array(2 * tg1.equal_tailed_interval(Z/2,0.05)), 4),
np.around(np.array(tg2.equal_tailed_interval(Z,0.05)), 4))
@set_seed_iftrue(SET_SEED)
@dec.skipif(True, 'checking coverage: this is random with highish failure rate')
@set_sampling_params_iftrue(SMALL_SAMPLES, nsim=100)
def test_equal_tailed_coverage(nsim=1000):
alpha = 0.25
tg = truncated_gaussian_old([(2.3,np.inf)], scale=2)
coverage = 0
for i in range(nsim):
while True:
Z = np.random.standard_normal() * 2
if Z > 2.3:
break
L, U = tg.equal_tailed_interval(Z, alpha)
coverage += (U > 0) * (L < 0)
SE = np.sqrt(alpha*(1-alpha)*nsim)
print(coverage)
nt.assert_true(np.fabs(coverage - (1-alpha)*nsim) < 2*SE)
@set_seed_iftrue(SET_SEED)
@dec.skipif(True, 'really slow')
@set_sampling_params_iftrue(SMALL_SAMPLES, nsim=100)
def test_UMAU_coverage(nsim=1000):
alpha = 0.25
tg = truncated_gaussian_old([(2.3,np.inf)], scale=2)
coverage = 0
for i in range(nsim):
while True:
Z = np.random.standard_normal()*2
if Z > 2.3:
break
L, U = tg.UMAU_interval(Z, alpha)
coverage += (U > 0) * (L < 0)
SE = np.sqrt(alpha*(1-alpha)*nsim)
print(coverage)
nt.assert_true(np.fabs(coverage - (1-alpha)*nsim) < 2.1*SE)
|
selective-inference/selective-inference
|
selectinf/truncated/tests/test_truncated.py
|
Python
|
bsd-3-clause
| 2,115
|
# -*- coding: utf-8 -*-
import gc
import inspect
import json
import logging
from django.contrib.auth.models import User as AuthUser
from django.db import models
from django.utils.importlib import import_module
from django.utils.translation import ugettext_lazy as _
from ficuspumila.settings import (
get as settings_get,
ficuspumila as settings,
)
from .auth.sso import Authenticator
from .crypto import Transcoder
from .exceptions import ModelException
from .utils import (
generate_hmac_digest,
get_default_language_code,
)
logger = logging.getLogger(__name__)
transcoder = Transcoder()
def iterator(queryset, chunksize=1000, reverse=False):
"""
Iterate over a Django Queryset ordered by the primary key
This method loads a maximum of chunksize (default: 1000) rows in it's
memory at the same time while django normally would load all rows in it's
memory. Using the iterator() method only causes it to not preload all the
classes.
Note that the implementation of the iterator does not support ordered query sets.
"""
ordering = '-' if reverse else ''
queryset = queryset.order_by(ordering + 'pk')
last_pk = None
new_items = True
while new_items:
new_items = False
chunk = queryset
if last_pk is not None:
func = 'lt' if reverse else 'gt'
chunk = chunk.filter(**{'pk__' + func: last_pk})
chunk = chunk[:chunksize]
row = None
for row in chunk:
yield row
if row is not None:
last_pk = row.pk
new_items = True
class Choice(object):
class __metaclass__(type):
def __init__(self, *args, **kwargs):
self._data = []
for name, value in inspect.getmembers(self):
if not name.startswith('_') and not inspect.ismethod(value):
if isinstance(value, tuple) and len(value) > 1:
data = value
else:
pieces = [x.capitalize() for x in name.split('_')]
data = (value, ' '.join(pieces))
self._data.append(data)
setattr(self, name, data[0])
self._hash = dict(self._data)
def __iter__(self):
for value, data in self._data:
yield (value, data)
@classmethod
def get_value(self, key):
return self._hash[key]
class CsvField(models.CharField):
__metaclass__ = models.SubfieldBase
def to_python(self, value):
if isinstance(value, basestring):
splited = value.split(',')
if len(splited) == 1 and len(splited[0]) < 1:
return []
try:
return [element.strip() for element in splited]
except:
logger.exception(u'failed to convert CSV field '
u'to python: %s' % value)
raise ModelException(_(u'Could not evaluate CSV field.'))
else:
return value
def get_prep_value(self, value):
try:
if len(value) < 1:
return ''
return ', '.join(value)
except:
logger.exception(u'failed to prep data for CSV field: '
u'%s' % value)
raise ModelException(_(u'Invalid value detected for CSV field.'))
class JsonField(models.TextField):
__metaclass__ = models.SubfieldBase
def to_python(self, value):
if isinstance(value, basestring):
if len(value) < 1:
return {}
try:
return json.loads(value)
except:
logger.exception(u'failed to convert JSON field '
u'to python: %s' % value)
raise ModelException(_(u'Could not evaluate JSON field.'))
else:
return value
def get_prep_value(self, value):
try:
if len(value.keys()) < 1:
return ''
return json.dumps(value)
except:
logger.exception(u'failed to prep data for JSON field: '
u'%s' % value)
raise ModelException(_(u'Invalid value detected for JSON field.'))
class Model(models.Model):
class Meta:
abstract = True
objects = import_module('settings.database.managers').Manager()
ctime = models.DateTimeField(auto_now_add=True)
utime = models.DateTimeField(auto_now=True)
def __iter__(self):
for field in self._meta.get_all_field_names():
yield(field, getattr(self, field))
def __getitem__(self, field):
if field in self._meta.get_all_field_names():
return getattr(self, field)
return super(Model, self).__getaitem__(field)
def __setitem__(self, field):
if field in self._meta.get_all_field_names():
return setattr(self, field, value)
return super(Model, self).__setitem__(field, value)
class Name(Model):
class Meta:
abstract = True
ordering = ('name',)
name = models.CharField(max_length=64)
def __unicode__(self):
return self.name
class Attributable(models.Model):
class Meta:
abstract = True
class Attribute(Model):
class Meta:
abstract = True
ordering = ('name__name',)
# name field must be specified as a foreign key to the Name model
# related name to the attributable must be "attributes"
value = models.CharField(max_length=512,
blank=False,
null=False)
def __unicode__(self):
return u'%s: %s' % (self.name.name,
self.value,)
class Logger(Attributable):
class Meta:
abstract = True
class Event(Model):
class Meta:
abstract = True
ordering = ('-id',)
# name field must be specified as a foreign key to the Name model
# related name to the logger must be "events"
message = JsonField(blank=True,
null=False,
verbose_name=_(u'Message'))
def __unicode__(self):
return u'%s: %s' % (self.name.name,
self.utime,)
class Stateful(Logger):
class Meta:
abstract = True
class Notifier(Logger):
class Meta:
abstract = True
hmac_key = models.CharField(max_length=64,
default=generate_hmac_digest)
notification_urls = CsvField(max_length=1024,
help_text=_('Urls can be specified as comma separated value.'))
class Notification(Model):
class Meta:
abstract = True
# foreign key to the event must be specified
url = models.CharField(max_length=255,
blank=True,
null=False)
status_code = models.IntegerField(default=200)
content = models.TextField(blank=True,
null=False)
def __unicode__(self):
return '%s: Event(%s)' % (self.ctime, self.event,)
class Localizable(Model):
class Meta:
abstract = True
class Localization(Model):
class Meta:
abstract = True
language_code = models.CharField(max_length=2,
choices=settings_get('LANGUAGES'),
blank=False,
null=False)
class User(Model):
class Meta:
abstract = True
user = models.OneToOneField(AuthUser,
primary_key=True)
class Service(User, Notifier, Attributable):
class Meta:
abstract = True
token_key = models.CharField(max_length=255,
default=transcoder.algorithm.generate_key)
token_iv = models.CharField(max_length=255,
default=transcoder.algorithm.generate_iv)
|
nk113/django-ficuspumila
|
ficuspumila/core/models.py
|
Python
|
bsd-3-clause
| 7,951
|
"""The tests for the MQTT component."""
import asyncio
from datetime import datetime, timedelta
import json
import ssl
import pytest
import voluptuous as vol
from homeassistant.components import mqtt, websocket_api
from homeassistant.components.mqtt import debug_info
from homeassistant.const import (
ATTR_DOMAIN,
ATTR_SERVICE,
EVENT_CALL_SERVICE,
EVENT_HOMEASSISTANT_STOP,
TEMP_CELSIUS,
)
from homeassistant.core import callback
from homeassistant.helpers import device_registry
from homeassistant.setup import async_setup_component
from homeassistant.util.dt import utcnow
from tests.async_mock import AsyncMock, MagicMock, call, mock_open, patch
from tests.common import (
MockConfigEntry,
async_fire_mqtt_message,
async_fire_time_changed,
mock_device_registry,
mock_registry,
)
from tests.testing_config.custom_components.test.sensor import DEVICE_CLASSES
@pytest.fixture(autouse=True)
def mock_storage(hass_storage):
"""Autouse hass_storage for the TestCase tests."""
@pytest.fixture
def device_reg(hass):
"""Return an empty, loaded, registry."""
return mock_device_registry(hass)
@pytest.fixture
def entity_reg(hass):
"""Return an empty, loaded, registry."""
return mock_registry(hass)
@pytest.fixture
def mock_mqtt():
"""Make sure connection is established."""
with patch("homeassistant.components.mqtt.MQTT") as mock_mqtt:
mock_mqtt.return_value.async_connect = AsyncMock(return_value=True)
mock_mqtt.return_value.async_disconnect = AsyncMock(return_value=True)
yield mock_mqtt
@pytest.fixture
def calls():
"""Fixture to record calls."""
return []
@pytest.fixture
def record_calls(calls):
"""Fixture to record calls."""
@callback
def record_calls(*args):
"""Record calls."""
calls.append(args)
return record_calls
async def test_mqtt_connects_on_home_assistant_mqtt_setup(
hass, mqtt_client_mock, mqtt_mock
):
"""Test if client is connected after mqtt init on bootstrap."""
assert mqtt_client_mock.connect.call_count == 1
async def test_mqtt_disconnects_on_home_assistant_stop(hass, mqtt_mock):
"""Test if client stops on HA stop."""
hass.bus.fire(EVENT_HOMEASSISTANT_STOP)
await hass.async_block_till_done()
await hass.async_block_till_done()
assert mqtt_mock.async_disconnect.called
async def test_publish_calls_service(hass, mqtt_mock, calls, record_calls):
"""Test the publishing of call to services."""
hass.bus.async_listen_once(EVENT_CALL_SERVICE, record_calls)
mqtt.async_publish(hass, "test-topic", "test-payload")
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0][0].data["service_data"][mqtt.ATTR_TOPIC] == "test-topic"
assert calls[0][0].data["service_data"][mqtt.ATTR_PAYLOAD] == "test-payload"
async def test_service_call_without_topic_does_not_publish(hass, mqtt_mock):
"""Test the service call if topic is missing."""
hass.bus.fire(
EVENT_CALL_SERVICE,
{ATTR_DOMAIN: mqtt.DOMAIN, ATTR_SERVICE: mqtt.SERVICE_PUBLISH},
)
await hass.async_block_till_done()
assert not mqtt_mock.async_publish.called
async def test_service_call_with_template_payload_renders_template(hass, mqtt_mock):
"""Test the service call with rendered template.
If 'payload_template' is provided and 'payload' is not, then render it.
"""
mqtt.async_publish_template(hass, "test/topic", "{{ 1+1 }}")
await hass.async_block_till_done()
assert mqtt_mock.async_publish.called
assert mqtt_mock.async_publish.call_args[0][1] == "2"
async def test_service_call_with_payload_doesnt_render_template(hass, mqtt_mock):
"""Test the service call with unrendered template.
If both 'payload' and 'payload_template' are provided then fail.
"""
payload = "not a template"
payload_template = "a template"
with pytest.raises(vol.Invalid):
await hass.services.async_call(
mqtt.DOMAIN,
mqtt.SERVICE_PUBLISH,
{
mqtt.ATTR_TOPIC: "test/topic",
mqtt.ATTR_PAYLOAD: payload,
mqtt.ATTR_PAYLOAD_TEMPLATE: payload_template,
},
blocking=True,
)
assert not mqtt_mock.async_publish.called
async def test_service_call_with_ascii_qos_retain_flags(hass, mqtt_mock):
"""Test the service call with args that can be misinterpreted.
Empty payload message and ascii formatted qos and retain flags.
"""
await hass.services.async_call(
mqtt.DOMAIN,
mqtt.SERVICE_PUBLISH,
{
mqtt.ATTR_TOPIC: "test/topic",
mqtt.ATTR_PAYLOAD: "",
mqtt.ATTR_QOS: "2",
mqtt.ATTR_RETAIN: "no",
},
blocking=True,
)
assert mqtt_mock.async_publish.called
assert mqtt_mock.async_publish.call_args[0][2] == 2
assert not mqtt_mock.async_publish.call_args[0][3]
def test_validate_topic():
"""Test topic name/filter validation."""
# Invalid UTF-8, must not contain U+D800 to U+DFFF.
with pytest.raises(vol.Invalid):
mqtt.util.valid_topic("\ud800")
with pytest.raises(vol.Invalid):
mqtt.util.valid_topic("\udfff")
# Topic MUST NOT be empty
with pytest.raises(vol.Invalid):
mqtt.util.valid_topic("")
# Topic MUST NOT be longer than 65535 encoded bytes.
with pytest.raises(vol.Invalid):
mqtt.util.valid_topic("ü" * 32768)
# UTF-8 MUST NOT include null character
with pytest.raises(vol.Invalid):
mqtt.util.valid_topic("bad\0one")
# Topics "SHOULD NOT" include these special characters
# (not MUST NOT, RFC2119). The receiver MAY close the connection.
mqtt.util.valid_topic("\u0001")
mqtt.util.valid_topic("\u001F")
mqtt.util.valid_topic("\u009F")
mqtt.util.valid_topic("\u009F")
mqtt.util.valid_topic("\uffff")
def test_validate_subscribe_topic():
"""Test invalid subscribe topics."""
mqtt.valid_subscribe_topic("#")
mqtt.valid_subscribe_topic("sport/#")
with pytest.raises(vol.Invalid):
mqtt.valid_subscribe_topic("sport/#/")
with pytest.raises(vol.Invalid):
mqtt.valid_subscribe_topic("foo/bar#")
with pytest.raises(vol.Invalid):
mqtt.valid_subscribe_topic("foo/#/bar")
mqtt.valid_subscribe_topic("+")
mqtt.valid_subscribe_topic("+/tennis/#")
with pytest.raises(vol.Invalid):
mqtt.valid_subscribe_topic("sport+")
with pytest.raises(vol.Invalid):
mqtt.valid_subscribe_topic("sport+/")
with pytest.raises(vol.Invalid):
mqtt.valid_subscribe_topic("sport/+1")
with pytest.raises(vol.Invalid):
mqtt.valid_subscribe_topic("sport/+#")
with pytest.raises(vol.Invalid):
mqtt.valid_subscribe_topic("bad+topic")
mqtt.valid_subscribe_topic("sport/+/player1")
mqtt.valid_subscribe_topic("/finance")
mqtt.valid_subscribe_topic("+/+")
mqtt.valid_subscribe_topic("$SYS/#")
def test_validate_publish_topic():
"""Test invalid publish topics."""
with pytest.raises(vol.Invalid):
mqtt.valid_publish_topic("pub+")
with pytest.raises(vol.Invalid):
mqtt.valid_publish_topic("pub/+")
with pytest.raises(vol.Invalid):
mqtt.valid_publish_topic("1#")
with pytest.raises(vol.Invalid):
mqtt.valid_publish_topic("bad+topic")
mqtt.valid_publish_topic("//")
# Topic names beginning with $ SHOULD NOT be used, but can
mqtt.valid_publish_topic("$SYS/")
def test_entity_device_info_schema():
"""Test MQTT entity device info validation."""
# just identifier
mqtt.MQTT_ENTITY_DEVICE_INFO_SCHEMA({"identifiers": ["abcd"]})
mqtt.MQTT_ENTITY_DEVICE_INFO_SCHEMA({"identifiers": "abcd"})
# just connection
mqtt.MQTT_ENTITY_DEVICE_INFO_SCHEMA({"connections": [["mac", "02:5b:26:a8:dc:12"]]})
# full device info
mqtt.MQTT_ENTITY_DEVICE_INFO_SCHEMA(
{
"identifiers": ["helloworld", "hello"],
"connections": [["mac", "02:5b:26:a8:dc:12"], ["zigbee", "zigbee_id"]],
"manufacturer": "Whatever",
"name": "Beer",
"model": "Glass",
"sw_version": "0.1-beta",
}
)
# full device info with via_device
mqtt.MQTT_ENTITY_DEVICE_INFO_SCHEMA(
{
"identifiers": ["helloworld", "hello"],
"connections": [["mac", "02:5b:26:a8:dc:12"], ["zigbee", "zigbee_id"]],
"manufacturer": "Whatever",
"name": "Beer",
"model": "Glass",
"sw_version": "0.1-beta",
"via_device": "test-hub",
}
)
# no identifiers
with pytest.raises(vol.Invalid):
mqtt.MQTT_ENTITY_DEVICE_INFO_SCHEMA(
{
"manufacturer": "Whatever",
"name": "Beer",
"model": "Glass",
"sw_version": "0.1-beta",
}
)
# empty identifiers
with pytest.raises(vol.Invalid):
mqtt.MQTT_ENTITY_DEVICE_INFO_SCHEMA(
{"identifiers": [], "connections": [], "name": "Beer"}
)
async def test_receiving_non_utf8_message_gets_logged(
hass, mqtt_mock, calls, record_calls, caplog
):
"""Test receiving a non utf8 encoded message."""
await mqtt.async_subscribe(hass, "test-topic", record_calls)
async_fire_mqtt_message(hass, "test-topic", b"\x9a")
await hass.async_block_till_done()
assert (
"Can't decode payload b'\\x9a' on test-topic with encoding utf-8" in caplog.text
)
async def test_all_subscriptions_run_when_decode_fails(
hass, mqtt_mock, calls, record_calls
):
"""Test all other subscriptions still run when decode fails for one."""
await mqtt.async_subscribe(hass, "test-topic", record_calls, encoding="ascii")
await mqtt.async_subscribe(hass, "test-topic", record_calls)
async_fire_mqtt_message(hass, "test-topic", TEMP_CELSIUS)
await hass.async_block_till_done()
assert len(calls) == 1
async def test_subscribe_topic(hass, mqtt_mock, calls, record_calls):
"""Test the subscription of a topic."""
unsub = await mqtt.async_subscribe(hass, "test-topic", record_calls)
async_fire_mqtt_message(hass, "test-topic", "test-payload")
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0][0].topic == "test-topic"
assert calls[0][0].payload == "test-payload"
unsub()
async_fire_mqtt_message(hass, "test-topic", "test-payload")
await hass.async_block_till_done()
assert len(calls) == 1
async def test_subscribe_deprecated(hass, mqtt_mock):
"""Test the subscription of a topic using deprecated callback signature."""
calls = []
@callback
def record_calls(topic, payload, qos):
"""Record calls."""
calls.append((topic, payload, qos))
unsub = await mqtt.async_subscribe(hass, "test-topic", record_calls)
async_fire_mqtt_message(hass, "test-topic", "test-payload")
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0][0] == "test-topic"
assert calls[0][1] == "test-payload"
unsub()
async_fire_mqtt_message(hass, "test-topic", "test-payload")
await hass.async_block_till_done()
assert len(calls) == 1
async def test_subscribe_deprecated_async(hass, mqtt_mock):
"""Test the subscription of a topic using deprecated callback signature."""
calls = []
async def record_calls(topic, payload, qos):
"""Record calls."""
calls.append((topic, payload, qos))
unsub = await mqtt.async_subscribe(hass, "test-topic", record_calls)
async_fire_mqtt_message(hass, "test-topic", "test-payload")
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0][0] == "test-topic"
assert calls[0][1] == "test-payload"
unsub()
async_fire_mqtt_message(hass, "test-topic", "test-payload")
await hass.async_block_till_done()
assert len(calls) == 1
async def test_subscribe_topic_not_match(hass, mqtt_mock, calls, record_calls):
"""Test if subscribed topic is not a match."""
await mqtt.async_subscribe(hass, "test-topic", record_calls)
async_fire_mqtt_message(hass, "another-test-topic", "test-payload")
await hass.async_block_till_done()
assert len(calls) == 0
async def test_subscribe_topic_level_wildcard(hass, mqtt_mock, calls, record_calls):
"""Test the subscription of wildcard topics."""
await mqtt.async_subscribe(hass, "test-topic/+/on", record_calls)
async_fire_mqtt_message(hass, "test-topic/bier/on", "test-payload")
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0][0].topic == "test-topic/bier/on"
assert calls[0][0].payload == "test-payload"
async def test_subscribe_topic_level_wildcard_no_subtree_match(
hass, mqtt_mock, calls, record_calls
):
"""Test the subscription of wildcard topics."""
await mqtt.async_subscribe(hass, "test-topic/+/on", record_calls)
async_fire_mqtt_message(hass, "test-topic/bier", "test-payload")
await hass.async_block_till_done()
assert len(calls) == 0
async def test_subscribe_topic_level_wildcard_root_topic_no_subtree_match(
hass, mqtt_mock, calls, record_calls
):
"""Test the subscription of wildcard topics."""
await mqtt.async_subscribe(hass, "test-topic/#", record_calls)
async_fire_mqtt_message(hass, "test-topic-123", "test-payload")
await hass.async_block_till_done()
assert len(calls) == 0
async def test_subscribe_topic_subtree_wildcard_subtree_topic(
hass, mqtt_mock, calls, record_calls
):
"""Test the subscription of wildcard topics."""
await mqtt.async_subscribe(hass, "test-topic/#", record_calls)
async_fire_mqtt_message(hass, "test-topic/bier/on", "test-payload")
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0][0].topic == "test-topic/bier/on"
assert calls[0][0].payload == "test-payload"
async def test_subscribe_topic_subtree_wildcard_root_topic(
hass, mqtt_mock, calls, record_calls
):
"""Test the subscription of wildcard topics."""
await mqtt.async_subscribe(hass, "test-topic/#", record_calls)
async_fire_mqtt_message(hass, "test-topic", "test-payload")
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0][0].topic == "test-topic"
assert calls[0][0].payload == "test-payload"
async def test_subscribe_topic_subtree_wildcard_no_match(
hass, mqtt_mock, calls, record_calls
):
"""Test the subscription of wildcard topics."""
await mqtt.async_subscribe(hass, "test-topic/#", record_calls)
async_fire_mqtt_message(hass, "another-test-topic", "test-payload")
await hass.async_block_till_done()
assert len(calls) == 0
async def test_subscribe_topic_level_wildcard_and_wildcard_root_topic(
hass, mqtt_mock, calls, record_calls
):
"""Test the subscription of wildcard topics."""
await mqtt.async_subscribe(hass, "+/test-topic/#", record_calls)
async_fire_mqtt_message(hass, "hi/test-topic", "test-payload")
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0][0].topic == "hi/test-topic"
assert calls[0][0].payload == "test-payload"
async def test_subscribe_topic_level_wildcard_and_wildcard_subtree_topic(
hass, mqtt_mock, calls, record_calls
):
"""Test the subscription of wildcard topics."""
await mqtt.async_subscribe(hass, "+/test-topic/#", record_calls)
async_fire_mqtt_message(hass, "hi/test-topic/here-iam", "test-payload")
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0][0].topic == "hi/test-topic/here-iam"
assert calls[0][0].payload == "test-payload"
async def test_subscribe_topic_level_wildcard_and_wildcard_level_no_match(
hass, mqtt_mock, calls, record_calls
):
"""Test the subscription of wildcard topics."""
await mqtt.async_subscribe(hass, "+/test-topic/#", record_calls)
async_fire_mqtt_message(hass, "hi/here-iam/test-topic", "test-payload")
await hass.async_block_till_done()
assert len(calls) == 0
async def test_subscribe_topic_level_wildcard_and_wildcard_no_match(
hass, mqtt_mock, calls, record_calls
):
"""Test the subscription of wildcard topics."""
await mqtt.async_subscribe(hass, "+/test-topic/#", record_calls)
async_fire_mqtt_message(hass, "hi/another-test-topic", "test-payload")
await hass.async_block_till_done()
assert len(calls) == 0
async def test_subscribe_topic_sys_root(hass, mqtt_mock, calls, record_calls):
"""Test the subscription of $ root topics."""
await mqtt.async_subscribe(hass, "$test-topic/subtree/on", record_calls)
async_fire_mqtt_message(hass, "$test-topic/subtree/on", "test-payload")
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0][0].topic == "$test-topic/subtree/on"
assert calls[0][0].payload == "test-payload"
async def test_subscribe_topic_sys_root_and_wildcard_topic(
hass, mqtt_mock, calls, record_calls
):
"""Test the subscription of $ root and wildcard topics."""
await mqtt.async_subscribe(hass, "$test-topic/#", record_calls)
async_fire_mqtt_message(hass, "$test-topic/some-topic", "test-payload")
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0][0].topic == "$test-topic/some-topic"
assert calls[0][0].payload == "test-payload"
async def test_subscribe_topic_sys_root_and_wildcard_subtree_topic(
hass, mqtt_mock, calls, record_calls
):
"""Test the subscription of $ root and wildcard subtree topics."""
await mqtt.async_subscribe(hass, "$test-topic/subtree/#", record_calls)
async_fire_mqtt_message(hass, "$test-topic/subtree/some-topic", "test-payload")
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0][0].topic == "$test-topic/subtree/some-topic"
assert calls[0][0].payload == "test-payload"
async def test_subscribe_special_characters(hass, mqtt_mock, calls, record_calls):
"""Test the subscription to topics with special characters."""
topic = "/test-topic/$(.)[^]{-}"
payload = "p4y.l[]a|> ?"
await mqtt.async_subscribe(hass, topic, record_calls)
async_fire_mqtt_message(hass, topic, payload)
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0][0].topic == topic
assert calls[0][0].payload == payload
async def test_subscribe_same_topic(hass, mqtt_client_mock, mqtt_mock):
"""
Test subscring to same topic twice and simulate retained messages.
When subscribing to the same topic again, SUBSCRIBE must be sent to the broker again
for it to resend any retained messages.
"""
# Fake that the client is connected
mqtt_mock().connected = True
calls_a = MagicMock()
await mqtt.async_subscribe(hass, "test/state", calls_a)
async_fire_mqtt_message(
hass, "test/state", "online"
) # Simulate a (retained) message
await hass.async_block_till_done()
assert calls_a.called
mqtt_client_mock.subscribe.assert_called()
calls_a.reset_mock()
mqtt_client_mock.reset_mock()
calls_b = MagicMock()
await mqtt.async_subscribe(hass, "test/state", calls_b)
async_fire_mqtt_message(
hass, "test/state", "online"
) # Simulate a (retained) message
await hass.async_block_till_done()
assert calls_a.called
assert calls_b.called
mqtt_client_mock.subscribe.assert_called()
async def test_not_calling_unsubscribe_with_active_subscribers(
hass, mqtt_client_mock, mqtt_mock
):
"""Test not calling unsubscribe() when other subscribers are active."""
# Fake that the client is connected
mqtt_mock().connected = True
unsub = await mqtt.async_subscribe(hass, "test/state", None)
await mqtt.async_subscribe(hass, "test/state", None)
await hass.async_block_till_done()
assert mqtt_client_mock.subscribe.called
unsub()
await hass.async_block_till_done()
assert not mqtt_client_mock.unsubscribe.called
@pytest.mark.parametrize(
"mqtt_config",
[{mqtt.CONF_BROKER: "mock-broker", mqtt.CONF_DISCOVERY: False}],
)
async def test_restore_subscriptions_on_reconnect(hass, mqtt_client_mock, mqtt_mock):
"""Test subscriptions are restored on reconnect."""
# Fake that the client is connected
mqtt_mock().connected = True
await mqtt.async_subscribe(hass, "test/state", None)
await hass.async_block_till_done()
assert mqtt_client_mock.subscribe.call_count == 1
mqtt_mock._mqtt_on_disconnect(None, None, 0)
with patch("homeassistant.components.mqtt.DISCOVERY_COOLDOWN", 0):
mqtt_mock._mqtt_on_connect(None, None, None, 0)
await hass.async_block_till_done()
assert mqtt_client_mock.subscribe.call_count == 2
@pytest.mark.parametrize(
"mqtt_config",
[{mqtt.CONF_BROKER: "mock-broker", mqtt.CONF_DISCOVERY: False}],
)
async def test_restore_all_active_subscriptions_on_reconnect(
hass, mqtt_client_mock, mqtt_mock
):
"""Test active subscriptions are restored correctly on reconnect."""
# Fake that the client is connected
mqtt_mock().connected = True
unsub = await mqtt.async_subscribe(hass, "test/state", None, qos=2)
await mqtt.async_subscribe(hass, "test/state", None)
await mqtt.async_subscribe(hass, "test/state", None, qos=1)
await hass.async_block_till_done()
expected = [
call("test/state", 2),
call("test/state", 0),
call("test/state", 1),
]
assert mqtt_client_mock.subscribe.mock_calls == expected
unsub()
await hass.async_block_till_done()
assert mqtt_client_mock.unsubscribe.call_count == 0
mqtt_mock._mqtt_on_disconnect(None, None, 0)
with patch("homeassistant.components.mqtt.DISCOVERY_COOLDOWN", 0):
mqtt_mock._mqtt_on_connect(None, None, None, 0)
await hass.async_block_till_done()
expected.append(call("test/state", 1))
assert mqtt_client_mock.subscribe.mock_calls == expected
async def test_setup_logs_error_if_no_connect_broker(hass, caplog):
"""Test for setup failure if connection to broker is missing."""
entry = MockConfigEntry(domain=mqtt.DOMAIN, data={mqtt.CONF_BROKER: "test-broker"})
with patch("paho.mqtt.client.Client") as mock_client:
mock_client().connect = lambda *args: 1
assert await mqtt.async_setup_entry(hass, entry)
assert "Failed to connect to MQTT server:" in caplog.text
async def test_setup_raises_ConfigEntryNotReady_if_no_connect_broker(hass, caplog):
"""Test for setup failure if connection to broker is missing."""
entry = MockConfigEntry(domain=mqtt.DOMAIN, data={mqtt.CONF_BROKER: "test-broker"})
with patch("paho.mqtt.client.Client") as mock_client:
mock_client().connect = MagicMock(side_effect=OSError("Connection error"))
assert await mqtt.async_setup_entry(hass, entry)
assert "Failed to connect to MQTT server due to exception:" in caplog.text
async def test_setup_uses_certificate_on_certificate_set_to_auto(hass):
"""Test setup uses bundled certs when certificate is set to auto."""
calls = []
def mock_tls_set(certificate, certfile=None, keyfile=None, tls_version=None):
calls.append((certificate, certfile, keyfile, tls_version))
with patch("paho.mqtt.client.Client") as mock_client:
mock_client().tls_set = mock_tls_set
entry = MockConfigEntry(
domain=mqtt.DOMAIN,
data={mqtt.CONF_BROKER: "test-broker", "certificate": "auto"},
)
assert await mqtt.async_setup_entry(hass, entry)
assert calls
import certifi
expectedCertificate = certifi.where()
# assert mock_mqtt.mock_calls[0][1][2]["certificate"] == expectedCertificate
assert calls[0][0] == expectedCertificate
async def test_setup_without_tls_config_uses_tlsv1_under_python36(hass):
"""Test setup defaults to TLSv1 under python3.6."""
calls = []
def mock_tls_set(certificate, certfile=None, keyfile=None, tls_version=None):
calls.append((certificate, certfile, keyfile, tls_version))
with patch("paho.mqtt.client.Client") as mock_client:
mock_client().tls_set = mock_tls_set
entry = MockConfigEntry(
domain=mqtt.DOMAIN,
data={"certificate": "auto", mqtt.CONF_BROKER: "test-broker"},
)
assert await mqtt.async_setup_entry(hass, entry)
assert calls
import sys
if sys.hexversion >= 0x03060000:
expectedTlsVersion = ssl.PROTOCOL_TLS # pylint: disable=no-member
else:
expectedTlsVersion = ssl.PROTOCOL_TLSv1
assert calls[0][3] == expectedTlsVersion
@pytest.mark.parametrize(
"mqtt_config",
[
{
mqtt.CONF_BROKER: "mock-broker",
mqtt.CONF_BIRTH_MESSAGE: {
mqtt.ATTR_TOPIC: "birth",
mqtt.ATTR_PAYLOAD: "birth",
},
}
],
)
async def test_custom_birth_message(hass, mqtt_client_mock, mqtt_mock):
"""Test sending birth message."""
birth = asyncio.Event()
async def wait_birth(topic, payload, qos):
"""Handle birth message."""
birth.set()
with patch("homeassistant.components.mqtt.DISCOVERY_COOLDOWN", 0.1):
await mqtt.async_subscribe(hass, "birth", wait_birth)
mqtt_mock._mqtt_on_connect(None, None, 0, 0)
await hass.async_block_till_done()
await birth.wait()
mqtt_client_mock.publish.assert_called_with("birth", "birth", 0, False)
@pytest.mark.parametrize(
"mqtt_config",
[
{
mqtt.CONF_BROKER: "mock-broker",
mqtt.CONF_BIRTH_MESSAGE: {
mqtt.ATTR_TOPIC: "homeassistant/status",
mqtt.ATTR_PAYLOAD: "online",
},
}
],
)
async def test_default_birth_message(hass, mqtt_client_mock, mqtt_mock):
"""Test sending birth message."""
birth = asyncio.Event()
async def wait_birth(topic, payload, qos):
"""Handle birth message."""
birth.set()
with patch("homeassistant.components.mqtt.DISCOVERY_COOLDOWN", 0.1):
await mqtt.async_subscribe(hass, "homeassistant/status", wait_birth)
mqtt_mock._mqtt_on_connect(None, None, 0, 0)
await hass.async_block_till_done()
await birth.wait()
mqtt_client_mock.publish.assert_called_with(
"homeassistant/status", "online", 0, False
)
@pytest.mark.parametrize(
"mqtt_config",
[{mqtt.CONF_BROKER: "mock-broker", mqtt.CONF_BIRTH_MESSAGE: {}}],
)
async def test_no_birth_message(hass, mqtt_client_mock, mqtt_mock):
"""Test disabling birth message."""
with patch("homeassistant.components.mqtt.DISCOVERY_COOLDOWN", 0.1):
mqtt_mock._mqtt_on_connect(None, None, 0, 0)
await hass.async_block_till_done()
await asyncio.sleep(0.2)
mqtt_client_mock.publish.assert_not_called()
@pytest.mark.parametrize(
"mqtt_config",
[
{
mqtt.CONF_BROKER: "mock-broker",
mqtt.CONF_WILL_MESSAGE: {
mqtt.ATTR_TOPIC: "death",
mqtt.ATTR_PAYLOAD: "death",
},
}
],
)
async def test_custom_will_message(hass, mqtt_client_mock, mqtt_mock):
"""Test will message."""
mqtt_client_mock.will_set.assert_called_with(
topic="death", payload="death", qos=0, retain=False
)
async def test_default_will_message(hass, mqtt_client_mock, mqtt_mock):
"""Test will message."""
mqtt_client_mock.will_set.assert_called_with(
topic="homeassistant/status", payload="offline", qos=0, retain=False
)
@pytest.mark.parametrize(
"mqtt_config",
[{mqtt.CONF_BROKER: "mock-broker", mqtt.CONF_WILL_MESSAGE: {}}],
)
async def test_no_will_message(hass, mqtt_client_mock, mqtt_mock):
"""Test will message."""
mqtt_client_mock.will_set.assert_not_called()
@pytest.mark.parametrize(
"mqtt_config",
[
{
mqtt.CONF_BROKER: "mock-broker",
mqtt.CONF_BIRTH_MESSAGE: {},
mqtt.CONF_DISCOVERY: False,
}
],
)
async def test_mqtt_subscribes_topics_on_connect(hass, mqtt_client_mock, mqtt_mock):
"""Test subscription to topic on connect."""
await mqtt.async_subscribe(hass, "topic/test", None)
await mqtt.async_subscribe(hass, "home/sensor", None, 2)
await mqtt.async_subscribe(hass, "still/pending", None)
await mqtt.async_subscribe(hass, "still/pending", None, 1)
hass.add_job = MagicMock()
mqtt_mock._mqtt_on_connect(None, None, 0, 0)
await hass.async_block_till_done()
assert mqtt_client_mock.disconnect.call_count == 0
expected = {"topic/test": 0, "home/sensor": 2, "still/pending": 1}
calls = {call[1][1]: call[1][2] for call in hass.add_job.mock_calls}
assert calls == expected
async def test_setup_fails_without_config(hass):
"""Test if the MQTT component fails to load with no config."""
assert not await async_setup_component(hass, mqtt.DOMAIN, {})
@pytest.mark.no_fail_on_log_exception
async def test_message_callback_exception_gets_logged(hass, caplog, mqtt_mock):
"""Test exception raised by message handler."""
@callback
def bad_handler(*args):
"""Record calls."""
raise Exception("This is a bad message callback")
await mqtt.async_subscribe(hass, "test-topic", bad_handler)
async_fire_mqtt_message(hass, "test-topic", "test")
await hass.async_block_till_done()
assert (
"Exception in bad_handler when handling msg on 'test-topic':"
" 'test'" in caplog.text
)
async def test_mqtt_ws_subscription(hass, hass_ws_client, mqtt_mock):
"""Test MQTT websocket subscription."""
client = await hass_ws_client(hass)
await client.send_json({"id": 5, "type": "mqtt/subscribe", "topic": "test-topic"})
response = await client.receive_json()
assert response["success"]
async_fire_mqtt_message(hass, "test-topic", "test1")
async_fire_mqtt_message(hass, "test-topic", "test2")
response = await client.receive_json()
assert response["event"]["topic"] == "test-topic"
assert response["event"]["payload"] == "test1"
response = await client.receive_json()
assert response["event"]["topic"] == "test-topic"
assert response["event"]["payload"] == "test2"
# Unsubscribe
await client.send_json({"id": 8, "type": "unsubscribe_events", "subscription": 5})
response = await client.receive_json()
assert response["success"]
async def test_dump_service(hass, mqtt_mock):
"""Test that we can dump a topic."""
mopen = mock_open()
await hass.services.async_call(
"mqtt", "dump", {"topic": "bla/#", "duration": 3}, blocking=True
)
async_fire_mqtt_message(hass, "bla/1", "test1")
async_fire_mqtt_message(hass, "bla/2", "test2")
with patch("homeassistant.components.mqtt.open", mopen):
async_fire_time_changed(hass, utcnow() + timedelta(seconds=3))
await hass.async_block_till_done()
writes = mopen.return_value.write.mock_calls
assert len(writes) == 2
assert writes[0][1][0] == "bla/1,test1\n"
assert writes[1][1][0] == "bla/2,test2\n"
async def test_mqtt_ws_remove_discovered_device(
hass, device_reg, entity_reg, hass_ws_client, mqtt_mock
):
"""Test MQTT websocket device removal."""
data = (
'{ "device":{"identifiers":["0AFFD2"]},'
' "state_topic": "foobar/sensor",'
' "unique_id": "unique" }'
)
async_fire_mqtt_message(hass, "homeassistant/sensor/bla/config", data)
await hass.async_block_till_done()
# Verify device entry is created
device_entry = device_reg.async_get_device({("mqtt", "0AFFD2")}, set())
assert device_entry is not None
client = await hass_ws_client(hass)
await client.send_json(
{"id": 5, "type": "mqtt/device/remove", "device_id": device_entry.id}
)
response = await client.receive_json()
assert response["success"]
# Verify device entry is cleared
device_entry = device_reg.async_get_device({("mqtt", "0AFFD2")}, set())
assert device_entry is None
async def test_mqtt_ws_remove_discovered_device_twice(
hass, device_reg, hass_ws_client, mqtt_mock
):
"""Test MQTT websocket device removal."""
data = (
'{ "device":{"identifiers":["0AFFD2"]},'
' "state_topic": "foobar/sensor",'
' "unique_id": "unique" }'
)
async_fire_mqtt_message(hass, "homeassistant/sensor/bla/config", data)
await hass.async_block_till_done()
device_entry = device_reg.async_get_device({("mqtt", "0AFFD2")}, set())
assert device_entry is not None
client = await hass_ws_client(hass)
await client.send_json(
{"id": 5, "type": "mqtt/device/remove", "device_id": device_entry.id}
)
response = await client.receive_json()
assert response["success"]
await client.send_json(
{"id": 6, "type": "mqtt/device/remove", "device_id": device_entry.id}
)
response = await client.receive_json()
assert not response["success"]
assert response["error"]["code"] == websocket_api.const.ERR_NOT_FOUND
async def test_mqtt_ws_remove_discovered_device_same_topic(
hass, device_reg, hass_ws_client, mqtt_mock
):
"""Test MQTT websocket device removal."""
data = (
'{ "device":{"identifiers":["0AFFD2"]},'
' "state_topic": "foobar/sensor",'
' "availability_topic": "foobar/sensor",'
' "unique_id": "unique" }'
)
async_fire_mqtt_message(hass, "homeassistant/sensor/bla/config", data)
await hass.async_block_till_done()
device_entry = device_reg.async_get_device({("mqtt", "0AFFD2")}, set())
assert device_entry is not None
client = await hass_ws_client(hass)
await client.send_json(
{"id": 5, "type": "mqtt/device/remove", "device_id": device_entry.id}
)
response = await client.receive_json()
assert response["success"]
await client.send_json(
{"id": 6, "type": "mqtt/device/remove", "device_id": device_entry.id}
)
response = await client.receive_json()
assert not response["success"]
assert response["error"]["code"] == websocket_api.const.ERR_NOT_FOUND
async def test_mqtt_ws_remove_non_mqtt_device(
hass, device_reg, hass_ws_client, mqtt_mock
):
"""Test MQTT websocket device removal of device belonging to other domain."""
config_entry = MockConfigEntry(domain="test")
config_entry.add_to_hass(hass)
device_entry = device_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
assert device_entry is not None
client = await hass_ws_client(hass)
await client.send_json(
{"id": 5, "type": "mqtt/device/remove", "device_id": device_entry.id}
)
response = await client.receive_json()
assert not response["success"]
assert response["error"]["code"] == websocket_api.const.ERR_NOT_FOUND
async def test_mqtt_ws_get_device_debug_info(
hass, device_reg, hass_ws_client, mqtt_mock
):
"""Test MQTT websocket device debug info."""
config = {
"device": {"identifiers": ["0AFFD2"]},
"platform": "mqtt",
"state_topic": "foobar/sensor",
"unique_id": "unique",
}
data = json.dumps(config)
async_fire_mqtt_message(hass, "homeassistant/sensor/bla/config", data)
await hass.async_block_till_done()
# Verify device entry is created
device_entry = device_reg.async_get_device({("mqtt", "0AFFD2")}, set())
assert device_entry is not None
client = await hass_ws_client(hass)
await client.send_json(
{"id": 5, "type": "mqtt/device/debug_info", "device_id": device_entry.id}
)
response = await client.receive_json()
assert response["success"]
expected_result = {
"entities": [
{
"entity_id": "sensor.mqtt_sensor",
"subscriptions": [{"topic": "foobar/sensor", "messages": []}],
"discovery_data": {
"payload": config,
"topic": "homeassistant/sensor/bla/config",
},
}
],
"triggers": [],
}
assert response["result"] == expected_result
async def test_debug_info_multiple_devices(hass, mqtt_mock):
"""Test we get correct debug_info when multiple devices are present."""
devices = [
{
"domain": "sensor",
"config": {
"device": {"identifiers": ["0AFFD0"]},
"platform": "mqtt",
"state_topic": "test-topic-sensor",
"unique_id": "unique",
},
},
{
"domain": "binary_sensor",
"config": {
"device": {"identifiers": ["0AFFD1"]},
"platform": "mqtt",
"state_topic": "test-topic-binary-sensor",
"unique_id": "unique",
},
},
{
"domain": "device_automation",
"config": {
"automation_type": "trigger",
"device": {"identifiers": ["0AFFD2"]},
"platform": "mqtt",
"topic": "test-topic1",
"type": "foo",
"subtype": "bar",
},
},
{
"domain": "device_automation",
"config": {
"automation_type": "trigger",
"device": {"identifiers": ["0AFFD3"]},
"platform": "mqtt",
"topic": "test-topic2",
"type": "ikk",
"subtype": "baz",
},
},
]
registry = await hass.helpers.device_registry.async_get_registry()
for d in devices:
data = json.dumps(d["config"])
domain = d["domain"]
id = d["config"]["device"]["identifiers"][0]
async_fire_mqtt_message(hass, f"homeassistant/{domain}/{id}/config", data)
await hass.async_block_till_done()
for d in devices:
domain = d["domain"]
id = d["config"]["device"]["identifiers"][0]
device = registry.async_get_device({("mqtt", id)}, set())
assert device is not None
debug_info_data = await debug_info.info_for_device(hass, device.id)
if d["domain"] != "device_automation":
assert len(debug_info_data["entities"]) == 1
assert len(debug_info_data["triggers"]) == 0
discovery_data = debug_info_data["entities"][0]["discovery_data"]
assert len(debug_info_data["entities"][0]["subscriptions"]) == 1
topic = d["config"]["state_topic"]
assert {"topic": topic, "messages": []} in debug_info_data["entities"][0][
"subscriptions"
]
else:
assert len(debug_info_data["entities"]) == 0
assert len(debug_info_data["triggers"]) == 1
discovery_data = debug_info_data["triggers"][0]["discovery_data"]
assert discovery_data["topic"] == f"homeassistant/{domain}/{id}/config"
assert discovery_data["payload"] == d["config"]
async def test_debug_info_multiple_entities_triggers(hass, mqtt_mock):
"""Test we get correct debug_info for a device with multiple entities and triggers."""
config = [
{
"domain": "sensor",
"config": {
"device": {"identifiers": ["0AFFD0"]},
"platform": "mqtt",
"state_topic": "test-topic-sensor",
"unique_id": "unique",
},
},
{
"domain": "binary_sensor",
"config": {
"device": {"identifiers": ["0AFFD0"]},
"platform": "mqtt",
"state_topic": "test-topic-binary-sensor",
"unique_id": "unique",
},
},
{
"domain": "device_automation",
"config": {
"automation_type": "trigger",
"device": {"identifiers": ["0AFFD0"]},
"platform": "mqtt",
"topic": "test-topic1",
"type": "foo",
"subtype": "bar",
},
},
{
"domain": "device_automation",
"config": {
"automation_type": "trigger",
"device": {"identifiers": ["0AFFD0"]},
"platform": "mqtt",
"topic": "test-topic2",
"type": "ikk",
"subtype": "baz",
},
},
]
registry = await hass.helpers.device_registry.async_get_registry()
for c in config:
data = json.dumps(c["config"])
domain = c["domain"]
# Use topic as discovery_id
id = c["config"].get("topic", c["config"].get("state_topic"))
async_fire_mqtt_message(hass, f"homeassistant/{domain}/{id}/config", data)
await hass.async_block_till_done()
device_id = config[0]["config"]["device"]["identifiers"][0]
device = registry.async_get_device({("mqtt", device_id)}, set())
assert device is not None
debug_info_data = await debug_info.info_for_device(hass, device.id)
assert len(debug_info_data["entities"]) == 2
assert len(debug_info_data["triggers"]) == 2
for c in config:
# Test we get debug info for each entity and trigger
domain = c["domain"]
# Use topic as discovery_id
id = c["config"].get("topic", c["config"].get("state_topic"))
if c["domain"] != "device_automation":
discovery_data = [e["discovery_data"] for e in debug_info_data["entities"]]
topic = c["config"]["state_topic"]
assert {"topic": topic, "messages": []} in [
t for e in debug_info_data["entities"] for t in e["subscriptions"]
]
else:
discovery_data = [e["discovery_data"] for e in debug_info_data["triggers"]]
assert {
"topic": f"homeassistant/{domain}/{id}/config",
"payload": c["config"],
} in discovery_data
async def test_debug_info_non_mqtt(hass, device_reg, entity_reg):
"""Test we get empty debug_info for a device with non MQTT entities."""
DOMAIN = "sensor"
platform = getattr(hass.components, f"test.{DOMAIN}")
platform.init()
config_entry = MockConfigEntry(domain="test", data={})
config_entry.add_to_hass(hass)
device_entry = device_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
for device_class in DEVICE_CLASSES:
entity_reg.async_get_or_create(
DOMAIN,
"test",
platform.ENTITIES[device_class].unique_id,
device_id=device_entry.id,
)
assert await async_setup_component(hass, DOMAIN, {DOMAIN: {"platform": "test"}})
debug_info_data = await debug_info.info_for_device(hass, device_entry.id)
assert len(debug_info_data["entities"]) == 0
assert len(debug_info_data["triggers"]) == 0
async def test_debug_info_wildcard(hass, mqtt_mock):
"""Test debug info."""
config = {
"device": {"identifiers": ["helloworld"]},
"platform": "mqtt",
"name": "test",
"state_topic": "sensor/#",
"unique_id": "veryunique",
}
registry = await hass.helpers.device_registry.async_get_registry()
data = json.dumps(config)
async_fire_mqtt_message(hass, "homeassistant/sensor/bla/config", data)
await hass.async_block_till_done()
device = registry.async_get_device({("mqtt", "helloworld")}, set())
assert device is not None
debug_info_data = await debug_info.info_for_device(hass, device.id)
assert len(debug_info_data["entities"][0]["subscriptions"]) >= 1
assert {"topic": "sensor/#", "messages": []} in debug_info_data["entities"][0][
"subscriptions"
]
start_dt = datetime(2019, 1, 1, 0, 0, 0)
with patch("homeassistant.util.dt.utcnow") as dt_utcnow:
dt_utcnow.return_value = start_dt
async_fire_mqtt_message(hass, "sensor/abc", "123")
debug_info_data = await debug_info.info_for_device(hass, device.id)
assert len(debug_info_data["entities"][0]["subscriptions"]) >= 1
assert {
"topic": "sensor/#",
"messages": [
{
"payload": "123",
"qos": 0,
"retain": False,
"time": start_dt,
"topic": "sensor/abc",
}
],
} in debug_info_data["entities"][0]["subscriptions"]
async def test_debug_info_filter_same(hass, mqtt_mock):
"""Test debug info removes messages with same timestamp."""
config = {
"device": {"identifiers": ["helloworld"]},
"platform": "mqtt",
"name": "test",
"state_topic": "sensor/#",
"unique_id": "veryunique",
}
registry = await hass.helpers.device_registry.async_get_registry()
data = json.dumps(config)
async_fire_mqtt_message(hass, "homeassistant/sensor/bla/config", data)
await hass.async_block_till_done()
device = registry.async_get_device({("mqtt", "helloworld")}, set())
assert device is not None
debug_info_data = await debug_info.info_for_device(hass, device.id)
assert len(debug_info_data["entities"][0]["subscriptions"]) >= 1
assert {"topic": "sensor/#", "messages": []} in debug_info_data["entities"][0][
"subscriptions"
]
dt1 = datetime(2019, 1, 1, 0, 0, 0)
dt2 = datetime(2019, 1, 1, 0, 0, 1)
with patch("homeassistant.util.dt.utcnow") as dt_utcnow:
dt_utcnow.return_value = dt1
async_fire_mqtt_message(hass, "sensor/abc", "123")
async_fire_mqtt_message(hass, "sensor/abc", "123")
dt_utcnow.return_value = dt2
async_fire_mqtt_message(hass, "sensor/abc", "123")
debug_info_data = await debug_info.info_for_device(hass, device.id)
assert len(debug_info_data["entities"][0]["subscriptions"]) == 1
assert len(debug_info_data["entities"][0]["subscriptions"][0]["messages"]) == 2
assert {
"topic": "sensor/#",
"messages": [
{
"payload": "123",
"qos": 0,
"retain": False,
"time": dt1,
"topic": "sensor/abc",
},
{
"payload": "123",
"qos": 0,
"retain": False,
"time": dt2,
"topic": "sensor/abc",
},
],
} == debug_info_data["entities"][0]["subscriptions"][0]
async def test_debug_info_same_topic(hass, mqtt_mock):
"""Test debug info."""
config = {
"device": {"identifiers": ["helloworld"]},
"platform": "mqtt",
"name": "test",
"state_topic": "sensor/status",
"availability_topic": "sensor/status",
"unique_id": "veryunique",
}
registry = await hass.helpers.device_registry.async_get_registry()
data = json.dumps(config)
async_fire_mqtt_message(hass, "homeassistant/sensor/bla/config", data)
await hass.async_block_till_done()
device = registry.async_get_device({("mqtt", "helloworld")}, set())
assert device is not None
debug_info_data = await debug_info.info_for_device(hass, device.id)
assert len(debug_info_data["entities"][0]["subscriptions"]) >= 1
assert {"topic": "sensor/status", "messages": []} in debug_info_data["entities"][0][
"subscriptions"
]
start_dt = datetime(2019, 1, 1, 0, 0, 0)
with patch("homeassistant.util.dt.utcnow") as dt_utcnow:
dt_utcnow.return_value = start_dt
async_fire_mqtt_message(hass, "sensor/status", "123", qos=0, retain=False)
debug_info_data = await debug_info.info_for_device(hass, device.id)
assert len(debug_info_data["entities"][0]["subscriptions"]) == 1
assert {
"payload": "123",
"qos": 0,
"retain": False,
"time": start_dt,
"topic": "sensor/status",
} in debug_info_data["entities"][0]["subscriptions"][0]["messages"]
config["availability_topic"] = "sensor/availability"
data = json.dumps(config)
async_fire_mqtt_message(hass, "homeassistant/sensor/bla/config", data)
await hass.async_block_till_done()
start_dt = datetime(2019, 1, 1, 0, 0, 0)
with patch("homeassistant.util.dt.utcnow") as dt_utcnow:
dt_utcnow.return_value = start_dt
async_fire_mqtt_message(hass, "sensor/status", "123", qos=0, retain=False)
async def test_debug_info_qos_retain(hass, mqtt_mock):
"""Test debug info."""
config = {
"device": {"identifiers": ["helloworld"]},
"platform": "mqtt",
"name": "test",
"state_topic": "sensor/#",
"unique_id": "veryunique",
}
registry = await hass.helpers.device_registry.async_get_registry()
data = json.dumps(config)
async_fire_mqtt_message(hass, "homeassistant/sensor/bla/config", data)
await hass.async_block_till_done()
device = registry.async_get_device({("mqtt", "helloworld")}, set())
assert device is not None
debug_info_data = await debug_info.info_for_device(hass, device.id)
assert len(debug_info_data["entities"][0]["subscriptions"]) >= 1
assert {"topic": "sensor/#", "messages": []} in debug_info_data["entities"][0][
"subscriptions"
]
start_dt = datetime(2019, 1, 1, 0, 0, 0)
with patch("homeassistant.util.dt.utcnow") as dt_utcnow:
dt_utcnow.return_value = start_dt
async_fire_mqtt_message(hass, "sensor/abc", "123", qos=0, retain=False)
async_fire_mqtt_message(hass, "sensor/abc", "123", qos=1, retain=True)
async_fire_mqtt_message(hass, "sensor/abc", "123", qos=2, retain=False)
debug_info_data = await debug_info.info_for_device(hass, device.id)
assert len(debug_info_data["entities"][0]["subscriptions"]) == 1
assert {
"payload": "123",
"qos": 0,
"retain": False,
"time": start_dt,
"topic": "sensor/abc",
} in debug_info_data["entities"][0]["subscriptions"][0]["messages"]
assert {
"payload": "123",
"qos": 1,
"retain": True,
"time": start_dt,
"topic": "sensor/abc",
} in debug_info_data["entities"][0]["subscriptions"][0]["messages"]
assert {
"payload": "123",
"qos": 2,
"retain": False,
"time": start_dt,
"topic": "sensor/abc",
} in debug_info_data["entities"][0]["subscriptions"][0]["messages"]
async def test_publish_json_from_template(hass, mqtt_mock):
"""Test the publishing of call to services."""
test_str = "{'valid': 'python', 'invalid': 'json'}"
test_str_tpl = "{'valid': '{{ \"python\" }}', 'invalid': 'json'}"
await async_setup_component(
hass,
"script",
{
"script": {
"test_script_payload": {
"sequence": {
"service": "mqtt.publish",
"data": {"topic": "test-topic", "payload": test_str_tpl},
}
},
"test_script_payload_template": {
"sequence": {
"service": "mqtt.publish",
"data": {
"topic": "test-topic",
"payload_template": test_str_tpl,
},
}
},
}
},
)
await hass.services.async_call("script", "test_script_payload", blocking=True)
await hass.async_block_till_done()
assert mqtt_mock.async_publish.called
assert mqtt_mock.async_publish.call_args[0][1] == test_str
mqtt_mock.async_publish.reset_mock()
assert not mqtt_mock.async_publish.called
await hass.services.async_call(
"script", "test_script_payload_template", blocking=True
)
await hass.async_block_till_done()
assert mqtt_mock.async_publish.called
assert mqtt_mock.async_publish.call_args[0][1] == test_str
|
tboyce021/home-assistant
|
tests/components/mqtt/test_init.py
|
Python
|
apache-2.0
| 52,384
|
import string
from nltk.corpus import stopwords as sw
from nltk.corpus import wordnet as wn
from nltk import wordpunct_tokenize
from nltk import WordNetLemmatizer
from nltk import sent_tokenize
from nltk import pos_tag
from sklearn.base import BaseEstimator, TransformerMixin
class NLTKPreprocessor(BaseEstimator, TransformerMixin):
def __init__(self, stopwords=None, punct=None,
lower=True, strip=True):
self.lower = lower
self.strip = strip
self.stopwords = stopwords or set(sw.words('english'))
self.punct = punct or set(string.punctuation)
self.lemmatizer = WordNetLemmatizer()
def fit(self, X, y=None):
return self
def inverse_transform(self, X):
return [" ".join(doc) for doc in X]
def transform(self, X):
return [
list(self.tokenize(doc)) for doc in X
]
def tokenize(self, document):
# Break the document into sentences
for sent in sent_tokenize(document):
# Break the sentence into part of speech tagged tokens
for token, tag in pos_tag(wordpunct_tokenize(sent)):
# Apply preprocessing to the token
token = token.lower() if self.lower else token
token = token.strip() if self.strip else token
token = token.strip('_') if self.strip else token
token = token.strip('*') if self.strip else token
# If stopword, ignore token and continue
if token in self.stopwords:
continue
# If punctuation, ignore token and continue
if all(char in self.punct for char in token):
continue
# Lemmatize the token and yield
lemma = self.lemmatize(token, tag)
yield lemma
def lemmatize(self, token, tag):
tag = {
'N': wn.NOUN,
'V': wn.VERB,
'R': wn.ADV,
'J': wn.ADJ
}.get(tag[0], wn.NOUN)
return self.lemmatizer.lemmatize(token, tag)
|
JFriel/honours_project
|
app/parser/NLTKPreprocessor.py
|
Python
|
gpl-3.0
| 2,120
|
#!/usr/bin/python
import pylab
import glob
import time
import os
import numpy as np
from scipy.interpolate import interp1d
from scipy.integrate import trapz
from scipy.optimize import minimize_scalar
from csv import DictReader, QUOTE_NONNUMERIC
from collections import defaultdict
from matplotlib import pyplot
from datetime import datetime, timedelta
#Minimum and maximum elevations for each reservoir
FC_MAX_EL = 833.3
FC_MIN_EL = 682.2
HC_MAX_EL = 1545.3
HC_MIN_EL = 1246.7
LP_MAX_EL = 931.7
LP_MIN_EL = 715.2
def scruffy(path,return_path,name): #Scruffy's the janitor. Kills any output files older than one hour
os.chdir(path)
hour_ago = datetime.now() - timedelta(hours=1)
for file in glob.glob("{}*".format(name)):
age = datetime.fromtimestamp(os.path.getctime(file))
if age < hour_ago:
os.remove(file)
os.chdir(return_path)
def get_sustain_est(elevation, total_daphnia, consumed, site):
bath_data = '{}Bath.csv'.format(site)
bath = {}
with open(bath_data) as file:
reader = DictReader(file)
for row in reader:
bath.update({int(row['elevation (m)']): float(row[' 2d_area (m2)'])})
if site == 'Fall Creek':
elev = min(max((elevation/3.281), (FC_MIN_EL/3.281)), (FC_MAX_EL/3.281))
elif site == 'Hills Creek':
elev = min(max((elevation/3.281), (HC_MIN_EL/3.281)), (HC_MAX_EL/3.281))
elif site == 'Lookout Point':
elev = min(max((elevation/3.281), (LP_MIN_EL/3.281)), (LP_MAX_EL/3.281))
area = bath[int(elev)]
consumable = (area*total_daphnia*0.58)
pop_est = consumable/(consumed*4)
return pop_est
def get_vals(light_in, total_daphnia_in, daphnia_size_in, site, month, year):
#k represents the light extinction coefficient
lights = {('Fall Creek', '2016'): {'April': 0.758, 'May': 0.466, 'June': 0.435,
'July': 0.451, 'August': 0.444, 'September': 0.406},
('Hills Creek', '2016'): {'April': 0.399, 'May': 0.321, 'June': 0.440,
'July': 0.257, 'August': 0.384, 'September': 0.340},
('Lookout Point', '2016'): {'April': 0.514, 'May': 0.373, 'June': 0.368,
'July': 0.311, 'August': 0.389, 'September': 0.343},
('Fall Creek', '2015'): {'March': 0.834, 'April': 0.596, 'May': 0.58, 'June': 0.72,
'July': 0.521, 'August': 0.509},
('Hills Creek', '2015'): {'March': 0.583, 'April': 0.503, 'May': 0.467,
'June': 0.441, 'July': 0.32, 'August': 0.368},
('Lookout Point', '2015'): {'March': 0.532, 'April': 0.565, 'May': 0.373,
'June': 0.374, 'July': 0.396, 'August': 0.39},
('Fall Creek', '2014'): {'June': 0.404, 'July': 0.274, 'August': 0.295},
('Hills Creek', '2014'): {'June': 0.298, 'July': 0.274, 'August': 0.274},
('Lookout Point', '2014'): {'June': 0.315, 'July': 0.271, 'August': 0.282}
}
# Daphnia totals weighted by subsample - only from C and Z sites
# July 2013 and July 2014 are not currently available
daphnias = {('Fall Creek', '2016'): {'April': 367, 'May': 22328, 'June': 48240, 'July': 8801,
'August': 5378, 'September': 3626},
('Hills Creek', '2016'): {'April': 163, 'May': 7456, 'June': 88658, 'July': 9045,
'August': 13527, 'September': 13853},
('Lookout Point', '2016'): {'April': 20, 'May': 448, 'June': 9290, 'July': 11693,
'August': 6926, 'September': 1854},
('Fall Creek', '2015'): {'March': 815, 'April': 17357, 'May': 24446, 'June':3993,
'July': 2363, 'August': 407},
('Hills Creek', '2015'): {'March': 204, 'April': 453, 'May': 11408, 'June': 20535,
'July': 9126, 'August': 3178},
('Lookout Point', '2015'): {'March': 61, 'April': 127, 'May': 14016, 'June': 44981,
'July': 5949, 'August': 581},
('Fall Creek', '2014'): {'June': 25280, 'July': 0, 'August': 7752},
('Hills Creek', '2014'): {'June': 6040, 'July': 0, 'August': 2249},
('Lookout Point', '2014'): {'June': 16863, 'July': 0, 'August': 1061},
('Fall Creek', '2013'): {'June': 18416, 'July': 0, 'August': 4563},
('Hills Creek', '2013'): {'June': 127772, 'July': 0, 'August': 18559},
('Blue River', '2013'): {'June': 68449, 'July': 0, 'August': 41233}
}
#Weighted for proportion D.mendotae, D.pulex, and D.rosea/ambigua averaged across available years
sizes = {('Fall Creek', '2016'): {'April': 0.56, 'May': 1.01, 'June': 1.13, 'July': 1.48,
'August': 1.78, 'September': 1.10},
('Hills Creek', '2016'): {'April': 1.22, 'May': 1.08, 'June': 1.16, 'July': 1.54,
'August': 1.18, 'September': 1.51},
('Lookout Point', '2016'): {'April': 0.53, 'May': 0.68, 'June': 1.14, 'July': 1.31,
'August': 1.64, 'September': 1.20},
('Blue River', '2016'): {'July': 1.27},
('Fall Creek', '2015'): {'March': 1.21, 'April': 1.25, 'May': 1.13, 'June': 1.26,
'July': 1.49, 'August': 1.18},
('Hills Creek', '2015'): {'March': 1.24, 'April': 1.09, 'May': 1.03, 'June': 1.20,
'July': 1.84, 'August': 2.21},
('Lookout Point', '2015'): {'March': 1.46, 'April': 0.96, 'May': 1.06, 'June': 1.35,
'July': 1.97, 'August': 2.07},
('Blue River', '2015'): {'March': 0.63, 'April': 0.73, 'May': 0.83, 'June': 1.50,
'July': 1.48, 'August': 1.25},
('Fall Creek', '2014'): {'March': 1.207, 'April': 0.90375, 'May': 1.073, 'June': 1.262,
'July': 1.485, 'August': 1.633},
('Hills Creek', '2014'): {'March': 1.238, 'April': 1.152, 'May': 1.058, 'June': 1.232,
'July': 1.687, 'August': 2.005},
('Lookout Point', '2014'): {'March': 1.457, 'April': 0.745, 'May': 0.871,
'June': 1.237, 'July': 1.642, 'August': 2.033},
('Blue River', '2014'): {'March': 0.628, 'April': 0.780, 'May': 0.827, 'June': 1.321,
'July': 1.377, 'August': 1.282}
}
if light_in == float(123456):
light = lights[(site, year)][month]
else:
light = light_in
print(site,month,year)
if total_daphnia_in == float(123456):
total_daphnia = daphnias[(site, year)][month]
else:
total_daphnia = total_daphnia_in
if daphnia_size_in == float(123456):
daphnia_size = sizes[(site, year)][month]
else:
daphnia_size = daphnia_size_in
return light, total_daphnia, daphnia_size
def sensitivity_expand(form):
sparam_exp = []
if form.getvalue('Sparam_Range') != None:
sparam_range = float(form.getvalue('Sparam_Range'))
else:
sparam_range = 200
step_size = (sparam_range-100)/1000
for i in range(5, 1, -1):
sparam_exp.append(float(1)/(i*10))
sparam_exp.append(1)
for i in range(1, 11):
sparam_exp.append(float(1)+(step_size*i))
return sparam_exp
def run_sensitivity(sens_factors, sparam, site_data, starting_mass, daph_data, max_temp, min_temp, cust_temp, elev, pop_site, base_val, base_set_flag):
batches = []
results = []
sens_inputs = []
growths = []
growths1 = []
csvheaders = [[] for i in range(20)]
SHORT_RESULTS = {'Elevation': [], 'Reservoir(used for elevation)': [],
'Daphnia Density': [], 'Light': [], 'Daphnia Size': [],
'Min Depth': [], 'Max Depth': [], 'Min Temp': [], 'Max Temp': [],
'Daphnia Year': [], 'Daphnia Month': [], 'Daphnia Site': [],
'Temperature File': [], 'Starting Mass': [], 'Ending Mass': [],
'Day Depth': [], 'Day Temperature': [], 'Night Depth': [],
'Night Temperature': [], 'Day 1 Growth': [], 'Day 30 Growth': [],
'Daphnia Consumed': [], 'Sustainable Estimate': [],
'Estimated Condition Change': []}
if base_set_flag == 0:
if sparam == 'Starting Mass':
base_input = starting_mass
elif sparam == 'Total Daphnia':
base_input = daph_data.total_daph
elif sparam == 'Daphnia Size':
base_input = daph_data.daph_size
else:
base_input = site_data.light
print("base input: ", base_input, "<br>")
base_set_flag = 1
base_val = base_input
else:
base_input = base_val
for i in range(15):
print("base input: ", base_input, "<br>")
if (base_input * sens_factors[i]) > 0.00001:
sens_inputs.append(base_input * sens_factors[i])
else:
sens_inputs.append(.00001)
sens_factors[i] = sens_factors[i] * 100
csvheaders[i] = [site_data.site, site_data.month, site_data.year, ("%s: %f" % (sparam, sens_inputs[i]))]
if sparam == 'Starting Mass':
batches.append(Batch(site_data, sens_inputs[i], daph_data, max_temp, min_temp, cust_temp, elev, pop_site))
elif sparam == 'Total Daphnia':
daph_data.total_daph = sens_inputs[i]
batches.append(Batch(site_data, starting_mass, daph_data, max_temp, min_temp, cust_temp, elev, pop_site))
elif sparam == 'Daphnia Size':
daph_data.daph_size = sens_inputs[i]
batches.append(Batch(site_data, starting_mass, daph_data, max_temp, min_temp, cust_temp, elev, pop_site))
else:
site_data.light = sens_inputs[i]
batches.append(Batch(site_data, starting_mass, daph_data, max_temp, min_temp, cust_temp, elev, pop_site))
res, taway, condition, condition1, dt, nt, taway2 = batches[i].Run_Batch()
results.append(res)
#SHORT_RESULTS['Tab Name'].append(vals.title)
SHORT_RESULTS['Elevation'].append(elev)
SHORT_RESULTS['Reservoir(used for elevation)'].append(pop_site)
SHORT_RESULTS['Daphnia Density'].append(daph_data.total_daph)
SHORT_RESULTS['Light'].append(site_data.light)
SHORT_RESULTS['Daphnia Size'].append(daph_data.daph_size)
SHORT_RESULTS['Min Depth'].append(site_data.min_depth)
SHORT_RESULTS['Max Depth'].append(site_data.max_depth)
SHORT_RESULTS['Min Temp'].append(min_temp)
SHORT_RESULTS['Max Temp'].append(max_temp)
SHORT_RESULTS['Daphnia Year'].append(daph_data.d_year)
SHORT_RESULTS['Daphnia Month'].append(daph_data.d_month)
SHORT_RESULTS['Daphnia Site'].append(daph_data.d_site)
SHORT_RESULTS['Temperature File'].append(cust_temp)
SHORT_RESULTS['Starting Mass'].append(starting_mass)
SHORT_RESULTS['Ending Mass'].append(results[i]['StartingMass'][29])
SHORT_RESULTS['Day Depth'].append(results[i]['day_depth'][29])
SHORT_RESULTS['Day Temperature'].append(dt)
SHORT_RESULTS['Night Depth'].append(results[i]['night_depth'][29])
SHORT_RESULTS['Night Temperature'].append(nt)
SHORT_RESULTS['Day 1 Growth'].append(results[i]['growth'][0])
SHORT_RESULTS['Day 30 Growth'].append(results[i]['growth'][29])
SHORT_RESULTS['Daphnia Consumed'].append(taway)
SHORT_RESULTS['Sustainable Estimate'].append(taway2)
SHORT_RESULTS['Estimated Condition Change'].append(condition)
growths.append(results[i]['growth'][29])
growths1.append(results[i]['growth'][0])
return results, growths, growths1, csvheaders, sens_inputs, SHORT_RESULTS, base_val, base_set_flag
class Daph_Data:
def __init__(self, abundance, size, year, site, month):
self.total_daph = abundance
self.daph_size = size
self.d_year = year
self.d_site = site
self.d_month = month
class Form_Data_Packager:
def __init__(self, form):
self.title = form.getvalue('TabName') or 'GrowChinook Results'
self.starting_mass = float(form.getvalue('Starting_Mass_In') or 20)
if self.starting_mass == 0:
self.starting_mass = 0.1
self.total_daphnnia = float(form.getvalue('Total_Daphnia_Input_Name') or form.getvalue('TotDDef') or 123456)
self.daphnia_size = float(form.getvalue('Daphnia Size') or form.getvalue('DaphSDef') or 123456)
self.light = float(form.getvalue('Light') or form.getvalue('LightDef') or 123456)
self.year = form.getvalue('Year') or '2015'
self.month = form.getvalue('Month1') or 'June'
self.site = form.getvalue('Site') or 'Fall Creek'
self.max_dep = float(form.getvalue('DmaxIn') or 10000)
self.min_dep = float(form.getvalue('DminIn') or -1)
self.max_temp = float(form.getvalue('TmaxIn') or 10000)
self.min_temp = float(form.getvalue('TminIn') or -1)
if self.min_temp == self.max_temp:
self.max_temp = self.max_temp + 1
self.pop_site = form.getvalue('ESite') or self.site
self.elev = float(form.getvalue('Elev') or 100000)
if self.pop_site == 'Fall Creek':
self.elev = max(self.elev, 690)
self.max_dep = min(((self.elev - FC_MIN_EL) / 3.281), self.max_dep)
elif self.pop_site == 'Lookout Point':
self.elev = max(self.elev, 725)
self.max_dep = min(((self.elev - LP_MIN_EL) / 3.281), self.max_dep)
elif self.pop_site == 'Hills Creek':
self.max_dep = min(((self.elev - HC_MIN_EL) / 3.281), self.max_dep)
if self.max_dep <= 0:
self.max_dep = 1
self.dmaxday = 1
self.daph_year = form.getvalue('DYear') or self.year
self.daph_month = form.getvalue('DMonth') or self.month
self.daph_site = form.getvalue('DSite') or self.site
self.temp_year = form.getvalue('TYear') or self.year
self.temp_month = form.getvalue('TMonth') or self.month
self.temp_site = form.getvalue('TSite') or self.site
if form.getvalue('CustTemp') is None:
self.cust_temp = '{0}_T_{1}_{2}.csv'.format(self.temp_site, self.temp_month, self.temp_year)
else:
self.cust_temp = 'uploads/temp/{}'.format(form.getvalue('CustTemp'))
self.light, self.total_daphnnia, self.daphnia_size = get_vals(self.light, self.total_daphnnia, self. daphnia_size, self.site, self.month, self.year)
self.site_data = Site_Data(self.year, self.site, self.month, self.light, self.max_dep, self.min_dep)
self.daph_data = Daph_Data(self.total_daphnnia, self.daphnia_size, self.daph_year, self.daph_site, self.daph_month)
class Adv_Sens_Form_Data_Packager:
def __init__(self, form):
self.title = form.getvalue('TabName') or 'GrowChinook Results'
self.starting_mass = float(form.getvalue('Starting_Mass_In') or 20)
if self.starting_mass == 0:
self.starting_mass = 0.1
self.total_daphnnia = float(form.getvalue('Total_Daphnia_Input_Name') or form.getvalue('TotDDef') or 123456)
self.daphnia_size = float(form.getvalue('Daphnia Size') or form.getvalue('DaphSDef') or 123456)
self.light = float(form.getvalue('Light') or form.getvalue('LightDef') or 123456)
self.year = form.getvalue('Year') or '2015'
self.site = form.getvalue('Site') or 'Fall Creek'
self.max_dep = float(form.getvalue('DmaxIn') or 10000)
self.min_dep = float(form.getvalue('DminIn') or -1)
self.max_temp = float(form.getvalue('TmaxIn') or 10000)
self.min_temp = float(form.getvalue('TminIn') or -1)
if self.min_temp == self.max_temp:
self.max_temp = self.max_temp + 1
self.site_data = Site_Data(self.year, self.site, None, self.light, self.max_dep, self.min_dep)
self.daph_data = Daph_Data(self.total_daphnnia, self.daphnia_size, self.year, self.site, None)
class Site_Data:
def __init__(self, year, site, month, light, max_depth, min_depth):
self.year = year
self.site = site
self.month = month
self.light = light
self.max_depth = max_depth
self.min_depth = min_depth
class Batch:
def __init__(self, site_data, starting_mass, daphnia_data, temp_max, temp_min, temp_file, elevation, PSite):
self.site = site_data.site
self.month = site_data.month
self.year = site_data.year
self.light = site_data.light
self.daphnia_size = daphnia_data.daph_size
self.total_daphnia = daphnia_data.total_daph
self.temp_file = temp_file
self.DYear = daphnia_data.d_year
self.DMonth = daphnia_data.d_month
self.daphnia_site = daphnia_data.d_site
self.starting_mass = starting_mass
self.starting_mass_initial = starting_mass
self.depth_max = site_data.max_depth
self.depth_min = site_data.min_depth
self.temp_max = temp_max
self.temp_min = temp_min
self.dtfinal = 0
self.ntfinal = 0
self.depths = []
self.elevation = elevation
self.PSite = PSite
self.SparamExp = []
# Body lengths (from grey lit((())))
self.SwimSpeed = 2
self.params = {}
# J/gram of O2 in respiration conversions (Elliot and Davidson 1975).
self.O2Conv = 13560
# lux http://sustainabilityworkshop.autodesk.com/buildings/measuring-light-levels
self.DayLight = 39350
self.NightLight = 0.10
self.out = {}
# Based of Cornell equation (g) #WetDaphWeight <- DaphWeight*(8.7/0.322)
self.daphnia_dry_weight = (np.exp(1.468 + 2.83 * np.log(self.daphnia_size))) /\
1000000 #From Ghazy, others use ~10%
self.daphnia_weight = self.daphnia_dry_weight * 8.7 / 0.322
if elevation is None:
self.elevation = 100000
else:
self.elevation = int(float(elevation)/3.281)
self.PSite = PSite
self.PSite = self.PSite or self.site
self.DYear = self.DYear or self.year
self.DMonth = self.DMonth or self.month
self.daphnia_site = self.daphnia_site or self.site
# From Luecke 22.7 kJ/g
DaphEnergy = 22700
self.prey = [1]
# Noue and Choubert 1985 suggest Daphnia are 82.6% digestible by Rainbow Trout
self.digestibility = [0.174]
self.preyenergy = [DaphEnergy]
with open('Daphnia VD.csv') as fid:
reader = DictReader(fid)
zooplankton_data = [r for r in reader]
(self.daphline, self.daph_auc) = self.compute_daphniabydepth(zooplankton_data)
# From Lookout Point and Fall Creek downstream screw trap data (R2 = 0.9933)
self.StartingLength = (self.starting_mass / 0.000004) ** (1 / 3.1776)
#self.StartingLength = (self.starting_mass/0.0003)**(1/2.217) #see note below
self.temp_max = self.temp_max or 1000
self.temp_min = self.temp_min or -1
f = 'ChinookAppendixA.csv'
with open(f) as fid:
reader = DictReader(fid, quoting=QUOTE_NONNUMERIC)
self.params = next(reader)
if self.temp_file == "None_T_None_None.csv":
temperature_file = '{0}_T_{1}_{2}.csv'\
.format(self.site, self.month, self.year)
else:
temperature_file = temp_file
with open(temperature_file) as fid:
reader = DictReader(fid)
self.temperatures = []
for row in reader:
if (float(row['temp']) <= self.temp_max) and (float(row['temp']) >= self.temp_min):
self.temperatures.append(float(row['temp']))
self.depths.append(float(row['depth']))
if self.temperatures == [] or self.depths == []:
print("ALL DEPTHS EXCLUDED BY TEMPERATURE AND DEPTH RESTRICTIONS!!!!!!!!!")
self.predatorenergy = self.predatorenergy(self.starting_mass)
self.depth_from_temp = interp1d(self.temperatures, self.depths,
fill_value=0, bounds_error=False)
self.temp_from_depth = interp1d(self.depths, self.temperatures,
fill_value=0, bounds_error=False)
day_depth = 5
night_depth = 10
self.day_temp = self.temp_from_depth(day_depth)
self.day_depth = 5
self.night_temp = self.temp_from_depth(night_depth)
self.night_depth = 10
def compute_daphniabydepth(self, zooplankton_data):
# get rows for site, season, depth
if self.year == '2016':
rows = [r for r in zooplankton_data if (r['Site'] == self.daphnia_site
and r['Month'] == self.DMonth
and r['Year'] == '2016')]
else:
rows = [r for r in zooplankton_data if (r['Site'] == self.daphnia_site
and r['Month'] == self.DMonth
and r['Year'] == '2015')]
x = [float(r['Depth']) for r in rows]
y = [float(r['Total Daphnia']) for r in rows]
surface_count = y[np.argmin(x)]
auc = trapz(y, x)
y = y / auc * self.total_daphnia
return (interp1d(x, y, bounds_error=False, fill_value=surface_count), trapz(y, x))
# Foraging from Beauchamps paper, prey per hour
def compute_foragingbydepth(self, StartingLength, starting_mass, surface_light,
daphline, daph_auc, depth):
light = surface_light * np.exp((-self.light) * depth)
depth = depth
daphnia = daphline(depth) / 10000
#reactiondistance = 3.787 * (light ** 0.4747) * ((self.daphnia_size / 10) ** 0.9463)
lightenergy = light/51.2
suspendedsediment = -((np.log(lightenergy) - 1.045)/(.0108))
turbidity = .96*np.log(suspendedsediment+1) - .002
reactiondistance = 31.64-13.31*turbidity
if reactiondistance < .08 or np.isnan(reactiondistance):
reactiondistance = .08
swim_speed = self.SwimSpeed * StartingLength/10
searchvolume = np.pi * (reactiondistance ** 2) * swim_speed
EncounterRate = searchvolume * daphnia
gramsER = EncounterRate * self.daphnia_weight
return gramsER / starting_mass
def compute_ft(self, temperature):
CQ = self.params['CQ']
CTL = self.params['CTL']
CTM = self.params['CTM']
CTO = self.params['CTO']
CK1 = self.params['CK1']
CK4 = self.params['CK4']
eq = self.params['c_eq']
if eq == 1:
return np.exp(CQ * temperature)
elif eq == 2:
V = (CTM - temperature) / (CTM - CTO)
Z = np.log(CQ) * (CTM - CTO)
Y = np.log(CQ) * (CTM - CTO + 2)
X = (Z ** 2 * (1 + (1 + 40 / Y) ** 0.5) ** 2) / 400
return (V ** X) * np.exp(X * (1 - V))
elif eq == 3:
G1 = (1 / (CTO - CQ)) * np.log((0.98 * (1 - CK1)) / (CK1 * 0.002))
G2 = (1 / (CTL - CTM)) * np.log((0.98 * (1 - CK4)) / (CK4 * 0.02))
L1 = np.exp(G1 * (temperature - CQ))
L2 = np.exp(G2 * (CTL - temperature))
K_A = (CK1 * L1) / (1 + CK1 * (L1 - 1))
K_B = (CK4 * L2) / (1 + CK4 * (L2 - 1))
return K_A * K_B
else:
raise ValueError("Unknown consumption equation type: " + eq)
def compute_cmax(self, W):
CA = self.params['CA']
CB = self.params['CB']
return CA * (W ** CB)
def compute_consumption(self, cmax, P, ft):
return cmax * P * ft
def compute_waste(self, consumption, P, temperature, prey, digestibility):
# Units are g/g/d
FA = self.params['FA']
FB = self.params['FB']
FG = self.params['FG']
UA = self.params['UA']
UB = self.params['UB']
UG = self.params['UG']
eq = self.params['egexeq']
if eq == 1:
egestion = FA * consumption
excretion = UA * (consumption - egestion)
return (egestion, excretion)
elif eq == 2:
egestion = FA * (temperature ** FB) * np.exp(FG * P) * consumption
excretion = UA * (temperature ** UB) * np.exp(UG * P) * (consumption - egestion)
return (egestion, excretion)
elif eq == 3:
if prey is None or digestibility is None:
raise ValueError("Prey or digestibility not defined")
PFF = np.inner(prey, digestibility)
PE = FA * (temperature ** FB) * np.exp(FG * P)
PF = ((PE - 0.1) / 0.9) * (1 - PFF) + PFF
egestion = PF * consumption
excretion = UA * (temperature ** UB) * np.exp(UG * P) * (consumption - egestion)
return (egestion, excretion)
else:
raise ValueError("Unknown egestion/excretion equation type: " + eq)
def compute_respiration(self, W0, temperature, egestion, consumption):
RA = self.params['RA']
RB = self.params['RB']
RQ = self.params['RQ']
RTO = self.params['RTO']
RTM = self.params['RTM']
RTL = self.params['RTL']
RK1 = self.params['RK1']
RK4 = self.params['RK4']
ACT = self.params['ACT']
BACT = self.params['BACT']
SDA = self.params['SDA']
eq = self.params['respeq']
if eq == 1:
if temperature > RTL:
VEL = RK1 * W0 ** RK4
print("SOME OF THE INCLUDED TEMPERATURES ARE LETHAL,"
"PLEASE MODIFY THE TEMPERATURE TO EXCLUDE TEMPERATURES OVER 25C!")
else:
VEL = ACT * (W0 ** RK4) * np.exp(BACT * temperature)
FTmetabolism = np.exp(RQ * temperature)
activity = np.exp(RTO * VEL)
elif eq == 2:
Vresp = (RTM - temperature) / (RTM - RTO)
Zresp = np.log(RQ) * (RTM - RTO)
Yresp = np.log(RQ) * (RTM - RTO + 2)
Xresp = (((Zresp ** 2) * (1 + (1 + 40 / Yresp) ** 0.5)) ** 2) / 400
FTmetabolism = (Vresp ** Xresp) * np.exp(Xresp * (1 - Vresp))
activity = ACT
else:
raise ValueError("Unknown respiration equation type: " + eq)
respiration = RA * (W0 ** RB) * FTmetabolism * activity
SDAction = SDA * (consumption - egestion)
return (respiration, SDAction)
def predatorenergy(self, W0):
AlphaI = self.params['AlphaI']
AlphaII = self.params['AlphaII']
BetaI = self.params['BetaI']
BetaII = self.params['BetaII']
energydensity = self.params['energydensity']
cutoff = self.params['cutoff']
eq = self.params['prededeq']
if eq == 1:
predatorenergy = energydensity
if eq == 2:
if W0 < cutoff:
predatorenergy = AlphaI + (BetaI * W0)
elif W0 >= cutoff:
predatorenergy = AlphaII + (BetaII * W0)
else:
raise ValueError("Unknown predator energy density equation type: " + eq)
return predatorenergy
def compute_bioenergetics(self, W, temp, P, prey, digestibility):
cmax = self.compute_cmax(W)
ft = self.compute_ft(temp)
consumption = self.compute_consumption(cmax, P, ft)
(egestion, excretion) = self.compute_waste(consumption, P, temp, prey, digestibility)
(respiration, SDAction) = self.compute_respiration(W, temp, egestion, consumption)
return (consumption, egestion, excretion, respiration, SDAction)
def compute_growth(self, consumption, prey, preyenergy, egestion, excretion,
SDAction, respiration, predatorenergy, W):
consumptionjoules = consumption * np.inner(prey, preyenergy)
return (consumptionjoules - ((egestion + excretion + SDAction) * np.inner(prey, preyenergy)
+ respiration * self.O2Conv)) / predatorenergy * W
def best_depth(self, StartingLength, starting_mass, hours, light, depths):
if self.depth_min > min(max(depths), self.depth_max):
self.depth_min = min(max(depths), self.depth_max)
if self.depth_max < max(min(depths), self.depth_min):
self.depth_max = max(min(depths), self.depth_min)
if self.depth_max == self.depth_min:
self.depth_max = self.depth_max + 0.2
depth_arr = np.arange(max(min(depths), self.depth_min), min(max(depths), self.depth_max), 0.1)
growths = [self.growth_fn(d, StartingLength, starting_mass, hours, light, self.prey)[0]
for d in depth_arr]
idx = np.argmax(growths)
d = depth_arr[idx]
best_growth, best_consumption = self.growth_fn(d, StartingLength, starting_mass,
hours, light, self.prey)
return depth_arr[idx], best_growth, best_consumption
def growth_fn(self, depth, StartingLength, starting_mass, hours, light, prey):
temp = self.temp_from_depth(depth)
foraging = self.compute_foragingbydepth(StartingLength, starting_mass, light,
self.daphline, self.daph_auc, depth) * hours
ft = self.compute_ft(temp)
cmax = self.compute_cmax(starting_mass)
P = min(foraging / cmax, 1)
(consumption, egestion, excretion, respiration, SDAction) = \
self.compute_bioenergetics(starting_mass, temp, P, self.prey, self.digestibility)
day_proportion = hours / 24.0
consumption *= day_proportion
respiration *= day_proportion
growth = self.compute_growth(consumption, prey, self.preyenergy, egestion, excretion,
SDAction, respiration, self.predatorenergy, starting_mass)
return (growth, consumption)
def Run_Batch(self):
daylength = {'March':11.83, 'April':13.4, 'May':14.73, 'June':15.42,
'July':15.12, 'August':13.97, 'September':12.45}
# March 11:50 (11.83), April 13:24 (13.4), May 14:44 (14.73), June 15:25 (15.42),
# July 15:07 (15.12), August 13:58 (13.97), September 12:27 (12.45)
ndays = 30
day_hours = daylength[self.month]
night_hours = 24 - day_hours
self.out = {'Year':[], 'Site':[], 'Month':[], 'Fish Starting Mass':[],
'Light Extinction Coefficient':[], 'Daphnia Size':[], 'Daphnia Density':[],
'StartingLength':[], 'StartingMass':[], 'growth':[], 'day_depth':[],
'night_depth':[]}
condition1 = float(100*self.starting_mass*((self.StartingLength/10)**(-3.0)))
for d in range(ndays):
(day_depth, day_growth, day_consumption) =\
self.best_depth(self.StartingLength, self.starting_mass,
day_hours, self.DayLight, self.depths)
(night_depth, night_growth, night_consumption) =\
self.best_depth(self.StartingLength, self.starting_mass,
night_hours, self.NightLight, self.depths)
self.day_temp = self.temp_from_depth(day_depth)
self.night_temp = self.temp_from_depth(night_depth)
growth = day_growth + night_growth
dailyconsume = ((day_consumption + night_consumption)*self.starting_mass)\
/self.daphnia_weight
self.starting_mass += growth
if growth > 0:
# From LP and FC screw trap data (R2 = 0.9933)
self.StartingLength = (self.starting_mass / 0.000004) ** (1 / 3.1776)
#self.StartingLength = (self.starting_mass / 0.0003) ** (1 / 2.217)
#weight to fork length (MacFarlane and Norton 2008)
#Checked fish lengths against this and by end of summer
# fish weigh much less than they 'should' based on their length
self.out['Year'].append(self.year)
self.out['Site'].append(self.site)
self.out['Month'].append(self.month)
self.out['Fish Starting Mass'].append(self.starting_mass)
self.out['Light Extinction Coefficient'].append(self.light)
self.out['Daphnia Size'].append(self.daphnia_size)
self.out['Daphnia Density'].append(self.total_daphnia)
self.out['day_depth'].append(day_depth)
self.out['night_depth'].append(night_depth)
self.out['growth'].append(growth)
self.out['StartingMass'].append(self.starting_mass)
self.out['StartingLength'].append(self.StartingLength)
dtfinal = self.day_temp
ntfinal = self.night_temp
ele = self.elevation-int(day_depth)
daph = self.daphline(day_depth)
PopEst = get_sustain_est(ele, daph, dailyconsume, self.PSite)
condition = float(100*(self.starting_mass-self.starting_mass_initial)*((self.StartingLength/10)**(-3.0)))
return self.out, dailyconsume, condition, condition1, dtfinal, ntfinal, PopEst
|
ReservoirWebs/GrowChinook
|
Bioenergetics_advsens_new.py
|
Python
|
gpl-3.0
| 33,775
|
import sys, os
import cog
def write_kernel( var_name, kernel_filename ):
cog.outl( 'string kernelFilename = "' + kernel_filename + '";' )
f = open( kernel_filename, 'r')
line = f.readline()
cog.outl( 'const char * ' + var_name + ' = ' )
while( line != '' ):
cog.outl( '"' + line.strip().replace('\\','\\\\') + '\\n" ' )
line = f.readline()
cog.outl( '"";')
f.close()
def write_file2( filepath ):
f = open( filepath, 'r')
line = f.readline()
while( line != '' ):
line = process_includes( line )
cog.outl( '"' + line.rstrip().replace('\\','\\\\').replace('"', '\\"') + '\\n" ' )
line = f.readline()
f.close()
def process_includes( line ):
if line.strip().find('#include') != 0:
return line
line = line.replace('<','"').replace('>','"') # standardize quotes a bit...
targetpath = line.split('"')[1]
line = ''
cog.outl('// including ' + targetpath + ':')
write_file2( '../' + targetpath )
return line
def write_kernel2( kernelVarName, kernel_filename, kernelName, options ):
# cog.outl( 'string kernelFilename = "' + kernel_filename + '";' )
cog.outl( '// generated using cog:' )
cog.outl( 'const char * ' + kernelVarName + 'Source = ' )
write_file2( '../' + kernel_filename )
cog.outl( '"";')
cog.outl( kernelVarName + ' = cl->buildKernelFromString( ' + kernelVarName + 'Source, "' + kernelName + '", ' + options + ', "' + kernel_filename + '" );' )
def write_kernel3( kernelVarName, kernel_filename, kernelName, options ):
# cog.outl( 'string kernelFilename = "' + kernel_filename + '";' )
cog.outl( '// generated using cog:' )
f = open( '../' + kernel_filename, 'r')
line = f.readline()
cog.outl( 'const char * ' + kernelVarName + 'Source = R"DELIM(\n' )
while( line != '' ):
cog.outl( '' + line.rstrip() )
line = f.readline()
cog.outl( ')DELIM";')
f.close()
cog.outl( kernelVarName + ' = cl->buildKernelFromString( ' + kernelVarName + 'Source, "' + kernelName + '", ' + options + ', "' + kernel_filename + '" );' )
|
hughperkins/Jinja2CppLight
|
cog-batteries/stringify.py
|
Python
|
mpl-2.0
| 2,124
|
from django.shortcuts import redirect
from django.core.urlresolvers import reverse_lazy
from django.views.generic import TemplateView, FormView
from .forms import ContactForm
class HomeView(TemplateView):
template_name = 'pages/home.html'
def dispatch(self, request, *args, **kwargs):
if request.user.is_authenticated():
return redirect('questions:list')
return super(TemplateView, self).dispatch(request, *args, **kwargs)
class ThanksView(TemplateView):
template_name = 'pages/thanks.html'
class AboutView(TemplateView):
template_name = 'pages/about.html'
class ContactView(FormView):
template_name = 'pages/contact.html'
form_class = ContactForm
success_url = reverse_lazy("thanks")
def form_valid(self, form):
form.send_email()
return super(FormView, self).form_valid(form)
|
Uran198/med
|
med/pages/views.py
|
Python
|
bsd-3-clause
| 864
|
# -*- coding: utf-8 -*-
###############################################################################
#
# FriendshipsShow
# Returns detailed information about the relationship between two users.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class FriendshipsShow(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the FriendshipsShow Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(FriendshipsShow, self).__init__(temboo_session, '/Library/Twitter/FriendsAndFollowers/FriendshipsShow')
def new_input_set(self):
return FriendshipsShowInputSet()
def _make_result_set(self, result, path):
return FriendshipsShowResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return FriendshipsShowChoreographyExecution(session, exec_id, path)
class FriendshipsShowInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the FriendshipsShow
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_AccessTokenSecret(self, value):
"""
Set the value of the AccessTokenSecret input for this Choreo. ((required, string) The Access Token Secret provided by Twitter or retrieved during the OAuth process.)
"""
super(FriendshipsShowInputSet, self)._set_input('AccessTokenSecret', value)
def set_AccessToken(self, value):
"""
Set the value of the AccessToken input for this Choreo. ((required, string) The Access Token provided by Twitter or retrieved during the OAuth process.)
"""
super(FriendshipsShowInputSet, self)._set_input('AccessToken', value)
def set_ConsumerKey(self, value):
"""
Set the value of the ConsumerKey input for this Choreo. ((required, string) The API Key (or Consumer Key) provided by Twitter.)
"""
super(FriendshipsShowInputSet, self)._set_input('ConsumerKey', value)
def set_ConsumerSecret(self, value):
"""
Set the value of the ConsumerSecret input for this Choreo. ((required, string) The API Secret (or Consumer Secret) provided by Twitter.)
"""
super(FriendshipsShowInputSet, self)._set_input('ConsumerSecret', value)
def set_SourceScreenName(self, value):
"""
Set the value of the SourceScreenName input for this Choreo. ((conditional, string) The screen_name of the subject user. Required unless specifying the SourceUserID instead.)
"""
super(FriendshipsShowInputSet, self)._set_input('SourceScreenName', value)
def set_SourceUserID(self, value):
"""
Set the value of the SourceUserID input for this Choreo. ((conditional, string) The ID of the subject user. Required unless specifying the SourceScreenName instead.)
"""
super(FriendshipsShowInputSet, self)._set_input('SourceUserID', value)
def set_TargetScreenName(self, value):
"""
Set the value of the TargetScreenName input for this Choreo. ((conditional, string) The screen_name of the target user. Required unless specifying the TargetUserID instead.)
"""
super(FriendshipsShowInputSet, self)._set_input('TargetScreenName', value)
def set_TargetUserID(self, value):
"""
Set the value of the TargetUserID input for this Choreo. ((conditional, string) The ID of the target user. Required unless specifying the TargetScreenName instead.)
"""
super(FriendshipsShowInputSet, self)._set_input('TargetUserID', value)
class FriendshipsShowResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the FriendshipsShow Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. ((json) The response from Twitter.)
"""
return self._output.get('Response', None)
class FriendshipsShowChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return FriendshipsShowResultSet(response, path)
|
jordanemedlock/psychtruths
|
temboo/core/Library/Twitter/FriendsAndFollowers/FriendshipsShow.py
|
Python
|
apache-2.0
| 5,218
|
#coding=utf-8
__author__ = 'rocky'
import time
from selenium import webdriver
# using chrome driver to start chrome and do price_crawl
CHROME_DRIVER_PATH = 'XXX/google/chromedriver.exe'
# using phantomjs driver to do price_crawl(does not need any explorer to be opened)
PHANTOMJS_DRIVER_PATH = 'XXX/phantomjs-2.0.0/bin/phantomjs.exe'
URL_FILE_PATH = 'urls.txt'
URL_RESULT_PATH = 'result.txt'
WAIT_TIME = 3
def read_file2dict():
origin_url_list = list()
with open(URL_FILE_PATH, 'r') as url_file:
for line in url_file:
line = line.strip(' ').strip('\n')
origin_url_list.append(line)
return origin_url_list
def write_dict2file(_url_result_list):
with open(URL_RESULT_PATH, 'w') as result_file:
result_file.writelines(_url_result_list)
def get_price_by_selenium(_driver, _url_list):
result_list = list()
for url_line in _url_list:
_driver.get(url_line)
time.sleep(WAIT_TIME) # Let the user actually see something!
try:
if 'jd.com' in url_line:
kws = _driver.find_elements_by_id('jd-price')
elif 'tmall.com' in url_line:
kws = _driver.find_elements_by_class_name('tm-price')
else:
print 'URL not supported.'
result_list.append(url_line + ',' + 'URL not supported.' + '\n')
continue
print url_line + ',' + unicode(kws[0].text).replace(u'¥', u'')
result_list.append(url_line + ',' + unicode(kws[0].text).replace(u'¥', u'') + '\n')
except Exception, e:
print "Can't find price element."
result_list.append(url_line + ',' + "Can't find price element." + '\n')
continue
return result_list
if __name__ == '__main__':
url_list = read_file2dict()
# driver = webdriver.Chrome(CHROME_DRIVER_PATH)
# driver = webdriver.PhantomJS(PHANTOMJS_DRIVER_PATH)
# url_result_list = get_price_by_selenium(driver, url_list)
# driver.quit()
# service = service.Service('D:/software/google/chromedriver.exe')
# service.start()
# capabilities = {'chrome.binary': 'C:/Program Files (x86)/Google/Chrome/Application/chrome.exe'}
# driver = webdriver.Remote(service.service_url, capabilities)
# get_price_by_selenium(driver)
# driver.quit()
# write_dict2file(url_result_list)
|
rocky1001/UrlCrawler
|
price_crawler/selenium_price_crawler.py
|
Python
|
gpl-2.0
| 2,385
|
# Copyright 2014 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import xml.etree.ElementTree as ET
import xml.dom.minidom
from topology import *
""" v5 topology parser and serializer """
def parseFile(filename):
return parseTree(ET.parse(filename))
def parseString(data):
return parseTree(ET.fromstring(data))
def parseTree(tree):
# Get XML Tree root and initialize topology
root = tree.getroot()
t = Topology()
# Populate Edges
edges = root.find("edges").findall("edge")
# print "Num Edges Detected:",len(edges)
# Keep track of edges for reference later
edgeList = dict()
for edge in edges:
e = Edge(t)
eid = int(edge.attrib['id'].strip())
edgeList[eid] = e
for band in edge.findall("band"):
altitude = int(band.attrib["altitude"].strip())
rank = int(band.attrib["rank"].strip())
matches = filter(lambda x: x.rank==rank,t.bands.values())
b = e.posBand if altitude > 0 else e.negBand
b.altitude = altitude
b.rank = rank
# Populate Vertices
vertices = root.find("vertices").findall("vertex")
# print "Num Vertices Detected: %d"%len(vertices)
for vertex in vertices:
index = int(vertex.attrib['index'].strip())
v = Vertex(t)
# print "Creating Vertex with index=",index,v
v.block.index = index
# Make edge connections to this vertex
for sink in vertex.find("collector").findall("sink"):
order = int(sink.attrib["order"].strip())
edgeid = int(sink.attrib["edge"].strip())
e = edgeList[edgeid]
if v in [s.vertex for s in e.sinks]:
pass
# print "Existing Vertex found!"
else:
tmp = Sink(t,v,e)
tmp.snap.order = order
# print "Creating sink with order=",order,"altitude=",altitude,tmp
for source in vertex.find("emitter").findall("source"):
order = int(source.attrib["order"].strip())
edgeid = int(source.attrib["edge"].strip())
e = edgeList[edgeid]
if v in [src.vertex for src in e.sources]:
pass
# print "Existing Vertex found"
else:
tmp = Source(t,v,e)
tmp.snap.order = order
# print "Creating source with order=",order,"altitude=",altitude,tmp
return t
def serialize(topology):
""" Generate xml from topology """
xmlRoot = ET.Element('topology')
xmlVertices = ET.SubElement(xmlRoot,'vertices')
xmlEdges = ET.SubElement(xmlRoot,'edges')
nextEdgeId = 0
revEdgeList = dict()
# Serialize Edges
for edge in topology.edges:
xmlEdge = ET.SubElement(xmlEdges,'edge')
eid = nextEdgeId
nextEdgeId+=1
xmlEdge.attrib["id"] = eid
for band in [edge.posBand,edge.negBand]:
b = ET.SubElement(xmlEdge,'band')
b.attrib["altitude"] = str(band.altitude)
b.attrib["rank"] = str(band.rank)
revEdgeList[band.altitude] = eid
# Serialize Vertices
for vertex in topology.vertices:
xmlVertex = ET.SubElement(xmlVertices,'vertex')
# for source in vertex.sources:
#
#
#
# for sink in vertex.sinks:
return xmlify(xmlRoot)
# Search children of ETree
def find_element_by_attribute(root,elementname,attribname,attribval):
element_list = root.findall(elementname)
if element_list is None:
raise Exception("No Elements of name %s found"%elementname)
for tmp in element_list:
try:
if tmp.attrib[attribname] == attribval:
return tmp
except:
raise Exception("Element %s has not attribute %s"%(elementname,attribname))
raise Exception("Could not find %s with %s=%s"%(elementname,attribname,attribval))
def xmlify(root):
# Split continuous string by newlines
content = xml.dom.minidom.parseString(ET.tostring(root)).toprettyxml().split("\n")
# Remove right hand whitespace
content = [str(l).rstrip() for l in content]
# Filter Blank lines
content = filter(lambda x: not x == "",content)
# Repack as a single string
content = "\n".join(content)
return content
|
viccro/diarc
|
diarc/parser.py
|
Python
|
apache-2.0
| 4,865
|
import _plotly_utils.basevalidators
class SizesrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name="sizesrc", parent_name="streamtube.hoverlabel.font", **kwargs
):
super(SizesrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "info"),
**kwargs
)
|
plotly/python-api
|
packages/python/plotly/plotly/validators/streamtube/hoverlabel/font/_sizesrc.py
|
Python
|
mit
| 471
|
import os
from django import VERSION as DJANGO_VERSION
BASE_DIR = os.path.dirname(__file__)
DATABASES = {
'default': {
'NAME': ':memory:',
'ENGINE': 'django.db.backends.sqlite3',
}
}
SECRET_KEY = 'fake-key'
INSTALLED_APPS = (
'linaro_django_pagination',
)
if DJANGO_VERSION >= (1, 8):
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'APP_DIRS': True,
'DIRS': [
os.path.join(BASE_DIR, 'templates'),
],
},
]
else:
TEMPLATE_DIRS = (
os.path.join(BASE_DIR, 'templates'),
)
|
atugushev/django-pagination
|
linaro_django_pagination/tests/settings.py
|
Python
|
bsd-3-clause
| 636
|
from __future__ import print_function, division
import matplotlib
import logging
from sys import stdout
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
from neuralnilm import (Net, RealApplianceSource,
BLSTMLayer, DimshuffleLayer,
BidirectionalRecurrentLayer)
from neuralnilm.source import standardise, discretize, fdiff, power_and_fdiff
from neuralnilm.experiment import run_experiment, init_experiment
from neuralnilm.net import TrainingError
from neuralnilm.layers import MixtureDensityLayer
from neuralnilm.objectives import (scaled_cost, mdn_nll,
scaled_cost_ignore_inactive, ignore_inactive,
scaled_cost3)
from neuralnilm.plot import MDNPlotter, CentralOutputPlotter
from lasagne.nonlinearities import sigmoid, rectify, tanh
from lasagne.objectives import mse, binary_crossentropy
from lasagne.init import Uniform, Normal
from lasagne.layers import (LSTMLayer, DenseLayer, Conv1DLayer,
ReshapeLayer, FeaturePoolLayer, RecurrentLayer)
from lasagne.updates import nesterov_momentum, momentum
from functools import partial
import os
import __main__
from copy import deepcopy
from math import sqrt
import numpy as np
import theano.tensor as T
"""
e370
longer seq
"""
NAME = os.path.splitext(os.path.split(__main__.__file__)[1])[0]
#PATH = "/homes/dk3810/workspace/python/neuralnilm/figures"
PATH = "/data/dk3810/figures"
SAVE_PLOT_INTERVAL = 500
GRADIENT_STEPS = 100
source_dict = dict(
filename='/data/dk3810/ukdale.h5',
appliances=[
['fridge freezer', 'fridge', 'freezer'],
'hair straighteners',
'television'
# 'dish washer',
# ['washer dryer', 'washing machine']
],
on_power_thresholds=[5] * 5,
min_on_durations=[60, 60, 60, 1800, 1800],
min_off_durations=[12, 12, 12, 1800, 600],
window=("2013-06-01", "2014-07-01"),
seq_length=1024,
# random_window=64,
output_one_appliance=True,
boolean_targets=False,
train_buildings=[1],
validation_buildings=[1],
skip_probability=0.9,
one_target_per_seq=False,
n_seq_per_batch=16,
subsample_target=4,
include_diff=False,
include_power=True,
# clip_appliance_power=True,
target_is_prediction=False,
# independently_center_inputs = True,
standardise_input=True,
unit_variance_targets=True,
input_padding=2,
lag=0
# classification=True
# reshape_target_to_2D=True
# input_stats={'mean': np.array([ 0.05526326], dtype=np.float32),
# 'std': np.array([ 0.12636775], dtype=np.float32)},
# target_stats={
# 'mean': np.array([ 0.04066789, 0.01881946,
# 0.24639061, 0.17608672, 0.10273963],
# dtype=np.float32),
# 'std': np.array([ 0.11449792, 0.07338708,
# 0.26608968, 0.33463112, 0.21250485],
# dtype=np.float32)}
)
N = 50
net_dict = dict(
save_plot_interval=SAVE_PLOT_INTERVAL,
# loss_function=partial(ignore_inactive, loss_func=mdn_nll, seq_length=SEQ_LENGTH),
# loss_function=lambda x, t: mdn_nll(x, t).mean(),
loss_function=lambda x, t: mse(x, t).mean(),
# loss_function=lambda x, t: binary_crossentropy(x, t).mean(),
# loss_function=partial(scaled_cost, loss_func=mse),
# loss_function=ignore_inactive,
# loss_function=partial(scaled_cost3, ignore_inactive=False),
updates_func=momentum,
learning_rate=1e-2,
learning_rate_changes_by_iteration={
500: 1e-3,
1500: 1e-4
# 800: 1e-4
# 500: 1e-3
# 4000: 1e-03,
# 6000: 5e-06,
# 7000: 1e-06
# 2000: 5e-06
# 3000: 1e-05
# 7000: 5e-06,
# 10000: 1e-06,
# 15000: 5e-07,
# 50000: 1e-07
},
do_save_activations=True,
# auto_reshape=False,
# plotter=CentralOutputPlotter
# plotter=MDNPlotter
layers_config=[
{
'type': BidirectionalRecurrentLayer,
'num_units': 40,
'gradient_steps': GRADIENT_STEPS,
'nonlinearity': tanh
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': Conv1DLayer,
'num_filters': 20,
'filter_length': 4,
'stride': 4,
'nonlinearity': tanh
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': BidirectionalRecurrentLayer,
'num_units': 80,
'gradient_steps': GRADIENT_STEPS,
'nonlinearity': tanh
}
]
)
def exp_a(name):
global source
# source_dict_copy = deepcopy(source_dict)
# source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
net_dict_copy['layers_config'].extend([
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': T.nnet.softplus
}
])
net = Net(**net_dict_copy)
return net
def exp_b(name):
global source
source_dict_copy = deepcopy(source_dict)
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source,
loss_function=partial(scaled_cost3, ignore_inactive=False),
))
net_dict_copy['layers_config'].extend([
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': T.nnet.softplus
}
])
net = Net(**net_dict_copy)
return net
def exp_c(name):
global source
source_dict_copy = deepcopy(source_dict)
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source,
loss_function=partial(scaled_cost3, ignore_inactive=True),
))
net_dict_copy['layers_config'].extend([
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': T.nnet.softplus
}
])
net = Net(**net_dict_copy)
return net
def main():
# EXPERIMENTS = list('abcdefghijklmnopqrstuvwxyz')
EXPERIMENTS = list('abc')
for experiment in EXPERIMENTS:
full_exp_name = NAME + experiment
func_call = init_experiment(PATH, experiment, full_exp_name)
logger = logging.getLogger(full_exp_name)
try:
net = eval(func_call)
run_experiment(net, epochs=2000)
except KeyboardInterrupt:
logger.info("KeyboardInterrupt")
break
except Exception as exception:
logger.exception("Exception")
# raise
finally:
logging.shutdown()
if __name__ == "__main__":
main()
|
JackKelly/neuralnilm_prototype
|
scripts/e374.py
|
Python
|
mit
| 7,070
|
# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import sys
import re
import os
import shlex
import yaml
import copy
import optparse
import operator
from ansible import errors
from ansible import __version__
from ansible.utils.plugins import *
from ansible.utils import template
from ansible.callbacks import display
import ansible.constants as C
import time
import StringIO
import stat
import termios
import tty
import pipes
import random
import difflib
import warnings
import traceback
import getpass
import sys
import textwrap
VERBOSITY=0
# list of all deprecation messages to prevent duplicate display
deprecations = {}
warns = {}
MAX_FILE_SIZE_FOR_DIFF=1*1024*1024
try:
import json
except ImportError:
import simplejson as json
try:
from hashlib import md5 as _md5
except ImportError:
from md5 import md5 as _md5
PASSLIB_AVAILABLE = False
try:
import passlib.hash
PASSLIB_AVAILABLE = True
except:
pass
KEYCZAR_AVAILABLE=False
try:
import keyczar.errors as key_errors
from keyczar.keys import AesKey
KEYCZAR_AVAILABLE=True
except ImportError:
pass
###############################################################
# Abstractions around keyczar
###############################################################
def key_for_hostname(hostname):
# fireball mode is an implementation of ansible firing up zeromq via SSH
# to use no persistent daemons or key management
if not KEYCZAR_AVAILABLE:
raise errors.AnsibleError("python-keyczar must be installed on the control machine to use accelerated modes")
key_path = os.path.expanduser("~/.fireball.keys")
if not os.path.exists(key_path):
os.makedirs(key_path)
key_path = os.path.expanduser("~/.fireball.keys/%s" % hostname)
# use new AES keys every 2 hours, which means fireball must not allow running for longer either
if not os.path.exists(key_path) or (time.time() - os.path.getmtime(key_path) > 60*60*2):
key = AesKey.Generate()
fh = open(key_path, "w")
fh.write(str(key))
fh.close()
return key
else:
fh = open(key_path)
key = AesKey.Read(fh.read())
fh.close()
return key
def encrypt(key, msg):
return key.Encrypt(msg)
def decrypt(key, msg):
try:
return key.Decrypt(msg)
except key_errors.InvalidSignatureError:
raise errors.AnsibleError("decryption failed")
###############################################################
# UTILITY FUNCTIONS FOR COMMAND LINE TOOLS
###############################################################
def err(msg):
''' print an error message to stderr '''
print >> sys.stderr, msg
def exit(msg, rc=1):
''' quit with an error to stdout and a failure code '''
err(msg)
sys.exit(rc)
def jsonify(result, format=False):
''' format JSON output (uncompressed or uncompressed) '''
if result is None:
return "{}"
result2 = result.copy()
for key, value in result2.items():
if type(value) is str:
result2[key] = value.decode('utf-8', 'ignore')
if format:
return json.dumps(result2, sort_keys=True, indent=4)
else:
return json.dumps(result2, sort_keys=True)
def write_tree_file(tree, hostname, buf):
''' write something into treedir/hostname '''
# TODO: might be nice to append playbook runs per host in a similar way
# in which case, we'd want append mode.
path = os.path.join(tree, hostname)
fd = open(path, "w+")
fd.write(buf)
fd.close()
def is_failed(result):
''' is a given JSON result a failed result? '''
return ((result.get('rc', 0) != 0) or (result.get('failed', False) in [ True, 'True', 'true']))
def is_changed(result):
''' is a given JSON result a changed result? '''
return (result.get('changed', False) in [ True, 'True', 'true'])
def check_conditional(conditional, basedir, inject, fail_on_undefined=False, jinja2=False):
if isinstance(conditional, list):
for x in conditional:
if not check_conditional(x, basedir, inject, fail_on_undefined=fail_on_undefined, jinja2=jinja2):
return False
return True
if jinja2:
conditional = "jinja2_compare %s" % conditional
if conditional.startswith("jinja2_compare"):
conditional = conditional.replace("jinja2_compare ","")
# allow variable names
if conditional in inject and str(inject[conditional]).find('-') == -1:
conditional = inject[conditional]
conditional = template.template(basedir, conditional, inject, fail_on_undefined=fail_on_undefined)
original = str(conditional).replace("jinja2_compare ","")
# a Jinja2 evaluation that results in something Python can eval!
presented = "{%% if %s %%} True {%% else %%} False {%% endif %%}" % conditional
conditional = template.template(basedir, presented, inject)
val = conditional.strip()
if val == presented:
# the templating failed, meaning most likely a
# variable was undefined. If we happened to be
# looking for an undefined variable, return True,
# otherwise fail
if conditional.find("is undefined") != -1:
return True
elif conditional.find("is defined") != -1:
return False
else:
raise errors.AnsibleError("error while evaluating conditional: %s" % original)
elif val == "True":
return True
elif val == "False":
return False
else:
raise errors.AnsibleError("unable to evaluate conditional: %s" % original)
if not isinstance(conditional, basestring):
return conditional
try:
conditional = conditional.replace("\n", "\\n")
result = safe_eval(conditional)
if result not in [ True, False ]:
raise errors.AnsibleError("Conditional expression must evaluate to True or False: %s" % conditional)
return result
except (NameError, SyntaxError):
raise errors.AnsibleError("Could not evaluate the expression: (%s)" % conditional)
def is_executable(path):
'''is the given path executable?'''
return (stat.S_IXUSR & os.stat(path)[stat.ST_MODE]
or stat.S_IXGRP & os.stat(path)[stat.ST_MODE]
or stat.S_IXOTH & os.stat(path)[stat.ST_MODE])
def unfrackpath(path):
'''
returns a path that is free of symlinks, environment
variables, relative path traversals and symbols (~)
example:
'$HOME/../../var/mail' becomes '/var/spool/mail'
'''
return os.path.normpath(os.path.realpath(os.path.expandvars(os.path.expanduser(path))))
def prepare_writeable_dir(tree,mode=0777):
''' make sure a directory exists and is writeable '''
# modify the mode to ensure the owner at least
# has read/write access to this directory
mode |= 0700
# make sure the tree path is always expanded
# and normalized and free of symlinks
tree = unfrackpath(tree)
if not os.path.exists(tree):
try:
os.makedirs(tree, mode)
except (IOError, OSError), e:
raise errors.AnsibleError("Could not make dir %s: %s" % (tree, e))
if not os.access(tree, os.W_OK):
raise errors.AnsibleError("Cannot write to path %s" % tree)
return tree
def path_dwim(basedir, given):
'''
make relative paths work like folks expect.
'''
if given.startswith("/"):
return os.path.abspath(given)
elif given.startswith("~"):
return os.path.abspath(os.path.expanduser(given))
else:
return os.path.abspath(os.path.join(basedir, given))
def path_dwim_relative(original, dirname, source, playbook_base, check=True):
''' find one file in a directory one level up in a dir named dirname relative to current '''
# (used by roles code)
basedir = os.path.dirname(original)
if os.path.islink(basedir):
basedir = unfrackpath(basedir)
template2 = os.path.join(basedir, dirname, source)
else:
template2 = os.path.join(basedir, '..', dirname, source)
source2 = path_dwim(basedir, template2)
if os.path.exists(source2):
return source2
obvious_local_path = path_dwim(playbook_base, source)
if os.path.exists(obvious_local_path):
return obvious_local_path
if check:
raise errors.AnsibleError("input file not found at %s or %s" % (source2, obvious_local_path))
return source2 # which does not exist
def json_loads(data):
''' parse a JSON string and return a data structure '''
return json.loads(data)
def parse_json(raw_data):
''' this version for module return data only '''
orig_data = raw_data
# ignore stuff like tcgetattr spewage or other warnings
data = filter_leading_non_json_lines(raw_data)
try:
return json.loads(data)
except:
# not JSON, but try "Baby JSON" which allows many of our modules to not
# require JSON and makes writing modules in bash much simpler
results = {}
try:
tokens = shlex.split(data)
except:
print "failed to parse json: "+ data
raise
for t in tokens:
if t.find("=") == -1:
raise errors.AnsibleError("failed to parse: %s" % orig_data)
(key,value) = t.split("=", 1)
if key == 'changed' or 'failed':
if value.lower() in [ 'true', '1' ]:
value = True
elif value.lower() in [ 'false', '0' ]:
value = False
if key == 'rc':
value = int(value)
results[key] = value
if len(results.keys()) == 0:
return { "failed" : True, "parsed" : False, "msg" : orig_data }
return results
def smush_braces(data):
''' smush Jinaj2 braces so unresolved templates like {{ foo }} don't get parsed weird by key=value code '''
while data.find('{{ ') != -1:
data = data.replace('{{ ', '{{')
while data.find(' }}') != -1:
data = data.replace(' }}', '}}')
return data
def smush_ds(data):
# things like key={{ foo }} are not handled by shlex.split well, so preprocess any YAML we load
# so we do not have to call smush elsewhere
if type(data) == list:
return [ smush_ds(x) for x in data ]
elif type(data) == dict:
for (k,v) in data.items():
data[k] = smush_ds(v)
return data
elif isinstance(data, basestring):
return smush_braces(data)
else:
return data
def parse_yaml(data):
''' convert a yaml string to a data structure '''
return smush_ds(yaml.safe_load(data))
def process_common_errors(msg, probline, column):
replaced = probline.replace(" ","")
if replaced.find(":{{") != -1 and replaced.find("}}") != -1:
msg = msg + """
This one looks easy to fix. YAML thought it was looking for the start of a
hash/dictionary and was confused to see a second "{". Most likely this was
meant to be an ansible template evaluation instead, so we have to give the
parser a small hint that we wanted a string instead. The solution here is to
just quote the entire value.
For instance, if the original line was:
app_path: {{ base_path }}/foo
It should be written as:
app_path: "{{ base_path }}/foo"
"""
return msg
elif len(probline) and len(probline) >= column and probline[column] == ":" and probline.count(':') > 1:
msg = msg + """
This one looks easy to fix. There seems to be an extra unquoted colon in the line
and this is confusing the parser. It was only expecting to find one free
colon. The solution is just add some quotes around the colon, or quote the
entire line after the first colon.
For instance, if the original line was:
copy: src=file.txt dest=/path/filename:with_colon.txt
It can be written as:
copy: src=file.txt dest='/path/filename:with_colon.txt'
Or:
copy: 'src=file.txt dest=/path/filename:with_colon.txt'
"""
return msg
else:
parts = probline.split(":")
if len(parts) > 1:
middle = parts[1].strip()
match = False
unbalanced = False
if middle.startswith("'") and not middle.endswith("'"):
match = True
elif middle.startswith('"') and not middle.endswith('"'):
match = True
if len(middle) > 0 and middle[0] in [ '"', "'" ] and middle[-1] in [ '"', "'" ] and probline.count("'") > 2 or probline.count("'") > 2:
unbalanced = True
if match:
msg = msg + """
This one looks easy to fix. It seems that there is a value started
with a quote, and the YAML parser is expecting to see the line ended
with the same kind of quote. For instance:
when: "ok" in result.stdout
Could be written as:
when: '"ok" in result.stdout'
or equivalently:
when: "'ok' in result.stdout"
"""
return msg
if unbalanced:
msg = msg + """
We could be wrong, but this one looks like it might be an issue with
unbalanced quotes. If starting a value with a quote, make sure the
line ends with the same set of quotes. For instance this arbitrary
example:
foo: "bad" "wolf"
Could be written as:
foo: '"bad" "wolf"'
"""
return msg
return msg
def process_yaml_error(exc, data, path=None):
if hasattr(exc, 'problem_mark'):
mark = exc.problem_mark
if mark.line -1 >= 0:
before_probline = data.split("\n")[mark.line-1]
else:
before_probline = ''
probline = data.split("\n")[mark.line]
arrow = " " * mark.column + "^"
msg = """Syntax Error while loading YAML script, %s
Note: The error may actually appear before this position: line %s, column %s
%s
%s
%s""" % (path, mark.line + 1, mark.column + 1, before_probline, probline, arrow)
msg = process_common_errors(msg, probline, mark.column)
else:
# No problem markers means we have to throw a generic
# "stuff messed up" type message. Sry bud.
if path:
msg = "Could not parse YAML. Check over %s again." % path
else:
msg = "Could not parse YAML."
raise errors.AnsibleYAMLValidationFailed(msg)
def parse_yaml_from_file(path):
''' convert a yaml file to a data structure '''
try:
data = file(path).read()
return parse_yaml(data)
except IOError:
raise errors.AnsibleError("file not found: %s" % path)
except yaml.YAMLError, exc:
process_yaml_error(exc, data, path)
def parse_kv(args):
''' convert a string of key/value items to a dict '''
options = {}
if args is not None:
# attempting to split a unicode here does bad things
args = args.encode('utf-8')
vargs = [x.decode('utf-8') for x in shlex.split(args, posix=True)]
#vargs = shlex.split(str(args), posix=True)
for x in vargs:
if x.find("=") != -1:
k, v = x.split("=",1)
options[k]=v
return options
def merge_hash(a, b):
''' recursively merges hash b into a
keys from b take precedence over keys from a '''
result = copy.deepcopy(a)
# next, iterate over b keys and values
for k, v in b.iteritems():
# if there's already such key in a
# and that key contains dict
if k in result and isinstance(result[k], dict):
# merge those dicts recursively
result[k] = merge_hash(a[k], v)
else:
# otherwise, just copy a value from b to a
result[k] = v
return result
def md5s(data):
''' Return MD5 hex digest of data. '''
digest = _md5()
try:
digest.update(data)
except UnicodeEncodeError:
digest.update(data.encode('utf-8'))
return digest.hexdigest()
def md5(filename):
''' Return MD5 hex digest of local file, or None if file is not present. '''
if not os.path.exists(filename):
return None
digest = _md5()
blocksize = 64 * 1024
infile = open(filename, 'rb')
block = infile.read(blocksize)
while block:
digest.update(block)
block = infile.read(blocksize)
infile.close()
return digest.hexdigest()
def default(value, function):
''' syntactic sugar around lazy evaluation of defaults '''
if value is None:
return function()
return value
def _gitinfo():
''' returns a string containing git branch, commit id and commit date '''
result = None
repo_path = os.path.join(os.path.dirname(__file__), '..', '..', '..', '.git')
if os.path.exists(repo_path):
# Check if the .git is a file. If it is a file, it means that we are in a submodule structure.
if os.path.isfile(repo_path):
try:
gitdir = yaml.safe_load(open(repo_path)).get('gitdir')
# There is a posibility the .git file to have an absolute path.
if os.path.isabs(gitdir):
repo_path = gitdir
else:
repo_path = os.path.join(repo_path.split('.git')[0], gitdir)
except (IOError, AttributeError):
return ''
f = open(os.path.join(repo_path, "HEAD"))
branch = f.readline().split('/')[-1].rstrip("\n")
f.close()
branch_path = os.path.join(repo_path, "refs", "heads", branch)
if os.path.exists(branch_path):
f = open(branch_path)
commit = f.readline()[:10]
f.close()
date = time.localtime(os.stat(branch_path).st_mtime)
if time.daylight == 0:
offset = time.timezone
else:
offset = time.altzone
result = "({0} {1}) last updated {2} (GMT {3:+04d})".format(branch, commit,
time.strftime("%Y/%m/%d %H:%M:%S", date), offset / -36)
else:
result = ''
return result
def version(prog):
result = "{0} {1}".format(prog, __version__)
gitinfo = _gitinfo()
if gitinfo:
result = result + " {0}".format(gitinfo)
return result
def getch():
''' read in a single character '''
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
try:
tty.setraw(sys.stdin.fileno())
ch = sys.stdin.read(1)
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
return ch
####################################################################
# option handling code for /usr/bin/ansible and ansible-playbook
# below this line
class SortedOptParser(optparse.OptionParser):
'''Optparser which sorts the options by opt before outputting --help'''
def format_help(self, formatter=None):
self.option_list.sort(key=operator.methodcaller('get_opt_string'))
return optparse.OptionParser.format_help(self, formatter=None)
def increment_debug(option, opt, value, parser):
global VERBOSITY
VERBOSITY += 1
def base_parser(constants=C, usage="", output_opts=False, runas_opts=False,
async_opts=False, connect_opts=False, subset_opts=False, check_opts=False, diff_opts=False):
''' create an options parser for any ansible script '''
parser = SortedOptParser(usage, version=version("%prog"))
parser.add_option('-v','--verbose', default=False, action="callback",
callback=increment_debug, help="verbose mode (-vvv for more, -vvvv to enable connection debugging)")
parser.add_option('-f','--forks', dest='forks', default=constants.DEFAULT_FORKS, type='int',
help="specify number of parallel processes to use (default=%s)" % constants.DEFAULT_FORKS)
parser.add_option('-i', '--inventory-file', dest='inventory',
help="specify inventory host file (default=%s)" % constants.DEFAULT_HOST_LIST,
default=constants.DEFAULT_HOST_LIST)
parser.add_option('-k', '--ask-pass', default=False, dest='ask_pass', action='store_true',
help='ask for SSH password')
parser.add_option('--private-key', default=C.DEFAULT_PRIVATE_KEY_FILE, dest='private_key_file',
help='use this file to authenticate the connection')
parser.add_option('-K', '--ask-sudo-pass', default=False, dest='ask_sudo_pass', action='store_true',
help='ask for sudo password')
parser.add_option('--list-hosts', dest='listhosts', action='store_true',
help='outputs a list of matching hosts; does not execute anything else')
parser.add_option('-M', '--module-path', dest='module_path',
help="specify path(s) to module library (default=%s)" % constants.DEFAULT_MODULE_PATH,
default=None)
if subset_opts:
parser.add_option('-l', '--limit', default=constants.DEFAULT_SUBSET, dest='subset',
help='further limit selected hosts to an additional pattern')
parser.add_option('-T', '--timeout', default=constants.DEFAULT_TIMEOUT, type='int',
dest='timeout',
help="override the SSH timeout in seconds (default=%s)" % constants.DEFAULT_TIMEOUT)
if output_opts:
parser.add_option('-o', '--one-line', dest='one_line', action='store_true',
help='condense output')
parser.add_option('-t', '--tree', dest='tree', default=None,
help='log output to this directory')
if runas_opts:
parser.add_option("-s", "--sudo", default=constants.DEFAULT_SUDO, action="store_true",
dest='sudo', help="run operations with sudo (nopasswd)")
parser.add_option('-U', '--sudo-user', dest='sudo_user', help='desired sudo user (default=root)',
default=None) # Can't default to root because we need to detect when this option was given
parser.add_option('-u', '--user', default=constants.DEFAULT_REMOTE_USER,
dest='remote_user',
help='connect as this user (default=%s)' % constants.DEFAULT_REMOTE_USER)
if connect_opts:
parser.add_option('-c', '--connection', dest='connection',
default=C.DEFAULT_TRANSPORT,
help="connection type to use (default=%s)" % C.DEFAULT_TRANSPORT)
if async_opts:
parser.add_option('-P', '--poll', default=constants.DEFAULT_POLL_INTERVAL, type='int',
dest='poll_interval',
help="set the poll interval if using -B (default=%s)" % constants.DEFAULT_POLL_INTERVAL)
parser.add_option('-B', '--background', dest='seconds', type='int', default=0,
help='run asynchronously, failing after X seconds (default=N/A)')
if check_opts:
parser.add_option("-C", "--check", default=False, dest='check', action='store_true',
help="don't make any changes; instead, try to predict some of the changes that may occur"
)
if diff_opts:
parser.add_option("-D", "--diff", default=False, dest='diff', action='store_true',
help="when changing (small) files and templates, show the differences in those files; works great with --check"
)
return parser
def ask_passwords(ask_pass=False, ask_sudo_pass=False):
sshpass = None
sudopass = None
sudo_prompt = "sudo password: "
if ask_pass:
sshpass = getpass.getpass(prompt="SSH password: ")
sudo_prompt = "sudo password [defaults to SSH password]: "
if ask_sudo_pass:
sudopass = getpass.getpass(prompt=sudo_prompt)
if ask_pass and sudopass == '':
sudopass = sshpass
return (sshpass, sudopass)
def do_encrypt(result, encrypt, salt_size=None, salt=None):
if PASSLIB_AVAILABLE:
try:
crypt = getattr(passlib.hash, encrypt)
except:
raise errors.AnsibleError("passlib does not support '%s' algorithm" % encrypt)
if salt_size:
result = crypt.encrypt(result, salt_size=salt_size)
elif salt:
result = crypt.encrypt(result, salt=salt)
else:
result = crypt.encrypt(result)
else:
raise errors.AnsibleError("passlib must be installed to encrypt vars_prompt values")
return result
def last_non_blank_line(buf):
all_lines = buf.splitlines()
all_lines.reverse()
for line in all_lines:
if (len(line) > 0):
return line
# shouldn't occur unless there's no output
return ""
def filter_leading_non_json_lines(buf):
'''
used to avoid random output from SSH at the top of JSON output, like messages from
tcagetattr, or where dropbear spews MOTD on every single command (which is nuts).
need to filter anything which starts not with '{', '[', ', '=' or is an empty line.
filter only leading lines since multiline JSON is valid.
'''
filtered_lines = StringIO.StringIO()
stop_filtering = False
for line in buf.splitlines():
if stop_filtering or "=" in line or line.startswith('{') or line.startswith('['):
stop_filtering = True
filtered_lines.write(line + '\n')
return filtered_lines.getvalue()
def boolean(value):
val = str(value)
if val.lower() in [ "true", "t", "y", "1", "yes" ]:
return True
else:
return False
def compile_when_to_only_if(expression):
'''
when is a shorthand for writing only_if conditionals. It requires less quoting
magic. only_if is retained for backwards compatibility.
'''
# when: set $variable
# when: unset $variable
# when: failed $json_result
# when: changed $json_result
# when: int $x >= $z and $y < 3
# when: int $x in $alist
# when: float $x > 2 and $y <= $z
# when: str $x != $y
# when: jinja2_compare asdf # implies {{ asdf }}
if type(expression) not in [ str, unicode ]:
raise errors.AnsibleError("invalid usage of when_ operator: %s" % expression)
tokens = expression.split()
if len(tokens) < 2:
raise errors.AnsibleError("invalid usage of when_ operator: %s" % expression)
# when_set / when_unset
if tokens[0] in [ 'set', 'unset' ]:
tcopy = tokens[1:]
for (i,t) in enumerate(tokens[1:]):
if t.find("$") != -1:
tcopy[i] = "is_%s('''%s''')" % (tokens[0], t)
else:
tcopy[i] = t
return " ".join(tcopy)
# when_failed / when_changed
elif tokens[0] in [ 'failed', 'changed' ]:
tcopy = tokens[1:]
for (i,t) in enumerate(tokens[1:]):
if t.find("$") != -1:
tcopy[i] = "is_%s(%s)" % (tokens[0], t)
else:
tcopy[i] = t
return " ".join(tcopy)
# when_integer / when_float / when_string
elif tokens[0] in [ 'integer', 'float', 'string' ]:
cast = None
if tokens[0] == 'integer':
cast = 'int'
elif tokens[0] == 'string':
cast = 'str'
elif tokens[0] == 'float':
cast = 'float'
tcopy = tokens[1:]
for (i,t) in enumerate(tokens[1:]):
#if re.search(t, r"^\w"):
# bare word will turn into Jinja2 so all the above
# casting is really not needed
#tcopy[i] = "%s('''%s''')" % (cast, t)
t2 = t.strip()
if (t2[0].isalpha() or t2[0] == '$') and cast == 'str' and t2 != 'in':
tcopy[i] = "'%s'" % (t)
else:
tcopy[i] = t
result = " ".join(tcopy)
return result
# when_boolean
elif tokens[0] in [ 'bool', 'boolean' ]:
tcopy = tokens[1:]
for (i, t) in enumerate(tcopy):
if t.find("$") != -1:
tcopy[i] = "(is_set('''%s''') and '''%s'''.lower() not in ('false', 'no', 'n', 'none', '0', ''))" % (t, t)
return " ".join(tcopy)
# the stock 'when' without qualification (new in 1.2), assumes Jinja2 terms
elif tokens[0] == 'jinja2_compare':
return " ".join(tokens)
else:
raise errors.AnsibleError("invalid usage of when_ operator: %s" % expression)
def make_sudo_cmd(sudo_user, executable, cmd):
"""
helper function for connection plugins to create sudo commands
"""
# Rather than detect if sudo wants a password this time, -k makes
# sudo always ask for a password if one is required.
# Passing a quoted compound command to sudo (or sudo -s)
# directly doesn't work, so we shellquote it with pipes.quote()
# and pass the quoted string to the user's shell. We loop reading
# output until we see the randomly-generated sudo prompt set with
# the -p option.
randbits = ''.join(chr(random.randint(ord('a'), ord('z'))) for x in xrange(32))
prompt = '[sudo via ansible, key=%s] password: ' % randbits
success_key = 'SUDO-SUCCESS-%s' % randbits
sudocmd = '%s -k && %s %s -S -p "%s" -u %s %s -c %s' % (
C.DEFAULT_SUDO_EXE, C.DEFAULT_SUDO_EXE, C.DEFAULT_SUDO_FLAGS,
prompt, sudo_user, executable or '$SHELL', pipes.quote('echo %s; %s' % (success_key, cmd)))
return ('/bin/sh -c ' + pipes.quote(sudocmd), prompt, success_key)
_TO_UNICODE_TYPES = (unicode, type(None))
def to_unicode(value):
if isinstance(value, _TO_UNICODE_TYPES):
return value
return value.decode("utf-8")
def get_diff(diff):
# called by --diff usage in playbook and runner via callbacks
# include names in diffs 'before' and 'after' and do diff -U 10
try:
with warnings.catch_warnings():
warnings.simplefilter('ignore')
ret = []
if 'dst_binary' in diff:
ret.append("diff skipped: destination file appears to be binary\n")
if 'src_binary' in diff:
ret.append("diff skipped: source file appears to be binary\n")
if 'dst_larger' in diff:
ret.append("diff skipped: destination file size is greater than %d\n" % diff['dst_larger'])
if 'src_larger' in diff:
ret.append("diff skipped: source file size is greater than %d\n" % diff['src_larger'])
if 'before' in diff and 'after' in diff:
if 'before_header' in diff:
before_header = "before: %s" % diff['before_header']
else:
before_header = 'before'
if 'after_header' in diff:
after_header = "after: %s" % diff['after_header']
else:
after_header = 'after'
differ = difflib.unified_diff(to_unicode(diff['before']).splitlines(True), to_unicode(diff['after']).splitlines(True), before_header, after_header, '', '', 10)
for line in list(differ):
ret.append(line)
return u"".join(ret)
except UnicodeDecodeError:
return ">> the files are different, but the diff library cannot compare unicode strings"
def is_list_of_strings(items):
for x in items:
if not isinstance(x, basestring):
return False
return True
def safe_eval(str, locals=None, include_exceptions=False):
'''
this is intended for allowing things like:
with_items: a_list_variable
where Jinja2 would return a string
but we do not want to allow it to call functions (outside of Jinja2, where
the env is constrained)
'''
# FIXME: is there a more native way to do this?
def is_set(var):
return not var.startswith("$") and not '{{' in var
def is_unset(var):
return var.startswith("$") or '{{' in var
# do not allow method calls to modules
if not isinstance(str, basestring):
# already templated to a datastructure, perhaps?
if include_exceptions:
return (str, None)
return str
if re.search(r'\w\.\w+\(', str):
if include_exceptions:
return (str, None)
return str
# do not allow imports
if re.search(r'import \w+', str):
if include_exceptions:
return (str, None)
return str
try:
result = None
if not locals:
result = eval(str)
else:
result = eval(str, None, locals)
if include_exceptions:
return (result, None)
else:
return result
except Exception, e:
if include_exceptions:
return (str, e)
return str
def listify_lookup_plugin_terms(terms, basedir, inject):
if isinstance(terms, basestring):
# someone did:
# with_items: alist
# OR
# with_items: {{ alist }}
stripped = terms.strip()
if not (stripped.startswith('{') or stripped.startswith('[')) and not stripped.startswith("/"):
# if not already a list, get ready to evaluate with Jinja2
# not sure why the "/" is in above code :)
try:
new_terms = template.template(basedir, "{{ %s }}" % terms, inject)
if isinstance(new_terms, basestring) and new_terms.find("{{") != -1:
pass
else:
terms = new_terms
except:
pass
if '{' in terms or '[' in terms:
# Jinja2 already evaluated a variable to a list.
# Jinja2-ified list needs to be converted back to a real type
# TODO: something a bit less heavy than eval
return safe_eval(terms)
if isinstance(terms, basestring):
terms = [ terms ]
return terms
def deprecated(msg, version):
''' used to print out a deprecation message.'''
if not C.DEPRECATION_WARNINGS:
return
new_msg = "\n[DEPRECATION WARNING]: %s. This feature will be removed in version %s." % (msg, version)
new_msg = new_msg + " Deprecation warnings can be disabled by setting deprecation_warnings=False in ansible.cfg.\n\n"
wrapped = textwrap.wrap(new_msg, 79)
new_msg = "\n".join(wrapped) + "\n"
if new_msg not in deprecations:
display(new_msg, color='purple', stderr=True)
deprecations[new_msg] = 1
def warning(msg):
new_msg = "\n[WARNING]: %s" % msg
wrapped = textwrap.wrap(new_msg, 79)
new_msg = "\n".join(wrapped) + "\n"
if new_msg not in warns:
display(new_msg, color='bright purple', stderr=True)
warns[new_msg] = 1
def combine_vars(a, b):
if C.DEFAULT_HASH_BEHAVIOUR == "merge":
return merge_hash(a, b)
else:
return dict(a.items() + b.items())
def random_password(length=20, chars=C.DEFAULT_PASSWORD_CHARS):
'''Return a random password string of length containing only chars.'''
password = []
while len(password) < length:
new_char = os.urandom(1)
if new_char in chars:
password.append(new_char)
return ''.join(password)
|
JensRantil/ansible
|
lib/ansible/utils/__init__.py
|
Python
|
gpl-3.0
| 35,532
|
"""
Helper functions for creating Form classes from Django models
and database field objects.
"""
from django.db import connections
from django.utils.encoding import smart_unicode, force_unicode
from django.utils.datastructures import SortedDict
from django.utils.text import get_text_list, capfirst
from django.utils.translation import ugettext_lazy as _, ugettext
from django.core.exceptions import ValidationError, NON_FIELD_ERRORS, \
FieldError
from django.core.validators import EMPTY_VALUES
from util import ErrorList
from forms import BaseForm, get_declared_fields
from fields import Field, ChoiceField
from widgets import SelectMultiple, HiddenInput, MultipleHiddenInput
from widgets import media_property
from formsets import BaseFormSet, formset_factory, DELETION_FIELD_NAME
__all__ = (
'ModelForm', 'BaseModelForm', 'model_to_dict', 'fields_for_model',
'save_instance', 'form_for_fields', 'ModelChoiceField',
'ModelMultipleChoiceField',
)
def construct_instance(form, instance, fields=None, exclude=None):
"""
Constructs and returns a model instance from the bound ``form``'s
``cleaned_data``, but does not save the returned instance to the
database.
"""
from django.db import models
opts = instance._meta
cleaned_data = form.cleaned_data
file_field_list = []
for f in opts.fields:
if not f.editable or isinstance(f, models.AutoField) \
or not f.name in cleaned_data:
continue
if fields is not None and f.name not in fields:
continue
if exclude and f.name in exclude:
continue
# Defer saving file-type fields until after the other fields, so a
# callable upload_to can use the values from other fields.
if isinstance(f, models.FileField):
file_field_list.append(f)
else:
f.save_form_data(instance, cleaned_data[f.name])
for f in file_field_list:
f.save_form_data(instance, cleaned_data[f.name])
return instance
def save_instance(form, instance, fields=None, fail_message='saved',
commit=True, exclude=None, construct=True):
"""
Saves bound Form ``form``'s cleaned_data into model instance ``instance``.
If commit=True, then the changes to ``instance`` will be saved to the
database. Returns ``instance``.
If construct=False, assume ``instance`` has already been constructed and
just needs to be saved.
"""
if construct:
instance = construct_instance(form, instance, fields, exclude)
opts = instance._meta
if form.errors:
raise ValueError("The %s could not be %s because the data didn't"
" validate." % (opts.object_name, fail_message))
# Wrap up the saving of m2m data as a function.
def save_m2m():
cleaned_data = form.cleaned_data
for f in opts.many_to_many:
if fields and f.name not in fields:
continue
if f.name in cleaned_data:
f.save_form_data(instance, cleaned_data[f.name])
if commit:
# If we are committing, save the instance and the m2m data immediately.
instance.save()
save_m2m()
else:
# We're not committing. Add a method to the form to allow deferred
# saving of m2m data.
form.save_m2m = save_m2m
return instance
def make_model_save(model, fields, fail_message):
"""Returns the save() method for a Form."""
def save(self, commit=True):
return save_instance(self, model(), fields, fail_message, commit)
return save
def make_instance_save(instance, fields, fail_message):
"""Returns the save() method for a Form."""
def save(self, commit=True):
return save_instance(self, instance, fields, fail_message, commit)
return save
def form_for_fields(field_list):
"""
Returns a Form class for the given list of Django database field instances.
"""
fields = SortedDict([(f.name, f.formfield())
for f in field_list if f.editable])
return type('FormForFields', (BaseForm,), {'base_fields': fields})
# ModelForms #################################################################
def model_to_dict(instance, fields=None, exclude=None):
"""
Returns a dict containing the data in ``instance`` suitable for passing as
a Form's ``initial`` keyword argument.
``fields`` is an optional list of field names. If provided, only the named
fields will be included in the returned dict.
``exclude`` is an optional list of field names. If provided, the named
fields will be excluded from the returned dict, even if they are listed in
the ``fields`` argument.
"""
# avoid a circular import
from django.db.models.fields.related import ManyToManyField
opts = instance._meta
data = {}
for f in opts.fields + opts.many_to_many:
if not f.editable:
continue
if fields and not f.name in fields:
continue
if exclude and f.name in exclude:
continue
if isinstance(f, ManyToManyField):
# If the object doesn't have a primry key yet, just use an empty
# list for its m2m fields. Calling f.value_from_object will raise
# an exception.
if instance.pk is None:
data[f.name] = []
else:
# MultipleChoiceWidget needs a list of pks, not object instances.
data[f.name] = [obj.pk for obj in f.value_from_object(instance)]
else:
data[f.name] = f.value_from_object(instance)
return data
def fields_for_model(model, fields=None, exclude=None, widgets=None, formfield_callback=None):
"""
Returns a ``SortedDict`` containing form fields for the given model.
``fields`` is an optional list of field names. If provided, only the named
fields will be included in the returned fields.
``exclude`` is an optional list of field names. If provided, the named
fields will be excluded from the returned fields, even if they are listed
in the ``fields`` argument.
"""
field_list = []
ignored = []
opts = model._meta
for f in opts.fields + opts.many_to_many:
if not f.editable:
continue
if fields is not None and not f.name in fields:
continue
if exclude and f.name in exclude:
continue
if widgets and f.name in widgets:
kwargs = {'widget': widgets[f.name]}
else:
kwargs = {}
if formfield_callback is None:
formfield = f.formfield(**kwargs)
elif not callable(formfield_callback):
raise TypeError('formfield_callback must be a function or callable')
else:
formfield = formfield_callback(f, **kwargs)
if formfield:
field_list.append((f.name, formfield))
else:
ignored.append(f.name)
field_dict = SortedDict(field_list)
if fields:
field_dict = SortedDict(
[(f, field_dict.get(f)) for f in fields
if ((not exclude) or (exclude and f not in exclude)) and (f not in ignored)]
)
return field_dict
class ModelFormOptions(object):
def __init__(self, options=None):
self.model = getattr(options, 'model', None)
self.fields = getattr(options, 'fields', None)
self.exclude = getattr(options, 'exclude', None)
self.widgets = getattr(options, 'widgets', None)
class ModelFormMetaclass(type):
def __new__(cls, name, bases, attrs):
formfield_callback = attrs.pop('formfield_callback', None)
try:
parents = [b for b in bases if issubclass(b, ModelForm)]
except NameError:
# We are defining ModelForm itself.
parents = None
declared_fields = get_declared_fields(bases, attrs, False)
new_class = super(ModelFormMetaclass, cls).__new__(cls, name, bases,
attrs)
if not parents:
return new_class
if 'media' not in attrs:
new_class.media = media_property(new_class)
opts = new_class._meta = ModelFormOptions(getattr(new_class, 'Meta', None))
if opts.model:
# If a model is defined, extract form fields from it.
fields = fields_for_model(opts.model, opts.fields,
opts.exclude, opts.widgets, formfield_callback)
# make sure opts.fields doesn't specify an invalid field
none_model_fields = [k for k, v in fields.iteritems() if not v]
missing_fields = set(none_model_fields) - \
set(declared_fields.keys())
if missing_fields:
message = 'Unknown field(s) (%s) specified for %s'
message = message % (', '.join(missing_fields),
opts.model.__name__)
raise FieldError(message)
# Override default model fields with any custom declared ones
# (plus, include all the other declared fields).
fields.update(declared_fields)
else:
fields = declared_fields
new_class.declared_fields = declared_fields
new_class.base_fields = fields
return new_class
class BaseModelForm(BaseForm):
def __init__(self, data=None, files=None, auto_id='id_%s', prefix=None,
initial=None, error_class=ErrorList, label_suffix=':',
empty_permitted=False, instance=None):
opts = self._meta
if instance is None:
if opts.model is None:
raise ValueError('ModelForm has no model class specified.')
# if we didn't get an instance, instantiate a new one
self.instance = opts.model()
object_data = {}
else:
self.instance = instance
object_data = model_to_dict(instance, opts.fields, opts.exclude)
# if initial was provided, it should override the values from instance
if initial is not None:
object_data.update(initial)
# self._validate_unique will be set to True by BaseModelForm.clean().
# It is False by default so overriding self.clean() and failing to call
# super will stop validate_unique from being called.
self._validate_unique = False
super(BaseModelForm, self).__init__(data, files, auto_id, prefix, object_data,
error_class, label_suffix, empty_permitted)
def _update_errors(self, message_dict):
for k, v in message_dict.items():
if k != NON_FIELD_ERRORS:
self._errors.setdefault(k, self.error_class()).extend(v)
# Remove the data from the cleaned_data dict since it was invalid
if k in self.cleaned_data:
del self.cleaned_data[k]
if NON_FIELD_ERRORS in message_dict:
messages = message_dict[NON_FIELD_ERRORS]
self._errors.setdefault(NON_FIELD_ERRORS, self.error_class()).extend(messages)
def _get_validation_exclusions(self):
"""
For backwards-compatibility, several types of fields need to be
excluded from model validation. See the following tickets for
details: #12507, #12521, #12553
"""
exclude = []
# Build up a list of fields that should be excluded from model field
# validation and unique checks.
for f in self.instance._meta.fields:
field = f.name
# Exclude fields that aren't on the form. The developer may be
# adding these values to the model after form validation.
if field not in self.fields:
exclude.append(f.name)
# Don't perform model validation on fields that were defined
# manually on the form and excluded via the ModelForm's Meta
# class. See #12901.
elif self._meta.fields and field not in self._meta.fields:
exclude.append(f.name)
elif self._meta.exclude and field in self._meta.exclude:
exclude.append(f.name)
# Exclude fields that failed form validation. There's no need for
# the model fields to validate them as well.
elif field in self._errors.keys():
exclude.append(f.name)
# Exclude empty fields that are not required by the form, if the
# underlying model field is required. This keeps the model field
# from raising a required error. Note: don't exclude the field from
# validaton if the model field allows blanks. If it does, the blank
# value may be included in a unique check, so cannot be excluded
# from validation.
else:
form_field = self.fields[field]
field_value = self.cleaned_data.get(field, None)
if not f.blank and not form_field.required and field_value in EMPTY_VALUES:
exclude.append(f.name)
return exclude
def clean(self):
self._validate_unique = True
return self.cleaned_data
def _post_clean(self):
opts = self._meta
# Update the model instance with self.cleaned_data.
self.instance = construct_instance(self, self.instance, opts.fields, opts.exclude)
exclude = self._get_validation_exclusions()
# Foreign Keys being used to represent inline relationships
# are excluded from basic field value validation. This is for two
# reasons: firstly, the value may not be supplied (#12507; the
# case of providing new values to the admin); secondly the
# object being referred to may not yet fully exist (#12749).
# However, these fields *must* be included in uniqueness checks,
# so this can't be part of _get_validation_exclusions().
for f_name, field in self.fields.items():
if isinstance(field, InlineForeignKeyField):
exclude.append(f_name)
# Clean the model instance's fields.
try:
self.instance.clean_fields(exclude=exclude)
except ValidationError, e:
self._update_errors(e.message_dict)
# Call the model instance's clean method.
try:
self.instance.clean()
except ValidationError, e:
self._update_errors({NON_FIELD_ERRORS: e.messages})
# Validate uniqueness if needed.
if self._validate_unique:
self.validate_unique()
def validate_unique(self):
"""
Calls the instance's validate_unique() method and updates the form's
validation errors if any were raised.
"""
exclude = self._get_validation_exclusions()
try:
self.instance.validate_unique(exclude=exclude)
except ValidationError, e:
self._update_errors(e.message_dict)
def save(self, commit=True):
"""
Saves this ``form``'s cleaned_data into model instance
``self.instance``.
If commit=True, then the changes to ``instance`` will be saved to the
database. Returns ``instance``.
"""
if self.instance.pk is None:
fail_message = 'created'
else:
fail_message = 'changed'
return save_instance(self, self.instance, self._meta.fields,
fail_message, commit, construct=False)
save.alters_data = True
class ModelForm(BaseModelForm):
__metaclass__ = ModelFormMetaclass
def modelform_factory(model, form=ModelForm, fields=None, exclude=None,
formfield_callback=None):
# Create the inner Meta class. FIXME: ideally, we should be able to
# construct a ModelForm without creating and passing in a temporary
# inner class.
# Build up a list of attributes that the Meta object will have.
attrs = {'model': model}
if fields is not None:
attrs['fields'] = fields
if exclude is not None:
attrs['exclude'] = exclude
# If parent form class already has an inner Meta, the Meta we're
# creating needs to inherit from the parent's inner meta.
parent = (object,)
if hasattr(form, 'Meta'):
parent = (form.Meta, object)
Meta = type('Meta', parent, attrs)
# Give this new form class a reasonable name.
class_name = model.__name__ + 'Form'
# Class attributes for the new form class.
form_class_attrs = {
'Meta': Meta,
'formfield_callback': formfield_callback
}
return ModelFormMetaclass(class_name, (form,), form_class_attrs)
# ModelFormSets ##############################################################
class BaseModelFormSet(BaseFormSet):
"""
A ``FormSet`` for editing a queryset and/or adding new objects to it.
"""
model = None
def __init__(self, data=None, files=None, auto_id='id_%s', prefix=None,
queryset=None, **kwargs):
self.queryset = queryset
defaults = {'data': data, 'files': files, 'auto_id': auto_id, 'prefix': prefix}
defaults.update(kwargs)
super(BaseModelFormSet, self).__init__(**defaults)
def initial_form_count(self):
"""Returns the number of forms that are required in this FormSet."""
if not (self.data or self.files):
return len(self.get_queryset())
return super(BaseModelFormSet, self).initial_form_count()
def _existing_object(self, pk):
if not hasattr(self, '_object_dict'):
self._object_dict = dict([(o.pk, o) for o in self.get_queryset()])
return self._object_dict.get(pk)
def _construct_form(self, i, **kwargs):
if self.is_bound and i < self.initial_form_count():
pk_key = "%s-%s" % (self.add_prefix(i), self.model._meta.pk.name)
pk = self.data[pk_key]
pk_field = self.model._meta.pk
pk = pk_field.get_db_prep_lookup('exact', pk,
connection=connections[self.get_queryset().db])
if isinstance(pk, list):
pk = pk[0]
kwargs['instance'] = self._existing_object(pk)
if i < self.initial_form_count() and not kwargs.get('instance'):
kwargs['instance'] = self.get_queryset()[i]
return super(BaseModelFormSet, self)._construct_form(i, **kwargs)
def get_queryset(self):
if not hasattr(self, '_queryset'):
if self.queryset is not None:
qs = self.queryset
else:
qs = self.model._default_manager.get_query_set()
# If the queryset isn't already ordered we need to add an
# artificial ordering here to make sure that all formsets
# constructed from this queryset have the same form order.
if not qs.ordered:
qs = qs.order_by(self.model._meta.pk.name)
# Removed queryset limiting here. As per discussion re: #13023
# on django-dev, max_num should not prevent existing
# related objects/inlines from being displayed.
self._queryset = qs
return self._queryset
def save_new(self, form, commit=True):
"""Saves and returns a new model instance for the given form."""
return form.save(commit=commit)
def save_existing(self, form, instance, commit=True):
"""Saves and returns an existing model instance for the given form."""
return form.save(commit=commit)
def save(self, commit=True):
"""Saves model instances for every form, adding and changing instances
as necessary, and returns the list of instances.
"""
if not commit:
self.saved_forms = []
def save_m2m():
for form in self.saved_forms:
form.save_m2m()
self.save_m2m = save_m2m
return self.save_existing_objects(commit) + self.save_new_objects(commit)
def clean(self):
self.validate_unique()
def validate_unique(self):
# Collect unique_checks and date_checks to run from all the forms.
all_unique_checks = set()
all_date_checks = set()
for form in self.forms:
if not hasattr(form, 'cleaned_data'):
continue
exclude = form._get_validation_exclusions()
unique_checks, date_checks = form.instance._get_unique_checks(exclude=exclude)
all_unique_checks = all_unique_checks.union(set(unique_checks))
all_date_checks = all_date_checks.union(set(date_checks))
errors = []
# Do each of the unique checks (unique and unique_together)
for uclass, unique_check in all_unique_checks:
seen_data = set()
for form in self.forms:
# if the form doesn't have cleaned_data then we ignore it,
# it's already invalid
if not hasattr(form, "cleaned_data"):
continue
# get data for each field of each of unique_check
row_data = tuple([form.cleaned_data[field] for field in unique_check if field in form.cleaned_data])
if row_data and not None in row_data:
# if we've aready seen it then we have a uniqueness failure
if row_data in seen_data:
# poke error messages into the right places and mark
# the form as invalid
errors.append(self.get_unique_error_message(unique_check))
form._errors[NON_FIELD_ERRORS] = self.get_form_error()
del form.cleaned_data
break
# mark the data as seen
seen_data.add(row_data)
# iterate over each of the date checks now
for date_check in all_date_checks:
seen_data = set()
uclass, lookup, field, unique_for = date_check
for form in self.forms:
# if the form doesn't have cleaned_data then we ignore it,
# it's already invalid
if not hasattr(self, 'cleaned_data'):
continue
# see if we have data for both fields
if (form.cleaned_data and form.cleaned_data[field] is not None
and form.cleaned_data[unique_for] is not None):
# if it's a date lookup we need to get the data for all the fields
if lookup == 'date':
date = form.cleaned_data[unique_for]
date_data = (date.year, date.month, date.day)
# otherwise it's just the attribute on the date/datetime
# object
else:
date_data = (getattr(form.cleaned_data[unique_for], lookup),)
data = (form.cleaned_data[field],) + date_data
# if we've aready seen it then we have a uniqueness failure
if data in seen_data:
# poke error messages into the right places and mark
# the form as invalid
errors.append(self.get_date_error_message(date_check))
form._errors[NON_FIELD_ERRORS] = self.get_form_error()
del form.cleaned_data
break
seen_data.add(data)
if errors:
raise ValidationError(errors)
def get_unique_error_message(self, unique_check):
if len(unique_check) == 1:
return ugettext("Please correct the duplicate data for %(field)s.") % {
"field": unique_check[0],
}
else:
return ugettext("Please correct the duplicate data for %(field)s, "
"which must be unique.") % {
"field": get_text_list(unique_check, unicode(_("and"))),
}
def get_date_error_message(self, date_check):
return ugettext("Please correct the duplicate data for %(field_name)s "
"which must be unique for the %(lookup)s in %(date_field)s.") % {
'field_name': date_check[2],
'date_field': date_check[3],
'lookup': unicode(date_check[1]),
}
def get_form_error(self):
return ugettext("Please correct the duplicate values below.")
def save_existing_objects(self, commit=True):
self.changed_objects = []
self.deleted_objects = []
if not self.get_queryset():
return []
saved_instances = []
for form in self.initial_forms:
pk_name = self._pk_field.name
raw_pk_value = form._raw_value(pk_name)
# clean() for different types of PK fields can sometimes return
# the model instance, and sometimes the PK. Handle either.
pk_value = form.fields[pk_name].clean(raw_pk_value)
pk_value = getattr(pk_value, 'pk', pk_value)
obj = self._existing_object(pk_value)
if self.can_delete:
raw_delete_value = form._raw_value(DELETION_FIELD_NAME)
should_delete = form.fields[DELETION_FIELD_NAME].clean(raw_delete_value)
if should_delete:
self.deleted_objects.append(obj)
obj.delete()
continue
if form.has_changed():
self.changed_objects.append((obj, form.changed_data))
saved_instances.append(self.save_existing(form, obj, commit=commit))
if not commit:
self.saved_forms.append(form)
return saved_instances
def save_new_objects(self, commit=True):
self.new_objects = []
for form in self.extra_forms:
if not form.has_changed():
continue
# If someone has marked an add form for deletion, don't save the
# object.
if self.can_delete:
raw_delete_value = form._raw_value(DELETION_FIELD_NAME)
should_delete = form.fields[DELETION_FIELD_NAME].clean(raw_delete_value)
if should_delete:
continue
self.new_objects.append(self.save_new(form, commit=commit))
if not commit:
self.saved_forms.append(form)
return self.new_objects
def add_fields(self, form, index):
"""Add a hidden field for the object's primary key."""
from django.db.models import AutoField, OneToOneField, ForeignKey
self._pk_field = pk = self.model._meta.pk
# If a pk isn't editable, then it won't be on the form, so we need to
# add it here so we can tell which object is which when we get the
# data back. Generally, pk.editable should be false, but for some
# reason, auto_created pk fields and AutoField's editable attribute is
# True, so check for that as well.
def pk_is_not_editable(pk):
return ((not pk.editable) or (pk.auto_created or isinstance(pk, AutoField))
or (pk.rel and pk.rel.parent_link and pk_is_not_editable(pk.rel.to._meta.pk)))
if pk_is_not_editable(pk) or pk.name not in form.fields:
if form.is_bound:
pk_value = form.instance.pk
else:
try:
if index is not None:
pk_value = self.get_queryset()[index].pk
else:
pk_value = None
except IndexError:
pk_value = None
if isinstance(pk, OneToOneField) or isinstance(pk, ForeignKey):
qs = pk.rel.to._default_manager.get_query_set()
else:
qs = self.model._default_manager.get_query_set()
qs = qs.using(form.instance._state.db)
form.fields[self._pk_field.name] = ModelChoiceField(qs, initial=pk_value, required=False, widget=HiddenInput)
super(BaseModelFormSet, self).add_fields(form, index)
def modelformset_factory(model, form=ModelForm, formfield_callback=None,
formset=BaseModelFormSet,
extra=1, can_delete=False, can_order=False,
max_num=None, fields=None, exclude=None):
"""
Returns a FormSet class for the given Django model class.
"""
form = modelform_factory(model, form=form, fields=fields, exclude=exclude,
formfield_callback=formfield_callback)
FormSet = formset_factory(form, formset, extra=extra, max_num=max_num,
can_order=can_order, can_delete=can_delete)
FormSet.model = model
return FormSet
# InlineFormSets #############################################################
class BaseInlineFormSet(BaseModelFormSet):
"""A formset for child objects related to a parent."""
def __init__(self, data=None, files=None, instance=None,
save_as_new=False, prefix=None, queryset=None):
from django.db.models.fields.related import RelatedObject
if instance is None:
self.instance = self.fk.rel.to()
else:
self.instance = instance
self.save_as_new = save_as_new
# is there a better way to get the object descriptor?
self.rel_name = RelatedObject(self.fk.rel.to, self.model, self.fk).get_accessor_name()
if self.fk.rel.field_name == self.fk.rel.to._meta.pk.name:
backlink_value = self.instance
else:
backlink_value = getattr(self.instance, self.fk.rel.field_name)
if queryset is None:
queryset = self.model._default_manager
qs = queryset.filter(**{self.fk.name: backlink_value})
super(BaseInlineFormSet, self).__init__(data, files, prefix=prefix,
queryset=qs)
def initial_form_count(self):
if self.save_as_new:
return 0
return super(BaseInlineFormSet, self).initial_form_count()
def total_form_count(self):
if self.save_as_new:
return super(BaseInlineFormSet, self).initial_form_count()
return super(BaseInlineFormSet, self).total_form_count()
def _construct_form(self, i, **kwargs):
form = super(BaseInlineFormSet, self)._construct_form(i, **kwargs)
if self.save_as_new:
# Remove the primary key from the form's data, we are only
# creating new instances
form.data[form.add_prefix(self._pk_field.name)] = None
# Remove the foreign key from the form's data
form.data[form.add_prefix(self.fk.name)] = None
# Set the fk value here so that the form can do it's validation.
setattr(form.instance, self.fk.get_attname(), self.instance.pk)
return form
#@classmethod
def get_default_prefix(cls):
from django.db.models.fields.related import RelatedObject
return RelatedObject(cls.fk.rel.to, cls.model, cls.fk).get_accessor_name().replace('+','')
get_default_prefix = classmethod(get_default_prefix)
def save_new(self, form, commit=True):
# Use commit=False so we can assign the parent key afterwards, then
# save the object.
obj = form.save(commit=False)
pk_value = getattr(self.instance, self.fk.rel.field_name)
setattr(obj, self.fk.get_attname(), getattr(pk_value, 'pk', pk_value))
if commit:
obj.save()
# form.save_m2m() can be called via the formset later on if commit=False
if commit and hasattr(form, 'save_m2m'):
form.save_m2m()
return obj
def add_fields(self, form, index):
super(BaseInlineFormSet, self).add_fields(form, index)
if self._pk_field == self.fk:
name = self._pk_field.name
kwargs = {'pk_field': True}
else:
# The foreign key field might not be on the form, so we poke at the
# Model field to get the label, since we need that for error messages.
name = self.fk.name
kwargs = {
'label': getattr(form.fields.get(name), 'label', capfirst(self.fk.verbose_name))
}
if self.fk.rel.field_name != self.fk.rel.to._meta.pk.name:
kwargs['to_field'] = self.fk.rel.field_name
form.fields[name] = InlineForeignKeyField(self.instance, **kwargs)
# Add the generated field to form._meta.fields if it's defined to make
# sure validation isn't skipped on that field.
if form._meta.fields:
if isinstance(form._meta.fields, tuple):
form._meta.fields = list(form._meta.fields)
form._meta.fields.append(self.fk.name)
def get_unique_error_message(self, unique_check):
unique_check = [field for field in unique_check if field != self.fk.name]
return super(BaseInlineFormSet, self).get_unique_error_message(unique_check)
def _get_foreign_key(parent_model, model, fk_name=None, can_fail=False):
"""
Finds and returns the ForeignKey from model to parent if there is one
(returns None if can_fail is True and no such field exists). If fk_name is
provided, assume it is the name of the ForeignKey field. Unles can_fail is
True, an exception is raised if there is no ForeignKey from model to
parent_model.
"""
# avoid circular import
from django.db.models import ForeignKey
opts = model._meta
if fk_name:
fks_to_parent = [f for f in opts.fields if f.name == fk_name]
if len(fks_to_parent) == 1:
fk = fks_to_parent[0]
if not isinstance(fk, ForeignKey) or \
(fk.rel.to != parent_model and
fk.rel.to not in parent_model._meta.get_parent_list()):
raise Exception("fk_name '%s' is not a ForeignKey to %s" % (fk_name, parent_model))
elif len(fks_to_parent) == 0:
raise Exception("%s has no field named '%s'" % (model, fk_name))
else:
# Try to discover what the ForeignKey from model to parent_model is
fks_to_parent = [
f for f in opts.fields
if isinstance(f, ForeignKey)
and (f.rel.to == parent_model
or f.rel.to in parent_model._meta.get_parent_list())
]
if len(fks_to_parent) == 1:
fk = fks_to_parent[0]
elif len(fks_to_parent) == 0:
if can_fail:
return
raise Exception("%s has no ForeignKey to %s" % (model, parent_model))
else:
raise Exception("%s has more than 1 ForeignKey to %s" % (model, parent_model))
return fk
def inlineformset_factory(parent_model, model, form=ModelForm,
formset=BaseInlineFormSet, fk_name=None,
fields=None, exclude=None,
extra=3, can_order=False, can_delete=True, max_num=None,
formfield_callback=None):
"""
Returns an ``InlineFormSet`` for the given kwargs.
You must provide ``fk_name`` if ``model`` has more than one ``ForeignKey``
to ``parent_model``.
"""
fk = _get_foreign_key(parent_model, model, fk_name=fk_name)
# enforce a max_num=1 when the foreign key to the parent model is unique.
if fk.unique:
max_num = 1
kwargs = {
'form': form,
'formfield_callback': formfield_callback,
'formset': formset,
'extra': extra,
'can_delete': can_delete,
'can_order': can_order,
'fields': fields,
'exclude': exclude,
'max_num': max_num,
}
FormSet = modelformset_factory(model, **kwargs)
FormSet.fk = fk
return FormSet
# Fields #####################################################################
class InlineForeignKeyHiddenInput(HiddenInput):
def _has_changed(self, initial, data):
return False
class InlineForeignKeyField(Field):
"""
A basic integer field that deals with validating the given value to a
given parent instance in an inline.
"""
default_error_messages = {
'invalid_choice': _(u'The inline foreign key did not match the parent instance primary key.'),
}
def __init__(self, parent_instance, *args, **kwargs):
self.parent_instance = parent_instance
self.pk_field = kwargs.pop("pk_field", False)
self.to_field = kwargs.pop("to_field", None)
if self.parent_instance is not None:
if self.to_field:
kwargs["initial"] = getattr(self.parent_instance, self.to_field)
else:
kwargs["initial"] = self.parent_instance.pk
kwargs["required"] = False
kwargs["widget"] = InlineForeignKeyHiddenInput
super(InlineForeignKeyField, self).__init__(*args, **kwargs)
def clean(self, value):
if value in EMPTY_VALUES:
if self.pk_field:
return None
# if there is no value act as we did before.
return self.parent_instance
# ensure the we compare the values as equal types.
if self.to_field:
orig = getattr(self.parent_instance, self.to_field)
else:
orig = self.parent_instance.pk
if force_unicode(value) != force_unicode(orig):
raise ValidationError(self.error_messages['invalid_choice'])
return self.parent_instance
class ModelChoiceIterator(object):
def __init__(self, field):
self.field = field
self.queryset = field.queryset
def __iter__(self):
if self.field.empty_label is not None:
yield (u"", self.field.empty_label)
if self.field.cache_choices:
if self.field.choice_cache is None:
self.field.choice_cache = [
self.choice(obj) for obj in self.queryset.all()
]
for choice in self.field.choice_cache:
yield choice
else:
for obj in self.queryset.all():
yield self.choice(obj)
def __len__(self):
return len(self.queryset)
def choice(self, obj):
return (self.field.prepare_value(obj), self.field.label_from_instance(obj))
class ModelChoiceField(ChoiceField):
"""A ChoiceField whose choices are a model QuerySet."""
# This class is a subclass of ChoiceField for purity, but it doesn't
# actually use any of ChoiceField's implementation.
default_error_messages = {
'invalid_choice': _(u'Select a valid choice. That choice is not one of'
u' the available choices.'),
}
def __init__(self, queryset, empty_label=u"---------", cache_choices=False,
required=True, widget=None, label=None, initial=None,
help_text=None, to_field_name=None, *args, **kwargs):
if required and (initial is not None):
self.empty_label = None
else:
self.empty_label = empty_label
self.cache_choices = cache_choices
# Call Field instead of ChoiceField __init__() because we don't need
# ChoiceField.__init__().
Field.__init__(self, required, widget, label, initial, help_text,
*args, **kwargs)
self.queryset = queryset
self.choice_cache = None
self.to_field_name = to_field_name
def __deepcopy__(self, memo):
result = super(ChoiceField, self).__deepcopy__(memo)
# Need to force a new ModelChoiceIterator to be created, bug #11183
result.queryset = result.queryset
return result
def _get_queryset(self):
return self._queryset
def _set_queryset(self, queryset):
self._queryset = queryset
self.widget.choices = self.choices
queryset = property(_get_queryset, _set_queryset)
# this method will be used to create object labels by the QuerySetIterator.
# Override it to customize the label.
def label_from_instance(self, obj):
"""
This method is used to convert objects into strings; it's used to
generate the labels for the choices presented by this object. Subclasses
can override this method to customize the display of the choices.
"""
return smart_unicode(obj)
def _get_choices(self):
# If self._choices is set, then somebody must have manually set
# the property self.choices. In this case, just return self._choices.
if hasattr(self, '_choices'):
return self._choices
# Otherwise, execute the QuerySet in self.queryset to determine the
# choices dynamically. Return a fresh ModelChoiceIterator that has not been
# consumed. Note that we're instantiating a new ModelChoiceIterator *each*
# time _get_choices() is called (and, thus, each time self.choices is
# accessed) so that we can ensure the QuerySet has not been consumed. This
# construct might look complicated but it allows for lazy evaluation of
# the queryset.
return ModelChoiceIterator(self)
choices = property(_get_choices, ChoiceField._set_choices)
def prepare_value(self, value):
if hasattr(value, '_meta'):
if self.to_field_name:
return value.serializable_value(self.to_field_name)
else:
return value.pk
return super(ModelChoiceField, self).prepare_value(value)
def to_python(self, value):
if value in EMPTY_VALUES:
return None
try:
key = self.to_field_name or 'pk'
value = self.queryset.get(**{key: value})
except (ValueError, self.queryset.model.DoesNotExist):
raise ValidationError(self.error_messages['invalid_choice'])
return value
def validate(self, value):
return Field.validate(self, value)
class ModelMultipleChoiceField(ModelChoiceField):
"""A MultipleChoiceField whose choices are a model QuerySet."""
widget = SelectMultiple
hidden_widget = MultipleHiddenInput
default_error_messages = {
'list': _(u'Enter a list of values.'),
'invalid_choice': _(u'Select a valid choice. %s is not one of the'
u' available choices.'),
'invalid_pk_value': _(u'"%s" is not a valid value for a primary key.')
}
def __init__(self, queryset, cache_choices=False, required=True,
widget=None, label=None, initial=None,
help_text=None, *args, **kwargs):
super(ModelMultipleChoiceField, self).__init__(queryset, None,
cache_choices, required, widget, label, initial, help_text,
*args, **kwargs)
def clean(self, value):
if self.required and not value:
raise ValidationError(self.error_messages['required'])
elif not self.required and not value:
return []
if not isinstance(value, (list, tuple)):
raise ValidationError(self.error_messages['list'])
for pk in value:
try:
self.queryset.filter(pk=pk)
except ValueError:
raise ValidationError(self.error_messages['invalid_pk_value'] % pk)
qs = self.queryset.filter(pk__in=value)
pks = set([force_unicode(o.pk) for o in qs])
for val in value:
if force_unicode(val) not in pks:
raise ValidationError(self.error_messages['invalid_choice'] % val)
# Since this overrides the inherited ModelChoiceField.clean
# we run custom validators here
self.run_validators(value)
return qs
def prepare_value(self, value):
if hasattr(value, '__iter__'):
return [super(ModelMultipleChoiceField, self).prepare_value(v) for v in value]
return super(ModelMultipleChoiceField, self).prepare_value(value)
|
tjsavage/sfcsdatabase
|
sfcs/django/forms/models.py
|
Python
|
bsd-3-clause
| 44,249
|
# Copyright 2015 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from zun.common.i18n import _
db_opts = [
# TODO(yuywz): Change to etcd after all etcd db driver code is landed
cfg.StrOpt('db_type',
default='sql',
help=_('Defines which db type to use for storing container. '
'Possible Values: sql, etcd'))
]
sql_opts = [
cfg.StrOpt('mysql_engine',
default='InnoDB',
help=_('MySQL engine to use.'))
]
etcd_opts = [
cfg.HostAddressOpt('etcd_host',
default='127.0.0.1',
help=_("Host IP address on which etcd service "
"running.")),
cfg.PortOpt('etcd_port',
default=2379,
help=_("Port on which etcd listen client request."))
]
etcd_group = cfg.OptGroup(name='etcd', title='Options for etcd connection')
ALL_OPTS = (db_opts + sql_opts + etcd_opts)
def register_opts(conf):
conf.register_opts(db_opts)
conf.register_opts(sql_opts, 'database')
conf.register_group(etcd_group)
conf.register_opts(etcd_opts, etcd_group)
def list_opts():
return {"DEFAULT": ALL_OPTS}
|
kevin-zhaoshuai/zun
|
zun/conf/database.py
|
Python
|
apache-2.0
| 1,765
|
# Copyright 2018 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from pants.engine.addressable import Addresses
from pants.engine.console import Console
from pants.engine.goal import Goal, GoalSubsystem
from pants.engine.rules import goal_rule
class ListAndDieForTestingOptions(GoalSubsystem):
"""A fast and deadly variant of `./pants list`."""
name = "list-and-die-for-testing"
class ListAndDieForTesting(Goal):
subsystem_cls = ListAndDieForTestingOptions
@goal_rule
def fast_list_and_die_for_testing(console: Console, addresses: Addresses) -> ListAndDieForTesting:
for address in addresses.dependencies:
console.print_stdout(address.spec)
return ListAndDieForTesting(exit_code=42)
def rules():
return [
fast_list_and_die_for_testing,
]
|
wisechengyi/pants
|
pants-plugins/src/python/internal_backend/rules_for_testing/register.py
|
Python
|
apache-2.0
| 858
|
def print_double_num(x):
""" (number) -> NoneType
Double x.
>>> double_num(3)
"""
print(x * 2)
|
simontakite/sysadmin
|
pythonscripts/practicalprogramming/functions/double_print.py
|
Python
|
gpl-2.0
| 121
|
# -*- coding: utf-8 -*-
"""ciscosparkapi exception classes."""
# Use future for Python v2 and v3 compatibility
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals,
)
from builtins import *
from past.builtins import basestring
import json
import requests
from collections import OrderedDict
from ciscosparkapi.responsecodes import SPARK_RESPONSE_CODES
__author__ = "Chris Lunsford"
__author_email__ = "chrlunsf@cisco.com"
__copyright__ = "Copyright (c) 2016 Cisco Systems, Inc."
__license__ = "MIT"
class ciscosparkapiException(Exception):
"""Base class for all ciscosparkapi package exceptions."""
def __init__(self, *error_message_args, **error_data):
super(ciscosparkapiException, self).__init__()
self.error_message_args = error_message_args
self.error_data = OrderedDict(error_data)
@property
def error_message(self):
"""The error message created from the error message arguments."""
if not self.error_message_args:
return ""
elif len(self.error_message_args) == 1:
return str(self.error_message_args[0])
elif len(self.error_message_args) > 1 \
and isinstance(self.error_message_args[0], basestring):
return self.error_message_args[0] % self.error_message_args[1:]
else:
return "; ".join(self.error_message_args)
def __repr__(self):
"""String representation of the exception."""
arg_list = self.error_message_args
kwarg_list = [str(key) + "=" + repr(value)
for key, value in self.error_data.items()]
arg_string = ", ".join(arg_list + kwarg_list)
return self.__class__.__name__ + "(" + arg_string + ")"
def __str__(self):
"""Human readable string representation of the exception."""
return self.error_message + '\n' + \
json.dumps(self.error_data, indent=4)
class SparkApiError(ciscosparkapiException):
"""Errors returned by requests to the Cisco Spark cloud APIs."""
def __init__(self, response):
assert isinstance(response, requests.Response)
super(SparkApiError, self).__init__()
# Convenience data attributes
self.request = response.request
self.response = response
self.response_code = response.status_code
self.response_text = SPARK_RESPONSE_CODES.get(self.response_code,
"Unknown Response Code")
# Error message and parameters
self.error_message_args = [
"Response Code [%s] - %s",
self.response_code,
self.response_text
]
# Error Data
self.error_data["response_code"] = self.response_code
self.error_data["description"] = self.response_text
if response.text:
try:
response_data = json.loads(response.text,
object_pairs_hook=OrderedDict)
except ValueError:
self.error_data["response_body"] = response.text
else:
self.error_data["response_body"] = response_data
class SparkRateLimitError(SparkApiError):
"""Cisco Spark Rate-Limit exceeded Error."""
def __init__(self, response):
assert isinstance(response, requests.Response)
super(SparkRateLimitError, self).__init__(response)
retry_after = response.headers.get('Retry-After')
if retry_after:
# Convenience data attributes
self.retry_after = float(retry_after)
# Error Data
self.error_data["retry_after"] = self.retry_after
|
jbogarin/ciscosparkapi
|
ciscosparkapi/exceptions.py
|
Python
|
mit
| 3,729
|
"""
An IRC parser.
This is by no means complete.
"""
from nodeforge.PluginUtils import *
from twisted.internet.task import LoopingCall
import time, random
class Main(Plugin):
priority = Priority.parser
def send(self, string):
if isinstance(string, unicode):
string = string.encode('raw_unicode_escape')
self.core.sendLine(string)
print '>>> %s' % string
def privmsg(self, who, msg):
"""
http://www.irchelp.org/irchelp/rfc/chapter4.html#c4_4_1
"""
self.send('PRIVMSG %s :%s' % (who, msg) )
def join(self, channels, passes=""):
"""
http://www.irchelp.org/irchelp/rfc/chapter4.html#c4_2_1
"""
self.send("JOIN %s %s" % (channels, passes) )
def ping(self, msg="a"):
self.send("PING :%s" % msg)
def setnick(self, name):
self.send("NICK %s" % name)
def onData(self, raw):
'''
First we will parse the stream and save it to self.context
Then we will do the login procedure.
'''
p = parseirc(raw)
self.context = p
if p[1] == 'PING':
self.send("PONG :%s" % p[2][0])
elif p[1] == ERR_NICKNAMEINUSE:
self.nick = "%s%s" %(self.core.nick, random.randint(1,999))
self.setnick(self.nick)
def onLoad(self):
self.core.delimiter = '\r\n'
if not hasattr(self.core, 'nick'):
self.nick = 'AnonymousPerson'
else:
self.nick = self.core.nick
def onConnect(self):
self.setnick(self.nick)
self.send("USER %s host server :Madewith http://code.google.com/p/nodeforge/" % self.nick)
LoopingCall(self.ping).start(60)
def parseirc(input):
"""
BNF in rfc2812
message = [ ":" prefix SPACE ] command [ params ] crlf
params = *14( SPACE middle ) [ SPACE ":" trailing ]
=/ 14( SPACE middle ) [ SPACE [ ":" ] trailing ]
trailing = *( ":" / " " / nospcrlfcl )
"""
\
prefix = ""
trailing = []
if input == "":
raise Exception("Parse string is empty")
if input[0] == ":":
prefix, input = input[1:].split(' ', 1)
data = input.split(" :",1)
if len(data) == 1:
params = input.split()
else:
input, trailing = data
params = input.split()
params.append(trailing)
command = params.pop(0)
return prefix, command, params
ERR_NICKNAMEINUSE = '433'
|
impredicative/nodeforge
|
src/plugins/irc/IRC Parser/main.py
|
Python
|
bsd-3-clause
| 2,784
|
# coding: utf-8
#
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'Jeremy Emerson'
import test_utils
from statistics import Counter
from statistics import Journal
class StatisticsUnitTests(test_utils.AppEngineTestBase):
"""Test the exploration model."""
def test_counter_class(self):
"""Test Counter Class."""
o = Counter()
o.name = 'The name'
o.value = 2
self.assertEqual(o.name, 'The name')
self.assertEqual(o.value, 2)
def test_journal_class(self):
"""Test Journal Class."""
o = Journal()
o.name = 'The name'
o.values = ['The values']
self.assertEqual(o.name, 'The name')
self.assertEqual(o.values, ['The values'])
|
sunu/oppia-test
|
models/statistics_test.py
|
Python
|
apache-2.0
| 1,295
|
"""Event loop implementation that uses pyuv(libuv-python bindings)."""
import sys
from collections import deque
import pyuv
from .base import BaseEventLoop
class UvEventLoop(BaseEventLoop):
"""`BaseEventLoop` subclass that uses `pvuv` as a backend."""
def _init(self):
self._loop = pyuv.Loop()
self._async = pyuv.Async(self._loop, self._on_async)
self._connection_error = None
self._error_stream = None
self._callbacks = deque()
def _on_connect(self, stream, error):
self.stop()
if error:
msg = 'Cannot connect to {0}: {1}'.format(
self._connect_address, pyuv.errno.strerror(error))
self._connection_error = IOError(msg)
return
self._read_stream = self._write_stream = stream
def _on_read(self, handle, data, error):
if error or not data:
msg = pyuv.errno.strerror(error) if error else 'EOF'
self._on_error(msg)
return
if handle == self._error_stream:
return
self._on_data(data)
def _on_write(self, handle, error):
if error:
msg = pyuv.errno.strerror(error)
self._on_error(msg)
def _on_exit(self, handle, exit_status, term_signal):
self._on_error('EOF')
def _disconnected(self, *args):
raise IOError('Not connected to Nvim')
def _connect_tcp(self, address, port):
stream = pyuv.TCP(self._loop)
self._connect_address = '{0}:{1}'.format(address, port)
stream.connect((address, port), self._on_connect)
def _connect_socket(self, path):
stream = pyuv.Pipe(self._loop)
self._connect_address = path
stream.connect(path, self._on_connect)
def _connect_stdio(self):
self._read_stream = pyuv.Pipe(self._loop)
self._read_stream.open(sys.stdin.fileno())
self._write_stream = pyuv.Pipe(self._loop)
self._write_stream.open(sys.stdout.fileno())
def _connect_child(self, argv):
self._write_stream = pyuv.Pipe(self._loop)
self._read_stream = pyuv.Pipe(self._loop)
self._error_stream = pyuv.Pipe(self._loop)
stdin = pyuv.StdIO(self._write_stream,
flags=pyuv.UV_CREATE_PIPE + pyuv.UV_READABLE_PIPE)
stdout = pyuv.StdIO(self._read_stream,
flags=pyuv.UV_CREATE_PIPE + pyuv.UV_WRITABLE_PIPE)
stderr = pyuv.StdIO(self._error_stream,
flags=pyuv.UV_CREATE_PIPE + pyuv.UV_WRITABLE_PIPE)
self._process = pyuv.Process(self._loop)
self._process.spawn(file=argv[0],
exit_callback=self._on_exit,
args=argv[1:],
flags=pyuv.UV_PROCESS_WINDOWS_HIDE,
stdio=(stdin, stdout, stderr,))
self._error_stream.start_read(self._on_read)
def _start_reading(self):
if self._transport_type in ['tcp', 'socket']:
self._loop.run()
if self._connection_error:
self.run = self.send = self._disconnected
raise self._connection_error
self._read_stream.start_read(self._on_read)
def _send(self, data):
self._write_stream.write(data, self._on_write)
def _run(self):
self._loop.run(pyuv.UV_RUN_DEFAULT)
def _stop(self):
self._loop.stop()
def _threadsafe_call(self, fn):
self._callbacks.append(fn)
self._async.send()
def _on_async(self, handle):
while self._callbacks:
self._callbacks.popleft()()
def _setup_signals(self, signals):
self._signal_handles = []
def handler(h, signum):
self._on_signal(signum)
for signum in signals:
handle = pyuv.Signal(self._loop)
handle.start(handler, signum)
self._signal_handles.append(handle)
def _teardown_signals(self):
for handle in self._signal_handles:
handle.stop()
|
bfredl/python-client
|
neovim/msgpack_rpc/event_loop/uv.py
|
Python
|
apache-2.0
| 4,058
|
# -*- coding: iso-8859-1 -*-
# Copyright (C) 2000-2014 Bastian Kleineidam
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
Robots.txt parser.
The robots.txt Exclusion Protocol is implemented as specified in
http://www.robotstxt.org/wc/norobots-rfc.html
"""
try:
import urlparse
except ImportError:
# Python 3
from urllib import parse as urlparse
import urllib
import time
import requests
from . import log, LOG_CHECK, configuration
__all__ = ["RobotFileParser"]
ACCEPT_ENCODING = 'x-gzip,gzip,deflate'
class RobotFileParser (object):
"""This class provides a set of methods to read, parse and answer
questions about a single robots.txt file."""
def __init__ (self, url='', session=None, proxies=None, auth=None):
"""Initialize internal entry lists and store given url and
credentials."""
self.set_url(url)
if session is None:
self.session = requests.Session()
else:
self.session = session
self.proxies = proxies
self.auth = auth
self._reset()
def _reset (self):
"""Reset internal flags and entry lists."""
self.entries = []
self.default_entry = None
self.disallow_all = False
self.allow_all = False
self.last_checked = 0
# list of tuples (sitemap url, line number)
self.sitemap_urls = []
def mtime (self):
"""Returns the time the robots.txt file was last fetched.
This is useful for long-running web spiders that need to
check for new robots.txt files periodically.
@return: last modified in time.time() format
@rtype: number
"""
return self.last_checked
def modified (self):
"""Set the time the robots.txt file was last fetched to the
current time."""
self.last_checked = time.time()
def set_url (self, url):
"""Set the URL referring to a robots.txt file."""
self.url = url
self.host, self.path = urlparse.urlparse(url)[1:3]
def read (self):
"""Read the robots.txt URL and feeds it to the parser."""
self._reset()
kwargs = dict(
headers = {
'User-Agent': configuration.UserAgent,
'Accept-Encoding': ACCEPT_ENCODING,
}
)
if self.auth:
kwargs["auth"] = self.auth
if self.proxies:
kwargs["proxies"] = self.proxies
try:
response = self.session.get(self.url, **kwargs)
response.raise_for_status()
content_type = response.headers.get('content-type')
if content_type and content_type.lower().startswith('text/plain'):
self.parse(response.iter_lines())
else:
log.debug(LOG_CHECK, "%r allow all (no text content)", self.url)
self.allow_all = True
except requests.HTTPError as x:
if x.response.status_code in (401, 403):
self.disallow_all = True
log.debug(LOG_CHECK, "%r disallow all (code %d)", self.url, x.response.status_code)
else:
self.allow_all = True
log.debug(LOG_CHECK, "%r allow all (HTTP error)", self.url)
except requests.exceptions.Timeout:
raise
except requests.exceptions.RequestException:
# no network or other failure
self.allow_all = True
log.debug(LOG_CHECK, "%r allow all (request error)", self.url)
def _add_entry (self, entry):
"""Add a parsed entry to entry list.
@return: None
"""
if "*" in entry.useragents:
# the default entry is considered last
self.default_entry = entry
else:
self.entries.append(entry)
def parse (self, lines):
"""Parse the input lines from a robot.txt file.
We allow that a user-agent: line is not preceded by
one or more blank lines.
@return: None
"""
log.debug(LOG_CHECK, "%r parse lines", self.url)
state = 0
linenumber = 0
entry = Entry()
for line in lines:
line = line.strip()
linenumber += 1
if not line:
if state == 1:
log.debug(LOG_CHECK, "%r line %d: allow or disallow directives without any user-agent line", self.url, linenumber)
entry = Entry()
state = 0
elif state == 2:
self._add_entry(entry)
entry = Entry()
state = 0
# remove optional comment and strip line
i = line.find('#')
if i >= 0:
line = line[:i]
line = line.strip()
if not line:
continue
line = line.split(':', 1)
if len(line) == 2:
line[0] = line[0].strip().lower()
line[1] = urllib.unquote(line[1].strip())
if line[0] == "user-agent":
if state == 2:
log.debug(LOG_CHECK, "%r line %d: missing blank line before user-agent directive", self.url, linenumber)
self._add_entry(entry)
entry = Entry()
entry.useragents.append(line[1])
state = 1
elif line[0] == "disallow":
if state == 0:
log.debug(LOG_CHECK, "%r line %d: missing user-agent directive before this line", self.url, linenumber)
pass
else:
entry.rulelines.append(RuleLine(line[1], False))
state = 2
elif line[0] == "allow":
if state == 0:
log.debug(LOG_CHECK, "%r line %d: missing user-agent directive before this line", self.url, linenumber)
pass
else:
entry.rulelines.append(RuleLine(line[1], True))
state = 2
elif line[0] == "crawl-delay":
if state == 0:
log.debug(LOG_CHECK, "%r line %d: missing user-agent directive before this line", self.url, linenumber)
pass
else:
try:
entry.crawldelay = max(0, int(line[1]))
state = 2
except (ValueError, OverflowError):
log.debug(LOG_CHECK, "%r line %d: invalid delay number %r", self.url, linenumber, line[1])
pass
elif line[0] == "sitemap":
# Note that sitemap URLs must be absolute according to
# http://www.sitemaps.org/protocol.html#submit_robots
# But this should be checked by the calling layer.
self.sitemap_urls.append((line[1], linenumber))
else:
log.debug(LOG_CHECK, "%r line %d: unknown key %r", self.url, linenumber, line[0])
pass
else:
log.debug(LOG_CHECK, "%r line %d: malformed line %r", self.url, linenumber, line)
pass
if state in (1, 2):
self.entries.append(entry)
self.modified()
log.debug(LOG_CHECK, "Parsed rules:\n%s", str(self))
def can_fetch (self, useragent, url):
"""Using the parsed robots.txt decide if useragent can fetch url.
@return: True if agent can fetch url, else False
@rtype: bool
"""
log.debug(LOG_CHECK, "%r check allowance for:\n user agent: %r\n url: %r ...", self.url, useragent, url)
if not isinstance(useragent, str):
useragent = useragent.encode("ascii", "ignore")
if not isinstance(url, str):
url = url.encode("ascii", "ignore")
if self.disallow_all:
log.debug(LOG_CHECK, " ... disallow all.")
return False
if self.allow_all:
log.debug(LOG_CHECK, " ... allow all.")
return True
# search for given user agent matches
# the first match counts
url = urllib.quote(urlparse.urlparse(urllib.unquote(url))[2]) or "/"
for entry in self.entries:
if entry.applies_to(useragent):
return entry.allowance(url)
# try the default entry last
if self.default_entry is not None:
return self.default_entry.allowance(url)
# agent not found ==> access granted
log.debug(LOG_CHECK, " ... agent not found, allow.")
return True
def get_crawldelay (self, useragent):
"""Look for a configured crawl delay.
@return: crawl delay in seconds or zero
@rtype: integer >= 0
"""
for entry in self.entries:
if entry.applies_to(useragent):
return entry.crawldelay
return 0
def __str__ (self):
"""Constructs string representation, usable as contents of a
robots.txt file.
@return: robots.txt format
@rtype: string
"""
lines = [str(entry) for entry in self.entries]
if self.default_entry is not None:
lines.append(str(self.default_entry))
return "\n\n".join(lines)
class RuleLine (object):
"""A rule line is a single "Allow:" (allowance==1) or "Disallow:"
(allowance==0) followed by a path.
"""
def __init__ (self, path, allowance):
"""Initialize with given path and allowance info."""
if path == '' and not allowance:
# an empty value means allow all
allowance = True
path = '/'
self.path = urllib.quote(path)
self.allowance = allowance
def applies_to (self, path):
"""Look if given path applies to this rule.
@return: True if pathname applies to this rule, else False
@rtype: bool
"""
return self.path == "*" or path.startswith(self.path)
def __str__ (self):
"""Construct string representation in robots.txt format.
@return: robots.txt format
@rtype: string
"""
return ("Allow" if self.allowance else "Disallow")+": "+self.path
class Entry (object):
"""An entry has one or more user-agents and zero or more rulelines."""
def __init__ (self):
"""Initialize user agent and rule list."""
self.useragents = []
self.rulelines = []
self.crawldelay = 0
def __str__ (self):
"""string representation in robots.txt format.
@return: robots.txt format
@rtype: string
"""
lines = ["User-agent: %s" % agent for agent in self.useragents]
if self.crawldelay:
lines.append("Crawl-delay: %d" % self.crawldelay)
lines.extend([str(line) for line in self.rulelines])
return "\n".join(lines)
def applies_to (self, useragent):
"""Check if this entry applies to the specified agent.
@return: True if this entry applies to the agent, else False.
@rtype: bool
"""
if not useragent:
return True
useragent = useragent.lower()
for agent in self.useragents:
if agent == '*':
# we have the catch-all agent
return True
if agent.lower() in useragent:
return True
return False
def allowance (self, filename):
"""Preconditions:
- our agent applies to this entry
- filename is URL decoded
Check if given filename is allowed to acces this entry.
@return: True if allowed, else False
@rtype: bool
"""
for line in self.rulelines:
log.debug(LOG_CHECK, "%s %s %s", filename, str(line), line.allowance)
if line.applies_to(filename):
log.debug(LOG_CHECK, " ... rule line %s", line)
return line.allowance
log.debug(LOG_CHECK, " ... no rule lines of %s applied to %s; allowed.", self.useragents, filename)
return True
|
terryyin/linkchecker
|
linkcheck/robotparser2.py
|
Python
|
gpl-2.0
| 12,956
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Converting code to AST.
Adapted from Tangent.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import textwrap
import gast
from tensorflow.python.util import tf_inspect
def parse_object(obj):
"""Return the AST of given object."""
return parse_str(tf_inspect.getsource(obj))
def parse_str(src):
"""Return the AST of given piece of code."""
return gast.parse(textwrap.dedent(src))
|
jwlawson/tensorflow
|
tensorflow/contrib/py2tf/pyct/parser.py
|
Python
|
apache-2.0
| 1,152
|
from binascii import b2a_base64, a2b_base64
POSTFIX = {
'ns': 1e-9,
'us': 1e-6,
'ms': 1e-3,
'secs': 1,
'mins': 60,
'hrs': 60 * 60,
'days': 24 * 60 * 60,
'weeks': 7 * 24 * 60 * 60
}
def parse_duration(s):
s = s.strip()
unit = None
postfix = None
for n, u in POSTFIX.items():
if s.endswith(n):
unit = u
postfix = n
break
assert unit is not None, \
'Unknown duration \'%s\'; supported units are %s' % (
s, ','.join('\'%s\'' % n for n in POSTFIX)
)
n = float(s[:-len(postfix)])
return n * unit
def encode_data(data):
return b2a_base64(data).strip().decode('ascii')
def decode_data(data):
return a2b_base64(data)
|
yuyang0/pymesos
|
pymesos/utils.py
|
Python
|
bsd-3-clause
| 757
|
# -*- coding: utf-8 -*-
#
# This file is a part of Pluggable Output Processor
#
# Copyright (c) 2013-2017 Alex Turbov <i.zaufi@gmail.com>
#
# Pluggable Output Processor is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Pluggable Output Processor is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
'''
Unit tests for terminal helpers
'''
# Project specific imports
from outproc.config import Config
from outproc.term import column_formatter, fg2bg, pos_to_offset
# Standard imports
import pathlib
class term_module_tester:
def setup_method(self):
self.config = Config(pathlib.Path('doesnt-matter'))
self.red_fg = self.config.get_color('some', 'red', with_reset=False)
self.yellow_fg = self.config.get_color('some', 'yellow+bold')
self.white_fg = self.config.get_color('some', 'white')
def fg2bg_test(self):
assert fg2bg(self.red_fg) == '\x1b[41m'
def pos_to_offset_0_test(self):
line = 'Hello Africa'
colored = self.yellow_fg + line + self.config.color.reset
#print('{}'.format(repr(colored)))
pos = pos_to_offset(colored, 0)
assert pos == 13
assert colored[pos] == 'H'
pos = pos_to_offset(colored, 6)
assert pos == 19
assert colored[pos] == 'A'
pos = pos_to_offset(colored, len(line) - 1)
assert pos == 24
assert colored[pos] == 'a'
def pos_to_offset_1_test(self):
line = 'Hello Africa'
colored = self.white_fg + ' ' + self.yellow_fg + line + self.config.color.reset
pos = pos_to_offset(colored, 1)
assert pos == 23
assert colored[pos] == 'H'
def bg_highlight_1_test(self):
self.reg_bg = fg2bg(self.red_fg)
line = 'Hello Africa'
line = self.yellow_fg + line + self.config.color.reset
pos = pos_to_offset(line, 0)
assert line[pos] == 'H'
line = line[:pos] + self.reg_bg + line[pos:pos+1] + self.config.color.normal_bg \
+ line[pos+1:]
assert line == '\x1b[0m\x1b[33m\x1b[1m\x1b[41mH\x1b[48mello Africa\x1b[0m'
pos = pos_to_offset(line, 6)
assert line[pos] == 'A'
line = line[:pos] + self.reg_bg + line[pos:pos+1] + self.config.color.normal_bg \
+ line[pos+1:]
assert line == '\x1b[0m\x1b[33m\x1b[1m\x1b[41mH\x1b[48mello \x1b[41mA\x1b[48mfrica\x1b[0m'
pos = pos_to_offset(line, 11)
assert line[pos] == 'a'
line = line[:pos] + self.reg_bg + line[pos:pos+1] + self.config.color.normal_bg \
+ line[pos+1:]
assert line == '\x1b[0m\x1b[33m\x1b[1m\x1b[41mH\x1b[48mello \x1b[41mA\x1b[48mfric\x1b[41ma\x1b[48m\x1b[0m'
def _format_range_as_to_columns(self, count, columns):
result = ''
line = ''
for i in column_formatter(count, columns):
if i == -1:
#print(line)
result += line + '\n'
line = ''
else:
line += '{} '.format(i)
return result
def test_column_formatter_0(self):
for i in column_formatter(0, 1):
self.assertFalse()
def column_formatter_1_2_test(self):
expected = '0 \n'
result = self._format_range_as_to_columns(1, 2)
assert expected == result
def column_formatter_10_4_test(self):
expected = '0 3 6 9 \n1 4 7 \n2 5 8 \n'
result = self._format_range_as_to_columns(10, 4)
assert expected == result
def column_formatter_10_3_test(self):
expected = '0 4 8 \n1 5 9 \n2 6 \n3 7 \n'
result = self._format_range_as_to_columns(10, 3)
assert expected == result
|
zaufi/pluggable-output-processor
|
test/test_term.py
|
Python
|
gpl-3.0
| 4,169
|
# Animal is-a object
class Animal(object):
@staticmethod
def latin_name():
return "Animal"
# Dog is-a Animal
class Dog(Animal):
# Dog has-a name
def __init__(self, name):
self.name = name
def eater(self):
return "Carnivore", Animal.latin_name(), self.name
# Cat is-a Animal
class Cat(Animal):
# Cat has-a name
def __init__(self, name):
self.name = name
# Person is-a object
class Person(object):
def __init__(self, name):
# person has-a name
self.name = name
# person has-a Pet of some kind
self.pet = None
# Employees are people
class Employee(Person):
def __init__(self, name, salary):
# call constructor of super class
super(Employee, self).__init__(name)
# Employee has-a salary
self.salary = salary
# Fish is-a object
class Fish(object):
pass
# Salmon is-a Fish
class Salmon(Fish):
pass
# Halibut is-a Fish
class Halibut(Fish):
pass
# Rover is-a dog
rover = Dog("Rover")
# Satan is-a cat
satan = Cat("Satan")
# Mary is-a person
mary = Person("Mary")
# Mary has-a pet - cat Satan
mary.pet = satan
# Fran is-a employee with a salary of ...
frank = Employee("Frank", 12000)
# Frank has-a pet rover
frank.pet = rover
# crouse is-a Salmon
crouse = Salmon()
# harry is-a Halibut
harry = Halibut()
print rover.eater()
|
aurelo/lphw
|
source/ex42.py
|
Python
|
mit
| 1,389
|
import simplejson
import werkzeug
import requests
import random
from datetime import datetime, timedelta
from odoo import api, exceptions, fields, models
from odoo.tools import scan_languages
from odoo.tools.translate import _
from odoo.addons.base.res.res_partner import _tz_get
from odoo.tools import DEFAULT_SERVER_DATETIME_FORMAT
from odoo.addons.saas_base.exceptions import MaximumTrialDBException
from odoo.addons.saas_base.exceptions import MaximumDBException
from werkzeug.exceptions import Forbidden
import logging
_logger = logging.getLogger(__name__)
@api.multi
def _compute_host(self):
base_saas_domain = self.env['ir.config_parameter'].sudo(
).get_param('saas_portal.base_saas_domain')
for r in self:
host = r.name
if base_saas_domain and '.' not in r.name:
host = '%s.%s' % (r.name, base_saas_domain)
r.host = host
class SaasPortalServer(models.Model):
_name = 'saas_portal.server'
_description = 'SaaS Server'
_rec_name = 'name'
_inherit = ['mail.thread']
_inherits = {'oauth.application': 'oauth_application_id'}
name = fields.Char('Database name', required=True)
oauth_application_id = fields.Many2one(
'oauth.application', 'OAuth Application', required=True, ondelete='cascade')
sequence = fields.Integer('Sequence')
active = fields.Boolean('Active', default=True)
request_scheme = fields.Selection(
[('http', 'http'), ('https', 'https')], 'Scheme', default='http', required=True)
verify_ssl = fields.Boolean(
'Verify SSL', default=True, help="verify SSL certificates for server-side HTTPS requests, just like a web browser")
request_port = fields.Integer('Request Port', default=80)
client_ids = fields.One2many(
'saas_portal.client', 'server_id', string='Clients')
local_host = fields.Char(
'Local host', help='local host or ip address of server for server-side requests')
local_port = fields.Char(
'Local port', help='local tcp port of server for server-side requests')
local_request_scheme = fields.Selection(
[('http', 'http'), ('https', 'https')], 'Scheme', default='http', required=True)
host = fields.Char('Host', compute=_compute_host)
odoo_version = fields.Char('Odoo version', readonly=True)
password = fields.Char()
clients_host_template = fields.Char('Template for clients host names',
help='The possible dynamic parts of the host names are: {dbname}, {base_saas_domain}, {base_saas_domain_1}')
@api.model
def create(self, vals):
record = super(SaasPortalServer, self).create(vals)
record.oauth_application_id._get_access_token(create=True)
return record
@api.multi
def _request_params(self, path='/web', scheme=None,
port=None, state=None, scope=None, client_id=None):
self.ensure_one()
if not state:
state = {}
scheme = scheme or self.request_scheme
port = port or self.request_port
scope = scope or ['userinfo', 'force_login', 'trial', 'skiptheuse']
scope = ' '.join(scope)
client_id = client_id or self.env['oauth.application'].generate_client_id(
)
params = {
'scope': scope,
'state': simplejson.dumps(state),
'redirect_uri': '{scheme}://{saas_server}:{port}{path}'.format(scheme=scheme, port=port, saas_server=self.host, path=path),
'response_type': 'token',
'client_id': client_id,
}
return params
@api.multi
def _request(self, **kwargs):
self.ensure_one()
params = self._request_params(**kwargs)
url = '/oauth2/auth?%s' % werkzeug.url_encode(params)
return url
@api.multi
def _request_server(self, path=None, scheme=None, port=None, **kwargs):
self.ensure_one()
scheme = scheme or self.local_request_scheme or self.request_scheme
host = self.local_host or self.host
port = port or self.local_port or self.request_port
params = self._request_params(**kwargs)
access_token = self.oauth_application_id.sudo()._get_access_token(create=True)
params.update({
'token_type': 'Bearer',
'access_token': access_token,
'expires_in': 3600,
})
url = '{scheme}://{host}:{port}{path}'.format(
scheme=scheme, host=host, port=port, path=path)
req = requests.Request('GET', url, data=params,
headers={'host': self.host})
req_kwargs = {'verify': self.verify_ssl}
return req.prepare(), req_kwargs
@api.multi
def action_redirect_to_server(self):
r = self[0]
url = '{scheme}://{saas_server}:{port}{path}'.format(
scheme=r.request_scheme, saas_server=r.host, port=r.request_port, path='/web')
return {
'type': 'ir.actions.act_url',
'target': 'new',
'name': 'Redirection',
'url': url
}
@api.model
def action_sync_server_all(self):
p_client = self.env['saas_portal.client']
self.search([]).action_sync_server()
p_client.search([]).storage_usage_monitoring()
@api.multi
def action_sync_server(self, updating_client_ID=None):
for server in self:
state = {
'd': server.name,
'client_id': server.client_id,
'updating_client_ID': updating_client_ID,
}
req, req_kwargs = server._request_server(
path='/saas_server/sync_server', state=state, client_id=server.client_id)
res = requests.Session().send(req, **req_kwargs)
if not res.ok:
raise Warning(_('Reason: %s \n Message: %s') %
(res.reason, res.content))
try:
data = simplejson.loads(res.text)
except Exception as e:
_logger.error('Error on parsing response: %s\n%s' %
([req.url, req.headers, req.body], res.text))
raise
for r in data:
r['server_id'] = server.id
client = server.env['saas_portal.client'].with_context(
active_test=False).search([('client_id', '=', r.get('client_id'))])
if not client:
database = server.env['saas_portal.database'].search(
[('client_id', '=', r.get('client_id'))])
if database:
database.write(r)
continue
client = server.env['saas_portal.client'].create(r)
else:
client.write(r)
return None
@api.model
def get_saas_server(self):
p_server = self.env['saas_portal.server']
saas_server_list = p_server.sudo().search([])
return saas_server_list[random.randint(0, len(saas_server_list) - 1)]
class SaasPortalPlan(models.Model):
_name = 'saas_portal.plan'
name = fields.Char('Plan', required=True)
summary = fields.Char('Summary')
template_id = fields.Many2one(
'saas_portal.database', 'Template', ondelete='restrict')
demo = fields.Boolean('Install Demo Data')
maximum_allowed_dbs_per_partner = fields.Integer(
help='maximum allowed non-trial databases per customer', require=True, default=0)
maximum_allowed_trial_dbs_per_partner = fields.Integer(
help='maximum allowed trial databases per customer', require=True, default=0)
max_users = fields.Char('Initial Max users',
default='0', help='leave 0 for no limit')
total_storage_limit = fields.Integer(
'Total storage limit (MB)', help='leave 0 for no limit')
block_on_expiration = fields.Boolean(
'Block clients on expiration', default=False)
block_on_storage_exceed = fields.Boolean(
'Block clients on storage exceed', default=False)
def _get_default_lang(self):
return self.env.user.lang
def _default_tz(self):
return self.env.user.tz
lang = fields.Selection(scan_languages(), 'Language',
default=_get_default_lang)
tz = fields.Selection(_tz_get, 'TimeZone', default=_default_tz)
sequence = fields.Integer('Sequence')
state = fields.Selection([('draft', 'Draft'), ('confirmed', 'Confirmed')],
'State', compute='_compute_get_state', store=True)
expiration = fields.Integer(
'Expiration (hours)', help='time to delete database. Use for demo')
_order = 'sequence'
grace_period = fields.Integer(
'Grace period (days)', help='initial days before expiration')
dbname_template = fields.Char(
'DB Names', help='Used for generating client database domain name. Use %i for numbering. Ignore if you use manually created db names', placeholder='crm-%i.odoo.com')
server_id = fields.Many2one('saas_portal.server', string='SaaS Server',
ondelete='restrict',
help='User this saas server or choose random')
website_description = fields.Html('Website description')
logo = fields.Binary('Logo')
on_create = fields.Selection([
('login', 'Log into just created instance'),
], string="Workflow on create", default='login')
on_create_email_template = fields.Many2one('mail.template',
default=lambda self: self.env.ref('saas_portal.email_template_create_saas'))
@api.multi
@api.depends('template_id.state')
def _compute_get_state(self):
for plan in self:
if plan.template_id.state == 'template':
plan.state = 'confirmed'
else:
plan.state = 'draft'
@api.multi
def _new_database_vals(self, vals):
self.ensure_one()
vals['max_users'] = vals.get('max_users',
self.max_users)
vals['total_storage_limit'] = vals.get('total_storage_limit',
self.total_storage_limit)
vals['block_on_expiration'] = vals.get('block_on_expiration',
self.block_on_expiration)
vals['block_on_storage_exceed'] = vals.get('block_on_storage_exceed',
self.block_on_storage_exceed)
return vals
@api.multi
def _prepare_owner_user_data(self, user_id):
"""
Prepare the dict of values to update owner user data in client instalnce. This method may be
overridden to implement custom values (making sure to call super() to establish
a clean extension chain).
"""
self.ensure_one()
owner_user = self.env['res.users'].browse(user_id) or self.env.user
owner_user_data = {
'user_id': owner_user.id,
'login': owner_user.login,
'name': owner_user.name,
'email': owner_user.email,
'password_crypt': owner_user.password_crypt,
}
return owner_user_data
@api.multi
def _get_expiration(self, trial):
self.ensure_one()
trial_hours = trial and self.expiration
initial_expiration_datetime = datetime.now()
trial_expiration_datetime = (initial_expiration_datetime + timedelta(
hours=trial_hours)).strftime(DEFAULT_SERVER_DATETIME_FORMAT)
return trial and trial_expiration_datetime or initial_expiration_datetime.strftime(DEFAULT_SERVER_DATETIME_FORMAT)
@api.multi
def create_new_database(self, **kwargs):
return self._create_new_database(**kwargs)
@api.multi
def _create_new_database(self, dbname=None, client_id=None,
partner_id=None, user_id=None, notify_user=True,
trial=False, support_team_id=None, async=None):
self.ensure_one()
p_client = self.env['saas_portal.client']
p_server = self.env['saas_portal.server']
server = self.server_id
if not server:
server = p_server.get_saas_server()
# server.action_sync_server()
if not partner_id and user_id:
user = self.env['res.users'].browse(user_id)
partner_id = user.partner_id.id
if not trial and self.maximum_allowed_dbs_per_partner != 0:
db_count = p_client.search_count([('partner_id', '=', partner_id),
('state',
'=', 'open'),
('plan_id',
'=', self.id),
('trial', '=', False)])
if db_count >= self.maximum_allowed_dbs_per_partner:
raise MaximumDBException("Limit of databases for this plan is %(maximum)s reached" % {
'maximum': self.maximum_allowed_dbs_per_partner})
if trial and self.maximum_allowed_trial_dbs_per_partner != 0:
trial_db_count = p_client.search_count([('partner_id', '=', partner_id),
('state',
'=', 'open'),
('plan_id',
'=', self.id),
('trial', '=', True)])
if trial_db_count >= self.maximum_allowed_trial_dbs_per_partner:
raise MaximumTrialDBException("Limit of trial databases for this plan is %(maximum)s reached" % {
'maximum': self.maximum_allowed_trial_dbs_per_partner})
client_expiration = self._get_expiration(trial)
vals = {'name': dbname or self.generate_dbname(),
'server_id': server.id,
'plan_id': self.id,
'partner_id': partner_id,
'trial': trial,
'support_team_id': support_team_id,
'expiration_datetime': client_expiration,
}
client = None
if client_id:
vals['client_id'] = client_id
client = p_client.search(
[('client_id', '=', client_id)])
vals = self._new_database_vals(vals)
if client:
client.write(vals)
else:
client = p_client.create(vals)
client_id = client.client_id
owner_user_data = self._prepare_owner_user_data(user_id)
state = {
'd': client.name,
'public_url': client.public_url,
'e': client_expiration,
'r': client.public_url + 'web',
'h': client.host,
'owner_user': owner_user_data,
't': client.trial,
}
if self.template_id:
state.update({'db_template': self.template_id.name})
scope = ['userinfo', 'force_login', 'trial', 'skiptheuse']
req, req_kwargs = server._request_server(path='/saas_server/new_database',
state=state,
client_id=client_id,
scope=scope,)
res = requests.Session().send(req, **req_kwargs)
if res.status_code != 200:
raise Warning(_('Error on request: %s\nReason: %s \n Message: %s') % (
req.url, res.reason, res.content))
data = simplejson.loads(res.text)
params = {
'state': data.get('state'),
'access_token': client.oauth_application_id._get_access_token(user_id, create=True),
}
url = '{url}?{params}'.format(url=data.get(
'url'), params=werkzeug.url_encode(params))
auth_url = url
# send email if there is mail template record
template = self.on_create_email_template
if template and notify_user:
# we have to have a user in this place (how to user without a user?)
user = self.env['res.users'].browse(user_id)
client.with_context(user=user).message_post_with_template(
template.id, composition_mode='comment')
client.send_params_to_client_db()
# TODO make async call of action_sync_server here
# client.server_id.action_sync_server()
client.sync_client()
return {'url': url,
'id': client.id,
'client_id': client_id,
'auth_url': auth_url}
@api.multi
def generate_dbname(self, raise_error=True):
self.ensure_one()
if not self.dbname_template:
if raise_error:
raise exceptions.Warning(
_('Template for db name is not configured'))
return ''
sequence = self.env['ir.sequence'].get('saas_portal.plan')
return self.dbname_template.replace('%i', sequence)
@api.multi
def create_template_button(self):
return self.create_template()
@api.multi
def create_template(self, addons=None):
self.ensure_one()
state = {
'd': self.template_id.name,
'demo': self.demo and 1 or 0,
'addons': addons or [],
'lang': self.lang,
'tz': self.tz,
'is_template_db': 1,
}
client_id = self.template_id.client_id
self.template_id.server_id = self.server_id
req, req_kwargs = self.server_id._request_server(
path='/saas_server/new_database', state=state, client_id=client_id)
res = requests.Session().send(req, **req_kwargs)
if not res.ok:
raise Warning(_('Error on request: %s\nReason: %s \n Message: %s') %
(req.url, res.reason, res.content))
try:
data = simplejson.loads(res.text)
except Exception as e:
_logger.error(_('Error on parsing response: %s\n%s') %
([req.url, req.headers, req.body], res.text))
raise
self.template_id.password = data.get('superuser_password')
self.template_id.state = data.get('state')
return data
@api.multi
def action_sync_server(self):
for r in self:
r.server_id.action_sync_server()
return True
@api.multi
def edit_template(self):
return self[0].template_id.edit_database()
@api.multi
def upgrade_template(self):
return self[0].template_id.show_upgrade_wizard()
@api.multi
def delete_template(self):
self.ensure_one()
res = self.template_id.delete_database_server()
return res
class OauthApplication(models.Model):
_inherit = 'oauth.application'
client_id = fields.Char('Database UUID')
last_connection = fields.Char(compute='_compute_get_last_connection',
string='Last Connection', size=64)
server_db_ids = fields.One2many(
'saas_portal.server', 'oauth_application_id',
string='Server Database')
template_db_ids = fields.One2many(
'saas_portal.database', 'oauth_application_id',
string='Template Database')
client_db_ids = fields.One2many(
'saas_portal.client', 'oauth_application_id',
string='Client Database')
@api.multi
def _compute_get_last_connection(self):
for r in self:
oat = self.env['oauth.access_token']
to_search = [('application_id', '=', r.id)]
access_tokens = oat.search(to_search)
if access_tokens:
access_token = access_tokens[0]
r.last_connection = access_token.user_id.login_date
class SaasPortalDatabase(models.Model):
_name = 'saas_portal.database'
_inherits = {'oauth.application': 'oauth_application_id'}
name = fields.Char('Database name', readonly=False)
oauth_application_id = fields.Many2one(
'oauth.application', 'OAuth Application',
required=True, ondelete='cascade')
server_id = fields.Many2one(
'saas_portal.server', ondelete='restrict',
string='Server', readonly=True)
state = fields.Selection([('draft', 'New'),
('open', 'In Progress'),
('cancelled', 'Cancelled'),
('pending', 'Pending'),
('deleted', 'Deleted'),
('template', 'Template'),
],
'State', default='draft',
track_visibility='onchange')
host = fields.Char('Host', compute='_compute_host')
public_url = fields.Char(compute='_compute_public_url')
password = fields.Char()
@api.multi
def _compute_host(self):
base_saas_domain = self.env['ir.config_parameter'].sudo(
).get_param('saas_portal.base_saas_domain')
base_saas_domain_1 = '.'.join(base_saas_domain.rsplit('.', 2)[-2:])
name_dict = {
'base_saas_domain': base_saas_domain,
'base_saas_domain_1': base_saas_domain_1,
}
for record in self:
if record.server_id.clients_host_template:
name_dict.update({'dbname': record.name})
record.host = record.server_id.clients_host_template.format(
**name_dict)
else:
_compute_host(self)
@api.multi
def _compute_public_url(self):
for record in self:
scheme = record.server_id.request_scheme
host = record.host
port = record.server_id.request_port
public_url = "%s://%s" % (scheme, host)
if scheme == 'http' and port != 80 or scheme == 'https' and port != 443:
public_url = public_url + ':' + str(port)
record.public_url = public_url + '/'
@api.multi
def _backup(self):
'''
call to backup database
'''
self.ensure_one()
state = {
'd': self.name,
'client_id': self.client_id,
}
req, req_kwargs = self.server_id._request_server(
path='/saas_server/backup_database', state=state, client_id=self.client_id)
res = requests.Session().send(req, **req_kwargs)
_logger.info('backup database: %s', res.text)
if not res.ok:
raise Warning(_('Reason: %s \n Message: %s') %
(res.reason, res.content))
data = simplejson.loads(res.text)
if not isinstance(data[0], dict):
raise Warning(data)
if data[0]['status'] != 'success':
warning = data[0].get(
'message', _('Could not backup database; please check your logs'))
raise Warning(warning)
return True
@api.multi
def action_sync_server(self):
for record in self:
record.server_id.action_sync_server()
@api.model
def _proceed_url(self, url):
return {
'type': 'ir.actions.act_url',
'target': 'new',
'name': 'Redirection',
'url': url
}
@api.multi
def _request_url(self, path):
r = self[0]
state = {
'd': r.name,
'host': r.host,
'public_url': r.public_url,
'client_id': r.client_id,
}
url = r.server_id._request(
path=path, state=state, client_id=r.client_id)
return url
@api.multi
def _request(self, path):
url = self._request_url(path)
return self._proceed_url(url)
@api.multi
def edit_database(self):
"""Obsolete. Use saas_portal.edit_database widget instead"""
for database_obj in self:
return database_obj._request('/saas_server/edit_database')
@api.multi
def delete_database(self):
for database_obj in self:
return database_obj._request('/saas_server/delete_database')
@api.multi
def upgrade(self, payload=None):
config_obj = self.env['saas.config']
res = []
if payload is not None:
# maybe use multiprocessing here
for database_obj in self:
res.append(config_obj.do_upgrade_database(
payload.copy(), database_obj))
return res
@api.multi
def delete_database_server(self, **kwargs):
self.ensure_one()
return self._delete_database_server(**kwargs)
@api.multi
def _delete_database_server(self, force_delete=False):
for database in self:
state = {
'd': database.name,
'client_id': database.client_id,
}
if force_delete:
state['force_delete'] = 1
req, req_kwargs = database.server_id._request_server(
path='/saas_server/delete_database',
state=state, client_id=database.client_id)
res = requests.Session().send(req, **req_kwargs)
_logger.info('delete database: %s', res.text)
if res.status_code != 500:
database.state = 'deleted'
@api.multi
def show_upgrade_wizard(self):
obj = self[0]
return {
'type': 'ir.actions.act_window',
'view_type': 'form',
'view_mode': 'form',
'res_model': 'saas.config',
'target': 'new',
'context': {
'default_action': 'upgrade',
'default_database': obj.name
}
}
class SaasPortalClient(models.Model):
_name = 'saas_portal.client'
_description = 'Client'
_rec_name = 'name'
_inherit = ['mail.thread', 'saas_portal.database', 'saas_base.client']
name = fields.Char(required=True)
partner_id = fields.Many2one(
'res.partner', string='Partner', track_visibility='onchange', readonly=True)
plan_id = fields.Many2one('saas_portal.plan', string='Plan',
track_visibility='onchange', ondelete='set null', readonly=True)
expiration_datetime = fields.Datetime(string="Expiration")
expired = fields.Boolean('Expired', readonly=True)
user_id = fields.Many2one(
'res.users', default=lambda self: self.env.user, string='Salesperson')
notification_sent = fields.Boolean(
default=False, readonly=True, help='notification about oncoming expiration has sent')
support_team_id = fields.Many2one(
'saas_portal.support_team', 'Support Team')
active = fields.Boolean(
default=True, compute='_compute_active', store=True)
block_on_expiration = fields.Boolean(
'Block clients on expiration', default=False)
block_on_storage_exceed = fields.Boolean(
'Block clients on storage exceed', default=False)
storage_exceed = fields.Boolean(
'Storage limit has been exceed', default=False)
trial_hours = fields.Integer('Initial period for trial (hours)',
help='Subsription initial period in hours for trials',
readonly=True)
# TODO: use new api for tracking
_track = {
'expired': {
'saas_portal.mt_expired':
lambda self, cr, uid, obj, ctx=None: obj.expired
}
}
@api.multi
@api.depends('state')
def _compute_active(self):
for record in self:
record.active = record.state != 'deleted'
@api.model
def _cron_suspend_expired_clients(self):
payload = {
'params': [{'key': 'saas_client.suspended', 'value': '1', 'hidden': True}],
}
now = fields.Datetime.now()
expired = self.search([
('expiration_datetime', '<', now),
('expired', '=', False)
])
expired.write({'expired': True})
for record in expired:
if record.trial or record.block_on_expiration:
template = self.env.ref(
'saas_portal.email_template_has_expired_notify')
record.message_post_with_template(
template.id, composition_mode='comment')
record.upgrade(payload)
# if upgraded without exceptions then change the state
record.state = 'pending'
@api.model
def _cron_notify_expired_clients(self):
# send notification about expiration by email
notification_delta = int(self.env['ir.config_parameter'].sudo(
).get_param('saas_portal.expiration_notify_in_advance', '0'))
if notification_delta > 0:
records = self.search([('expiration_datetime', '<=', (datetime.now() + timedelta(days=notification_delta)).strftime(DEFAULT_SERVER_DATETIME_FORMAT)),
('notification_sent', '=', False)])
records.write({'notification_sent': True})
for record in records:
template = self.env.ref(
'saas_portal.email_template_expiration_notify')
record.with_context(days=notification_delta).message_post_with_template(
template.id, composition_mode='comment')
def unlink(self):
for obj in self:
to_search1 = [('application_id', '=', obj.id)]
tokens = self.env['oauth.access_token'].search(to_search1)
tokens.unlink()
# TODO: it seems we don't need stuff below
# to_search2 = [('database', '=', obj.name)]
# user_ids = user_model.search(to_search2)
# if user_ids:
# user_model.unlink(user_ids)
# odoo.service.db.exp_drop(obj.name)
return super(SaasPortalClient, self).unlink()
@api.multi
def write(self, values):
if 'expiration_datetime' in values:
payload = {
'params': [{'key': 'saas_client.expiration_datetime', 'value': values['expiration_datetime'], 'hidden': True}],
}
for record in self:
record.upgrade(payload)
result = super(SaasPortalClient, self).write(values)
return result
@api.multi
def rename_database(self, new_dbname):
self.ensure_one()
# TODO async
state = {
'd': self.name,
'client_id': self.client_id,
'new_dbname': new_dbname,
}
req, req_kwargs = self.server_id._request_server(
path='/saas_server/rename_database', state=state, client_id=self.client_id)
res = requests.Session().send(req, **req_kwargs)
_logger.info('delete database: %s', res.text)
if res.status_code != 500:
self.name = new_dbname
@api.multi
def sync_client(self):
self.ensure_one()
self.server_id.action_sync_server(updating_client_ID=self.client_id)
@api.multi
def check_partner_access(self, partner_id):
for record in self:
if record.partner_id.id != partner_id:
raise Forbidden
@api.multi
def duplicate_database(self, dbname=None, partner_id=None, expiration=None):
self.ensure_one()
p_client = self.env['saas_portal.client']
p_server = self.env['saas_portal.server']
owner_user = self.env['res.users'].search(
[('partner_id', '=', partner_id)], limit=1) or self.env.user
server = self.server_id
if not server:
server = p_server.get_saas_server()
server.action_sync_server()
vals = {'name': dbname,
'server_id': server.id,
'plan_id': self.plan_id.id,
'partner_id': partner_id or self.partner_id.id,
}
if expiration:
now = datetime.now()
delta = timedelta(hours=expiration)
vals['expiration_datetime'] = (
now + delta).strftime(DEFAULT_SERVER_DATETIME_FORMAT)
client = p_client.create(vals)
client_id = client.client_id
owner_user_data = {
'user_id': owner_user.id,
'login': owner_user.login,
'name': owner_user.name,
'email': owner_user.email,
'password': None,
}
state = {
'd': client.name,
'e': client.expiration_datetime,
'r': client.public_url + 'web',
'owner_user': owner_user_data,
'public_url': client.public_url,
'db_template': self.name,
'disable_mail_server': True,
}
scope = ['userinfo', 'force_login', 'trial', 'skiptheuse']
req, req_kwargs = server._request_server(path='/saas_server/new_database',
state=state,
client_id=client_id,
scope=scope,)
res = requests.Session().send(req, **req_kwargs)
if not res.ok:
raise Warning(_('Reason: %s \n Message: %s') %
(res.reason, res.content))
try:
data = simplejson.loads(res.text)
except Exception as e:
_logger.error('Error on parsing response: %s\n%s' %
([req.url, req.headers, req.body], res.text))
raise
data.update({'id': client.id})
return data
@api.multi
def get_upgrade_database_payload(self):
self.ensure_one()
return {'params': [{'key': 'saas_client.expiration_datetime',
'value': self.expiration_datetime,
'hidden': True}]}
@api.multi
def send_params_to_client_db(self):
for record in self:
payload = {
'params': [{'key': 'saas_client.max_users',
'value': record.max_users, 'hidden': True},
{'key': 'saas_client.expiration_datetime',
'value': record.expiration_datetime,
'hidden': True},
{'key': 'saas_client.total_storage_limit',
'value': record.total_storage_limit,
'hidden': True}],
}
self.env['saas.config'].do_upgrade_database(payload, record)
@api.multi
def send_expiration_info_to_partner(self):
for record in self:
if record.expiration_datetime:
template = self.env.ref(
'saas_portal.email_template_expiration_datetime_updated')
record.message_post_with_template(
template.id, composition_mode='comment')
@api.multi
def storage_usage_monitoring(self):
payload = {
'params': [{'key': 'saas_client.suspended',
'value': '1',
'hidden': True}],
}
for r in self:
if r.total_storage_limit and r.total_storage_limit < r.file_storage + r.db_storage and r.storage_exceed is False:
r.write({'storage_exceed': True})
template = self.env.ref(
'saas_portal.email_template_storage_exceed')
r.message_post_with_template(
template.id, composition_mode='comment')
if r.block_on_storage_exceed:
self.env['saas.config'].do_upgrade_database(payload, r)
if not r.total_storage_limit or r.total_storage_limit >= r.file_storage + r.db_storage and r.storage_exceed is True:
r.write({'storage_exceed': False})
class SaasPortalSupportTeams(models.Model):
_name = 'saas_portal.support_team'
_inherit = ['mail.thread']
name = fields.Char('Team name')
|
it-projects-llc/odoo-saas-tools
|
saas_portal/models/saas_portal.py
|
Python
|
lgpl-3.0
| 36,190
|
# -*- coding: utf-8 -*-
##############################################################################
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#
##############################################################################
from . import mrp_production
|
odoomrp/odoomrp-wip
|
mrp_production_add_middle_stuff/models/__init__.py
|
Python
|
agpl-3.0
| 885
|
# array_editor.py -- Example of using array editors
import numpy as np
from traits.api import HasPrivateTraits, Array
from traitsui.api \
import View, ArrayEditor, Item
from traitsui.menu import NoButtons
class ArrayEditorTest ( HasPrivateTraits ):
three = Array(np.int, (3,3))
four = Array(np.float,
(4,4),
editor = ArrayEditor(width = -50))
view = View( Item('three', label='3x3 Integer'),
'_',
Item('three',
label='Integer Read-only',
style='readonly'),
'_',
Item('four', label='4x4 Float'),
'_',
Item('four',
label='Float Read-only',
style='readonly'),
buttons = NoButtons,
resizable = True )
if __name__ == '__main__':
ArrayEditorTest().configure_traits()
|
QuantumQuadrate/CsPyController
|
python/examples/array_editor.py
|
Python
|
lgpl-3.0
| 949
|
# -*- coding: utf-8 -*-
import json
from mock import Mock
from urlparse import ParseResult
from searx import webapp
from searx.testing import SearxTestCase
class ViewsTestCase(SearxTestCase):
def setUp(self):
webapp.app.config['TESTING'] = True # to get better error messages
self.app = webapp.app.test_client()
# set some defaults
self.test_results = [
{
'content': 'first test content',
'title': 'First Test',
'url': 'http://first.test.xyz',
'engines': ['youtube', 'startpage'],
'engine': 'startpage',
'parsed_url': ParseResult(scheme='http', netloc='first.test.xyz', path='/', params='', query='', fragment=''), # noqa
}, {
'content': 'second test content',
'title': 'Second Test',
'url': 'http://second.test.xyz',
'engines': ['youtube', 'startpage'],
'engine': 'youtube',
'parsed_url': ParseResult(scheme='http', netloc='second.test.xyz', path='/', params='', query='', fragment=''), # noqa
},
]
def search_mock(search_self, *args):
search_self.result_container = Mock(get_ordered_results=lambda: self.test_results,
answers=set(),
suggestions=set(),
infoboxes=[],
results=self.test_results,
results_number=lambda: 3,
results_length=lambda: len(self.test_results))
webapp.Search.search = search_mock
def get_current_theme_name_mock(override=None):
return 'default'
webapp.get_current_theme_name = get_current_theme_name_mock
self.maxDiff = None # to see full diffs
def test_index_empty(self):
result = self.app.post('/')
self.assertEqual(result.status_code, 200)
self.assertIn('<div class="title"><h1>searx</h1></div>', result.data)
def test_index_html(self):
result = self.app.post('/', data={'q': 'test'})
self.assertIn(
'<h3 class="result_title"><img width="14" height="14" class="favicon" src="/static/themes/default/img/icons/icon_youtube.ico" alt="youtube" /><a href="http://second.test.xyz" rel="noreferrer">Second <span class="highlight">Test</span></a></h3>', # noqa
result.data
)
self.assertIn(
'<p class="content">first <span class="highlight">test</span> content<br class="last"/></p>', # noqa
result.data
)
def test_index_json(self):
result = self.app.post('/', data={'q': 'test', 'format': 'json'})
result_dict = json.loads(result.data)
self.assertEqual('test', result_dict['query'])
self.assertEqual(
result_dict['results'][0]['content'], 'first test content')
self.assertEqual(
result_dict['results'][0]['url'], 'http://first.test.xyz')
def test_index_csv(self):
result = self.app.post('/', data={'q': 'test', 'format': 'csv'})
self.assertEqual(
'title,url,content,host,engine,score\r\n'
'First Test,http://first.test.xyz,first test content,first.test.xyz,startpage,\r\n' # noqa
'Second Test,http://second.test.xyz,second test content,second.test.xyz,youtube,\r\n', # noqa
result.data
)
def test_index_rss(self):
result = self.app.post('/', data={'q': 'test', 'format': 'rss'})
self.assertIn(
'<description>Search results for "test" - searx</description>',
result.data
)
self.assertIn(
'<opensearch:totalResults>3</opensearch:totalResults>',
result.data
)
self.assertIn(
'<title>First Test</title>',
result.data
)
self.assertIn(
'<link>http://first.test.xyz</link>',
result.data
)
self.assertIn(
'<description>first test content</description>',
result.data
)
def test_about(self):
result = self.app.get('/about')
self.assertEqual(result.status_code, 200)
self.assertIn('<h1>About <a href="/">searx</a></h1>', result.data)
def test_preferences(self):
result = self.app.get('/preferences')
self.assertEqual(result.status_code, 200)
self.assertIn(
'<form method="post" action="/preferences" id="search_form">',
result.data
)
self.assertIn(
'<legend>Default categories</legend>',
result.data
)
self.assertIn(
'<legend>Interface language</legend>',
result.data
)
def test_stats(self):
result = self.app.get('/stats')
self.assertEqual(result.status_code, 200)
self.assertIn('<h2>Engine stats</h2>', result.data)
def test_robots_txt(self):
result = self.app.get('/robots.txt')
self.assertEqual(result.status_code, 200)
self.assertIn('Allow: /', result.data)
def test_opensearch_xml(self):
result = self.app.get('/opensearch.xml')
self.assertEqual(result.status_code, 200)
self.assertIn('<Description>a privacy-respecting, hackable metasearch engine</Description>', result.data)
def test_favicon(self):
result = self.app.get('/favicon.ico')
self.assertEqual(result.status_code, 200)
|
pointhi/searx
|
tests/unit/test_webapp.py
|
Python
|
agpl-3.0
| 5,703
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('afiliados', '0002_auto_20160121_1745'),
]
operations = [
migrations.AlterField(
model_name='adherente',
name='codigo_qr',
field=models.ImageField(upload_to='codigos', blank=True, null=True),
),
migrations.AlterField(
model_name='titular',
name='codigo_qr',
field=models.ImageField(upload_to='codigos', blank=True, null=True),
),
]
|
montenegroariel/sigos
|
apps/afiliados/migrations/0003_auto_20160122_0259.py
|
Python
|
gpl-3.0
| 627
|
#!/usr/bin/env python
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import regex
class RegexTest(unittest.TestCase):
def test_wordRE(self):
for text, matches in [
('/abcdef/', True),
('Pod abcdef failed', True),
('abcdef', True),
('cdabcdef', False),
('abc def', False),
('Podname(abcdef)', True),
]:
self.assertEqual(bool(regex.wordRE("abcdef").search(text)), matches,
'wordRE(abcdef).search(%r) should be %r' % (text, matches))
def test_error_re(self):
for text, matches in [
('errno blah', False),
('ERROR: woops', True),
('Build timed out', True),
('something timed out', False),
('misc. fatality', False),
('there was a FaTaL error', True),
('we failed to read logs', True),
('FAIL k8s.io/kubernetes/pkg/client/record', True),
]:
self.assertEqual(bool(regex.error_re.search(text)), matches,
'error_re.search(%r) should be %r' % (text, matches))
def test_objref(self):
for text, matches in [
('api.ObjectReference{Kind:"Pod"} failed', True),
('{Pod:"abc", Namespace:\"pod abc\"}', False),
('Jan 1: Event(api.ObjectReference{Kind:"Pod", Podname:"abc"}) failed'
, True),
]:
self.assertEqual(bool(regex.objref(text)), matches,
'objref(%r) should be %r' % (text, matches))
def test_combine_wordsRE(self):
for text, matches in [
('pod123 failed', True),
('Volume mounted to pod', True),
('UID: "a123"', True),
]:
self.assertEqual(bool(regex.combine_wordsRE(["pod123", "volume", "a123"])), matches,
'combine_words(%r) should be %r' % (text, matches))
def test_log_re(self):
for text, matches in [
('build-log.txt', False),
('a/b/c/kublet.log', True),
('kube-apiserver.log', True),
('abc/kubelet.log/cde', False),
('path/to/log', False),
]:
self.assertEqual(bool(regex.log_re.search(text)), matches,
'log_re(%r) should be %r' % (text, matches))
def test_containerID(self):
for text, matches in [
('the ContainerID:ab123cd', True),
('ContainerID:}]}', False),
('ContainerID:', False),
]:
self.assertEqual(bool(regex.containerID(text).group(1)), matches,
'containerID(%r).group(1) should be %r' % (text, matches))
def test_timestamp(self):
for text, matches in [
('I0629 17:33:09.813041', True),
('2016-07-22T19:01:11.150204523Z', True),
('629 17:33:09.813041:', False),
('629 17:33:09', False),
]:
self.assertEqual(bool(regex.timestamp(text)), matches,
'test_timestamp(%r) should be %r' % (text, matches))
def test_sub_timestamp(self):
for text, matches in [
('0629 17:33:09.813041', '062917:33:09.813041'),
('07-22T19:01:11.150204523', '072219:01:11.150204523'),
]:
self.assertEqual(regex.sub_timestamp(text), matches,
'sub_timetamp(%r) should be %r' % (text, matches))
if __name__ == '__main__':
unittest.main()
|
maisem/test-infra
|
gubernator/regex_test.py
|
Python
|
apache-2.0
| 4,032
|
import time
from antstar.Ant import Ant
from antstar.GlueWallAntBrain import GlueWallAntBrain
from antstar.Grid import Grid
from antstar.StickWallAntBrain import StickWallAntBrain
brains = [StickWallAntBrain]#GlueWallAntBrain]#, DirectionByPassAntBrain]
grids = {
'Map A': """
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 X 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 # # # 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 # 0 # # 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 # # 0 # # 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 # # 0 # # # # # # # # # 0
0 0 0 0 0 0 0 # # # 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 # 0 # # # # # # # 0 0 0 0
0 0 0 0 0 0 0 # 0 # # 0 0 0 0 # 0 0 0 0
0 0 0 0 0 0 0 # 0 # # 0 0 0 0 # 0 0 0 0
0 0 0 0 0 0 0 # 0 # # 0 0 0 0 # 0 0 0 0
0 0 0 0 0 0 0 # 0 # # 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 # 0 # # # # # 0 0 0 0 0 0
0 0 0 0 0 0 0 # 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 # 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 # 0 0 0 0 0 0 0 0 0 0 S 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
""",
'Map B': """
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 S 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 # 0 # 0 0 0 0 # 0 0 0
0 0 0 0 0 0 0 0 0 0 # 0 # 0 0 # 0 0 0 0
0 0 0 0 0 0 0 0 0 # 0 # 0 # 0 # 0 0 0 0
0 0 0 0 0 0 0 0 0 0 # 0 # 0 # 0 # 0 0 0
0 0 0 0 0 0 0 0 0 # 0 # 0 # 0 # 0 # 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 # 0 # 0 # 0 # 0 # 0
0 0 0 0 0 0 0 0 0 X 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
""",
'Map C': """
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 S 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 # # 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 # 0 # # 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 # # 0 # # 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 # # 0 # # 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 # # 0 # 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 # # 0 # # 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 # 0 0 0 # 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 # 0 X 0 # 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 # 0 0 0 # 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 # # # # # 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
""",
'Map D': """
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 # # 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 # # 0 # # 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 # # X # # 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 # # 0 # # 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 # # 0 # # 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 # # 0 # 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 # # 0 # # 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 # 0 0 0 # 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 # 0 0 0 # 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 # 0 0 0 # 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 # # # # # 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 S 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
""",
'Map E': """
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 # # 0 0 0 0 0 0 0 0 0 # 0 0 0 0
0 0 0 0 0 # # 0 0 0 0 0 0 0 # # 0 0 0 0
0 0 0 0 0 0 # 0 0 0 0 0 0 # # 0 0 0 0 0
0 0 0 0 0 0 0 # 0 0 0 0 0 # 0 0 0 0 0 0
0 0 0 0 0 0 0 # # 0 0 0 # 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 # # X # # 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 # # # 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 # # # # # # 0 0 0 0 0 0 0
0 0 0 0 0 0 0 # 0 0 0 0 # 0 0 0 0 0 0 0
0 0 0 0 0 0 0 # 0 0 S 0 # 0 0 0 0 0 0 0
0 0 0 0 0 0 # 0 0 0 0 0 0 # 0 0 0 0 0 0
0 0 0 0 0 # 0 0 0 0 0 0 0 # 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
""",
'Map F': """
# # # # # # # # # # # # # # # # # # # # # #
# 0 0 0 0 # 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 #
# 0 X 0 # # 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 #
# 0 0 # # 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 #
# 0 # # 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 #
# 0 # 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 #
# 0 # 0 0 0 0 # # # 0 0 0 0 0 0 0 0 0 0 0 #
# 0 # 0 0 0 0 # 0 # # 0 0 0 0 0 0 0 0 0 0 #
# 0 # 0 0 0 0 # # 0 # # 0 0 0 0 0 0 0 0 0 #
# 0 # 0 0 0 0 0 # # 0 # # # # # # # # # 0 #
# 0 # 0 0 0 0 0 # 0 0 0 0 0 0 0 0 0 0 0 0 #
# 0 # 0 0 0 0 0 # # # # # # # # # 0 0 0 0 #
# 0 # 0 0 0 0 0 # 0 # # 0 0 0 0 # 0 0 0 0 #
# 0 # 0 0 0 0 0 # 0 # # 0 0 0 0 # 0 0 0 0 #
# 0 # 0 0 0 0 0 # 0 # # 0 0 0 0 # 0 0 0 0 #
# 0 # 0 0 0 0 0 # 0 # # 0 0 0 0 0 0 0 0 0 #
# 0 # 0 0 0 0 0 # 0 # # # # # 0 0 0 0 0 0 #
# 0 # 0 0 0 0 0 # 0 0 0 0 0 0 0 0 0 0 0 0 #
# 0 # # # # # # # 0 0 0 0 0 0 0 0 0 0 0 0 #
# 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 S 0 #
# 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 #
# # # # # # # # # # # # # # # # # # # # # #
""",
'Map G':"""
# # # # # # # # # # # # #
# 0 0 # X # 0 0 0 0 0 0 #
# 0 0 0 0 0 0 0 0 0 0 0 #
# 0 0 0 0 0 0 0 0 0 0 0 #
# # # 0 0 0 0 0 0 0 0 0 #
# 0 # # # 0 0 0 0 0 0 0 #
# 0 0 0 # # # 0 0 0 0 0 #
# 0 0 0 0 0 # # # 0 0 0 #
# 0 0 0 0 0 0 0 # # 0 0 #
# 0 0 0 0 0 0 0 0 0 0 0 #
# 0 0 0 0 0 0 0 0 0 0 0 #
# 0 0 0 0 0 0 0 0 0 0 0 #
# 0 0 0 0 # # # # # # # #
# 0 0 0 # # 0 0 0 0 0 0 #
# 0 0 # # 0 0 0 0 0 0 0 #
# 0 0 0 0 0 0 # # 0 0 0 #
# 0 0 0 0 # # # 0 0 0 0 #
# 0 0 # # # 0 0 0 0 0 0 #
# # # # 0 0 0 0 0 0 0 0 #
# # 0 0 0 0 0 0 0 0 0 0 #
# 0 0 0 0 0 0 S 0 0 0 0 #
# # # # # # # # # # # # #
""",
'Map Gbis':"""
# # # # # # # # # # # # #
# 0 # X # 0 0 0 0 0 0 0 #
# 0 0 0 0 0 0 0 0 0 0 0 #
# 0 0 0 0 0 0 0 0 0 0 0 #
# # # 0 0 0 0 0 0 0 0 0 #
# 0 # # # 0 0 0 0 0 0 0 #
# 0 0 0 # # # 0 0 0 0 0 #
# 0 0 0 0 0 # # # 0 0 0 #
# 0 0 0 0 0 0 0 # # 0 0 #
# 0 0 0 0 0 0 0 0 0 0 0 #
# 0 0 0 0 0 0 0 0 0 0 0 #
# 0 0 0 0 0 0 0 0 0 0 0 #
# 0 0 0 0 # # # # # # # #
# 0 0 0 # # 0 0 0 0 0 0 #
# 0 0 # # 0 0 0 0 0 0 0 #
# 0 0 0 0 0 0 # # 0 0 0 #
# 0 0 0 0 # # # 0 0 0 0 #
# 0 0 # # # 0 0 0 0 0 0 #
# # # # 0 0 0 0 0 0 0 0 #
# # 0 0 0 0 0 0 0 0 0 0 #
# 0 0 0 0 0 0 S 0 0 0 0 #
# # # # # # # # # # # # #
""",
'Map H':"""
# # # # # # # # # # # # #
# 0 0 0 0 0 0 0 0 0 0 0 #
# 0 0 0 0 0 0 0 0 0 0 0 #
# 0 0 0 0 0 0 # 0 # 0 0 #
# 0 0 0 0 0 0 # 0 # 0 0 #
# 0 0 0 0 0 0 # # # # 0 #
# 0 0 0 0 0 0 0 0 0 # 0 #
# 0 0 0 0 0 0 0 0 X # S #
# # # # # # # # # # # # #
""",
'Map I':"""
# # # # # # # # # # # # #
# 0 0 0 # 0 # 0 0 0 # 0 #
# 0 0 0 # 0 0 0 # # # 0 #
# 0 0 0 # # # # # 0 0 0 #
# 0 0 0 0 0 0 # # 0 # 0 #
# 0 0 0 0 # # # # 0 # 0 #
# 0 0 0 0 0 0 0 # 0 # # #
# 0 0 0 0 0 # 0 0 0 # 0 #
# 0 0 0 0 0 # 0 # 0 # 0 #
# 0 0 0 0 0 # 0 # 0 0 0 #
# 0 0 0 0 0 # # # # # 0 #
# 0 0 0 0 0 0 0 0 0 # 0 #
# 0 0 0 # # # # # X # S #
# # # # # # # # # # # # #
"""
}
# TODO De
#
# grids = {'Test': grids['Map F']}#,'Test': grids['Map I'], 'Test2': grids['Map H'], 'Test2': grids['Map Gbis']}
for grid_name in grids:
for brain_class in brains:
try:
grid_text = grids[grid_name]
grid = Grid.from_string(grid_text)
ant = Ant(start_position=grid.get_start_position(),
end_position=grid.get_end_position(),
grid=grid,
brain=brain_class)
print('%s with %s' % (grid_name, brain_class.__name__))
grid.print(ant)
steps = 0
while ant.get_position() != grid.get_end_position() and steps < 200:
time.sleep(0.1)
ant.move()
steps += 1
print('')
print('%s with %s (step %s)' % (grid_name, brain_class.__name__, str(steps)))
grid.print(ant)
except KeyboardInterrupt:
pass
|
buxx/AntStar
|
test.py
|
Python
|
gpl-2.0
| 8,653
|
from django.contrib import admin
from .models import Blog
class BlogAdmin(admin.ModelAdmin):
list_display = ('title', 'date')
search_fields = ('title',)
prepopulated_fields = {"slug": ("title",)}
admin.site.register(Blog, BlogAdmin)
|
chhantyal/taggit-selectize
|
example_app/blog/admin.py
|
Python
|
bsd-3-clause
| 250
|
#!/usr/bin/python
'''
Progbot.py
- A progressive IRC Bot written in Python
Copyright (c) Ruel Pagayon <http://ruel.me>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re
class Progbot:
'''
This is the Progbot class. This functions like a regular
IRC client. It requires a nick, server, channel, port,
and owner nick name on initialization.
Example:
bot = Progbot('Progbot', 'irc.rizon.net', '#Progbot', '6667', 'Ruel')
'''
import socket
Nick = 'Progbot'
Server = 'irc.rizon.net'
Channel = '#Progbot'
Port = '6667'
Owner = 'Ruel'
File = 'responses.txt'
_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
_buffer = ''
_last = ''
_source = 'Anonymous'
_target = Channel
_done = False
_owner = False
_flood = False
_flood2 = False
def __init__(self, nck, serv, pt, chan, own):
'''
This will initialize the class variables.
Acts as a constructor.
'''
self.Nick = nck
self.Server = serv
self.Channel = chan
self.Port = pt
self.Owner = own
def Connect(self, verbose = False):
'''
This function uses sockets to connect to the remote
IRC server. It accepts an optional variable, that makes
the console window verbose (True or False).
'''
self._sock.connect((self.Server, int(self.Port)))
self._sock.send("NICK %s\r\n" % self.Nick )
self._sock.send("USER %s %s %s :Progbot - Ruel.me\r\n" % (self.Nick, self.Nick, self.Nick))
self._sock.send("JOIN %s\r\n" % self.Channel)
while True:
self._buffer = self._sock.recv(1024)
if verbose:
print self._buffer
self._parseLine(self._buffer)
if self._done:
self._sock.close()
break;
def _parseLine(self, line):
'''
Parse every line, check for PING, or match responses.
'''
self._owner = False
line = line.strip()
words = line.split()
self._checkOwn(words[0])
self._pong(words)
self._checkSayChan(words)
self._checkQuit(words)
self._checkKick(words)
if words[1] == 'PRIVMSG':
self._checkResponse(words, self.File)
def _checkOwn(self, source):
if source.find('!') != -1:
nameStr = source.split('!')
self._source = nameStr[0].lstrip(':')
if self.Owner == self._source:
self._owner = True
def _checkResponse(self, words, rfile):
'''
This opens responses.txt file, and checks if the array is related to the items
at the text file. This will actually eliminate confusing if-else blocks.
'''
self._target = words[2] if self.Nick != words[2] else self._source
msg = ''
for i in range(3, len(words)):
msg += words[i] + ' '
msg = msg.lstrip(':').rstrip(' ')
fh = open(rfile, 'r')
for line in fh:
'''
Loop through each line in the file.
'''
if line[0] == '#' or line == '' or not '~' in line:
continue
matchStr, mType, msgStr = line.split(' ~ ', 3)
matchStr = matchStr.replace('%nick%', self._source)
matchStr = matchStr.replace('%bnick%', self.Nick)
matchStr = matchStr.replace('%source%', self._target)
msgStr = msgStr.replace('%nick%', self._source)
msgStr = msgStr.replace('%bnick%', self.Nick)
msgStr = msgStr.replace('%source%', self._target)
if '%m%' in matchStr:
'''
Check if there's a matching string.
'''
matchStr = matchStr.replace('%m%', '(.*)')
match = re.search(matchStr, msg)
if match:
'''
If there's a match
'''
msgStr = msgStr.replace('%m%', match.group(1).strip())
matchStr = matchStr.replace('(.*)', match.group(1).strip())
if matchStr == msg:
'''
Check if the case on the file, matches the current message.
'''
if mType == 'S':
response = "PRIVMSG %s :%s" % (self._target, msgStr)
elif mType == 'A':
response = "PRIVMSG %s :%sACTION %s%s" % (self._target, chr(1), msgStr, chr(1))
elif self._owner and mType == 'R':
response = msgStr
print response
# Check if the last response is the same as the present
if response == self._last:
self._flood = True
else:
self._flood = False
self._flood2 = False
# Flooding Protection
if self._flood:
if not self._flood2:
self._sock.send("PRIVMSG %s :Nope, you can't flood me.\r\n" % self._target)
self._flood2 = True
else:
self._sock.send("%s\r\n" % response)
# Copy the last response
self._last = response
def _pong(self, words):
'''
Respond to PING! That's one of the most important tasks to
stay alive at the server.
'''
if words[0] == 'PING':
self._sock.send("PONG " + words[1] + "\r\n")
def _checkQuit(self, words):
'''
Quit the connection to the IRC Server.
'''
if words[1] == 'PRIVMSG':
if self._owner and words[3] == ':!q':
self._sock.send("QUIT\r\n")
self._done = True
def _checkSayChan(self, words):
'''
Talk to the specified channel.
'''
if words[1] == 'PRIVMSG' and words[2] == self.Nick:
if self._owner and words[3] == ':!say':
# Merge the words to one string
full = ' '.join(words)
# Check if the structure is valid
regex = re.search(':!say #(\w+) (.+)', full)
if regex:
chan = regex.group(1)
message = regex.group(2)
self._sock.send("PRIVMSG #%s :%s\r\n" % (chan, message))
def _checkKick(self, words):
'''
Auto rejoin when kicked
'''
if words[1] == 'KICK' and words[3] == self.Nick:
self._sock.send("JOIN %s\r\n" % words[2])
'''
END OF CODE
'''
|
ruel/Progbot
|
progbot/progbot.py
|
Python
|
gpl-3.0
| 6,039
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# flake8: noqa
from wdom.tag import NewTagClass as NewTag
from wdom.themes import *
name = 'Bijou'
project_url = 'http://andhart.github.io/bijou/'
project_repository = 'https://github.com/andhart/bijou'
license = 'MIT License'
license_url = 'https://github.com/andhart/bijou/blob/master/LICENSE'
css_files = [
'_static/css/bijou.min.css',
]
Button = NewTag('Button', bases=Button, class_='button small')
DefaultButton = NewTag('DefaultButton', 'button', Button, is_='default-button')
PrimaryButton = NewTag('PrimaryButton', 'button', Button, class_='primary', is_='primary-button')
SecondaryButton = NewTag('SecondaryButton', 'button', Button, is_='secondary-button')
SuccessButton = NewTag('SuccessButton', 'button', Button, class_='success', is_='success-button')
InfoButton = NewTag('InfoButton', 'button', Button, class_='success', is_='info-button')
WarningButton = NewTag('WarningButton', 'button', Button, class_='danger', is_='warning-button')
DangerButton = NewTag('DangerButton', 'button', Button, class_='danger', is_='danger-button')
ErrorButton = NewTag('ErrorButton', 'button', Button, class_='danger', is_='error-button')
LinkButton = NewTag('LinkButton', 'button', Button, is_='link-button')
Table = NewTag('Table', 'table', Table, class_='table')
Row = NewTag('Row', 'div', Row, class_='row')
Col = NewTag('Col', 'div', Col, class_='span')
Col1 = NewTag('Col1', 'div', (Col1, Col), class_='one')
Col2 = NewTag('Col2', 'div', (Col2, Col), class_='two')
Col3 = NewTag('Col3', 'div', (Col3, Col), class_='three')
Col4 = NewTag('Col4', 'div', (Col4, Col), class_='four')
Col5 = NewTag('Col5', 'div', (Col5, Col), class_='five')
Col6 = NewTag('Col6', 'div', (Col6, Col), class_='six')
Col7 = NewTag('Col7', 'div', (Col7, Col), class_='seven')
Col8 = NewTag('Col8', 'div', (Col8, Col), class_='eight')
Col9 = NewTag('Col9', 'div', (Col9, Col), class_='nine')
Col10 = NewTag('Col10', 'div', (Col10, Col), class_='ten')
Col11 = NewTag('Col11', 'div', (Col11, Col), class_='eleven')
extended_classes = [
Button,
DefaultButton,
PrimaryButton,
SuccessButton,
InfoButton,
WarningButton,
DangerButton,
ErrorButton,
LinkButton,
Table,
Row,
Col1,
Col2,
Col3,
Col4,
Col5,
Col6,
Col7,
Col8,
Col9,
Col10,
Col11,
]
|
miyakogi/wdom
|
wdom/themes/bijou.py
|
Python
|
mit
| 2,359
|
# Copyright 2017 SrMouraSilva
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from pluginsmanager.util.pairs_list import PairsList
class PairsListTest(unittest.TestCase):
def test_pairs_list(self):
similarity_key = lambda element: element
pairs_list = PairsList(similarity_key)
list_a = ['A', 'A', 'A', 'B', 'D', 'C', 'X']
list_b = ['B', 'B', 'D', 'C', 'A', 'A', 'E']
result = pairs_list.calculate(list_a, list_b)
expected = [('A', 'A'), ('A', 'A'), ('B', 'B'), ('C', 'C'), ('D', 'D')]
self.assertEqual(expected, sorted(result.pairs))
expected_not_pairs_a = ['A', 'X']
expected_not_pairs_b = ['B', 'E']
self.assertEqual(expected_not_pairs_a, sorted(result.elements_not_added_a))
self.assertEqual(expected_not_pairs_b, sorted(result.elements_not_added_b))
|
PedalPi/PluginsManager
|
test/util/pairs_list_test.py
|
Python
|
apache-2.0
| 1,371
|
'''
.. Created 2015
.. codeauthor:: Hstau Y Liao <hstau.y.liao@gmail.com>
'''
import numpy as np
def mcol(u):
# u is an array
u = np.reshape(u,(u.shape[0],1))
return u
|
hstau/manifold-cryo
|
manifold/gui/utilities.py
|
Python
|
gpl-2.0
| 182
|
#!/usr/bin/python
# Copyright 2013 Google Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: gce
version_added: "1.4"
short_description: create or terminate GCE instances
description:
- Creates or terminates Google Compute Engine (GCE) instances. See
U(https://cloud.google.com/products/compute-engine) for an overview.
Full install/configuration instructions for the gce* modules can
be found in the comments of ansible/test/gce_tests.py.
options:
image:
description:
- image string to use for the instance
required: false
default: "debian-7"
aliases: []
instance_names:
description:
- a comma-separated list of instance names to create or destroy
required: false
default: null
aliases: []
machine_type:
description:
- machine type to use for the instance, use 'n1-standard-1' by default
required: false
default: "n1-standard-1"
aliases: []
metadata:
description:
- a hash/dictionary of custom data for the instance; '{"key":"value",...}'
required: false
default: null
aliases: []
service_account_email:
version_added: 1.5.1
description:
- service account email
required: false
default: null
aliases: []
pem_file:
version_added: 1.5.1
description:
- path to the pem file associated with the service account email
required: false
default: null
aliases: []
project_id:
version_added: 1.5.1
description:
- your GCE project ID
required: false
default: null
aliases: []
name:
description:
- identifier when working with a single instance
required: false
aliases: []
network:
description:
- name of the network, 'default' will be used if not specified
required: false
default: "default"
aliases: []
persistent_boot_disk:
description:
- if set, create the instance with a persistent boot disk
required: false
default: "false"
aliases: []
disks:
description:
- a list of persistent disks to attach to the instance; a string value gives the name of the disk; alternatively, a dictionary value can define 'name' and 'mode' ('READ_ONLY' or 'READ_WRITE'). The first entry will be the boot disk (which must be READ_WRITE).
required: false
default: null
aliases: []
version_added: "1.7"
state:
description:
- desired state of the resource
required: false
default: "present"
choices: ["active", "present", "absent", "deleted"]
aliases: []
tags:
description:
- a comma-separated list of tags to associate with the instance
required: false
default: null
aliases: []
zone:
description:
- the GCE zone to use
required: true
default: "us-central1-a"
aliases: []
ip_forward:
version_added: "1.9"
description:
- set to true if the instance can forward ip packets (useful for gateways)
required: false
default: "false"
aliases: []
external_ip:
version_added: "1.9"
description:
- type of external ip, ephemeral by default
required: false
default: "ephemeral"
aliases: []
disk_auto_delete:
version_added: "1.9"
description:
- if set boot disk will be removed after instance destruction
required: false
default: "true"
aliases: []
requirements:
- "python >= 2.6"
- "apache-libcloud >= 0.13.3"
notes:
- Either I(name) or I(instance_names) is required.
author: Eric Johnson <erjohnso@google.com>
'''
EXAMPLES = '''
# Basic provisioning example. Create a single Debian 7 instance in the
# us-central1-a Zone of n1-standard-1 machine type.
- local_action:
module: gce
name: test-instance
zone: us-central1-a
machine_type: n1-standard-1
image: debian-7
# Example using defaults and with metadata to create a single 'foo' instance
- local_action:
module: gce
name: foo
metadata: '{"db":"postgres", "group":"qa", "id":500}'
# Launch instances from a control node, runs some tasks on the new instances,
# and then terminate them
- name: Create a sandbox instance
hosts: localhost
vars:
names: foo,bar
machine_type: n1-standard-1
image: debian-6
zone: us-central1-a
service_account_email: unique-email@developer.gserviceaccount.com
pem_file: /path/to/pem_file
project_id: project-id
tasks:
- name: Launch instances
local_action: gce instance_names={{names}} machine_type={{machine_type}}
image={{image}} zone={{zone}} service_account_email={{ service_account_email }}
pem_file={{ pem_file }} project_id={{ project_id }}
register: gce
- name: Wait for SSH to come up
local_action: wait_for host={{item.public_ip}} port=22 delay=10
timeout=60 state=started
with_items: {{gce.instance_data}}
- name: Configure instance(s)
hosts: launched
sudo: True
roles:
- my_awesome_role
- my_awesome_tasks
- name: Terminate instances
hosts: localhost
connection: local
tasks:
- name: Terminate instances that were previously launched
local_action:
module: gce
state: 'absent'
instance_names: {{gce.instance_names}}
'''
try:
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
from libcloud.common.google import GoogleBaseError, QuotaExceededError, \
ResourceExistsError, ResourceInUseError, ResourceNotFoundError
_ = Provider.GCE
HAS_LIBCLOUD = True
except ImportError:
HAS_LIBCLOUD = False
try:
from ast import literal_eval
HAS_PYTHON26 = True
except ImportError:
HAS_PYTHON26 = False
def get_instance_info(inst):
"""Retrieves instance information from an instance object and returns it
as a dictionary.
"""
metadata = {}
if 'metadata' in inst.extra and 'items' in inst.extra['metadata']:
for md in inst.extra['metadata']['items']:
metadata[md['key']] = md['value']
try:
netname = inst.extra['networkInterfaces'][0]['network'].split('/')[-1]
except:
netname = None
if 'disks' in inst.extra:
disk_names = [disk_info['source'].split('/')[-1]
for disk_info
in sorted(inst.extra['disks'],
key=lambda disk_info: disk_info['index'])]
else:
disk_names = []
if len(inst.public_ips) == 0:
public_ip = None
else:
public_ip = inst.public_ips[0]
return({
'image': not inst.image is None and inst.image.split('/')[-1] or None,
'disks': disk_names,
'machine_type': inst.size,
'metadata': metadata,
'name': inst.name,
'network': netname,
'private_ip': inst.private_ips[0],
'public_ip': public_ip,
'status': ('status' in inst.extra) and inst.extra['status'] or None,
'tags': ('tags' in inst.extra) and inst.extra['tags'] or [],
'zone': ('zone' in inst.extra) and inst.extra['zone'].name or None,
})
def create_instances(module, gce, instance_names):
"""Creates new instances. Attributes other than instance_names are picked
up from 'module'
module : AnsibleModule object
gce: authenticated GCE libcloud driver
instance_names: python list of instance names to create
Returns:
A list of dictionaries with instance information
about the instances that were launched.
"""
image = module.params.get('image')
machine_type = module.params.get('machine_type')
metadata = module.params.get('metadata')
network = module.params.get('network')
persistent_boot_disk = module.params.get('persistent_boot_disk')
disks = module.params.get('disks')
state = module.params.get('state')
tags = module.params.get('tags')
zone = module.params.get('zone')
ip_forward = module.params.get('ip_forward')
external_ip = module.params.get('external_ip')
disk_auto_delete = module.params.get('disk_auto_delete')
if external_ip == "none":
external_ip = None
new_instances = []
changed = False
lc_image = gce.ex_get_image(image)
lc_disks = []
disk_modes = []
for i, disk in enumerate(disks or []):
if isinstance(disk, dict):
lc_disks.append(gce.ex_get_volume(disk['name']))
disk_modes.append(disk['mode'])
else:
lc_disks.append(gce.ex_get_volume(disk))
# boot disk is implicitly READ_WRITE
disk_modes.append('READ_ONLY' if i > 0 else 'READ_WRITE')
lc_network = gce.ex_get_network(network)
lc_machine_type = gce.ex_get_size(machine_type)
lc_zone = gce.ex_get_zone(zone)
# Try to convert the user's metadata value into the format expected
# by GCE. First try to ensure user has proper quoting of a
# dictionary-like syntax using 'literal_eval', then convert the python
# dict into a python list of 'key' / 'value' dicts. Should end up
# with:
# [ {'key': key1, 'value': value1}, {'key': key2, 'value': value2}, ...]
if metadata:
try:
md = literal_eval(metadata)
if not isinstance(md, dict):
raise ValueError('metadata must be a dict')
except ValueError, e:
module.fail_json(msg='bad metadata: %s' % str(e))
except SyntaxError, e:
module.fail_json(msg='bad metadata syntax')
items = []
for k,v in md.items():
items.append({"key": k,"value": v})
metadata = {'items': items}
# These variables all have default values but check just in case
if not lc_image or not lc_network or not lc_machine_type or not lc_zone:
module.fail_json(msg='Missing required create instance variable',
changed=False)
for name in instance_names:
pd = None
if lc_disks:
pd = lc_disks[0]
elif persistent_boot_disk:
try:
pd = gce.create_volume(None, "%s" % name, image=lc_image)
except ResourceExistsError:
pd = gce.ex_get_volume("%s" % name, lc_zone)
inst = None
try:
inst = gce.create_node(name, lc_machine_type, lc_image,
location=lc_zone, ex_network=network, ex_tags=tags,
ex_metadata=metadata, ex_boot_disk=pd, ex_can_ip_forward=ip_forward,
external_ip=external_ip, ex_disk_auto_delete=disk_auto_delete)
changed = True
except ResourceExistsError:
inst = gce.ex_get_node(name, lc_zone)
except GoogleBaseError, e:
module.fail_json(msg='Unexpected error attempting to create ' + \
'instance %s, error: %s' % (name, e.value))
for i, lc_disk in enumerate(lc_disks):
# Check whether the disk is already attached
if (len(inst.extra['disks']) > i):
attached_disk = inst.extra['disks'][i]
if attached_disk['source'] != lc_disk.extra['selfLink']:
module.fail_json(
msg=("Disk at index %d does not match: requested=%s found=%s" % (
i, lc_disk.extra['selfLink'], attached_disk['source'])))
elif attached_disk['mode'] != disk_modes[i]:
module.fail_json(
msg=("Disk at index %d is in the wrong mode: requested=%s found=%s" % (
i, disk_modes[i], attached_disk['mode'])))
else:
continue
gce.attach_volume(inst, lc_disk, ex_mode=disk_modes[i])
# Work around libcloud bug: attached volumes don't get added
# to the instance metadata. get_instance_info() only cares about
# source and index.
if len(inst.extra['disks']) != i+1:
inst.extra['disks'].append(
{'source': lc_disk.extra['selfLink'], 'index': i})
if inst:
new_instances.append(inst)
instance_names = []
instance_json_data = []
for inst in new_instances:
d = get_instance_info(inst)
instance_names.append(d['name'])
instance_json_data.append(d)
return (changed, instance_json_data, instance_names)
def terminate_instances(module, gce, instance_names, zone_name):
"""Terminates a list of instances.
module: Ansible module object
gce: authenticated GCE connection object
instance_names: a list of instance names to terminate
zone_name: the zone where the instances reside prior to termination
Returns a dictionary of instance names that were terminated.
"""
changed = False
terminated_instance_names = []
for name in instance_names:
inst = None
try:
inst = gce.ex_get_node(name, zone_name)
except ResourceNotFoundError:
pass
except Exception, e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
if inst:
gce.destroy_node(inst)
terminated_instance_names.append(inst.name)
changed = True
return (changed, terminated_instance_names)
def main():
module = AnsibleModule(
argument_spec = dict(
image = dict(default='debian-7'),
instance_names = dict(),
machine_type = dict(default='n1-standard-1'),
metadata = dict(),
name = dict(),
network = dict(default='default'),
persistent_boot_disk = dict(type='bool', default=False),
disks = dict(type='list'),
state = dict(choices=['active', 'present', 'absent', 'deleted'],
default='present'),
tags = dict(type='list'),
zone = dict(default='us-central1-a'),
service_account_email = dict(),
pem_file = dict(),
project_id = dict(),
ip_forward = dict(type='bool', default=False),
external_ip = dict(choices=['ephemeral', 'none'],
default='ephemeral'),
disk_auto_delete = dict(type='bool', default=True),
)
)
if not HAS_PYTHON26:
module.fail_json(msg="GCE module requires python's 'ast' module, python v2.6+")
if not HAS_LIBCLOUD:
module.fail_json(msg='libcloud with GCE support (0.13.3+) required for this module')
gce = gce_connect(module)
image = module.params.get('image')
instance_names = module.params.get('instance_names')
machine_type = module.params.get('machine_type')
metadata = module.params.get('metadata')
name = module.params.get('name')
network = module.params.get('network')
persistent_boot_disk = module.params.get('persistent_boot_disk')
state = module.params.get('state')
tags = module.params.get('tags')
zone = module.params.get('zone')
ip_forward = module.params.get('ip_forward')
changed = False
inames = []
if isinstance(instance_names, list):
inames = instance_names
elif isinstance(instance_names, str):
inames = instance_names.split(',')
if name:
inames.append(name)
if not inames:
module.fail_json(msg='Must specify a "name" or "instance_names"',
changed=False)
if not zone:
module.fail_json(msg='Must specify a "zone"', changed=False)
json_output = {'zone': zone}
if state in ['absent', 'deleted']:
json_output['state'] = 'absent'
(changed, terminated_instance_names) = terminate_instances(module,
gce, inames, zone)
# based on what user specified, return the same variable, although
# value could be different if an instance could not be destroyed
if instance_names:
json_output['instance_names'] = terminated_instance_names
elif name:
json_output['name'] = name
elif state in ['active', 'present']:
json_output['state'] = 'present'
(changed, instance_data,instance_name_list) = create_instances(
module, gce, inames)
json_output['instance_data'] = instance_data
if instance_names:
json_output['instance_names'] = instance_name_list
elif name:
json_output['name'] = name
json_output['changed'] = changed
module.exit_json(**json_output)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.gce import *
if __name__ == '__main__':
main()
|
mschurenko/ansible-modules-core
|
cloud/google/gce.py
|
Python
|
gpl-3.0
| 17,261
|
# -*- coding: utf-8 -*-
#------------------------------------------------------------------------------
# file: $Id$
# auth: metagriffin <mg.github@uberdev.org>
# date: 2011/06/02
# copy: (C) Copyright 2011-EOT metagriffin -- see LICENSE.txt
#------------------------------------------------------------------------------
# This software is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This software is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#------------------------------------------------------------------------------
import unittest, yaml
from aadict import aadict
from svnpublish import framework
from .config import mergeOptions
#------------------------------------------------------------------------------
class TestConfig(unittest.TestCase):
#----------------------------------------------------------------------------
def test_mergeOptions(self):
base = aadict(yaml.load(framework.defaultOptions))
override = yaml.load('reposUrl: https://svn.example.com/repos\n')
options = mergeOptions(base, override)
self.assertEqual(None, base.reposUrl)
self.assertEqual('https://svn.example.com/repos', options.reposUrl)
#----------------------------------------------------------------------------
def test_mergeOptions_nulldeletes(self):
base = aadict.d2ar(yaml.load(framework.defaultOptions))
override = yaml.load('genemail: {default: null}')
options = mergeOptions(base, override)
self.assertNotEqual(None, base.genemail.default)
self.assertEqual(None, options.genemail.default)
self.assertIn('default', base.genemail)
self.assertNotIn('default', options.genemail)
#------------------------------------------------------------------------------
# end of $Id$
#------------------------------------------------------------------------------
|
metagriffin/svnpublish
|
svnpublish/util/test_config.py
|
Python
|
gpl-3.0
| 2,329
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (c) 2016 Factor Libre S.L. (http://factorlibre.com)
# Kiko Peiro <francisco.peiro@factorlibre.com>
#
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import logging
from openerp import api, models
_logger = logging.getLogger(__name__)
class L10nEsAeatMod340CashBasisCalculateRecords(models.TransientModel):
_inherit = "l10n.es.aeat.mod340.calculate_records"
@api.multi
def _calculate_records(self, recalculate=True):
invoice_obj = self.env['account.invoice']
invoices340 = self.env['l10n.es.aeat.mod340.issued']
invoices340_rec = self.env['l10n.es.aeat.mod340.received']
report = self.env['l10n.es.aeat.mod340.report']
for record in self:
super(L10nEsAeatMod340CashBasisCalculateRecords, record).\
_calculate_records(recalculate=recalculate)
mod340 = report.browse(record.id)
account_period_ids = [x.id for x in mod340.periods]
domain = [
('period_id', 'in', account_period_ids),
('state', 'in', ('open', 'paid')),
('vat_on_payment', '=', True)
]
invoice_ids = invoice_obj.search(domain)
for invoice in invoice_ids:
domain_records = [
('mod340_id', '=', record.id),
('invoice_id', '=', invoice.id)
]
invoice_created_id = False
if invoice.type in ['out_invoice', 'out_refund']:
invoice_created_id = invoices340.search(domain_records)
if invoice.type in ['in_invoice', 'in_refund']:
invoice_created_id = invoices340_rec.search(domain_records)
if invoice_created_id:
invoice_created_id = invoice_created_id[0]
date_payment = False
payment_amount = 0
name_payment_method = ''
for payment_id in\
invoice_created_id.invoice_id.payment_ids:
if not date_payment:
date_payment = payment_id.date
if not name_payment_method:
name_payment_method_id = self.env[
'res.partner.bank'].search(
[('journal_id', '=', payment_id.journal_id.id)]
)
if name_payment_method_id:
name_payment_method =\
name_payment_method_id[0].\
acc_number.replace(' ', '')
payment_amount = payment_amount + payment_id.debit
invoice_created_id.write({
'date_payment': date_payment,
'payment_amount': payment_amount,
'name_payment_method': name_payment_method,
'key_operation': 'Z'
})
return True
|
algiopensource/l10n-spain
|
l10n_es_aeat_mod340_cash_basis/wizard/calculate_mod340_records.py
|
Python
|
agpl-3.0
| 3,973
|
#!/usr/bin/env python
#
# Use the raw transactions API to spend limecoinxs received on particular addresses,
# and send any change back to that same address.
#
# Example usage:
# spendfrom.py # Lists available funds
# spendfrom.py --from=ADDRESS --to=ADDRESS --amount=11.00
#
# Assumes it will talk to a limecoinxd or Limecoinx-Qt running
# on localhost.
#
# Depends on jsonrpc
#
from decimal import *
import getpass
import math
import os
import os.path
import platform
import sys
import time
from jsonrpc import ServiceProxy, json
BASE_FEE=Decimal("0.001")
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def determine_db_dir():
"""Return the default location of the limecoinx data directory"""
if platform.system() == "Darwin":
return os.path.expanduser("~/Library/Application Support/Limecoinx/")
elif platform.system() == "Windows":
return os.path.join(os.environ['APPDATA'], "Limecoinx")
return os.path.expanduser("~/.limecoinx")
def read_bitcoin_config(dbdir):
"""Read the limecoinx.conf file from dbdir, returns dictionary of settings"""
from ConfigParser import SafeConfigParser
class FakeSecHead(object):
def __init__(self, fp):
self.fp = fp
self.sechead = '[all]\n'
def readline(self):
if self.sechead:
try: return self.sechead
finally: self.sechead = None
else:
s = self.fp.readline()
if s.find('#') != -1:
s = s[0:s.find('#')].strip() +"\n"
return s
config_parser = SafeConfigParser()
config_parser.readfp(FakeSecHead(open(os.path.join(dbdir, "limecoinx.conf"))))
return dict(config_parser.items("all"))
def connect_JSON(config):
"""Connect to a limecoinx JSON-RPC server"""
testnet = config.get('testnet', '0')
testnet = (int(testnet) > 0) # 0/1 in config file, convert to True/False
if not 'rpcport' in config:
config['rpcport'] = 8884 if testnet else 8800
connect = "http://%s:%s@127.0.0.1:%s"%(config['rpcuser'], config['rpcpassword'], config['rpcport'])
try:
result = ServiceProxy(connect)
# ServiceProxy is lazy-connect, so send an RPC command mostly to catch connection errors,
# but also make sure the limecoinxd we're talking to is/isn't testnet:
if result.getmininginfo()['testnet'] != testnet:
sys.stderr.write("RPC server at "+connect+" testnet setting mismatch\n")
sys.exit(1)
return result
except:
sys.stderr.write("Error connecting to RPC server at "+connect+"\n")
sys.exit(1)
def unlock_wallet(limecoinxd):
info = limecoinxd.getinfo()
if 'unlocked_until' not in info:
return True # wallet is not encrypted
t = int(info['unlocked_until'])
if t <= time.time():
try:
passphrase = getpass.getpass("Wallet is locked; enter passphrase: ")
limecoinxd.walletpassphrase(passphrase, 5)
except:
sys.stderr.write("Wrong passphrase\n")
info = limecoinxd.getinfo()
return int(info['unlocked_until']) > time.time()
def list_available(limecoinxd):
address_summary = dict()
address_to_account = dict()
for info in limecoinxd.listreceivedbyaddress(0):
address_to_account[info["address"]] = info["account"]
unspent = limecoinxd.listunspent(0)
for output in unspent:
# listunspent doesn't give addresses, so:
rawtx = limecoinxd.getrawtransaction(output['txid'], 1)
vout = rawtx["vout"][output['vout']]
pk = vout["scriptPubKey"]
# This code only deals with ordinary pay-to-limecoinx-address
# or pay-to-script-hash outputs right now; anything exotic is ignored.
if pk["type"] != "pubkeyhash" and pk["type"] != "scripthash":
continue
address = pk["addresses"][0]
if address in address_summary:
address_summary[address]["total"] += vout["value"]
address_summary[address]["outputs"].append(output)
else:
address_summary[address] = {
"total" : vout["value"],
"outputs" : [output],
"account" : address_to_account.get(address, "")
}
return address_summary
def select_coins(needed, inputs):
# Feel free to improve this, this is good enough for my simple needs:
outputs = []
have = Decimal("0.0")
n = 0
while have < needed and n < len(inputs):
outputs.append({ "txid":inputs[n]["txid"], "vout":inputs[n]["vout"]})
have += inputs[n]["amount"]
n += 1
return (outputs, have-needed)
def create_tx(limecoinxd, fromaddresses, toaddress, amount, fee):
all_coins = list_available(limecoinxd)
total_available = Decimal("0.0")
needed = amount+fee
potential_inputs = []
for addr in fromaddresses:
if addr not in all_coins:
continue
potential_inputs.extend(all_coins[addr]["outputs"])
total_available += all_coins[addr]["total"]
if total_available < needed:
sys.stderr.write("Error, only %f BTC available, need %f\n"%(total_available, needed));
sys.exit(1)
#
# Note:
# Python's json/jsonrpc modules have inconsistent support for Decimal numbers.
# Instead of wrestling with getting json.dumps() (used by jsonrpc) to encode
# Decimals, I'm casting amounts to float before sending them to limecoinxd.
#
outputs = { toaddress : float(amount) }
(inputs, change_amount) = select_coins(needed, potential_inputs)
if change_amount > BASE_FEE: # don't bother with zero or tiny change
change_address = fromaddresses[-1]
if change_address in outputs:
outputs[change_address] += float(change_amount)
else:
outputs[change_address] = float(change_amount)
rawtx = limecoinxd.createrawtransaction(inputs, outputs)
signed_rawtx = limecoinxd.signrawtransaction(rawtx)
if not signed_rawtx["complete"]:
sys.stderr.write("signrawtransaction failed\n")
sys.exit(1)
txdata = signed_rawtx["hex"]
return txdata
def compute_amount_in(limecoinxd, txinfo):
result = Decimal("0.0")
for vin in txinfo['vin']:
in_info = limecoinxd.getrawtransaction(vin['txid'], 1)
vout = in_info['vout'][vin['vout']]
result = result + vout['value']
return result
def compute_amount_out(txinfo):
result = Decimal("0.0")
for vout in txinfo['vout']:
result = result + vout['value']
return result
def sanity_test_fee(limecoinxd, txdata_hex, max_fee):
class FeeError(RuntimeError):
pass
try:
txinfo = limecoinxd.decoderawtransaction(txdata_hex)
total_in = compute_amount_in(limecoinxd, txinfo)
total_out = compute_amount_out(txinfo)
if total_in-total_out > max_fee:
raise FeeError("Rejecting transaction, unreasonable fee of "+str(total_in-total_out))
tx_size = len(txdata_hex)/2
kb = tx_size/1000 # integer division rounds down
if kb > 1 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee transaction, larger than 1000 bytes")
if total_in < 0.01 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee, tiny-amount transaction")
# Exercise for the reader: compute transaction priority, and
# warn if this is a very-low-priority transaction
except FeeError as err:
sys.stderr.write((str(err)+"\n"))
sys.exit(1)
def main():
import optparse
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--from", dest="fromaddresses", default=None,
help="addresses to get limecoinxs from")
parser.add_option("--to", dest="to", default=None,
help="address to get send limecoinxs to")
parser.add_option("--amount", dest="amount", default=None,
help="amount to send")
parser.add_option("--fee", dest="fee", default="0.0",
help="fee to include")
parser.add_option("--datadir", dest="datadir", default=determine_db_dir(),
help="location of limecoinx.conf file with RPC username/password (default: %default)")
parser.add_option("--testnet", dest="testnet", default=False, action="store_true",
help="Use the test network")
parser.add_option("--dry_run", dest="dry_run", default=False, action="store_true",
help="Don't broadcast the transaction, just create and print the transaction data")
(options, args) = parser.parse_args()
check_json_precision()
config = read_bitcoin_config(options.datadir)
if options.testnet: config['testnet'] = True
limecoinxd = connect_JSON(config)
if options.amount is None:
address_summary = list_available(limecoinxd)
for address,info in address_summary.iteritems():
n_transactions = len(info['outputs'])
if n_transactions > 1:
print("%s %.8f %s (%d transactions)"%(address, info['total'], info['account'], n_transactions))
else:
print("%s %.8f %s"%(address, info['total'], info['account']))
else:
fee = Decimal(options.fee)
amount = Decimal(options.amount)
while unlock_wallet(limecoinxd) == False:
pass # Keep asking for passphrase until they get it right
txdata = create_tx(limecoinxd, options.fromaddresses.split(","), options.to, amount, fee)
sanity_test_fee(limecoinxd, txdata, amount*Decimal("0.01"))
if options.dry_run:
print(txdata)
else:
txid = limecoinxd.sendrawtransaction(txdata)
print(txid)
if __name__ == '__main__':
main()
|
LIMX-SUPPORT/LimecoinX
|
contrib/spendfrom/spendfrom.py
|
Python
|
mit
| 10,120
|
#!/usr/bin/python2.5
#
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""System-wide configuration variables."""
import datetime
# This HTML block will be printed in the footer of every page.
FOOTER_HTML = (
'cocos Live v0.3.6 - © 2009 <a href="http://www.sapusmedia.com">Sapus Media</a>'
)
# File caching controls
FILE_CACHE_CONTROL = 'private, max-age=86400'
FILE_CACHE_TIME = datetime.timedelta(days=1)
# Title for the website
SYSTEM_TITLE = 'cocos Live'
# Unique identifier from Google Analytics
ANALYTICS_ID = 'UA-871936-6'
|
google-code-export/cocoslive
|
cocoslive/configuration.py
|
Python
|
gpl-3.0
| 1,082
|
# Lint as: python3
# Copyright 2020 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Imports."""
from tfrecorder import accessor
from tfrecorder.converter import convert
from tfrecorder.dataset_loader import load
from tfrecorder.converter import convert_and_load
from tfrecorder.utils import inspect
|
google/tensorflow-recorder
|
tfrecorder/__init__.py
|
Python
|
apache-2.0
| 816
|
from gi.repository import Gtk
machines = {}
store = Gtk.ListStore(str, str, int, str, int)
cluster_ip = ""
cluster_port = ""
machine_json = ""
username = ""
password = ""
global connection
|
davidsf/ganetiadmin
|
params.py
|
Python
|
gpl-3.0
| 189
|
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2018.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Pass for one layer of decomposing the gates in a circuit."""
from qiskit.transpiler.basepasses import TransformationPass
from qiskit.dagcircuit import DAGCircuit
class Decompose(TransformationPass):
"""
Expand a gate in a circuit using its decomposition rules.
"""
def __init__(self, gate=None):
"""
Args:
gate (qiskit.circuit.gate.Gate): Gate to decompose.
"""
super().__init__()
self.gate = gate
def run(self, dag):
"""Expand a given gate into its decomposition.
Args:
dag(DAGCircuit): input dag
Returns:
DAGCircuit: output dag where gate was expanded.
"""
# Walk through the DAG and expand each non-basis node
for node in dag.op_nodes(self.gate):
# opaque or built-in gates are not decomposable
if not node.op.definition:
continue
# TODO: allow choosing among multiple decomposition rules
rule = node.op.definition
# hacky way to build a dag on the same register as the rule is defined
# TODO: need anonymous rules to address wires by index
decomposition = DAGCircuit()
qregs = {qb.register for inst in rule for qb in inst[1]}
cregs = {cb.register for inst in rule for cb in inst[2]}
for qreg in qregs:
decomposition.add_qreg(qreg)
for creg in cregs:
decomposition.add_creg(creg)
for inst in rule:
decomposition.apply_operation_back(*inst)
dag.substitute_node_with_dag(node, decomposition)
return dag
|
QISKit/qiskit-sdk-py
|
qiskit/transpiler/passes/decompose.py
|
Python
|
apache-2.0
| 2,194
|
from django.apps import AppConfig
class UploadsConfig(AppConfig):
name = "hav.apps.sources.uploads"
|
whav/hav
|
src/hav/apps/sources/uploads/apps.py
|
Python
|
gpl-3.0
| 106
|
# Generated by Django 3.2.6 on 2021-09-23 13:44
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("peeringdb", "0018_auto_20210523_1204")]
operations = [
migrations.AddField(
model_name="facility",
name="available_voltage_services",
field=models.CharField(
blank=True,
choices=[
("48 VDC", "48 VDC"),
("120 VAC", "120 VAC"),
("208 VAC", "208 VAC"),
("240 VAC", "240 VAC"),
("480 VAC", "480 VAC"),
],
max_length=255,
null=True,
),
),
migrations.AddField(
model_name="facility",
name="diverse_serving_substations",
field=models.BooleanField(blank=True, null=True),
),
migrations.AddField(
model_name="facility",
name="property",
field=models.CharField(
blank=True,
choices=[
("", "Not Disclosed"),
("Owner", "Owner"),
("Lessee", "Lessee"),
],
max_length=27,
null=True,
),
),
]
|
respawner/peering-manager
|
peeringdb/migrations/0019_auto_20210923_1544.py
|
Python
|
apache-2.0
| 1,338
|
# -*- coding: utf-8 -*-
"""Parser for Extensible Storage Engine (ESE) database files (EDB)."""
import pyesedb
from plaso.lib import specification
from plaso.parsers import interface
from plaso.parsers import manager
from plaso.parsers import plugins
class ESEDatabase(object):
"""Extensible Storage Engine (ESE) database."""
def __init__(self):
"""Initializes a Extensible Storage Engine (ESE) database."""
super(ESEDatabase, self).__init__()
self._esedb_file = None
self._table_names = []
@property
def tables(self):
"""list[str]: names of all the tables."""
if not self._table_names:
for esedb_table in self._esedb_file.tables:
self._table_names.append(esedb_table.name)
return self._table_names
def Close(self):
"""Closes the database."""
self._esedb_file.close()
self._esedb_file = None
def GetTableByName(self, name):
"""Retrieves a table by its name.
Args:
name (str): name of the table.
Returns:
pyesedb.table: the table with the corresponding name or None if there is
no table with the name.
"""
return self._esedb_file.get_table_by_name(name)
def Open(self, file_object):
"""Opens an Extensible Storage Engine (ESE) database file.
Args:
file_object (dfvfs.FileIO): file-like object.
Raises:
IOError: if the file-like object cannot be read.
OSError: if the file-like object cannot be read.
ValueError: if the file-like object is missing.
"""
if not file_object:
raise ValueError('Missing file object.')
self._esedb_file = pyesedb.file()
self._esedb_file.open_file_object(file_object)
class ESEDBCache(plugins.BasePluginCache):
"""A cache storing query results for ESEDB plugins."""
def StoreDictInCache(self, attribute_name, dict_object):
"""Store a dict object in cache.
Args:
attribute_name (str): name of the attribute.
dict_object (dict): dictionary.
"""
setattr(self, attribute_name, dict_object)
class ESEDBParser(interface.FileObjectParser):
"""Parses Extensible Storage Engine (ESE) database files (EDB)."""
_INITIAL_FILE_OFFSET = None
NAME = 'esedb'
DATA_FORMAT = 'Extensible Storage Engine (ESE) Database File (EDB) format'
_plugin_classes = {}
@classmethod
def GetFormatSpecification(cls):
"""Retrieves the format specification.
Returns:
FormatSpecification: format specification.
"""
format_specification = specification.FormatSpecification(cls.NAME)
format_specification.AddNewSignature(b'\xef\xcd\xab\x89', offset=4)
return format_specification
def ParseFileObject(self, parser_mediator, file_object):
"""Parses an ESE database file-like object.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
file_object (dfvfs.FileIO): file-like object.
"""
database = ESEDatabase()
try:
database.Open(file_object)
except (IOError, ValueError) as exception:
parser_mediator.ProduceExtractionWarning(
'unable to open file with error: {0!s}'.format(exception))
return
# Compare the list of available plugin objects.
cache = ESEDBCache()
try:
for plugin in self._plugins:
if parser_mediator.abort:
break
if not plugin.CheckRequiredTables(database):
continue
try:
plugin.UpdateChainAndProcess(
parser_mediator, cache=cache, database=database)
except Exception as exception: # pylint: disable=broad-except
parser_mediator.ProduceExtractionWarning((
'plugin: {0:s} unable to parse ESE database with error: '
'{1!s}').format(plugin.NAME, exception))
finally:
# TODO: explicitly clean up cache.
database.Close()
manager.ParsersManager.RegisterParser(ESEDBParser)
|
Onager/plaso
|
plaso/parsers/esedb.py
|
Python
|
apache-2.0
| 3,948
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'window.ui'
#
# Created: Mon Jun 8 18:47:51 2015
# by: PyQt5 UI code generator 5.3.2
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(800, 600)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.verticalLayout = QtWidgets.QVBoxLayout(self.centralwidget)
self.verticalLayout.setObjectName("verticalLayout")
self.snake_widget = SnakeWidget(self.centralwidget)
self.snake_widget.setObjectName("snake_widget")
self.pause = QtWidgets.QLabel(self.snake_widget)
self.pause.setGeometry(QtCore.QRect(140, 120, 331, 341))
self.pause.setText("")
self.pause.setObjectName("pause")
self.verticalLayout.addWidget(self.snake_widget)
MainWindow.setCentralWidget(self.centralwidget)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
from snake_widget import SnakeWidget
|
bitterfly/tamagotchi
|
tamagotchi/snake/ui_window.py
|
Python
|
gpl-2.0
| 1,398
|
from changes.api.serializer import Crumbler, register
from changes.models.node import Cluster
@register(Cluster)
class ClusterCrumbler(Crumbler):
def crumble(self, instance, attrs):
return {
'id': instance.id.hex,
'name': instance.label,
'dateCreated': instance.date_created,
}
|
dropbox/changes
|
changes/api/serializer/models/cluster.py
|
Python
|
apache-2.0
| 336
|
from functools import partial
from urllib.parse import urlencode
from geopy.exc import (
GeocoderAuthenticationFailure,
GeocoderInsufficientPrivileges,
GeocoderQueryError,
GeocoderQuotaExceeded,
GeocoderServiceError,
)
from geopy.geocoders.base import DEFAULT_SENTINEL, Geocoder
from geopy.location import Location
from geopy.timezone import (
ensure_pytz_is_installed,
from_fixed_gmt_offset,
from_timezone_name,
)
from geopy.util import logger
__all__ = ("GeoNames", )
class GeoNames(Geocoder):
"""GeoNames geocoder.
Documentation at:
http://www.geonames.org/export/geonames-search.html
Reverse geocoding documentation at:
http://www.geonames.org/export/web-services.html#findNearbyPlaceName
"""
geocode_path = '/searchJSON'
reverse_path = '/findNearbyPlaceNameJSON'
reverse_nearby_path = '/findNearbyJSON'
timezone_path = '/timezoneJSON'
def __init__(
self,
username,
*,
timeout=DEFAULT_SENTINEL,
proxies=DEFAULT_SENTINEL,
user_agent=None,
ssl_context=DEFAULT_SENTINEL,
adapter_factory=None,
scheme='http'
):
"""
:param str username: GeoNames username, required. Sign up here:
http://www.geonames.org/login
:param int timeout:
See :attr:`geopy.geocoders.options.default_timeout`.
:param dict proxies:
See :attr:`geopy.geocoders.options.default_proxies`.
:param str user_agent:
See :attr:`geopy.geocoders.options.default_user_agent`.
:type ssl_context: :class:`ssl.SSLContext`
:param ssl_context:
See :attr:`geopy.geocoders.options.default_ssl_context`.
:param callable adapter_factory:
See :attr:`geopy.geocoders.options.default_adapter_factory`.
.. versionadded:: 2.0
:param str scheme:
See :attr:`geopy.geocoders.options.default_scheme`. Note that
at the time of writing GeoNames doesn't support `https`, so
the default scheme is `http`. The value of
:attr:`geopy.geocoders.options.default_scheme` is not respected.
This parameter is present to make it possible to switch to
`https` once GeoNames adds support for it.
"""
super().__init__(
scheme=scheme,
timeout=timeout,
proxies=proxies,
user_agent=user_agent,
ssl_context=ssl_context,
adapter_factory=adapter_factory,
)
self.username = username
domain = 'api.geonames.org'
self.api = (
"%s://%s%s" % (self.scheme, domain, self.geocode_path)
)
self.api_reverse = (
"%s://%s%s" % (self.scheme, domain, self.reverse_path)
)
self.api_reverse_nearby = (
"%s://%s%s" % (self.scheme, domain, self.reverse_nearby_path)
)
self.api_timezone = (
"%s://%s%s" % (self.scheme, domain, self.timezone_path)
)
def geocode(
self,
query,
*,
exactly_one=True,
timeout=DEFAULT_SENTINEL,
country=None,
country_bias=None
):
"""
Return a location point by address.
:param str query: The address or query you wish to geocode.
:param bool exactly_one: Return one result or a list of results, if
available.
:param int timeout: Time, in seconds, to wait for the geocoding service
to respond before raising a :class:`geopy.exc.GeocoderTimedOut`
exception. Set this only if you wish to override, on this call
only, the value set during the geocoder's initialization.
:param country: Limit records to the specified countries.
Two letter country code ISO-3166 (e.g. ``FR``). Might be
a single string or a list of strings.
:type country: str or list
:param str country_bias: Records from the country_bias are listed first.
Two letter country code ISO-3166.
:rtype: ``None``, :class:`geopy.location.Location` or a list of them, if
``exactly_one=False``.
"""
params = [
('q', query),
('username', self.username),
]
if country_bias:
params.append(('countryBias', country_bias))
if not country:
country = []
if isinstance(country, str):
country = [country]
for country_item in country:
params.append(('country', country_item))
if exactly_one:
params.append(('maxRows', 1))
url = "?".join((self.api, urlencode(params)))
logger.debug("%s.geocode: %s", self.__class__.__name__, url)
callback = partial(self._parse_json, exactly_one=exactly_one)
return self._call_geocoder(url, callback, timeout=timeout)
def reverse(
self,
query,
*,
exactly_one=True,
timeout=DEFAULT_SENTINEL,
feature_code=None,
lang=None,
find_nearby_type='findNearbyPlaceName'
):
"""
Return an address by location point.
:param query: The coordinates for which you wish to obtain the
closest human-readable addresses.
:type query: :class:`geopy.point.Point`, list or tuple of ``(latitude,
longitude)``, or string as ``"%(latitude)s, %(longitude)s"``.
:param bool exactly_one: Return one result or a list of results, if
available.
:param int timeout: Time, in seconds, to wait for the geocoding service
to respond before raising a :class:`geopy.exc.GeocoderTimedOut`
exception. Set this only if you wish to override, on this call
only, the value set during the geocoder's initialization.
:param str feature_code: A GeoNames feature code
:param str lang: language of the returned ``name`` element (the pseudo
language code 'local' will return it in local language)
Full list of supported languages can be found here:
https://www.geonames.org/countries/
:param str find_nearby_type: A flag to switch between different
GeoNames API endpoints. The default value is ``findNearbyPlaceName``
which returns the closest populated place. Another currently
implemented option is ``findNearby`` which returns
the closest toponym for the lat/lng query.
:rtype: ``None``, :class:`geopy.location.Location` or a list of them, if
``exactly_one=False``.
"""
try:
lat, lng = self._coerce_point_to_string(query).split(',')
except ValueError:
raise ValueError("Must be a coordinate pair or Point")
if find_nearby_type == 'findNearbyPlaceName': # default
if feature_code:
raise ValueError(
"find_nearby_type=findNearbyPlaceName doesn't support "
"the `feature_code` param"
)
params = self._reverse_find_nearby_place_name_params(
lat=lat,
lng=lng,
lang=lang,
)
url = "?".join((self.api_reverse, urlencode(params)))
elif find_nearby_type == 'findNearby':
if lang:
raise ValueError(
"find_nearby_type=findNearby doesn't support the `lang` param"
)
params = self._reverse_find_nearby_params(
lat=lat,
lng=lng,
feature_code=feature_code,
)
url = "?".join((self.api_reverse_nearby, urlencode(params)))
else:
raise GeocoderQueryError(
'`%s` find_nearby_type is not supported by geopy' % find_nearby_type
)
logger.debug("%s.reverse: %s", self.__class__.__name__, url)
callback = partial(self._parse_json, exactly_one=exactly_one)
return self._call_geocoder(url, callback, timeout=timeout)
def _reverse_find_nearby_params(self, lat, lng, feature_code):
params = {
'lat': lat,
'lng': lng,
'username': self.username,
}
if feature_code:
params['featureCode'] = feature_code
return params
def _reverse_find_nearby_place_name_params(self, lat, lng, lang):
params = {
'lat': lat,
'lng': lng,
'username': self.username,
}
if lang:
params['lang'] = lang
return params
def reverse_timezone(self, query, *, timeout=DEFAULT_SENTINEL):
"""
Find the timezone for a point in `query`.
GeoNames always returns a timezone: if the point being queried
doesn't have an assigned Olson timezone id, a ``pytz.FixedOffset``
timezone is used to produce the :class:`geopy.timezone.Timezone`.
:param query: The coordinates for which you want a timezone.
:type query: :class:`geopy.point.Point`, list or tuple of (latitude,
longitude), or string as "%(latitude)s, %(longitude)s"
:param int timeout: Time, in seconds, to wait for the geocoding service
to respond before raising a :class:`geopy.exc.GeocoderTimedOut`
exception. Set this only if you wish to override, on this call
only, the value set during the geocoder's initialization.
:rtype: :class:`geopy.timezone.Timezone`.
"""
ensure_pytz_is_installed()
try:
lat, lng = self._coerce_point_to_string(query).split(',')
except ValueError:
raise ValueError("Must be a coordinate pair or Point")
params = {
"lat": lat,
"lng": lng,
"username": self.username,
}
url = "?".join((self.api_timezone, urlencode(params)))
logger.debug("%s.reverse_timezone: %s", self.__class__.__name__, url)
return self._call_geocoder(url, self._parse_json_timezone, timeout=timeout)
def _raise_for_error(self, body):
err = body.get('status')
if err:
code = err['value']
message = err['message']
# http://www.geonames.org/export/webservice-exception.html
if message.startswith("user account not enabled to use"):
raise GeocoderInsufficientPrivileges(message)
if code == 10:
raise GeocoderAuthenticationFailure(message)
if code in (18, 19, 20):
raise GeocoderQuotaExceeded(message)
raise GeocoderServiceError(message)
def _parse_json_timezone(self, response):
self._raise_for_error(response)
timezone_id = response.get("timezoneId")
if timezone_id is None:
# Sometimes (e.g. for Antarctica) GeoNames doesn't return
# a `timezoneId` value, but it returns GMT offsets.
# Apparently GeoNames always returns these offsets -- for
# every single point on the globe.
raw_offset = response["rawOffset"]
return from_fixed_gmt_offset(raw_offset, raw=response)
else:
return from_timezone_name(timezone_id, raw=response)
def _parse_json(self, doc, exactly_one):
"""
Parse JSON response body.
"""
places = doc.get('geonames', [])
self._raise_for_error(doc)
if not len(places):
return None
def parse_code(place):
"""
Parse each record.
"""
latitude = place.get('lat', None)
longitude = place.get('lng', None)
if latitude and longitude:
latitude = float(latitude)
longitude = float(longitude)
else:
return None
placename = place.get('name')
state = place.get('adminName1', None)
country = place.get('countryName', None)
location = ', '.join(
[x for x in [placename, state, country] if x]
)
return Location(location, (latitude, longitude), place)
if exactly_one:
return parse_code(places[0])
else:
return [parse_code(place) for place in places]
|
geopy/geopy
|
geopy/geocoders/geonames.py
|
Python
|
mit
| 12,560
|
#!/usr/bin/python
# Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import ntpath
import posixpath
import os
import re
import subprocess
import sys
import gyp.MSVSNew as MSVSNew
import gyp.MSVSProject as MSVSProject
import gyp.MSVSToolFile as MSVSToolFile
import gyp.MSVSUserFile as MSVSUserFile
import gyp.MSVSVersion as MSVSVersion
import gyp.MSVSSettings as MSVSSettings
import gyp.common
# Regular expression for validating Visual Studio GUIDs. If the GUID
# contains lowercase hex letters, MSVS will be fine. However,
# IncrediBuild BuildConsole will parse the solution file, but then
# silently skip building the target causing hard to track down errors.
# Note that this only happens with the BuildConsole, and does not occur
# if IncrediBuild is executed from inside Visual Studio. This regex
# validates that the string looks like a GUID with all uppercase hex
# letters.
VALID_MSVS_GUID_CHARS = re.compile('^[A-F0-9\-]+$')
generator_default_variables = {
'EXECUTABLE_PREFIX': '',
'EXECUTABLE_SUFFIX': '.exe',
'STATIC_LIB_PREFIX': '',
'SHARED_LIB_PREFIX': '',
'STATIC_LIB_SUFFIX': '.lib',
'SHARED_LIB_SUFFIX': '.dll',
'INTERMEDIATE_DIR': '$(IntDir)',
'SHARED_INTERMEDIATE_DIR': '$(OutDir)/obj/global_intermediate',
'OS': 'win',
'PRODUCT_DIR': '$(OutDir)',
'LIB_DIR': '$(OutDir)/lib',
'RULE_INPUT_ROOT': '$(InputName)',
'RULE_INPUT_EXT': '$(InputExt)',
'RULE_INPUT_NAME': '$(InputFileName)',
'RULE_INPUT_PATH': '$(InputPath)',
'CONFIGURATION_NAME': '$(ConfigurationName)',
}
# The msvs specific sections that hold paths
generator_additional_path_sections = [
'msvs_cygwin_dirs',
'msvs_props',
]
generator_additional_non_configuration_keys = [
'msvs_cygwin_dirs',
'msvs_cygwin_shell',
]
# List of precompiled header related keys.
precomp_keys = [
'msvs_precompiled_header',
'msvs_precompiled_source',
]
cached_username = None
cached_domain = None
# TODO(gspencer): Switch the os.environ calls to be
# win32api.GetDomainName() and win32api.GetUserName() once the
# python version in depot_tools has been updated to work on Vista
# 64-bit.
def _GetDomainAndUserName():
if sys.platform not in ('win32', 'cygwin'):
return ('DOMAIN', 'USERNAME')
global cached_username
global cached_domain
if not cached_domain or not cached_username:
domain = os.environ.get('USERDOMAIN')
username = os.environ.get('USERNAME')
if not domain or not username:
call = subprocess.Popen(['net', 'config', 'Workstation'],
stdout=subprocess.PIPE)
config = call.communicate()[0]
username_re = re.compile('^User name\s+(\S+)', re.MULTILINE)
username_match = username_re.search(config)
if username_match:
username = username_match.group(1)
domain_re = re.compile('^Logon domain\s+(\S+)', re.MULTILINE)
domain_match = domain_re.search(config)
if domain_match:
domain = domain_match.group(1)
cached_domain = domain
cached_username = username
return (cached_domain, cached_username)
fixpath_prefix = None
def _NormalizedSource(source):
""" Normalize the path.
But not if that gets rid of a variable, as this may expand to something
larger than one directory.
"""
normalized = os.path.normpath(source)
if source.count('$') == normalized.count('$'):
source = normalized
return source
def _FixPath(path):
"""Convert paths to a form that will make sense in a vcproj file.
Arguments:
path: The path to convert, may contain / etc.
Returns:
The path with all slashes made into backslashes.
"""
if fixpath_prefix and path and not os.path.isabs(path) and not path[0] == '$':
path = os.path.join(fixpath_prefix, path)
path = path.replace('/', '\\')
path = _NormalizedSource(path)
if len(path) > 0 and path[-1] == '\\':
path = path[:-1]
return path
def _ConvertSourcesToFilterHierarchy(sources, prefix=None, excluded=None):
"""Converts a list split source file paths into a vcproj folder hierarchy.
Arguments:
sources: A list of source file paths split.
prefix: A list of source file path layers meant to apply to each of sources.
Returns:
A hierarchy of filenames and MSVSProject.Filter objects that matches the
layout of the source tree.
For example:
_ConvertSourcesToFilterHierarchy([['a', 'bob1.c'], ['b', 'bob2.c']],
prefix=['joe'])
-->
[MSVSProject.Filter('a', contents=['joe\\a\\bob1.c']),
MSVSProject.Filter('b', contents=['joe\\b\\bob2.c'])]
"""
if not prefix: prefix = []
result = []
excluded_result = []
folders = dict()
# Gather files into the final result, excluded, or folders.
for s in sources:
if len(s) == 1:
filename = _NormalizedSource('\\'.join(prefix + s))
if filename in excluded:
excluded_result.append(filename)
else:
result.append(filename)
else:
if not folders.get(s[0]):
folders[s[0]] = []
folders[s[0]].append(s[1:])
# Add a folder for excluded files.
if excluded_result:
excluded_folder = MSVSProject.Filter('_excluded_files',
contents=excluded_result)
result.append(excluded_folder)
# Populate all the folders.
for f in folders:
contents = _ConvertSourcesToFilterHierarchy(folders[f], prefix=prefix + [f],
excluded=excluded)
contents = MSVSProject.Filter(f, contents=contents)
result.append(contents)
return result
def _ToolAppend(tools, tool_name, setting, value, only_if_unset=False):
if not value: return
# TODO(bradnelson): ugly hack, fix this more generally!!!
if 'Directories' in setting or 'Dependencies' in setting:
if type(value) == str:
value = value.replace('/', '\\')
else:
value = [i.replace('/', '\\') for i in value]
if not tools.get(tool_name):
tools[tool_name] = dict()
tool = tools[tool_name]
if tool.get(setting):
if only_if_unset: return
if type(tool[setting]) == list:
tool[setting] += value
else:
raise TypeError(
'Appending "%s" to a non-list setting "%s" for tool "%s" is '
'not allowed, previous value: %s' % (
value, setting, tool_name, str(tool[setting])))
else:
tool[setting] = value
def _ConfigPlatform(config_data):
return config_data.get('msvs_configuration_platform', 'Win32')
def _ConfigBaseName(config_name, platform_name):
if config_name.endswith('_' + platform_name):
return config_name[0:-len(platform_name)-1]
else:
return config_name
def _ConfigFullName(config_name, config_data):
platform_name = _ConfigPlatform(config_data)
return '%s|%s' % (_ConfigBaseName(config_name, platform_name), platform_name)
def _BuildCommandLineForRuleRaw(spec, cmd, cygwin_shell, has_input_path,
quote_cmd):
if cygwin_shell:
# Find path to cygwin.
cygwin_dir = _FixPath(spec.get('msvs_cygwin_dirs', ['.'])[0])
# Prepare command.
direct_cmd = cmd
direct_cmd = [i.replace('$(IntDir)',
'`cygpath -m "${INTDIR}"`') for i in direct_cmd]
direct_cmd = [i.replace('$(OutDir)',
'`cygpath -m "${OUTDIR}"`') for i in direct_cmd]
if has_input_path:
direct_cmd = [i.replace('$(InputPath)',
'`cygpath -m "${INPUTPATH}"`')
for i in direct_cmd]
direct_cmd = ['"%s"' % i for i in direct_cmd]
direct_cmd = [i.replace('"', '\\"') for i in direct_cmd]
#direct_cmd = gyp.common.EncodePOSIXShellList(direct_cmd)
direct_cmd = ' '.join(direct_cmd)
# TODO(quote): regularize quoting path names throughout the module
cmd = (
'call "$(ProjectDir)%(cygwin_dir)s\\setup_env.bat" && '
'set CYGWIN=nontsec&& ')
if direct_cmd.find('NUMBER_OF_PROCESSORS') >= 0:
cmd += 'set /a NUMBER_OF_PROCESSORS_PLUS_1=%%NUMBER_OF_PROCESSORS%%+1&& '
if direct_cmd.find('INTDIR') >= 0:
cmd += 'set INTDIR=$(IntDir)&& '
if direct_cmd.find('OUTDIR') >= 0:
cmd += 'set OUTDIR=$(OutDir)&& '
if has_input_path and direct_cmd.find('INPUTPATH') >= 0:
cmd += 'set INPUTPATH=$(InputPath) && '
cmd += (
'bash -c "%(cmd)s"')
cmd = cmd % {'cygwin_dir': cygwin_dir,
'cmd': direct_cmd}
return cmd
else:
# Convert cat --> type to mimic unix.
if cmd[0] == 'cat':
command = ['type']
else:
command = [cmd[0].replace('/', '\\')]
# Fix the paths
# If the argument starts with a slash, it's probably a command line switch
arguments = [i.startswith('/') and i or _FixPath(i) for i in cmd[1:]]
if quote_cmd:
# Support a mode for using cmd directly.
# Convert any paths to native form (first element is used directly).
# TODO(quote): regularize quoting path names throughout the module
arguments = ['"%s"' % i for i in arguments]
# Collapse into a single command.
return ' '.join(command + arguments)
def _BuildCommandLineForRule(spec, rule, has_input_path):
# Find path to cygwin.
cygwin_dir = _FixPath(spec.get('msvs_cygwin_dirs', ['.'])[0])
# Currently this weird argument munging is used to duplicate the way a
# python script would need to be run as part of the chrome tree.
# Eventually we should add some sort of rule_default option to set this
# per project. For now the behavior chrome needs is the default.
mcs = rule.get('msvs_cygwin_shell')
if mcs is None:
mcs = int(spec.get('msvs_cygwin_shell', 1))
elif isinstance(mcs, str):
mcs = int(mcs)
quote_cmd = int(rule.get('msvs_quote_cmd', 1))
return _BuildCommandLineForRuleRaw(spec, rule['action'], mcs, has_input_path,
quote_cmd)
def _AddActionStep(actions_dict, inputs, outputs, description, command):
"""Merge action into an existing list of actions.
Care must be taken so that actions which have overlapping inputs either don't
get assigned to the same input, or get collapsed into one.
Arguments:
actions_dict: dictionary keyed on input name, which maps to a list of
dicts describing the actions attached to that input file.
inputs: list of inputs
outputs: list of outputs
description: description of the action
command: command line to execute
"""
# Require there to be at least one input (call sites will ensure this).
assert inputs
action = {
'inputs': inputs,
'outputs': outputs,
'description': description,
'command': command,
}
# Pick where to stick this action.
# While less than optimal in terms of build time, attach them to the first
# input for now.
chosen_input = inputs[0]
# Add it there.
if chosen_input not in actions_dict:
actions_dict[chosen_input] = []
actions_dict[chosen_input].append(action)
def _AddCustomBuildToolForMSVS(p, spec, primary_input,
inputs, outputs, description, cmd):
"""Add a custom build tool to execute something.
Arguments:
p: the target project
spec: the target project dict
primary_input: input file to attach the build tool to
inputs: list of inputs
outputs: list of outputs
description: description of the action
cmd: command line to execute
"""
inputs = [_FixPath(i) for i in inputs]
outputs = [_FixPath(i) for i in outputs]
tool = MSVSProject.Tool(
'VCCustomBuildTool', {
'Description': description,
'AdditionalDependencies': ';'.join(inputs),
'Outputs': ';'.join(outputs),
'CommandLine': cmd,
})
# Add to the properties of primary input for each config.
for config_name, c_data in spec['configurations'].iteritems():
p.AddFileConfig(_FixPath(primary_input),
_ConfigFullName(config_name, c_data), tools=[tool])
def _AddAccumulatedActionsToMSVS(p, spec, actions_dict):
"""Add actions accumulated into an actions_dict, merging as needed.
Arguments:
p: the target project
spec: the target project dict
actions_dict: dictionary keyed on input name, which maps to a list of
dicts describing the actions attached to that input file.
"""
for input in actions_dict:
inputs = set()
outputs = set()
descriptions = []
commands = []
for action in actions_dict[input]:
inputs.update(set(action['inputs']))
outputs.update(set(action['outputs']))
descriptions.append(action['description'])
commands.append(action['command'])
# Add the custom build step for one input file.
description = ', and also '.join(descriptions)
command = '\r\n'.join(commands)
_AddCustomBuildToolForMSVS(p, spec,
primary_input=input,
inputs=inputs,
outputs=outputs,
description=description,
cmd=command)
def _RuleExpandPath(path, input_file):
"""Given the input file to which a rule applied, string substitute a path.
Arguments:
path: a path to string expand
input_file: the file to which the rule applied.
Returns:
The string substituted path.
"""
path = path.replace('$(InputName)',
os.path.splitext(os.path.split(input_file)[1])[0])
path = path.replace('$(InputExt)',
os.path.splitext(os.path.split(input_file)[1])[1])
path = path.replace('$(InputFileName)', os.path.split(input_file)[1])
path = path.replace('$(InputPath)', input_file)
return path
def _FindRuleTriggerFiles(rule, sources):
"""Find the list of files which a particular rule applies to.
Arguments:
rule: the rule in question
sources: the set of all known source files for this project
Returns:
The list of sources that trigger a particular rule.
"""
rule_ext = rule['extension']
return [s for s in sources if s.endswith('.' + rule_ext)]
def _RuleInputsAndOutputs(rule, trigger_file):
"""Find the inputs and outputs generated by a rule.
Arguments:
rule: the rule in question
sources: the set of all known source files for this project
Returns:
The pair of (inputs, outputs) involved in this rule.
"""
raw_inputs = [_FixPath(i) for i in rule.get('inputs', [])]
raw_outputs = [_FixPath(i) for i in rule.get('outputs', [])]
inputs = set()
outputs = set()
inputs.add(trigger_file)
for i in raw_inputs:
inputs.add(_RuleExpandPath(i, trigger_file))
for o in raw_outputs:
outputs.add(_RuleExpandPath(o, trigger_file))
return (inputs, outputs)
def _GenerateNativeRulesForMSVS(p, rules, output_dir, spec, options):
"""Generate a native rules file.
Arguments:
p: the target project
rules: the set of rules to include
output_dir: the directory in which the project/gyp resides
spec: the project dict
options: global generator options
"""
rules_filename = '%s%s.rules' % (spec['target_name'],
options.suffix)
rules_file = MSVSToolFile.Writer(os.path.join(output_dir, rules_filename))
rules_file.Create(spec['target_name'])
# Add each rule.
for r in rules:
rule_name = r['rule_name']
rule_ext = r['extension']
inputs = [_FixPath(i) for i in r.get('inputs', [])]
outputs = [_FixPath(i) for i in r.get('outputs', [])]
cmd = _BuildCommandLineForRule(spec, r, has_input_path=True)
rules_file.AddCustomBuildRule(name=rule_name,
description=r.get('message', rule_name),
extensions=[rule_ext],
additional_dependencies=inputs,
outputs=outputs,
cmd=cmd)
# Write out rules file.
rules_file.Write()
# Add rules file to project.
p.AddToolFile(rules_filename)
def _Cygwinify(path):
path = path.replace('$(OutDir)', '$(OutDirCygwin)')
path = path.replace('$(IntDir)', '$(IntDirCygwin)')
return path
def _GenerateExternalRules(rules, output_dir, spec,
sources, options, actions_to_add):
"""Generate an external makefile to do a set of rules.
Arguments:
rules: the list of rules to include
output_dir: path containing project and gyp files
spec: project specification data
sources: set of sources known
options: global generator options
"""
filename = '%s_rules%s.mk' % (spec['target_name'], options.suffix)
file = gyp.common.WriteOnDiff(os.path.join(output_dir, filename))
# Find cygwin style versions of some paths.
file.write('OutDirCygwin:=$(shell cygpath -u "$(OutDir)")\n')
file.write('IntDirCygwin:=$(shell cygpath -u "$(IntDir)")\n')
# Gather stuff needed to emit all: target.
all_inputs = set()
all_outputs = set()
all_output_dirs = set()
first_outputs = []
for rule in rules:
trigger_files = _FindRuleTriggerFiles(rule, sources)
for tf in trigger_files:
inputs, outputs = _RuleInputsAndOutputs(rule, tf)
all_inputs.update(set(inputs))
all_outputs.update(set(outputs))
# Only use one target from each rule as the dependency for
# 'all' so we don't try to build each rule multiple times.
first_outputs.append(list(outputs)[0])
# Get the unique output directories for this rule.
output_dirs = [os.path.split(i)[0] for i in outputs]
for od in output_dirs:
all_output_dirs.add(od)
first_outputs_cyg = [_Cygwinify(i) for i in first_outputs]
# Write out all: target, including mkdir for each output directory.
file.write('all: %s\n' % ' '.join(first_outputs_cyg))
for od in all_output_dirs:
file.write('\tmkdir -p %s\n' % od)
file.write('\n')
# Define how each output is generated.
for rule in rules:
trigger_files = _FindRuleTriggerFiles(rule, sources)
for tf in trigger_files:
# Get all the inputs and outputs for this rule for this trigger file.
inputs, outputs = _RuleInputsAndOutputs(rule, tf)
inputs = [_Cygwinify(i) for i in inputs]
outputs = [_Cygwinify(i) for i in outputs]
# Prepare the command line for this rule.
cmd = [_RuleExpandPath(c, tf) for c in rule['action']]
cmd = ['"%s"' % i for i in cmd]
cmd = ' '.join(cmd)
# Add it to the makefile.
file.write('%s: %s\n' % (' '.join(outputs), ' '.join(inputs)))
file.write('\t%s\n\n' % cmd)
# Close up the file.
file.close()
# Add makefile to list of sources.
sources.add(filename)
# Add a build action to call makefile.
cmd = ['make',
'OutDir=$(OutDir)',
'IntDir=$(IntDir)',
'-j', '${NUMBER_OF_PROCESSORS_PLUS_1}',
'-f', filename]
cmd = _BuildCommandLineForRuleRaw(spec, cmd, True, False, True)
# Insert makefile as 0'th input, so it gets the action attached there,
# as this is easier to understand from in the IDE.
all_inputs = list(all_inputs)
all_inputs.insert(0, filename)
_AddActionStep(actions_to_add,
inputs=[_FixPath(i) for i in all_inputs],
outputs=[_FixPath(i) for i in all_outputs],
description='Running %s' % cmd,
command=cmd)
def _EscapeEnvironmentVariableExpansion(s):
"""Escapes any % characters so that Windows-style environment variable
expansions will leave them alone.
See http://connect.microsoft.com/VisualStudio/feedback/details/106127/cl-d-name-text-containing-percentage-characters-doesnt-compile
to understand why we have to do this."""
s = s.replace('%', '%%')
return s
quote_replacer_regex = re.compile(r'(\\*)"')
def _EscapeCommandLineArgumentForMSVS(s):
"""Escapes a Windows command-line argument, so that the Win32
CommandLineToArgv function will turn the escaped result back into the
original string. See http://msdn.microsoft.com/en-us/library/17w5ykft.aspx
("Parsing C++ Command-Line Arguments") to understand why we have to do
this."""
def replace(match):
# For a literal quote, CommandLineToArgv requires an odd number of
# backslashes preceding it, and it produces half as many literal backslashes
# (rounded down). So we need to produce 2n+1 backslashes.
return 2 * match.group(1) + '\\"'
# Escape all quotes so that they are interpreted literally.
s = quote_replacer_regex.sub(replace, s)
# Now add unescaped quotes so that any whitespace is interpreted literally.
s = '"' + s + '"'
return s
delimiters_replacer_regex = re.compile(r'(\\*)([,;]+)')
def _EscapeVCProjCommandLineArgListItem(s):
"""The VCProj format stores string lists in a single string using commas and
semi-colons as separators, which must be quoted if they are to be
interpreted literally. However, command-line arguments may already have
quotes, and the VCProj parser is ignorant of the backslash escaping
convention used by CommandLineToArgv, so the command-line quotes and the
VCProj quotes may not be the same quotes. So to store a general
command-line argument in a VCProj list, we need to parse the existing
quoting according to VCProj's convention and quote any delimiters that are
not already quoted by that convention. The quotes that we add will also be
seen by CommandLineToArgv, so if backslashes precede them then we also have
to escape those backslashes according to the CommandLineToArgv
convention."""
def replace(match):
# For a non-literal quote, CommandLineToArgv requires an even number of
# backslashes preceding it, and it produces half as many literal
# backslashes. So we need to produce 2n backslashes.
return 2 * match.group(1) + '"' + match.group(2) + '"'
list = s.split('"')
# The unquoted segments are at the even-numbered indices.
for i in range(0, len(list), 2):
list[i] = delimiters_replacer_regex.sub(replace, list[i])
# Concatenate back into a single string
s = '"'.join(list)
if len(list) % 2 == 0:
# String ends while still quoted according to VCProj's convention. This
# means the delimiter and the next list item that follow this one in the
# .vcproj file will be misinterpreted as part of this item. There is nothing
# we can do about this. Adding an extra quote would correct the problem in
# the VCProj but cause the same problem on the final command-line. Moving
# the item to the end of the list does works, but that's only possible if
# there's only one such item. Let's just warn the user.
print >> sys.stderr, ('Warning: MSVS may misinterpret the odd number of ' +
'quotes in ' + s)
return s
def _EscapeCppDefineForMSVS(s):
"""Escapes a CPP define so that it will reach the compiler unaltered."""
s = _EscapeEnvironmentVariableExpansion(s)
s = _EscapeCommandLineArgumentForMSVS(s)
s = _EscapeVCProjCommandLineArgListItem(s)
return s
def _GenerateRulesForMSVS(p, output_dir, options, spec,
sources, excluded_sources,
actions_to_add):
"""Generate all the rules for a particular project.
Arguments:
output_dir: directory to emit rules to
options: global options passed to the generator
spec: the specification for this project
sources: the set of all known source files in this project
excluded_sources: the set of sources excluded from normal processing
actions_to_add: deferred list of actions to add in
"""
rules = spec.get('rules', [])
rules_native = [r for r in rules if not int(r.get('msvs_external_rule', 0))]
rules_external = [r for r in rules if int(r.get('msvs_external_rule', 0))]
# Handle rules that use a native rules file.
if rules_native:
_GenerateNativeRulesForMSVS(p, rules_native, output_dir, spec, options)
# Handle external rules (non-native rules).
if rules_external:
_GenerateExternalRules(rules_external, output_dir, spec,
sources, options, actions_to_add)
_AdjustSourcesForRules(rules, sources, excluded_sources)
def _AdjustSourcesForRules(rules, sources, excluded_sources):
# Add outputs generated by each rule (if applicable).
for rule in rules:
# Done if not processing outputs as sources.
if int(rule.get('process_outputs_as_sources', False)):
# Add in the outputs from this rule.
trigger_files = _FindRuleTriggerFiles(rule, sources)
for trigger_file in trigger_files:
inputs, outputs = _RuleInputsAndOutputs(rule, trigger_file)
inputs = set([_FixPath(i) for i in inputs])
outputs = set([_FixPath(i) for i in outputs])
inputs.remove(_FixPath(trigger_file))
sources.update(inputs)
excluded_sources.update(inputs)
sources.update(outputs)
def _FilterActionsFromExcluded(excluded_sources, actions_to_add):
"""Take inputs with actions attached out of the list of exclusions.
Arguments:
excluded_sources: list of source files not to be built.
actions_to_add: dict of actions keyed on source file they're attached to.
Returns:
excluded_sources with files that have actions attached removed.
"""
must_keep = set([_FixPath(s) for s in actions_to_add.keys()])
return [s for s in excluded_sources if s not in must_keep]
def _GetDefaultConfiguration(spec):
return spec['configurations'][spec['default_configuration']]
def _GetGuidOfProject(proj_path, spec):
"""Get the guid for the project
Arguments:
proj_path: Path of the vcproj file to generate.
spec: The target dictionary containing the properties of the target.
"""
# Pluck out the default configuration.
default_config = _GetDefaultConfiguration(spec)
# Decide the guid of the project.
guid = default_config.get('msvs_guid')
if guid:
if VALID_MSVS_GUID_CHARS.match(guid) == None:
raise ValueError('Invalid MSVS guid: "%s". Must match regex: "%s".' %
(guid, VALID_MSVS_GUID_CHARS.pattern))
guid = '{%s}' % guid
guid = guid or MSVSNew.MakeGuid(proj_path)
return guid
def _GenerateProject(project, options, version):
"""Generates a vcproj file.
Arguments:
project: the MSVSProject object.
options: global generator options.
version: the MSVSVersion object.
"""
default_config = _GetDefaultConfiguration(project.spec)
# Skip emitting anything if told to with msvs_existing_vcproj option.
if default_config.get('msvs_existing_vcproj'):
return
_GenerateMSVSProject(project, options, version)
def _GenerateMSVSProject(project, options, version):
"""Generates a .vcproj file. It may create .rules and .user files too.
Arguments:
project: The project object we will generate the file for.
options: Global options passed to the generator.
version: The VisualStudioVersion object.
"""
spec = project.spec
vcproj_dir = os.path.dirname(project.path)
if vcproj_dir and not os.path.exists(vcproj_dir):
os.makedirs(vcproj_dir)
platforms = _GetUniquePlatforms(spec)
p = MSVSProject.Writer(project.path, version=version)
p.Create(spec['target_name'], guid=project.guid, platforms=platforms)
# Get directory project file is in.
gyp_dir = os.path.split(project.path)[0]
gyp_file = posixpath.split(project.build_file)[1]
gyp_path = _NormalizedSource(gyp_file)
relative_path_of_gyp_file = gyp.common.RelativePath(gyp_path, gyp_dir)
config_type = _GetMSVSConfigurationType(spec, project.build_file)
for config_name, config in spec['configurations'].iteritems():
_AddConfigurationToMSVSProject(p, spec, config_type, config_name, config)
# Prepare list of sources and excluded sources.
sources, excluded_sources = _PrepareListOfSources(project, spec,
relative_path_of_gyp_file)
# Add rules.
actions_to_add = {}
_GenerateRulesForMSVS(p, gyp_dir, options, spec,
sources, excluded_sources,
actions_to_add)
sources, excluded_sources, excluded_idl = (
_AdjustSourcesAndConvertToFilterHierarchy(
spec, options, gyp_dir, sources, excluded_sources))
# Add in files.
p.AddFiles(sources)
_AddToolFilesToMSVS(p, spec)
_HandlePreCompileHeaderStubs(p, spec)
_AddActions(actions_to_add, spec, relative_path_of_gyp_file)
_AddCopies(actions_to_add, spec)
_WriteMSVSUserFile(project.path, version, spec)
# NOTE: this stanza must appear after all actions have been decided.
# Don't excluded sources with actions attached, or they won't run.
excluded_sources = _FilterActionsFromExcluded(
excluded_sources, actions_to_add)
_ExcludeFilesFromBeingBuilt(p, spec, excluded_sources, excluded_idl)
_AddAccumulatedActionsToMSVS(p, spec, actions_to_add)
# Write it out.
p.Write()
def _GetUniquePlatforms(spec):
"""Return the list of unique platforms for this spec, e.g ['win32', ...]
Arguments:
spec: The target dictionary containing the properties of the target.
Returns:
The MSVSUserFile object created.
"""
# Gather list of unique platforms.
platforms = set()
for configuration in spec['configurations']:
platforms.add(_ConfigPlatform(spec['configurations'][configuration]))
platforms = list(platforms)
return platforms
def _CreateMSVSUserFile(proj_path, version, spec):
"""Generates a .user file for the user running this Gyp program.
Arguments:
proj_path: The path of the project file being created. The .user file
shares the same path (with an appropriate suffix).
version: The VisualStudioVersion object.
spec: The target dictionary containing the properties of the target.
Returns:
The MSVSUserFile object created.
"""
(domain, username) = _GetDomainAndUserName()
vcuser_filename = '.'.join([proj_path, domain, username, 'user'])
user_file = MSVSUserFile.Writer(vcuser_filename, version=version)
user_file.Create(spec['target_name'])
return user_file
def _GetMSVSConfigurationType(spec, build_file):
"""Returns the configuration type for this project. It's a number defined
by Microsoft. May raise an exception.
Returns:
An integer, the configuration type.
"""
try:
config_type = {
'executable': '1', # .exe
'shared_library': '2', # .dll
'loadable_module': '2', # .dll
'static_library': '4', # .lib
'none': '10', # Utility type
'dummy_executable': '1', # .exe
}[spec['type']]
except KeyError, e:
if spec.get('type'):
raise Exception('Target type %s is not a valid target type for '
'target %s in %s.' %
(spec['type'], spec['target_name'], build_file))
else:
raise Exception('Missing type field for target %s in %s.' %
(spec['target_name'], build_file))
return config_type
def _AddConfigurationToMSVSProject(p, spec, config_type, config_name, config):
"""Many settings in a vcproj file are specific to a configuration. This
function the main part of the vcproj file that's configuration specific.
Arguments:
p: The target project being generated.
spec: The target dictionary containing the properties of the target.
config_type: The configuration type, a number as defined by Microsoft.
config_name: The name of the configuration.
config: The dictionnary that defines the special processing to be done
for this configuration.
"""
# Get the information for this configuration
include_dirs, resource_include_dirs = _GetIncludeDirs(config)
libraries = _GetLibraries(config, spec)
out_file, vc_tool = _GetOutputFilePathAndTool(spec)
defines = _GetDefines(config)
defines = [_EscapeCppDefineForMSVS(d) for d in defines]
disabled_warnings = _GetDisabledWarnings(config)
prebuild = config.get('msvs_prebuild')
postbuild = config.get('msvs_postbuild')
def_file = _GetModuleDefinition(spec)
precompiled_header = config.get('msvs_precompiled_header')
# Prepare the list of tools as a dictionary.
tools = dict()
# Add in user specified msvs_settings.
msvs_settings = config.get('msvs_settings', {})
MSVSSettings.ValidateMSVSSettings(msvs_settings)
for tool in msvs_settings:
settings = config['msvs_settings'][tool]
for setting in settings:
_ToolAppend(tools, tool, setting, settings[setting])
# Add the information to the appropriate tool
_ToolAppend(tools, 'VCCLCompilerTool',
'AdditionalIncludeDirectories', include_dirs)
_ToolAppend(tools, 'VCResourceCompilerTool',
'AdditionalIncludeDirectories', resource_include_dirs)
# Add in libraries.
_ToolAppend(tools, 'VCLinkerTool', 'AdditionalDependencies', libraries)
if out_file:
_ToolAppend(tools, vc_tool, 'OutputFile', out_file, only_if_unset=True)
# Add defines.
_ToolAppend(tools, 'VCCLCompilerTool', 'PreprocessorDefinitions', defines)
_ToolAppend(tools, 'VCResourceCompilerTool', 'PreprocessorDefinitions',
defines)
# Change program database directory to prevent collisions.
_ToolAppend(tools, 'VCCLCompilerTool', 'ProgramDataBaseFileName',
'$(IntDir)\\$(ProjectName)\\vc80.pdb')
# Add disabled warnings.
_ToolAppend(tools, 'VCCLCompilerTool',
'DisableSpecificWarnings', disabled_warnings)
# Add Pre-build.
_ToolAppend(tools, 'VCPreBuildEventTool', 'CommandLine', prebuild)
# Add Post-build.
_ToolAppend(tools, 'VCPostBuildEventTool', 'CommandLine', postbuild)
# Turn on precompiled headers if appropriate.
if precompiled_header:
precompiled_header = os.path.split(precompiled_header)[1]
_ToolAppend(tools, 'VCCLCompilerTool', 'UsePrecompiledHeader', '2')
_ToolAppend(tools, 'VCCLCompilerTool',
'PrecompiledHeaderThrough', precompiled_header)
_ToolAppend(tools, 'VCCLCompilerTool',
'ForcedIncludeFiles', precompiled_header)
# Loadable modules don't generate import libraries;
# tell dependent projects to not expect one.
if spec['type'] == 'loadable_module':
_ToolAppend(tools, 'VCLinkerTool', 'IgnoreImportLibrary', 'true')
# Set the module definition file if any.
if def_file:
_ToolAppend(tools, 'VCLinkerTool', 'ModuleDefinitionFile', def_file)
_AddConfigurationToMSVS(p, spec, tools, config, config_type, config_name)
def _GetIncludeDirs(config):
"""Returns the list of directories to be used for #include directives.
Arguments:
config: The dictionnary that defines the special processing to be done
for this configuration.
Returns:
The list of directory paths.
"""
# TODO(bradnelson): include_dirs should really be flexible enough not to
# require this sort of thing.
include_dirs = (
config.get('include_dirs', []) +
config.get('msvs_system_include_dirs', []))
resource_include_dirs = config.get('resource_include_dirs', include_dirs)
include_dirs = [_FixPath(i) for i in include_dirs]
resource_include_dirs = [_FixPath(i) for i in resource_include_dirs]
return include_dirs, resource_include_dirs
def _GetLibraries(config, spec):
"""Returns the list of libraries for this configuration.
Arguments:
config: The dictionnary that defines the special processing to be done
for this configuration.
spec: The target dictionary containing the properties of the target.
Returns:
The list of directory paths.
"""
libraries = spec.get('libraries', [])
# Strip out -l, as it is not used on windows (but is needed so we can pass
# in libraries that are assumed to be in the default library path).
return [re.sub('^(\-l)', '', lib) for lib in libraries]
def _GetOutputFilePathAndTool(spec):
"""Figures out the path of the file this spec will create and the name of
the VC tool that will create it.
Arguments:
spec: The target dictionary containing the properties of the target.
Returns:
A pair of (file path, name of the tool)
"""
# Select a name for the output file.
out_file = ''
vc_tool = ''
output_file_map = {
'executable': ('VCLinkerTool', '$(OutDir)\\', '.exe'),
'shared_library': ('VCLinkerTool', '$(OutDir)\\', '.dll'),
'loadable_module': ('VCLinkerTool', '$(OutDir)\\', '.dll'),
'static_library': ('VCLibrarianTool', '$(OutDir)\\lib\\', '.lib'),
'dummy_executable': ('VCLinkerTool', '$(IntDir)\\', '.junk'),
}
output_file_props = output_file_map.get(spec['type'])
if output_file_props and int(spec.get('msvs_auto_output_file', 1)):
vc_tool, out_dir, suffix = output_file_props
out_dir = spec.get('product_dir', out_dir)
product_extension = spec.get('product_extension')
if product_extension:
suffix = '.' + product_extension
prefix = spec.get('product_prefix', '')
product_name = spec.get('product_name', '$(ProjectName)')
out_file = ntpath.join(out_dir, prefix + product_name + suffix)
return out_file, vc_tool
def _GetDefines(config):
"""Returns the list of preprocessor definitions for this configuation.
Arguments:
config: The dictionnary that defines the special processing to be done
for this configuration.
Returns:
The list of preprocessor definitions.
"""
defines = []
for d in config.get('defines', []):
if type(d) == list:
fd = '='.join([str(dpart) for dpart in d])
else:
fd = str(d)
defines.append(fd)
return defines
def _GetDisabledWarnings(config):
return [str(i) for i in config.get('msvs_disabled_warnings', [])]
def _GetModuleDefinition(spec):
def_file = ""
if spec['type'] in ['shared_library', 'loadable_module']:
def_files = [s for s in spec.get('sources', []) if s.endswith('.def')]
if len(def_files) == 1:
def_file = _FixPath(def_files[0])
elif def_files:
raise ValueError('Multiple module definition files in one target, '
'target %s lists multiple .def files: %s' % (
spec['target_name'], ' '.join(def_files)))
return def_file
def _ConvertToolsToExpectedForm(tools):
""" Convert the content of the tools array to a form expected by
VisualStudio.
Arguments:
tools: A dictionnary of settings; the tool name is the key.
Returns:
A list of Tool objects.
"""
tool_list = []
for tool, settings in tools.iteritems():
# Collapse settings with lists.
settings_fixed = {}
for setting, value in settings.iteritems():
if type(value) == list:
if ((tool == 'VCLinkerTool' and
setting == 'AdditionalDependencies') or
setting == 'AdditionalOptions'):
settings_fixed[setting] = ' '.join(value)
else:
settings_fixed[setting] = ';'.join(value)
else:
settings_fixed[setting] = value
# Add in this tool.
tool_list.append(MSVSProject.Tool(tool, settings_fixed))
return tool_list
def _AddConfigurationToMSVS(p, spec, tools, config, config_type, config_name):
"""Add to the project file the configuration specified by config.
Arguments:
p: The target project being generated.
spec: the target project dict.
tools: A dictionnary of settings; the tool name is the key.
config: The dictionnary that defines the special processing to be done
for this configuration.
config_type: The configuration type, a number as defined by Microsoft.
config_name: The name of the configuration.
"""
attributes = _GetMSVSAttributes(spec, config, config_type)
# Add in this configuration.
tool_list = _ConvertToolsToExpectedForm(tools)
p.AddConfig(_ConfigFullName(config_name, config),
attrs=attributes, tools=tool_list)
def _GetMSVSAttributes(spec, config, config_type):
# Prepare configuration attributes.
prepared_attrs = {}
source_attrs = config.get('msvs_configuration_attributes', {})
for a in source_attrs:
prepared_attrs[a] = source_attrs[a]
# Add props files.
vsprops_dirs = config.get('msvs_props', [])
vsprops_dirs = [_FixPath(i) for i in vsprops_dirs]
if vsprops_dirs:
prepared_attrs['InheritedPropertySheets'] = ';'.join(vsprops_dirs)
# Set configuration type.
prepared_attrs['ConfigurationType'] = config_type
if not prepared_attrs.has_key('OutputDirectory'):
prepared_attrs['OutputDirectory'] = '$(SolutionDir)$(ConfigurationName)'
if not prepared_attrs.has_key('IntermediateDirectory'):
intermediate = '$(ConfigurationName)\\obj\\$(ProjectName)'
prepared_attrs['IntermediateDirectory'] = intermediate
return prepared_attrs
def _AddNormalizedSources(sources_set, sources_array):
sources = [_NormalizedSource(s) for s in sources_array]
sources_set.update(set(sources))
def _PrepareListOfSources(project, spec, relative_path_of_gyp_file):
"""Prepare list of sources and excluded sources.
Besides the sources specified directly in the spec, adds the gyp file so
that a change to it will cause a re-compile. Also adds appropriate sources
for actions and copies. Assumes later stage will un-exclude files which
have custom build steps attached.
Arguments:
project: the MSVSProject object.
spec: The target dictionary containing the properties of the target.
relative_path_of_gyp_file: The relative path of the gyp file.
Returns:
A pair of (list of sources, list of excluded sources)
"""
sources = set()
_AddNormalizedSources(sources, spec.get('sources', []))
excluded_sources = set()
# Add in the gyp file.
sources.add(relative_path_of_gyp_file)
# Add in 'action' inputs and outputs.
for a in spec.get('actions', []):
inputs = a.get('inputs', [])
inputs = [_NormalizedSource(i) for i in inputs]
# Add all inputs to sources and excluded sources.
inputs = set(inputs)
sources.update(inputs)
excluded_sources.update(inputs)
if int(a.get('process_outputs_as_sources', False)):
_AddNormalizedSources(sources, a.get('outputs', []))
# Add in 'copies' inputs and outputs.
for cpy in spec.get('copies', []):
_AddNormalizedSources(sources, cpy.get('files', []))
return (sources, excluded_sources)
def _AdjustSourcesAndConvertToFilterHierarchy(
spec, options, gyp_dir, sources, excluded_sources):
"""Adjusts the list of sources and excluded sources.
Also converts the sets to lists.
Arguments:
spec: The target dictionary containing the properties of the target.
options: Global generator options.
gyp_dir: The path to the gyp file being processed.
sources: A set of sources to be included for this project.
sources: A set of sources to be excluded for this project.
Returns:
A trio of (list of sources, list of excluded sources,
path of excluded IDL file)
"""
# Exclude excluded sources coming into the generator.
excluded_sources.update(set(spec.get('sources_excluded', [])))
# Add excluded sources into sources for good measure.
sources.update(excluded_sources)
# Convert to proper windows form.
# NOTE: sources goes from being a set to a list here.
# NOTE: excluded_sources goes from being a set to a list here.
sources = [_FixPath(i) for i in sources]
# Convert to proper windows form.
excluded_sources = [_FixPath(i) for i in excluded_sources]
excluded_idl = _IdlFilesHandledNonNatively(spec, sources)
precompiled_related = _GetPrecompileRelatedFiles(spec)
# Find the excluded ones, minus the precompiled header related ones.
fully_excluded = [i for i in excluded_sources if i not in precompiled_related]
# Convert to folders and the right slashes.
sources = [i.split('\\') for i in sources]
sources = _ConvertSourcesToFilterHierarchy(sources, excluded=fully_excluded)
# Add in dummy file for type none.
if spec['type'] == 'dummy_executable':
# Pull in a dummy main so it can link successfully.
dummy_relpath = gyp.common.RelativePath(
options.depth + '\\tools\\gyp\\gyp_dummy.c', gyp_dir)
sources.append(dummy_relpath)
return sources, excluded_sources, excluded_idl
def _IdlFilesHandledNonNatively(spec, sources):
# If any non-native rules use 'idl' as an extension exclude idl files.
# Gather a list here to use later.
using_idl = False
for rule in spec.get('rules', []):
if rule['extension'] == 'idl' and int(rule.get('msvs_external_rule', 0)):
using_idl = True
break
if using_idl:
excluded_idl = [i for i in sources if i.endswith('.idl')]
else:
excluded_idl = []
return excluded_idl
def _GetPrecompileRelatedFiles(spec):
# Gather a list of precompiled header related sources.
precompiled_related = []
for config_name, config in spec['configurations'].iteritems():
for k in precomp_keys:
f = config.get(k)
if f:
precompiled_related.append(_FixPath(f))
return precompiled_related
def _ExcludeFilesFromBeingBuilt(p, spec, excluded_sources, excluded_idl):
exclusions = _GetExcludedFilesFromBuild(spec, excluded_sources, excluded_idl)
for file_name, excluded_configs in exclusions.iteritems():
for config_name, config in excluded_configs:
p.AddFileConfig(file_name, _ConfigFullName(config_name, config),
{'ExcludedFromBuild': 'true'})
def _GetExcludedFilesFromBuild(spec, excluded_sources, excluded_idl):
exclusions = {}
# Exclude excluded sources from being built.
for f in excluded_sources:
excluded_configs = []
for config_name, config in spec['configurations'].iteritems():
precomped = [_FixPath(config.get(i, '')) for i in precomp_keys]
# Don't do this for ones that are precompiled header related.
if f not in precomped:
excluded_configs.append((config_name, config))
exclusions[f] = excluded_configs
# If any non-native rules use 'idl' as an extension exclude idl files.
# Exclude them now.
for f in excluded_idl:
excluded_configs = []
for config_name, config in spec['configurations'].iteritems():
excluded_configs.append((config_name, config))
exclusions[f] = excluded_configs
return exclusions
def _AddToolFilesToMSVS(p, spec):
# Add in tool files (rules).
tool_files = set()
for config_name, config in spec['configurations'].iteritems():
for f in config.get('msvs_tool_files', []):
tool_files.add(f)
for f in tool_files:
p.AddToolFile(f)
def _HandlePreCompileHeaderStubs(p, spec):
# Handle pre-compiled headers source stubs specially.
for config_name, config in spec['configurations'].iteritems():
source = config.get('msvs_precompiled_source')
if source:
source = _FixPath(source)
# UsePrecompiledHeader=1 for if using precompiled headers.
tool = MSVSProject.Tool('VCCLCompilerTool',
{'UsePrecompiledHeader': '1'})
p.AddFileConfig(source, _ConfigFullName(config_name, config),
{}, tools=[tool])
def _AddActions(actions_to_add, spec, relative_path_of_gyp_file):
# Add actions.
actions = spec.get('actions', [])
for a in actions:
cmd = _BuildCommandLineForRule(spec, a, has_input_path=False)
# Attach actions to the gyp file if nothing else is there.
inputs = a.get('inputs') or [relative_path_of_gyp_file]
# Add the action.
_AddActionStep(actions_to_add,
inputs=inputs,
outputs=a.get('outputs', []),
description=a.get('message', a['action_name']),
command=cmd)
def _WriteMSVSUserFile(project_path, version, spec):
# Add run_as and test targets.
if 'run_as' in spec:
run_as = spec['run_as']
action = run_as.get('action', [])
environment = run_as.get('environment', [])
working_directory = run_as.get('working_directory', '.')
elif int(spec.get('test', 0)):
action = ['$(TargetPath)', '--gtest_print_time']
environment = []
working_directory = '.'
else:
return # Nothing to add
# Write out the user file.
user_file = _CreateMSVSUserFile(project_path, version, spec)
for config_name, c_data in spec['configurations'].iteritems():
user_file.AddDebugSettings(_ConfigFullName(config_name, c_data),
action, environment, working_directory)
user_file.Write()
def _AddCopies(actions_to_add, spec):
copies = _GetCopies(spec)
for inputs, outputs, cmd, description in copies:
_AddActionStep(actions_to_add, inputs=inputs, outputs=outputs,
description=description, command=cmd)
def _GetCopies(spec):
copies = []
# Add copies.
for cpy in spec.get('copies', []):
for src in cpy.get('files', []):
dst = os.path.join(cpy['destination'], os.path.basename(src))
# _AddCustomBuildToolForMSVS() will call _FixPath() on the inputs and
# outputs, so do the same for our generated command line.
if src.endswith('/'):
src_bare = src[:-1]
base_dir = posixpath.split(src_bare)[0]
outer_dir = posixpath.split(src_bare)[1]
cmd = 'cd "%s" && xcopy /e /f /y "%s" "%s\\%s\\"' % (
_FixPath(base_dir), outer_dir, _FixPath(dst), outer_dir)
copies.append(([src], ['dummy_copies', dst], cmd,
'Copying %s to %s' % (src, dst)))
else:
cmd = 'mkdir "%s" 2>nul & set ERRORLEVEL=0 & copy /Y "%s" "%s"' % (
_FixPath(cpy['destination']), _FixPath(src), _FixPath(dst))
copies.append(([src], [dst], cmd, 'Copying %s to %s' % (src, dst)))
return copies
def _GetPathDict(root, path):
if path == '':
return root
parent, folder = os.path.split(path)
parent_dict = _GetPathDict(root, parent)
if folder not in parent_dict:
parent_dict[folder] = dict()
return parent_dict[folder]
def _DictsToFolders(base_path, bucket, flat):
# Convert to folders recursively.
children = []
for folder, contents in bucket.iteritems():
if type(contents) == dict:
folder_children = _DictsToFolders(os.path.join(base_path, folder),
contents, flat)
if flat:
children += folder_children
else:
folder_children = MSVSNew.MSVSFolder(os.path.join(base_path, folder),
name='(' + folder + ')',
entries=folder_children)
children.append(folder_children)
else:
children.append(contents)
return children
def _CollapseSingles(parent, node):
# Recursively explorer the tree of dicts looking for projects which are
# the sole item in a folder which has the same name as the project. Bring
# such projects up one level.
if (type(node) == dict and
len(node) == 1 and
node.keys()[0] == parent + '.vcproj'):
return node[node.keys()[0]]
if type(node) != dict:
return node
for child in node.keys():
node[child] = _CollapseSingles(child, node[child])
return node
def _GatherSolutionFolders(sln_projects, project_objects, flat):
root = {}
# Convert into a tree of dicts on path.
for p in sln_projects:
gyp_file, target = gyp.common.ParseQualifiedTarget(p)[0:2]
gyp_dir = os.path.dirname(gyp_file)
path_dict = _GetPathDict(root, gyp_dir)
path_dict[target + '.vcproj'] = project_objects[p]
# Walk down from the top until we hit a folder that has more than one entry.
# In practice, this strips the top-level "src/" dir from the hierarchy in
# the solution.
while len(root) == 1 and type(root[root.keys()[0]]) == dict:
root = root[root.keys()[0]]
# Collapse singles.
root = _CollapseSingles('', root)
# Merge buckets until everything is a root entry.
return _DictsToFolders('', root, flat)
def _GetPathOfProject(qualified_target, spec, options, msvs_version):
default_config = _GetDefaultConfiguration(spec)
proj_filename = default_config.get('msvs_existing_vcproj')
if not proj_filename:
proj_filename = (spec['target_name'] + options.suffix +
msvs_version.ProjectExtension())
build_file = gyp.common.BuildFile(qualified_target)
proj_path = os.path.join(os.path.split(build_file)[0], proj_filename)
fixpath_prefix = None
if options.generator_output:
projectDirPath = os.path.dirname(os.path.abspath(proj_path))
proj_path = os.path.join(options.generator_output, proj_path)
fixpath_prefix = gyp.common.RelativePath(projectDirPath,
os.path.dirname(proj_path))
return proj_path, fixpath_prefix
def _GetPlatformOverridesOfProject(spec):
# Prepare a dict indicating which project configurations are used for which
# solution configurations for this target.
config_platform_overrides = {}
for config_name, c in spec['configurations'].iteritems():
config_fullname = _ConfigFullName(config_name, c)
platform = c.get('msvs_target_platform', _ConfigPlatform(c))
fixed_config_fullname = '%s|%s' % (
_ConfigBaseName(config_name, _ConfigPlatform(c)), platform)
config_platform_overrides[config_fullname] = fixed_config_fullname
return config_platform_overrides
def _CreateProjectObjects(target_list, target_dicts, options, msvs_version):
"""Create a MSVSProject object for the targets found in target list.
Arguments:
target_list: the list of targets to generate project objects for.
target_dicts: the dictionary of specifications.
options: global generator options.
version: the MSVSVersion object.
Returns:
A set of created projects, keyed by target.
"""
global fixpath_prefix
# Generate each project.
projects = {}
for qualified_target in target_list:
spec = target_dicts[qualified_target]
if spec['toolset'] != 'target':
raise Exception(
'Multiple toolsets not supported in msvs build (target %s)' %
qualified_target)
proj_path, fixpath_prefix = _GetPathOfProject(qualified_target, spec,
options, msvs_version)
guid = _GetGuidOfProject(proj_path, spec)
overrides = _GetPlatformOverridesOfProject(spec)
build_file = gyp.common.BuildFile(qualified_target)
# Create object for this project.
obj = MSVSNew.MSVSProject(
_FixPath(proj_path),
name=spec['target_name'],
guid=guid,
spec=spec,
build_file=build_file,
config_platform_overrides=overrides,
fixpath_prefix=fixpath_prefix)
projects[qualified_target] = obj
# Set all the dependencies
for project in projects.values():
deps = project.spec.get('dependencies', [])
deps = [projects[d] for d in deps]
project.set_dependencies(deps)
return projects
def CalculateVariables(default_variables, params):
"""Generated variables that require params to be known."""
generator_flags = params.get('generator_flags', {})
# Select project file format version (if unset, default to auto detecting).
msvs_version = \
MSVSVersion.SelectVisualStudioVersion(generator_flags.get('msvs_version',
'auto'))
# Stash msvs_version for later (so we don't have to probe the system twice).
params['msvs_version'] = msvs_version
# Set a variable so conditions can be based on msvs_version.
default_variables['MSVS_VERSION'] = msvs_version.ShortName()
# To determine processor word size on Windows, in addition to checking
# PROCESSOR_ARCHITECTURE (which reflects the word size of the current
# process), it is also necessary to check PROCESSOR_ARCITEW6432 (which
# contains the actual word size of the system when running thru WOW64).
if (os.environ.get('PROCESSOR_ARCHITECTURE', '').find('64') >= 0 or
os.environ.get('PROCESSOR_ARCHITEW6432', '').find('64') >= 0):
default_variables['MSVS_OS_BITS'] = 64
else:
default_variables['MSVS_OS_BITS'] = 32
def GenerateOutput(target_list, target_dicts, data, params):
"""Generate .sln and .vcproj files.
This is the entry point for this generator.
Arguments:
target_list: List of target pairs: 'base/base.gyp:base'.
target_dicts: Dict of target properties keyed on target pair.
data: Dictionary containing per .gyp data.
"""
global fixpath_prefix
options = params['options']
generator_flags = params.get('generator_flags', {})
# Get the project file format version back out of where we stashed it in
# GeneratorCalculatedVariables.
msvs_version = params['msvs_version']
# Prepare the set of configurations.
configs = set()
for qualified_target in target_list:
spec = target_dicts[qualified_target]
for config_name, config in spec['configurations'].iteritems():
configs.add(_ConfigFullName(config_name, config))
configs = list(configs)
# Figure out all the projects that will be generated and their guids
project_objects = _CreateProjectObjects(target_list, target_dicts, options,
msvs_version)
# Generate each project.
for project in project_objects.values():
fixpath_prefix = project.fixpath_prefix
_GenerateProject(project, options, msvs_version)
fixpath_prefix = None
for build_file in data.keys():
# Validate build_file extension
if build_file[-4:] != '.gyp':
continue
sln_path = build_file[:-4] + options.suffix + '.sln'
if options.generator_output:
sln_path = os.path.join(options.generator_output, sln_path)
# Get projects in the solution, and their dependents.
sln_projects = gyp.common.BuildFileTargets(target_list, build_file)
sln_projects += gyp.common.DeepDependencyTargets(target_dicts, sln_projects)
# Create folder hierarchy.
root_entries = _GatherSolutionFolders(
sln_projects, project_objects, flat=msvs_version.FlatSolution())
# Create solution.
sln = MSVSNew.MSVSSolution(sln_path,
entries=root_entries,
variants=configs,
websiteProperties=False,
version=msvs_version)
sln.Write()
|
nawawi/wkhtmltopdf
|
webkit/Source/ThirdParty/gyp/pylib/gyp/generator/msvs.py
|
Python
|
lgpl-3.0
| 58,002
|
#!/usr/bin/python
# Written for Python 3.4
#
# The Coding Dead
# /r/dailyprogrammer Challege #186 Intermediate The Coding Dead
# https://www.reddit.com/r/dailyprogrammer/comments/2kwfqr/10312014_challenge_186_special_code_or_treat/
# Simulates a map randomly populated with Zombies, Hunters, Victims
# Load modules
import sys
import random
##############################
# Prints the map in a nicely formatted way
# Takes a type and position list
def printMap(type, pos):
map = [x[:] for x in [["-"]*mapDim]*mapDim]
for i in range(len(type)):
#print("For index {0}, type is {1} and position is {2}.".format(i, type[i], pos[i]))
map[pos[i][0]][pos[i][1]] = type[i]
for i in map:
for j in i:
print(j, end="")
print()
##############################
# Initialize the map
def initialize():
initType = []
initPos = []
# Create a list of random positions
pos = []
while len(pos) < (zomb+hunt+vict):
num = random.randrange(totSpaces)
if num not in pos:
pos.append(num)
i = 0
while i < zomb:
initType.append("Z")
initPos.append((pos[i]//mapDim, pos[i]%mapDim))
i += 1
while i < (zomb+hunt):
initType.append("H")
initPos.append((pos[i]//mapDim, pos[i]%mapDim))
i += 1
while i < (zomb+hunt+vict):
initType.append("V")
initPos.append((pos[i]//mapDim, pos[i]%mapDim))
i += 1
print("The initial map is:")
printMap(initType, initPos)
print("$$$$$$$$$$$$$$$$$$$$")
return (initType, initPos)
##############################
# Move the creatures
def moveCreatures(type, pos):
nextType = []
nextPos = []
for i in range(len(type)):
print("\nCreature {0} is {1} at {2}.".format(i, type[i], pos[i]))
if type[i] == "V":
# Victims only move if zombie nearby
threat = checkNearby(type, pos, i)
print("There are {0} threats nearby.".format(len(threat)))
if threat != []:
dir = random.randrange(8)
newPos = move(dir, pos, i)
if newPos not in nextPos:
nextPos.append(newPos)
else:
nextPos.append(pos[i])
nextType.append("V")
print("New position: {0}".format(nextPos[-1]))
else:
nextPos.append(pos[i])
nextType.append("V")
elif type[i] == "H":
dir = random.randrange(8)
newPos = move(dir, pos, i)
if newPos not in nextPos:
nextPos.append(newPos)
else:
nextPos.append(pos[i])
nextType.append("H")
print("New position: {0}".format(nextPos[-1]))
else:
dir = random.randrange(0, 8, 2)
newPos = move(dir, pos, i)
if newPos not in nextPos:
nextPos.append(newPos)
else:
nextPos.append(pos[i])
nextType.append("Z")
print("New position: {0}".format(nextPos[-1]))
return (nextType, nextPos)
##############################
# Generalized movement
# Takes a direction, the full position list and the index of the creature
# 0 = up - (-1, 0)
# 1 = up right - (-1, 1)
# 2 = right - (0, 1)
# 3 = down right - (1, 1)
# 4 = down - (1, 0)
# 5 = down left - (1, -1)
# 6 = left - (0, -1)
# 7 - up left - (-1, -1)
def move(dir, pos, i):
if dir == 0:
print("Attempting to move up.")
if pos[i][0] > 0: # Check we're not at the top of the map already
newPos = (pos[i][0]-1, pos[i][1])
if newPos not in pos:
return newPos
else:
return pos[i]
else:
return pos[i]
elif dir == 1:
print("Attempting to move up-right.")
if (pos[i][0] > 0) and (pos[i][1] < mapDim-1):
newPos = (pos[i][0]-1, pos[i][1]+1)
if newPos not in pos:
return newPos
else:
return pos[i]
else:
return pos[i]
elif dir == 2:
print("Attempting to move right.")
if pos[i][1] < mapDim-1:
newPos = (pos[i][0], pos[i][1]+1)
if newPos not in pos:
return newPos
else:
return pos[i]
else:
return pos[i]
elif dir == 3:
print("Attempting to move down-right.")
if (pos[i][0] < mapDim-1) and (pos[i][1] < mapDim-1):
newPos = (pos[i][0]+1, pos[i][1]+1)
if newPos not in pos:
return newPos
else:
return pos[i]
else:
return pos[i]
elif dir == 4:
print("Attempting to move down.")
if pos[i][0] < mapDim-1:
newPos = (pos[i][0]+1, pos[i][1])
if newPos not in pos:
return newPos
else:
return pos[i]
else:
return pos[i]
elif dir == 5:
print("Attempting to move down-left.")
if (pos[i][0] < mapDim-1) and (pos[i][1] > 0):
newPos = (pos[i][0]+1, pos[i][1]-1)
if newPos not in pos:
return newPos
else:
return pos[i]
else:
return pos[i]
elif dir == 6:
print("Attempting to move left.")
if pos[i][1] > 0:
newPos = (pos[i][0], pos[i][1]-1)
if newPos not in pos:
return newPos
else:
return pos[i]
else:
return pos[i]
elif dir == 7:
print("Attempting to move up-left.")
if (pos[i][0] > 0) and (pos[i][1] > 0):
newPos = (pos[i][0]-1, pos[i][1]-1)
if newPos not in pos:
return newPos
else:
return pos[i]
else:
return pos[i]
##############################
# Check around a creature to see if there is anyone next to them
# For victims and hunters check all 8 spaces for a zombie
# For zombies check 4 adjacent spaces for victim or hunter
# Returns a list of indices with nearby threats/prey
def checkNearby(type, pos, ind):
check8 = [(-1,-1), (-1,0), (-1,1), (0,-1), (0,1), (1,-1), (1,0), (1,1)]
check4 = [(-1,0), (0,-1), (1,0), (0,1)]
nearby = []
if (type[ind] == "H") or (type[ind] == "V"):
for i in check8:
if (pos[ind][0]+i[0], pos[ind][1]+i[1]) in pos:
j = pos.index((pos[ind][0]+i[0], pos[ind][1]+i[1]))
if type[j] == "Z":
nearby.append(j)
else:
for i in check4:
if (pos[ind][0]+i[0], pos[ind][1]+i[1]) in pos:
j = pos.index((pos[ind][0]+i[0], pos[ind][1]+i[1]))
if (type[j] == "H") or (type[j] == "V"):
nearby.append(j)
return nearby
##############################
# Hunters slay zombies
def slayZomb(type, pos):
newType = list(type)
newPos = list(pos)
global slay1
slay1.append(0)
global slay2
slay2.append(0)
for i in range(len(type)):
if type[i] == "H":
print("\nHunter at {0}...".format(pos[i]))
targets = checkNearby(newType, newPos, newPos.index((pos[i][0], pos[i][1])) )
targets.sort()
print("has {0} targets.".format(len(targets)))
if len(targets) == 1:
print("Slayed zombie at {0}.".format(newPos[targets[0]]))
newType.pop(targets[0])
newPos.pop(targets[0])
slay1[-1] += 1
elif len(targets) == 2:
print("Slayed zombies at {0} and {1}.".format(newPos[targets[0]], newPos[targets[1]]))
newType.pop(targets[1])
newPos.pop(targets[1])
newType.pop(targets[0])
newPos.pop(targets[0])
slay2[-1] += 1
elif len(targets) > 2:
hit = [None, None]
hit[0] = targets[random.randrange(len(targets))]
hit[1] = targets[random.randrange(len(targets))]
while hit[1] == hit[0]:
hit[1] = targets[random.randrange(len(targets))]
hit.sort()
print("Slayed zombies at {0} and {1}.".format(newPos[hit[0]], newPos[hit[1]]))
newType.pop(hit[1])
newPos.pop(hit[1])
newType.pop(hit[0])
newPos.pop(hit[0])
slay2[-1] += 1
return (newType, newPos)
##############################
# Zombies bite victims or hunters
# People bit immediately turn into zombies
def bite(type, pos):
newType = list(type)
newPos = list(pos)
global biteH
biteH.append(0)
global biteV
biteV.append(0)
for i in range(len(type)):
if type[i] == "Z":
print("\nZombie at {0}...".format(pos[i]))
targets = checkNearby(newType, newPos, i)
targets.sort()
print("has {0} targets.".format(len(targets)))
if len(targets) == 1:
print("Bit {0} at {1}.".format(type[targets[0]], pos[targets[0]]))
if type[targets[0]] == "H":
biteH[-1] += 1
else:
biteV[-1] += 1
newType[targets[0]] = "Z"
elif len(targets) > 1:
atk = targets[ random.randrange(len(targets)) ]
print("Bit {0} at {1}.".format(type[atk], pos[atk]))
if type[atk] == "H":
biteH[-1] += 1
else:
biteV[-1] += 1
newType[atk] = "Z"
return (newType, newPos)
##############################
# Set up variables to be used throughout
mapDim = 20
totSpaces = mapDim**2
# Set a seed for testing purposes
random.seed(12345)
zomb = int(sys.argv[1])
hunt = int(sys.argv[2])
vict = int(sys.argv[3])
ticks = int(sys.argv[4])
# Figure out more about Python error throwing and update this.
if (zomb+hunt+vict) > totSpaces:
print("You cannot have more creatures than available spaces on the map!")
##############################
# Set up data reporting variables
zMove = [0]
hMove = [0]
vMove = [0]
numZ = [zomb]
numH = [hunt]
numV = [vict]
slay1 = [0]
slay2 = [0]
biteH = [0]
biteV = [0]
##############################
# Run the main portion of the code
creatType, creatPos = initialize()
for i in range(1, ticks+1):
intermedType, intermedPos = moveCreatures(creatType, creatPos)
print("\nAfter moving at tick {0} the map is:".format(i))
printMap(intermedType, intermedPos)
slayType, slayPos = slayZomb(intermedType, intermedPos)
print("\nAfter slaying zombies at tick {0} the map is:".format(i))
printMap(slayType, slayPos)
biteType, bitePos = bite(slayType, slayPos)
print("\nAfter zombies bite at tick {0} the map is:".format(i))
printMap(biteType, bitePos)
|
ghevcoul/Programming-Challenges
|
Reddit DailyProgrammer/186i_theCodingDead.py
|
Python
|
bsd-2-clause
| 9,787
|
# Licensed to the .NET Foundation under one or more agreements.
# The .NET Foundation licenses this file to you under the Apache 2.0 License.
# See the LICENSE file in the project root for more information.
import unittest
from iptest.type_util import *
from iptest import run_test
class ComplexTest(unittest.TestCase):
def test_from_string(self):
# complex from string: negative
# - space related
l = ['1.2', '.3', '4e3', '.3e-4', "0.031"]
for x in l:
for y in l:
self.assertRaises(ValueError, complex, "%s +%sj" % (x, y))
self.assertRaises(ValueError, complex, "%s+ %sj" % (x, y))
self.assertRaises(ValueError, complex, "%s - %sj" % (x, y))
self.assertRaises(ValueError, complex, "%s- %sj" % (x, y))
self.assertRaises(ValueError, complex, "%s-\t%sj" % (x, y))
self.assertRaises(ValueError, complex, "%sj+%sj" % (x, y))
self.assertEqual(complex(" %s+%sj" % (x, y)), complex(" %s+%sj " % (x, y)))
def test_misc(self):
self.assertEqual(mycomplex(), complex())
a = mycomplex(1)
b = mycomplex(1,0)
c = complex(1)
d = complex(1,0)
for x in [a,b,c,d]:
for y in [a,b,c,d]:
self.assertEqual(x,y)
self.assertEqual(a ** 2, a)
self.assertEqual(a-complex(), a)
self.assertEqual(a+complex(), a)
self.assertEqual(complex()/a, complex())
self.assertEqual(complex()*a, complex())
self.assertEqual(complex()%a, complex())
self.assertEqual(complex() // a, complex())
self.assertEqual(complex(2), complex(2, 0))
def test_inherit(self):
class mycomplex(complex): pass
a = mycomplex(2+1j)
self.assertEqual(a.real, 2)
self.assertEqual(a.imag, 1)
def test_repr(self):
self.assertEqual(repr(1-6j), '(1-6j)')
def test_infinite(self):
self.assertEqual(repr(1.0e340j), 'infj')
self.assertEqual(repr(-1.0e340j),'-infj')
run_test(__name__)
|
slozier/ironpython2
|
Tests/test_complex.py
|
Python
|
apache-2.0
| 2,104
|
import pickle
import pickletools
from test import support
from test.pickletester import AbstractPickleTests
from test.pickletester import AbstractPickleModuleTests
class OptimizedPickleTests(AbstractPickleTests, AbstractPickleModuleTests):
def dumps(self, arg, proto=None):
return pickletools.optimize(pickle.dumps(arg, proto))
def loads(self, buf, **kwds):
return pickle.loads(buf, **kwds)
# Test relies on precise output of dumps()
test_pickle_to_2x = None
def test_main():
support.run_unittest(OptimizedPickleTests)
support.run_doctest(pickletools)
if __name__ == "__main__":
test_main()
|
Orav/kbengine
|
kbe/src/lib/python/Lib/test/test_pickletools.py
|
Python
|
lgpl-3.0
| 668
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import pyauto_functional
import pyauto
class IndexedDBTest(pyauto.PyUITest):
"""Test of IndexedDB."""
def _CrashBrowser(self):
"""Crashes the browser by navigating to special URL"""
crash_url = 'about:inducebrowsercrashforrealz'
self.NavigateToURL(crash_url)
def testIndexedDBNullKeyPathPersistence(self):
"""Verify null key path persists after restarting browser."""
url = self.GetHttpURLForDataPath('indexeddb', 'bug_90635.html')
self.NavigateToURL(url + '#part1')
self.assertTrue(self.WaitUntil(self.GetActiveTabTitle,
expect_retval='pass - first run'),
msg='Key paths had unexpected values')
self.RestartBrowser(clear_profile=False)
self.NavigateToURL(url + '#part2')
self.assertTrue(self.WaitUntil(self.GetActiveTabTitle,
expect_retval='pass - second run'),
msg='Key paths had unexpected values')
def testVersionChangeCrashResilience(self):
"""Verify that a VERSION_CHANGE transaction is rolled back
after a renderer/browser crash"""
url = self.GetHttpURLForDataPath('indexeddb', 'version_change_crash.html')
self.NavigateToURL(url + '#part1')
self.assertTrue(self.WaitUntil(self.GetActiveTabTitle,
expect_retval='pass - part1 - complete'),
msg='Failed to prepare database')
self.RestartBrowser(clear_profile=False)
self.NavigateToURL(url + '#part2')
self.assertTrue(self.WaitUntil(self.GetActiveTabTitle,
expect_retval='pass - part2 - crash me'),
msg='Failed to start transaction')
self._CrashBrowser()
self.RestartBrowser(clear_profile=False)
self.NavigateToURL(url + '#part3')
self.assertTrue(self.WaitUntil(self.GetActiveTabTitle,
expect_retval='pass - part3 - rolled back'),
msg='VERSION_CHANGE not completely aborted')
def testConnectionsClosedOnTabClose(self):
"""Verify that open DB connections are closed when a tab is destroyed."""
url = self.GetHttpURLForDataPath('indexeddb', 'version_change_blocked.html')
self.NavigateToURL(url + '#tab1')
pid = self.GetBrowserInfo()['windows'][0]['tabs'][0]['renderer_pid']
self.assertTrue(self.WaitUntil(self.GetActiveTabTitle,
expect_retval='setVersion(1) complete'),
msg='Version change failed')
# Start to a different URL to force a new renderer process
self.AppendTab(pyauto.GURL('about:blank'))
self.NavigateToURL(url + '#tab2')
self.assertTrue(self.WaitUntil(self.GetActiveTabTitle,
expect_retval='setVersion(2) blocked'),
msg='Version change not blocked as expected')
self.KillRendererProcess(pid)
self.assertTrue(self.WaitUntil(self.GetActiveTabTitle,
expect_retval='setVersion(2) complete'),
msg='Version change never unblocked')
if __name__ == '__main__':
pyauto_functional.Main()
|
gavinp/chromium
|
chrome/test/functional/indexeddb.py
|
Python
|
bsd-3-clause
| 3,343
|
from setuptools import setup, find_packages
setup(name='ELA',
version='0.1',
description='Energy by Location in America',
long_description='Class project for UW DIRECT',
url='https://github.com/DIRECT-Energy-Storage/ELA',
author='UW DIRECT students',
license='MIT',
packages=find_packages()
# install_requires=[],
# package_data={
# 'ela' : ['ela/data/']
)
|
DIRECT-Energy-Storage/ELA
|
setup.py
|
Python
|
mit
| 428
|
"""
Proxies for the libgeos_c shared lib, GEOS-specific exceptions, and utilities
"""
import atexit
import logging
import os
import sys
import threading
import ctypes
from ctypes import cdll, CDLL, CFUNCTYPE, c_char_p, c_void_p, string_at
from ctypes.util import find_library
import ftools
from ctypes_declarations import prototype, EXCEPTION_HANDLER_FUNCTYPE
# Begin by creating a do-nothing handler and adding to this module's logger.
class NullHandler(logging.Handler):
def emit(self, record):
pass
LOG = logging.getLogger(__name__)
LOG.addHandler(NullHandler())
# Find and load the GEOS and C libraries
# If this ever gets any longer, we'll break it into separate modules
def load_dll(libname, fallbacks=None):
lib = find_library(libname)
if lib is not None:
return CDLL(lib)
else:
if fallbacks is not None:
for name in fallbacks:
try:
return CDLL(name)
except OSError:
# move on to the next fallback
pass
# the end
raise OSError(
"Could not find library %s or load any of its variants %s" % (
libname, fallbacks or []))
if sys.platform.startswith('linux'):
_lgeos = load_dll('geos_c', fallbacks=['libgeos_c.so.1', 'libgeos_c.so'])
free = load_dll('c').free
free.argtypes = [c_void_p]
free.restype = None
elif sys.platform == 'darwin':
alt_paths = [
# The Framework build from Kyng Chaos:
"/Library/Frameworks/GEOS.framework/Versions/Current/GEOS",
# macports
'/opt/local/lib/libgeos_c.dylib',
]
_lgeos = load_dll('geos_c', fallbacks=alt_paths)
free = load_dll('c').free
free.argtypes = [c_void_p]
free.restype = None
elif sys.platform == 'win32':
try:
egg_dlls = os.path.abspath(os.path.join(os.path.dirname(__file__),
r"..\DLLs"))
wininst_dlls = os.path.abspath(os.__file__ + "../../../DLLs")
original_path = os.environ['PATH']
os.environ['PATH'] = "%s;%s;%s" % (egg_dlls, wininst_dlls, original_path)
_lgeos = CDLL("geos.dll")
except (ImportError, WindowsError, OSError):
raise
def free(m):
try:
cdll.msvcrt.free(m)
except WindowsError:
# XXX: See http://trac.gispython.org/projects/PCL/ticket/149
pass
elif sys.platform == 'sunos5':
_lgeos = load_dll('geos_c', fallbacks=['libgeos_c.so.1', 'libgeos_c.so'])
free = CDLL('libc.so.1').free
free.argtypes = [c_void_p]
free.restype = None
else: # other *nix systems
_lgeos = load_dll('geos_c', fallbacks=['libgeos_c.so.1', 'libgeos_c.so'])
free = load_dll('c', fallbacks=['libc.so.6']).free
free.argtypes = [c_void_p]
free.restype = None
def _geos_c_version():
func = _lgeos.GEOSversion
func.argtypes = []
func.restype = c_char_p
v = func().split('-')[2]
return tuple(int(n) for n in v.split('.'))
geos_capi_version = geos_c_version = _geos_c_version()
# If we have the new interface, then record a baseline so that we know what
# additional functions are declared in ctypes_declarations.
if geos_c_version >= (1,5,0):
start_set = set(_lgeos.__dict__)
# Apply prototypes for the libgeos_c functions
prototype(_lgeos, geos_c_version)
# If we have the new interface, automatically detect all function
# declarations, and declare their re-entrant counterpart.
if geos_c_version >= (1,5,0):
end_set = set(_lgeos.__dict__)
new_func_names = end_set - start_set
for func_name in new_func_names:
new_func_name = "%s_r" % func_name
if hasattr(_lgeos, new_func_name):
new_func = getattr(_lgeos, new_func_name)
old_func = getattr(_lgeos, func_name)
new_func.restype = old_func.restype
if old_func.argtypes is None:
# Handle functions that didn't take an argument before,
# finishGEOS.
new_func.argtypes = [c_void_p]
else:
new_func.argtypes = [c_void_p] + old_func.argtypes
if old_func.errcheck is not None:
new_func.errcheck = old_func.errcheck
# Handle special case.
_lgeos.initGEOS_r.restype = c_void_p
_lgeos.initGEOS_r.argtypes = [EXCEPTION_HANDLER_FUNCTYPE, EXCEPTION_HANDLER_FUNCTYPE]
_lgeos.finishGEOS_r.argtypes = [c_void_p]
# Exceptions
class ReadingError(Exception):
pass
class DimensionError(Exception):
pass
class TopologicalError(Exception):
pass
class PredicateError(Exception):
pass
def error_handler(fmt, list):
LOG.error("%s", list)
error_h = EXCEPTION_HANDLER_FUNCTYPE(error_handler)
def notice_handler(fmt, list):
LOG.warning("%s", list)
notice_h = EXCEPTION_HANDLER_FUNCTYPE(notice_handler)
def cleanup():
if _lgeos is not None :
_lgeos.finishGEOS()
atexit.register(cleanup)
# Errcheck functions
def errcheck_wkb(result, func, argtuple):
if not result:
return None
size_ref = argtuple[-1]
size = size_ref.contents
retval = ctypes.string_at(result, size.value)[:]
lgeos.GEOSFree(result)
return retval
def errcheck_just_free(result, func, argtuple):
retval = string_at(result)
lgeos.GEOSFree(result)
return retval
def errcheck_predicate(result, func, argtuple):
if result == 2:
raise PredicateError("Failed to evaluate %s" % repr(func))
return result
class LGEOSBase(threading.local):
"""Proxy for the GEOS_C DLL/SO
This is a base class. Do not instantiate.
"""
methods = {}
def __init__(self, dll):
self._lgeos = dll
self.geos_handle = None
class LGEOS14(LGEOSBase):
"""Proxy for the GEOS_C DLL/SO API version 1.4
"""
geos_capi_version = (1, 4, 0)
def __init__(self, dll):
super(LGEOS14, self).__init__(dll)
self.geos_handle = self._lgeos.initGEOS(notice_h, error_h)
keys = self._lgeos.__dict__.keys()
for key in keys:
setattr(self, key, getattr(self._lgeos, key))
self.GEOSFree = self._lgeos.free
self.GEOSGeomToWKB_buf.errcheck = errcheck_wkb
self.GEOSGeomToWKT.errcheck = errcheck_just_free
self.GEOSRelate.errcheck = errcheck_just_free
for pred in ( self.GEOSDisjoint,
self.GEOSTouches,
self.GEOSIntersects,
self.GEOSCrosses,
self.GEOSWithin,
self.GEOSContains,
self.GEOSOverlaps,
self.GEOSEquals,
self.GEOSEqualsExact,
self.GEOSisEmpty,
self.GEOSisValid,
self.GEOSisSimple,
self.GEOSisRing,
self.GEOSHasZ
):
pred.errcheck = errcheck_predicate
self.methods['area'] = self.GEOSArea
self.methods['boundary'] = self.GEOSBoundary
self.methods['buffer'] = self.GEOSBuffer
self.methods['centroid'] = self.GEOSGetCentroid
self.methods['representative_point'] = self.GEOSPointOnSurface
self.methods['convex_hull'] = self.GEOSConvexHull
self.methods['distance'] = self.GEOSDistance
self.methods['envelope'] = self.GEOSEnvelope
self.methods['length'] = self.GEOSLength
self.methods['has_z'] = self.GEOSHasZ
self.methods['is_empty'] = self.GEOSisEmpty
self.methods['is_ring'] = self.GEOSisRing
self.methods['is_simple'] = self.GEOSisSimple
self.methods['is_valid'] = self.GEOSisValid
self.methods['disjoint'] = self.GEOSDisjoint
self.methods['touches'] = self.GEOSTouches
self.methods['intersects'] = self.GEOSIntersects
self.methods['crosses'] = self.GEOSCrosses
self.methods['within'] = self.GEOSWithin
self.methods['contains'] = self.GEOSContains
self.methods['overlaps'] = self.GEOSOverlaps
self.methods['equals'] = self.GEOSEquals
self.methods['equals_exact'] = self.GEOSEqualsExact
self.methods['relate'] = self.GEOSRelate
self.methods['difference'] = self.GEOSDifference
self.methods['symmetric_difference'] = self.GEOSSymDifference
self.methods['union'] = self.GEOSUnion
self.methods['intersection'] = self.GEOSIntersection
self.methods['simplify'] = self.GEOSSimplify
self.methods['topology_preserve_simplify'] = \
self.GEOSTopologyPreserveSimplify
class LGEOS15(LGEOSBase):
"""Proxy for the reentrant GEOS_C DLL/SO API version 1.5
"""
geos_capi_version = (1, 5, 0)
def __init__(self, dll):
super(LGEOS15, self).__init__(dll)
self.geos_handle = self._lgeos.initGEOS_r(notice_h, error_h)
keys = self._lgeos.__dict__.keys()
for key in filter(lambda x: not x.endswith('_r'), keys):
if key + '_r' in keys:
reentr_func = getattr(self._lgeos, key + '_r')
attr = ftools.partial(reentr_func, self.geos_handle)
attr.__name__ = reentr_func.__name__
setattr(self, key, attr)
else:
setattr(self, key, getattr(self._lgeos, key))
if not hasattr(self, 'GEOSFree'):
self.GEOSFree = self._lgeos.free
self.GEOSGeomToWKB_buf.func.errcheck = errcheck_wkb
self.GEOSGeomToWKT.func.errcheck = errcheck_just_free
self.GEOSRelate.func.errcheck = errcheck_just_free
for pred in ( self.GEOSDisjoint,
self.GEOSTouches,
self.GEOSIntersects,
self.GEOSCrosses,
self.GEOSWithin,
self.GEOSContains,
self.GEOSOverlaps,
self.GEOSEquals,
self.GEOSEqualsExact,
self.GEOSisEmpty,
self.GEOSisValid,
self.GEOSisSimple,
self.GEOSisRing,
self.GEOSHasZ
):
pred.func.errcheck = errcheck_predicate
self.GEOSisValidReason.func.errcheck = errcheck_just_free
self.methods['area'] = self.GEOSArea
self.methods['boundary'] = self.GEOSBoundary
self.methods['buffer'] = self.GEOSBuffer
self.methods['centroid'] = self.GEOSGetCentroid
self.methods['representative_point'] = self.GEOSPointOnSurface
self.methods['convex_hull'] = self.GEOSConvexHull
self.methods['distance'] = self.GEOSDistance
self.methods['envelope'] = self.GEOSEnvelope
self.methods['length'] = self.GEOSLength
self.methods['has_z'] = self.GEOSHasZ
self.methods['is_empty'] = self.GEOSisEmpty
self.methods['is_ring'] = self.GEOSisRing
self.methods['is_simple'] = self.GEOSisSimple
self.methods['is_valid'] = self.GEOSisValid
self.methods['disjoint'] = self.GEOSDisjoint
self.methods['touches'] = self.GEOSTouches
self.methods['intersects'] = self.GEOSIntersects
self.methods['crosses'] = self.GEOSCrosses
self.methods['within'] = self.GEOSWithin
self.methods['contains'] = self.GEOSContains
self.methods['overlaps'] = self.GEOSOverlaps
self.methods['equals'] = self.GEOSEquals
self.methods['equals_exact'] = self.GEOSEqualsExact
self.methods['relate'] = self.GEOSRelate
self.methods['difference'] = self.GEOSDifference
self.methods['symmetric_difference'] = self.GEOSSymDifference
self.methods['union'] = self.GEOSUnion
self.methods['intersection'] = self.GEOSIntersection
self.methods['prepared_intersects'] = self.GEOSPreparedIntersects
self.methods['prepared_contains'] = self.GEOSPreparedContains
self.methods['prepared_contains_properly'] = \
self.GEOSPreparedContainsProperly
self.methods['prepared_covers'] = self.GEOSPreparedCovers
self.methods['simplify'] = self.GEOSSimplify
self.methods['topology_preserve_simplify'] = \
self.GEOSTopologyPreserveSimplify
class LGEOS16(LGEOS15):
"""Proxy for the reentrant GEOS_C DLL/SO API version 1.6
"""
geos_capi_version = (1, 6, 0)
def __init__(self, dll):
super(LGEOS16, self).__init__(dll)
class LGEOS16LR(LGEOS16):
"""Proxy for the reentrant GEOS_C DLL/SO API version 1.6 with linear
referencing
"""
geos_capi_version = geos_c_version
def __init__(self, dll):
super(LGEOS16LR, self).__init__(dll)
self.methods['parallel_offset'] = self.GEOSSingleSidedBuffer
self.methods['project'] = self.GEOSProject
self.methods['project_normalized'] = self.GEOSProjectNormalized
self.methods['interpolate'] = self.GEOSInterpolate
self.methods['interpolate_normalized'] = \
self.GEOSInterpolateNormalized
if geos_c_version >= (1, 6, 0):
if hasattr(_lgeos, 'GEOSProject'):
L = LGEOS16LR
else:
L = LGEOS16
elif geos_c_version >= (1, 5, 0):
L = LGEOS15
else:
L = LGEOS14
lgeos = L(_lgeos)
|
aaronr/shapely
|
shapely/geos.py
|
Python
|
bsd-3-clause
| 13,128
|
import chainer
from chainer import function_node
from chainer.utils import type_check
def reshape(x, channels):
"""Referenced from https://github.com/takedarts/resnetfamily"""
if x.shape[1] < channels:
xp = chainer.cuda.get_array_module(x)
p = xp.zeros(
(x.shape[0], channels - x.shape[1], x.shape[2], x.shape[3]),
dtype=x.dtype)
x = chainer.functions.concat((x, p), axis=1)
elif x.shape[1] > channels:
x = x[:, :channels, :]
return x
class ResidualAdd(function_node.FunctionNode):
"""
Be careful that this function is not commutable, lhs and rhs acts different!
Add different channel shaped array.
lhs is h, and rhs is x.
output channel is always h.shape[1].
If x.shape[1] is smaller than h.shape[1], x is virtually padded with 0
If x.shape[1] is bigger than h.shape[1], only first h.shape[1] axis is used
to add x to h.
"""
def __init__(self):
self.lhs_ch = None
self.rhs_ch = None
def check_type_forward(self, in_types):
type_check.expect(in_types.size() == 2)
lhs = in_types[0]
rhs = in_types[1]
type_check.expect(
# lhs.dtype.kind == 'f',
# rhs.dtype.kind == 'f',
lhs.ndim == 4,
rhs.ndim == 4,
lhs.shape[0] == rhs.shape[0],
# lhs.shape[1] >= rhs.shape[1],
lhs.shape[2] == rhs.shape[2],
lhs.shape[3] == rhs.shape[3],
)
def forward(self, x):
lhs, rhs = x[:2]
self.lhs_ch = lhs.shape[1]
self.rhs_ch = rhs.shape[1]
if self.lhs_ch < self.rhs_ch:
lhs += rhs[:, :self.lhs_ch, :, :]
return lhs,
# pyramid add
# rhs[:, :self.lhs_ch, :, :] += lhs
# return rhs,
elif self.lhs_ch > self.rhs_ch:
lhs[:, :self.rhs_ch, :, :] += rhs
return lhs,
else:
lhs += rhs
return lhs,
def backward(self, indexes, gy):
if self.lhs_ch < self.rhs_ch:
return gy[0], reshape(gy[0], self.rhs_ch)
# pyramid add
# return gy[0][:, :self.lhs_ch, :, :], gy[0]
elif self.lhs_ch > self.rhs_ch:
return gy[0], gy[0][:, :self.rhs_ch, :, :]
else:
return gy[0], gy[0]
def residual_add(lhs, rhs):
"""
# x: (mb, ch_x, h, w), h: (mb, ch_h, h, w)
# output h: (mb, ch_h, h, w). shape is always same with h (lhs),
# regardless of the size of `ch_x`.
h = pyramid_add(h, x)
Args:
lhs:
rhs:
Returns:
"""
return ResidualAdd().apply((lhs, rhs))[0]
# example usage
# h = pyramid_add(h, x)
|
corochann/chainerex
|
chainerex/functions/residual_add.py
|
Python
|
mit
| 2,746
|
import os
import sys
import transaction
from sqlalchemy import engine_from_config
from pyramid.paster import (
get_appsettings,
setup_logging,
)
from pyramid.scripts.common import parse_vars
from pony.db import (
Base,
DBSession,
)
from pony.models import Group, Kind
def usage(argv):
cmd = os.path.basename(argv[0])
print('usage: %s <config_uri> [var=value]\n'
'(example: "%s development.ini")' % (cmd, cmd))
sys.exit(1)
def main(argv=sys.argv):
if len(argv) < 2:
usage(argv)
config_uri = argv[1]
options = parse_vars(argv[2:])
setup_logging(config_uri)
settings = get_appsettings(config_uri, options=options)
engine = engine_from_config(settings, 'sqlalchemy.')
DBSession.configure(bind=engine)
Base.metadata.create_all(engine)
with transaction.manager:
DBSession.add(Kind(name='earth_ponies'))
DBSession.add(Kind(name='pegasis'))
DBSession.add(Kind(name='unicorns'))
DBSession.add(Group(name='stallion'))
DBSession.add(Group(name='filly'))
DBSession.add(Group(name='mare'))
DBSession.add(Group(name='colt'))
|
v0y/traversal_pony
|
pony/pony/scripts/initializedb.py
|
Python
|
mit
| 1,161
|
#python
import k3d
import testing
setup = testing.setup_mesh_modifier_test("PolyGrid", "ScalePoints")
setup.source.rows = 1
setup.source.columns = 1
mesh_selection = k3d.geometry.selection.create(1.0)
setup.modifier.mesh_selection = mesh_selection
setup.modifier.x = 2
testing.require_valid_mesh(setup.document, setup.modifier.get_property("output_mesh"))
testing.require_similar_mesh(setup.document, setup.modifier.get_property("output_mesh"), "mesh.selection.all", 1)
|
barche/k3d
|
tests/mesh/mesh.selection.all.py
|
Python
|
gpl-2.0
| 476
|
from unittest import TestCase
from even_fibonacci import *
class TestEvenFibonacciSum(TestCase):
def test_no_numbers(self):
self.assertEqual(even_fibonacci(1), 0)
def test_one_even_number(self):
self.assertEqual(even_fibonacci(2), 2)
def test_one_even_one_odd_number(self):
self.assertEqual(even_fibonacci(3), 2)
def test_two_even_numbers(self):
self.assertEqual(even_fibonacci(8), 10)
def test_project_euler_input(self):
self.assertEqual(even_fibonacci(3999999), 4613732)
def test_numbers_smaller_than_400_billion_and_one(self):
self.assertEqual(even_fibonacci(400000000001), 478361013020)
class TestFibonacciNumbers(TestCase):
def test_no_numbers(self):
self.assertEqual(fibonacci_numbers_up_to(0), [])
def test_two_number(self):
self.assertEqual(fibonacci_numbers_up_to(1), [1, 1])
def test_three_numbers(self):
self.assertEqual(fibonacci_numbers_up_to(2), [1, 1, 2])
def test_four_numbers(self):
self.assertEqual(fibonacci_numbers_up_to(3), [1, 1, 2, 3])
def test_up_to_10(self):
self.assertEqual(fibonacci_numbers_up_to(10), [1, 1, 2, 3, 5, 8])
|
plilja/project-euler
|
problem_2/test_even_fibonacci.py
|
Python
|
apache-2.0
| 1,197
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyScs(PythonPackage):
"""SCS: splitting conic solver"""
homepage = "https://github.com/cvxgrp/scs"
pypi = "scs/scs-2.1.1-2.tar.gz"
version('2.1.1-2', sha256='f816cfe3d4b4cff3ac2b8b96588c5960ddd2a3dc946bda6b09db04e7bc6577f2')
variant('cuda', default=False, description="Also compile the GPU CUDA version of SCS")
variant('float32', default=False, description="Use 32 bit (single precision) floats, default is 64 bit")
variant('extra_verbose', default=False, description="Extra verbose SCS (for debugging)")
variant('int32', default=False, description="Use 32 bit ints")
variant('blas64', default=False, description="Use 64 bit ints for the blas/lapack libs")
depends_on('py-setuptools', type='build')
depends_on('py-numpy@1.7:', type=('build', 'run'))
depends_on('py-scipy@0.13.2:', type=('build', 'run'))
def build_args(self, spec, prefix):
args = []
if '+cuda' in spec or '+float32' in spec or '+int32' in spec or\
'+extra_verbose' in spec or '+blas64' in spec:
args = ['--scs']
if '+cuda' in spec:
args.append('--gpu')
if '+float32' in spec:
args.append('--float')
if '+extra_verbose' in spec:
args.append('--extraverbose')
if '+int32' in spec:
args.append('--int')
if '+blas64' in spec:
args.append('--blas64')
return args
|
LLNL/spack
|
var/spack/repos/builtin/packages/py-scs/package.py
|
Python
|
lgpl-2.1
| 1,662
|
#!/usr/bin/python
########################################################################
# 1 August 2014
# Patrick Lombard, Centre for Stem Stem Research
# Core Bioinformatics Group
# University of Cambridge
# All right reserved.
########################################################################
import os, sys, re
import subprocess
import argparse
from collections import defaultdict
import rpy2.robjects as ro
from rpy2.robjects.packages import importr
from rpy2.robjects.vectors import IntVector, FloatVector, StrVector
def diffReps(inv_conds, cond1, cond2, chromsizes, method="nb", controls=None, counts=None):
#Can't deal with reps yet
if counts:
sample1 = inv_conds[cond1][0]
sample2 = inv_conds[cond2][0]
c1 = read_gapdh_counts_file(counts[sample1])
c2 = read_gapdh_counts_file(counts[sample2])
c1 = float(c1)/1000
c2 = float(c2)/1000
output = open("norm.txt", "w")
output.write("treatment\t{}\n".format(c1)),
output.write("control\t{}\n".format(c2)),
output.close()
command = "diffReps.pl --treatment {0} --control {1} --report {2}_vs_{3}_diffReps.txt --chrlen {4} -me {5} --norm norm.txt --nproc 8".format(inv_conds[cond1][0], inv_conds[cond2][0],
cond1, cond2, chromsizes, method )
elif controls:
backt1 = []
backt2 = []
for sample in t1:
backt1.append(controls[sample])
for sample in t2:
backt2.append(controls[sample])
command = "diffReps.pl --treatment {0} --control {1} --btr {2} --bco {3} --report {4}_vs_{5}_diffReps.txt --chrlen {6} -me {7}".format(inv_conds[cond1], inv_conds[cond2],
backt1, backt2, cond1, cond2, chromsizes, method)
else:
command = "diffReps.pl --treatment {0} --control {1} --report {2}_vs_{3}_diffReps.txt --chrlen {4} -me {5}".format(inv_conds[cond1][0], inv_conds[cond2][0], cond1, cond2,
chromsizes, method)
print command
subprocess.call(command.split())
def read_gapdh_counts_file(ifile):
with open(ifile) as f:
header= next(f)
header= header.rstrip()
word = header.split("\t")
return word[1]
|
pdl30/pychiptools
|
pychiptools/utilities/diffreps.py
|
Python
|
gpl-2.0
| 2,016
|
"""
Lightweight Birdseye backend server
"""
from flask import Flask, jsonify, request
from bird import client
from config import settings
#
# Setup and initialization
#
app = Flask(__name__)
#
# Helpers
#
def _bird_api_base(pk):
bird_servers = settings.BIRD_SERVERS
return bird_servers[int(pk)][1]
#
# Bird API proxy
#
@app.route('/birdseye/api/')
def api_index():
return 'Api endpoints'
@app.route('/birdseye/api/routeserver/')
def api_routeserver_index():
"""List all bird servers"""
result = [{
'id': i,
'name': server[0],
} for i, server in enumerate(settings.BIRD_SERVERS)]
return jsonify({"routeservers": result})
@app.route('/birdseye/api/routeserver/<int:pk>/status/')
def status(pk=None):
"""Get status"""
bird_api = _bird_api_base(pk)
bird = client.Bird(bird_api)
status = bird.status()
# Filter last reboot in case it is not public
if not settings.UI['rs_show_last_reboot']:
status['last_reboot'] = None
return jsonify(status)
@app.route('/birdseye/api/routeserver/<int:pk>/symbols/')
def symbols(pk=None):
"""Get symbols"""
bird_api = _bird_api_base(pk)
bird = client.Bird(bird_api)
return jsonify(bird.symbols())
@app.route('/birdseye/api/routeserver/<int:pk>/tables/')
def tables(pk=None):
"""Get tables"""
bird_api = _bird_api_base(pk)
bird = client.Bird(bird_api)
return jsonify(bird.tables())
@app.route('/birdseye/api/routeserver/<int:pk>/protocol/')
@app.route('/birdseye/api/routeserver/<int:pk>/protocol/<string:protocol>')
def protocol(pk=None, protocol="bgp"):
"""Get protocols: default protocol=bgp"""
bird_api = _bird_api_base(pk)
bird = client.Bird(bird_api)
return jsonify(bird.protocols(protocol))
@app.route('/birdseye/api/routeserver/<int:pk>/routes/filtered/')
def routes_filtered(pk=None):
"""Get filtered routes for routeserver id with protocol"""
protocol_id = request.args.get('protocol', None)
if not protocol_id:
return jsonify({'details': 'no protocol given'}), 404
bird_api = _bird_api_base(pk)
bird = client.Bird(bird_api)
return jsonify(bird.routes_filtered(protocol_id))
@app.route('/birdseye/api/routeserver/<int:pk>/routes/')
def routes(pk=None):
"""Get routes for routeserver id with protocol"""
protocol_id = request.args.get('protocol', None)
if not protocol_id:
return jsonify({ 'details': 'no protocol given' }), 404
bird_api = _bird_api_base(pk)
bird = client.Bird(bird_api)
return jsonify(bird.routes(protocol_id))
@app.route('/birdseye/api/config/')
def rejection_reasons():
return jsonify({"config": settings.FRONTEND_CONFIG})
#
# Single Page React App
#
@app.route('/', defaults={'path': None})
@app.route('/<path:path>')
def index(path):
with open('backend/static/app/index.html', 'r') as f:
# Read page, fix links
content = f.read()
content = content.replace('js/', '/static/app/js/')
content = content.replace('css/', '/static/app/css/')
return content
if __name__ == '__main__':
app.run()
|
ecix/birdseye
|
backend/server.py
|
Python
|
bsd-3-clause
| 3,125
|
# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
########################################################
import os
from ansible import constants as C
from ansible.cli import CLI
from ansible.errors import AnsibleError, AnsibleOptionsError
from ansible.executor.task_queue_manager import TaskQueueManager
from ansible.inventory import Inventory
from ansible.module_utils._text import to_text
from ansible.parsing.dataloader import DataLoader
from ansible.parsing.splitter import parse_kv
from ansible.playbook.play import Play
from ansible.plugins import get_all_plugin_loaders
from ansible.utils.vars import load_extra_vars
from ansible.utils.vars import load_options_vars
from ansible.vars import VariableManager
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
########################################################
class AdHocCLI(CLI):
''' is an extra-simple tool/framework/API for doing 'remote things'.
this command allows you to define and run a single task 'playbook' against a set of hosts
'''
def parse(self):
''' create an options parser for bin/ansible '''
self.parser = CLI.base_parser(
usage='%prog <host-pattern> [options]',
runas_opts=True,
inventory_opts=True,
async_opts=True,
output_opts=True,
connect_opts=True,
check_opts=True,
runtask_opts=True,
vault_opts=True,
fork_opts=True,
module_opts=True,
desc="Define and run a single task 'playbook' against a set of hosts",
epilog="Some modules do not make sense in Ad-Hoc (include, meta, etc)",
)
# options unique to ansible ad-hoc
self.parser.add_option('-a', '--args', dest='module_args',
help="module arguments", default=C.DEFAULT_MODULE_ARGS)
self.parser.add_option('-m', '--module-name', dest='module_name',
help="module name to execute (default=%s)" % C.DEFAULT_MODULE_NAME,
default=C.DEFAULT_MODULE_NAME)
super(AdHocCLI, self).parse()
if len(self.args) < 1:
raise AnsibleOptionsError("Missing target hosts")
elif len(self.args) > 1:
raise AnsibleOptionsError("Extraneous options or arguments")
display.verbosity = self.options.verbosity
self.validate_conflicts(runas_opts=True, vault_opts=True, fork_opts=True)
def _play_ds(self, pattern, async, poll):
check_raw = self.options.module_name in ('command', 'win_command', 'shell', 'win_shell', 'script', 'raw')
return dict(
name = "Ansible Ad-Hoc",
hosts = pattern,
gather_facts = 'no',
tasks = [ dict(action=dict(module=self.options.module_name, args=parse_kv(self.options.module_args, check_raw=check_raw)), async=async, poll=poll) ]
)
def run(self):
''' create and execute the single task playbook '''
super(AdHocCLI, self).run()
# only thing left should be host pattern
pattern = to_text(self.args[0], errors='surrogate_or_strict')
# ignore connection password cause we are local
if self.options.connection == "local":
self.options.ask_pass = False
sshpass = None
becomepass = None
b_vault_pass = None
self.normalize_become_options()
(sshpass, becomepass) = self.ask_passwords()
passwords = { 'conn_pass': sshpass, 'become_pass': becomepass }
loader = DataLoader()
if self.options.vault_password_file:
# read vault_pass from a file
b_vault_pass = CLI.read_vault_password_file(self.options.vault_password_file, loader=loader)
loader.set_vault_password(b_vault_pass)
elif self.options.ask_vault_pass:
b_vault_pass = self.ask_vault_passwords()
loader.set_vault_password(b_vault_pass)
variable_manager = VariableManager()
variable_manager.extra_vars = load_extra_vars(loader=loader, options=self.options)
variable_manager.options_vars = load_options_vars(self.options)
inventory = Inventory(loader=loader, variable_manager=variable_manager, host_list=self.options.inventory)
variable_manager.set_inventory(inventory)
no_hosts = False
if len(inventory.list_hosts()) == 0:
# Empty inventory
display.warning("provided hosts list is empty, only localhost is available")
no_hosts = True
inventory.subset(self.options.subset)
hosts = inventory.list_hosts(pattern)
if len(hosts) == 0:
if no_hosts is False and self.options.subset:
# Invalid limit
raise AnsibleError("Specified --limit does not match any hosts")
else:
display.warning("No hosts matched, nothing to do")
if self.options.listhosts:
display.display(' hosts (%d):' % len(hosts))
for host in hosts:
display.display(' %s' % host)
return 0
if self.options.module_name in C.MODULE_REQUIRE_ARGS and not self.options.module_args:
err = "No argument passed to %s module" % self.options.module_name
if pattern.endswith(".yml"):
err = err + ' (did you mean to run ansible-playbook?)'
raise AnsibleOptionsError(err)
# Avoid modules that don't work with ad-hoc
if self.options.module_name in ('include', 'include_role'):
raise AnsibleOptionsError("'%s' is not a valid action for ad-hoc commands" % self.options.module_name)
# dynamically load any plugins from the playbook directory
for name, obj in get_all_plugin_loaders():
if obj.subdir:
plugin_path = os.path.join('.', obj.subdir)
if os.path.isdir(plugin_path):
obj.add_directory(plugin_path)
play_ds = self._play_ds(pattern, self.options.seconds, self.options.poll_interval)
play = Play().load(play_ds, variable_manager=variable_manager, loader=loader)
if self.callback:
cb = self.callback
elif self.options.one_line:
cb = 'oneline'
else:
cb = 'minimal'
run_tree=False
if self.options.tree:
C.DEFAULT_CALLBACK_WHITELIST.append('tree')
C.TREE_DIR = self.options.tree
run_tree=True
# now create a task queue manager to execute the play
self._tqm = None
try:
self._tqm = TaskQueueManager(
inventory=inventory,
variable_manager=variable_manager,
loader=loader,
options=self.options,
passwords=passwords,
stdout_callback=cb,
run_additional_callbacks=C.DEFAULT_LOAD_CALLBACK_PLUGINS,
run_tree=run_tree,
)
result = self._tqm.run(play)
finally:
if self._tqm:
self._tqm.cleanup()
if loader:
loader.cleanup_all_tmp_files()
return result
|
prakritish/ansible
|
lib/ansible/cli/adhoc.py
|
Python
|
gpl-3.0
| 8,021
|
import os
import fnmatch
import mimetypes
from src.thumbnailer import Thumbnailer
class Movie:
def __init__(self, path, name, thumbnailer):
self.path = path
self.name = name
self.thumbnailer = thumbnailer
def full_path(self):
return os.path.join(self.path, self.name)
def pretty_name(self):
file_name = os.path.splitext(self.name)[0]
return file_name.replace('.', ' ').title()
def video(self):
return self.full_path()
def thumbnail(self):
return self.thumbnailer.get_thumbnail(self)
def mime(self):
return mimetypes.guess_type(self.full_path())[0]
class MovieLister:
def __init__(self, basedir, extensions):
self.basedir = basedir
self.extensions = extensions
self.thumbnailer = Thumbnailer(basedir)
def list(self):
for root, dirs, files in os.walk(self.basedir):
for extension in self.extensions:
for filename in fnmatch.filter(files, extension):
relative_path = os.path.relpath(root.decode("utf8"), self.basedir)
yield Movie(relative_path, filename.decode("utf8"), self.thumbnailer)
def get(self, movie):
full_path = os.path.join(self.basedir, movie)
if not os.path.isfile(full_path):
return None
for extension in self.extensions:
if not fnmatch.filter([full_path], extension):
path, name = os.path.split(movie)
return Movie(path, name, self.thumbnailer)
return None # No filter matched, invalid movie name
|
sdemircan/mediapielayer
|
src/movie.py
|
Python
|
mit
| 1,616
|
from .unitary import Unitary
from .unitary_kron import UnitaryKron
__all__ = [Unitary, UnitaryKron]
|
Nehoroshiy/urnn
|
manifolds/__init__.py
|
Python
|
mit
| 100
|
from django.contrib.contenttypes.fields import (
GenericForeignKey, GenericRelation,
)
from django.contrib.contenttypes.models import ContentType
from django.core.checks import Error
from django.core.exceptions import FieldDoesNotExist, FieldError
from django.db import models
from django.test import TestCase
from django.test.utils import isolate_apps
@isolate_apps('model_inheritance')
class AbstractInheritanceTests(TestCase):
def test_single_parent(self):
class AbstractBase(models.Model):
name = models.CharField(max_length=30)
class Meta:
abstract = True
class AbstractDescendant(AbstractBase):
name = models.CharField(max_length=50)
class Meta:
abstract = True
class DerivedChild(AbstractBase):
name = models.CharField(max_length=50)
class DerivedGrandChild(AbstractDescendant):
pass
self.assertEqual(AbstractDescendant._meta.get_field('name').max_length, 50)
self.assertEqual(DerivedChild._meta.get_field('name').max_length, 50)
self.assertEqual(DerivedGrandChild._meta.get_field('name').max_length, 50)
def test_multiple_parents_mro(self):
class AbstractBaseOne(models.Model):
class Meta:
abstract = True
class AbstractBaseTwo(models.Model):
name = models.CharField(max_length=30)
class Meta:
abstract = True
class DescendantOne(AbstractBaseOne, AbstractBaseTwo):
class Meta:
abstract = True
class DescendantTwo(AbstractBaseOne, AbstractBaseTwo):
name = models.CharField(max_length=50)
class Meta:
abstract = True
class Derived(DescendantOne, DescendantTwo):
pass
self.assertEqual(DescendantOne._meta.get_field('name').max_length, 30)
self.assertEqual(DescendantTwo._meta.get_field('name').max_length, 50)
self.assertEqual(Derived._meta.get_field('name').max_length, 50)
def test_multiple_inheritance_cannot_shadow_concrete_inherited_field(self):
class ConcreteParent(models.Model):
name = models.CharField(max_length=255)
class AbstractParent(models.Model):
name = models.IntegerField()
class Meta:
abstract = True
class FirstChild(ConcreteParent, AbstractParent):
pass
class AnotherChild(AbstractParent, ConcreteParent):
pass
self.assertIsInstance(FirstChild._meta.get_field('name'), models.CharField)
self.assertEqual(
AnotherChild.check(),
[Error(
"The field 'name' clashes with the field 'name' "
"from model 'model_inheritance.concreteparent'.",
obj=AnotherChild._meta.get_field('name'),
id="models.E006",
)]
)
def test_virtual_field(self):
class RelationModel(models.Model):
content_type = models.ForeignKey(ContentType, models.CASCADE)
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey('content_type', 'object_id')
class RelatedModelAbstract(models.Model):
field = GenericRelation(RelationModel)
class Meta:
abstract = True
class ModelAbstract(models.Model):
field = models.CharField(max_length=100)
class Meta:
abstract = True
class OverrideRelatedModelAbstract(RelatedModelAbstract):
field = models.CharField(max_length=100)
class ExtendModelAbstract(ModelAbstract):
field = GenericRelation(RelationModel)
self.assertIsInstance(OverrideRelatedModelAbstract._meta.get_field('field'), models.CharField)
self.assertIsInstance(ExtendModelAbstract._meta.get_field('field'), GenericRelation)
def test_cannot_override_indirect_abstract_field(self):
class AbstractBase(models.Model):
name = models.CharField(max_length=30)
class Meta:
abstract = True
class ConcreteDescendant(AbstractBase):
pass
msg = (
"Local field 'name' in class 'Descendant' clashes with field of "
"the same name from base class 'ConcreteDescendant'."
)
with self.assertRaisesMessage(FieldError, msg):
class Descendant(ConcreteDescendant):
name = models.IntegerField()
def test_override_field_with_attr(self):
class AbstractBase(models.Model):
first_name = models.CharField(max_length=50)
last_name = models.CharField(max_length=50)
middle_name = models.CharField(max_length=30)
full_name = models.CharField(max_length=150)
class Meta:
abstract = True
class Descendant(AbstractBase):
middle_name = None
def full_name(self):
return self.first_name + self.last_name
msg = "Descendant has no field named %r"
with self.assertRaisesMessage(FieldDoesNotExist, msg % 'middle_name'):
Descendant._meta.get_field('middle_name')
with self.assertRaisesMessage(FieldDoesNotExist, msg % 'full_name'):
Descendant._meta.get_field('full_name')
def test_overriding_field_removed_by_concrete_model(self):
class AbstractModel(models.Model):
foo = models.CharField(max_length=30)
class Meta:
abstract = True
class RemovedAbstractModelField(AbstractModel):
foo = None
class OverrideRemovedFieldByConcreteModel(RemovedAbstractModelField):
foo = models.CharField(max_length=50)
self.assertEqual(OverrideRemovedFieldByConcreteModel._meta.get_field('foo').max_length, 50)
def test_shadowed_fkey_id(self):
class Foo(models.Model):
pass
class AbstractBase(models.Model):
foo = models.ForeignKey(Foo, models.CASCADE)
class Meta:
abstract = True
class Descendant(AbstractBase):
foo_id = models.IntegerField()
self.assertEqual(
Descendant.check(),
[Error(
"The field 'foo_id' clashes with the field 'foo' "
"from model 'model_inheritance.descendant'.",
obj=Descendant._meta.get_field('foo_id'),
id='models.E006',
)]
)
def test_shadow_related_name_when_set_to_none(self):
class AbstractBase(models.Model):
bar = models.IntegerField()
class Meta:
abstract = True
class Foo(AbstractBase):
bar = None
foo = models.IntegerField()
class Bar(models.Model):
bar = models.ForeignKey(Foo, models.CASCADE, related_name='bar')
self.assertEqual(Bar.check(), [])
def test_reverse_foreign_key(self):
class AbstractBase(models.Model):
foo = models.CharField(max_length=100)
class Meta:
abstract = True
class Descendant(AbstractBase):
pass
class Foo(models.Model):
foo = models.ForeignKey(Descendant, models.CASCADE, related_name='foo')
self.assertEqual(
Foo._meta.get_field('foo').check(),
[
Error(
"Reverse accessor for 'Foo.foo' clashes with field name 'Descendant.foo'.",
hint=(
"Rename field 'Descendant.foo', or add/change a related_name "
"argument to the definition for field 'Foo.foo'."
),
obj=Foo._meta.get_field('foo'),
id='fields.E302',
),
Error(
"Reverse query name for 'Foo.foo' clashes with field name 'Descendant.foo'.",
hint=(
"Rename field 'Descendant.foo', or add/change a related_name "
"argument to the definition for field 'Foo.foo'."
),
obj=Foo._meta.get_field('foo'),
id='fields.E303',
),
]
)
def test_multi_inheritance_field_clashes(self):
class AbstractBase(models.Model):
name = models.CharField(max_length=30)
class Meta:
abstract = True
class ConcreteBase(AbstractBase):
pass
class AbstractDescendant(ConcreteBase):
class Meta:
abstract = True
class ConcreteDescendant(AbstractDescendant):
name = models.CharField(max_length=100)
self.assertEqual(
ConcreteDescendant.check(),
[Error(
"The field 'name' clashes with the field 'name' from "
"model 'model_inheritance.concretebase'.",
obj=ConcreteDescendant._meta.get_field('name'),
id="models.E006",
)]
)
def test_override_one2one_relation_auto_field_clashes(self):
class ConcreteParent(models.Model):
name = models.CharField(max_length=255)
class AbstractParent(models.Model):
name = models.IntegerField()
class Meta:
abstract = True
msg = (
"Auto-generated field 'concreteparent_ptr' in class 'Descendant' "
"for parent_link to base class 'ConcreteParent' clashes with "
"declared field of the same name."
)
with self.assertRaisesMessage(FieldError, msg):
class Descendant(ConcreteParent, AbstractParent):
concreteparent_ptr = models.CharField(max_length=30)
def test_abstract_model_with_regular_python_mixin_mro(self):
class AbstractModel(models.Model):
name = models.CharField(max_length=255)
age = models.IntegerField()
class Meta:
abstract = True
class Mixin:
age = None
class Mixin2:
age = 2
class DescendantMixin(Mixin):
pass
class ConcreteModel(models.Model):
foo = models.IntegerField()
class ConcreteModel2(ConcreteModel):
age = models.SmallIntegerField()
def fields(model):
if not hasattr(model, '_meta'):
return []
return [(f.name, f.__class__) for f in model._meta.get_fields()]
model_dict = {'__module__': 'model_inheritance'}
model1 = type('Model1', (AbstractModel, Mixin), model_dict.copy())
model2 = type('Model2', (Mixin2, AbstractModel), model_dict.copy())
model3 = type('Model3', (DescendantMixin, AbstractModel), model_dict.copy())
model4 = type('Model4', (Mixin2, Mixin, AbstractModel), model_dict.copy())
model5 = type('Model5', (Mixin2, ConcreteModel2, Mixin, AbstractModel), model_dict.copy())
self.assertEqual(
fields(model1),
[('id', models.AutoField), ('name', models.CharField), ('age', models.IntegerField)]
)
self.assertEqual(fields(model2), [('id', models.AutoField), ('name', models.CharField)])
self.assertEqual(getattr(model2, 'age'), 2)
self.assertEqual(fields(model3), [('id', models.AutoField), ('name', models.CharField)])
self.assertEqual(fields(model4), [('id', models.AutoField), ('name', models.CharField)])
self.assertEqual(getattr(model4, 'age'), 2)
self.assertEqual(
fields(model5),
[
('id', models.AutoField), ('foo', models.IntegerField),
('concretemodel_ptr', models.OneToOneField),
('age', models.SmallIntegerField), ('concretemodel2_ptr', models.OneToOneField),
('name', models.CharField),
]
)
|
edmorley/django
|
tests/model_inheritance/test_abstract_inheritance.py
|
Python
|
bsd-3-clause
| 12,105
|
# Tests:
# assign ::= expr store
pass
|
rocky/python-uncompyle6
|
test/simple_source/stmts/00_pass.py
|
Python
|
gpl-3.0
| 38
|
#!/usr/bin/env python
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''Copies the liblouis braille translation tables to a destination.'''
import liblouis_list_tables
import optparse
import os
import shutil
def LinkOrCopyFiles(sources, dest_dir):
def LinkOrCopyOneFile(src, dst):
if os.path.exists(dst):
os.unlink(dst)
try:
os.link(src, dst)
except:
shutil.copy(src, dst)
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
for source in sources:
LinkOrCopyOneFile(source, os.path.join(dest_dir, os.path.basename(source)))
def WriteDepfile(depfile, infiles):
stampfile = depfile + '.stamp'
with open(stampfile, 'w'):
os.utime(stampfile, None)
content = '%s: %s' % (stampfile, ' '.join(infiles))
open(depfile, 'w').write(content)
def main():
parser = optparse.OptionParser(description=__doc__)
parser.add_option('-D', '--directory', dest='directories',
action='append', help='Where to search for table files')
parser.add_option('-e', '--extra_file', dest='extra_files', action='append',
default=[], help='Extra liblouis table file to process')
parser.add_option('-d', '--dest_dir', action='store', metavar='DIR',
help=('Destination directory. Used when translating ' +
'input paths to output paths and when copying '
'files.'))
parser.add_option('--depfile', metavar='FILENAME',
help=('Store .d style dependencies in FILENAME and touch '
'FILENAME.stamp after copying the files'))
options, args = parser.parse_args()
if len(args) != 1:
parser.error('Expecting exactly one argument')
if not options.directories:
parser.error('At least one --directory option must be specified')
if not options.dest_dir:
parser.error('At least one --dest_dir option must be specified')
files = liblouis_list_tables.GetTableFiles(args[0], options.directories,
options.extra_files)
LinkOrCopyFiles(files, options.dest_dir)
if options.depfile:
WriteDepfile(options.depfile, files)
if __name__ == '__main__':
main()
|
Teamxrtc/webrtc-streaming-node
|
third_party/webrtc/src/chromium/src/third_party/liblouis/copy_tables.py
|
Python
|
mit
| 2,327
|
import zipfile
from sqlalchemy.exc import ProgrammingError
from plenario.database import postgres_engine, postgres_session
from plenario.etl.common import ETLFile, add_unique_hash
from plenario.utils.shapefile import import_shapefile
class ShapeETL:
def __init__(self, meta, source_path=None):
self.source_path = source_path
self.table_name = meta.dataset_name
self.source_url = meta.source_url
self.meta = meta
def add(self):
staging_name = 'staging_{}'.format(self.table_name)
with ETLFile(self.source_path, self.source_url, interpret_as='bytes') as file_helper:
handle = open(file_helper.handle.name, "rb")
with zipfile.ZipFile(handle) as shapefile_zip:
import_shapefile(shapefile_zip, staging_name)
add_unique_hash(staging_name)
try:
postgres_engine.execute('drop table {}'.format(self.table_name))
except ProgrammingError:
pass
rename_table = 'alter table {} rename to {}'
rename_table = rename_table.format(staging_name, self.table_name)
postgres_engine.execute(rename_table)
self.meta.update_after_ingest()
postgres_session.commit()
def update(self):
self.add()
|
UrbanCCD-UChicago/plenario
|
plenario/etl/shape.py
|
Python
|
mit
| 1,291
|
import os, sys, json
sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
import util
def main(request, response):
policyDeliveries = json.loads(request.GET.first('policyDeliveries', '[]'))
worker_type = request.GET.first('type', 'classic')
commonjs_url = '%s://%s:%s/common/security-features/resources/common.js' % (
request.url_parts.scheme, request.url_parts.hostname,
request.url_parts.port)
if worker_type == 'classic':
import_line = 'importScripts("%s");' % commonjs_url
else:
import_line = 'import "%s";' % commonjs_url
maybe_additional_headers = {}
error = ''
for delivery in policyDeliveries:
if delivery['deliveryType'] == 'meta':
error = '<meta> cannot be used in WorkerGlobalScope'
elif delivery['deliveryType'] == 'http-rp':
if delivery['key'] == 'referrerPolicy':
maybe_additional_headers['Referrer-Policy'] = delivery['value']
else:
error = 'invalid delivery key for http-rp: %s' % delivery['key']
else:
error = 'invalid deliveryType: %s' % delivery['deliveryType']
handler = lambda: util.get_template('worker.js.template') % ({
'import': import_line,
'error': error
})
util.respond(
request,
response,
payload_generator=handler,
content_type='text/javascript',
maybe_additional_headers=maybe_additional_headers)
|
paulrouget/servo
|
tests/wpt/web-platform-tests/common/security-features/scope/worker.py
|
Python
|
mpl-2.0
| 1,379
|
"""
Migrate management command.
"""
import os.path, re, sys
from optparse import make_option
from django.core.management.base import BaseCommand
from django.conf import settings
from south import migration
from south.migration import Migrations
from south.exceptions import NoMigrations
from south.db import DEFAULT_DB_ALIAS
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('--all', action='store_true', dest='all_apps', default=False,
help='Run the specified migration for all apps.'),
make_option('--list', action='store_true', dest='show_list', default=False,
help='List migrations noting those that have been applied'),
make_option('--changes', action='store_true', dest='show_changes', default=False,
help='List changes for migrations'),
make_option('--skip', action='store_true', dest='skip', default=False,
help='Will skip over out-of-order missing migrations'),
make_option('--merge', action='store_true', dest='merge', default=False,
help='Will run out-of-order missing migrations as they are - no rollbacks.'),
make_option('--no-initial-data', action='store_true', dest='no_initial_data', default=False,
help='Skips loading initial data if specified.'),
make_option('--fake', action='store_true', dest='fake', default=False,
help="Pretends to do the migrations, but doesn't actually execute them."),
make_option('--db-dry-run', action='store_true', dest='db_dry_run', default=False,
help="Doesn't execute the SQL generated by the db methods, and doesn't store a record that the migration(s) occurred. Useful to test migrations before applying them."),
make_option('--delete-ghost-migrations', action='store_true', dest='delete_ghosts', default=False,
help="Tells South to delete any 'ghost' migrations (ones in the database but not on disk)."),
make_option('--ignore-ghost-migrations', action='store_true', dest='ignore_ghosts', default=False,
help="Tells South to ignore any 'ghost' migrations (ones in the database but not on disk) and continue to apply new migrations."),
make_option('--noinput', action='store_false', dest='interactive', default=True,
help='Tells Django to NOT prompt the user for input of any kind.'),
make_option('--database', action='store', dest='database',
default=DEFAULT_DB_ALIAS, help='Nominates a database to synchronize. '
'Defaults to the "default" database.'),
)
if '--verbosity' not in [opt.get_opt_string() for opt in BaseCommand.option_list]:
option_list += (
make_option('--verbosity', action='store', dest='verbosity', default='1',
type='choice', choices=['0', '1', '2'],
help='Verbosity level; 0=minimal output, 1=normal output, 2=all output'),
)
help = "Runs migrations for all apps."
args = "[appname] [migrationname|zero] [--all] [--list] [--skip] [--merge] [--no-initial-data] [--fake] [--db-dry-run] [--database=dbalias]"
def handle(self, app=None, target=None, skip=False, merge=False, backwards=False, fake=False, db_dry_run=False, show_list=False, show_changes=False, database=DEFAULT_DB_ALIAS, delete_ghosts=False, ignore_ghosts=False, **options):
# NOTE: THIS IS DUPLICATED FROM django.core.management.commands.syncdb
# This code imports any module named 'management' in INSTALLED_APPS.
# The 'management' module is the preferred way of listening to post_syncdb
# signals, and since we're sending those out with create_table migrations,
# we need apps to behave correctly.
for app_name in settings.INSTALLED_APPS:
try:
__import__(app_name + '.management', {}, {}, [''])
except ImportError, exc:
msg = exc.args[0]
if not msg.startswith('No module named') or 'management' not in msg:
raise
# END DJANGO DUPE CODE
# if all_apps flag is set, shift app over to target
if options.get('all_apps', False):
target = app
app = None
# Migrate each app
if app:
try:
apps = [Migrations(app)]
except NoMigrations:
print "The app '%s' does not appear to use migrations." % app
print "./manage.py migrate " + self.args
return
else:
apps = list(migration.all_migrations())
# Do we need to show the list of migrations?
if show_list and apps:
list_migrations(apps, database, **options)
if show_changes and apps:
show_migration_changes(apps)
if not (show_list or show_changes):
for app in apps:
result = migration.migrate_app(
app,
target_name = target,
fake = fake,
db_dry_run = db_dry_run,
verbosity = int(options.get('verbosity', 0)),
interactive = options.get('interactive', True),
load_initial_data = not options.get('no_initial_data', False),
merge = merge,
skip = skip,
database = database,
delete_ghosts = delete_ghosts,
ignore_ghosts = ignore_ghosts,
)
if result is False:
sys.exit(1) # Migration failed, so the command fails.
def list_migrations(apps, database = DEFAULT_DB_ALIAS, **options):
"""
Prints a list of all available migrations, and which ones are currently applied.
Accepts a list of Migrations instances.
"""
from south.models import MigrationHistory
applied_migrations = MigrationHistory.objects.filter(app_name__in=[app.app_label() for app in apps])
if database != DEFAULT_DB_ALIAS:
applied_migrations = applied_migrations.using(database)
applied_migration_names = ['%s.%s' % (mi.app_name,mi.migration) for mi in applied_migrations]
print
for app in apps:
print " " + app.app_label()
# Get the migrations object
for migration in app:
if migration.app_label() + "." + migration.name() in applied_migration_names:
applied_migration = applied_migrations.get(app_name=migration.app_label(), migration=migration.name())
print format_migration_list_item(migration.name(), applied=applied_migration.applied, **options)
else:
print format_migration_list_item(migration.name(), applied=False, **options)
print
def show_migration_changes(apps):
"""
Prints a list of all available migrations, and which ones are currently applied.
Accepts a list of Migrations instances.
Much simpler, less clear, and much less robust version:
grep "ing " migrations/*.py
"""
for app in apps:
print app.app_label()
# Get the migrations objects
migrations = [migration for migration in app]
# we use reduce to compare models in pairs, not to generate a value
reduce(diff_migrations, migrations)
def format_migration_list_item(name, applied=True, **options):
if applied:
if int(options.get('verbosity')) >= 2:
return ' (*) %-80s (applied %s)' % (name, applied)
else:
return ' (*) %s' % name
else:
return ' ( ) %s' % name
def diff_migrations(migration1, migration2):
def model_name(models, model):
return models[model].get('Meta', {}).get('object_name', model)
def field_name(models, model, field):
return '%s.%s' % (model_name(models, model), field)
print " " + migration2.name()
models1 = migration1.migration_class().models
models2 = migration2.migration_class().models
# find new models
for model in models2.keys():
if not model in models1.keys():
print ' added model %s' % model_name(models2, model)
# find removed models
for model in models1.keys():
if not model in models2.keys():
print ' removed model %s' % model_name(models1, model)
# compare models
for model in models1:
if model in models2:
# find added fields
for field in models2[model]:
if not field in models1[model]:
print ' added field %s' % field_name(models2, model, field)
# find removed fields
for field in models1[model]:
if not field in models2[model]:
print ' removed field %s' % field_name(models1, model, field)
# compare fields
for field in models1[model]:
if field in models2[model]:
name = field_name(models1, model, field)
# compare field attributes
field_value1 = models1[model][field]
field_value2 = models2[model][field]
# if a field has become a class, or vice versa
if type(field_value1) != type(field_value2):
print ' type of %s changed from %s to %s' % (
name, field_value1, field_value2)
# if class
elif isinstance(field_value1, dict):
# print ' %s is a class' % name
pass
# else regular field
else:
type1, attr_list1, field_attrs1 = models1[model][field]
type2, attr_list2, field_attrs2 = models2[model][field]
if type1 != type2:
print ' %s type changed from %s to %s' % (
name, type1, type2)
if attr_list1 != []:
print ' %s list %s is not []' % (
name, attr_list1)
if attr_list2 != []:
print ' %s list %s is not []' % (
name, attr_list2)
if attr_list1 != attr_list2:
print ' %s list changed from %s to %s' % (
name, attr_list1, attr_list2)
# find added field attributes
for attr in field_attrs2:
if not attr in field_attrs1:
print ' added %s attribute %s=%s' % (
name, attr, field_attrs2[attr])
# find removed field attributes
for attr in field_attrs1:
if not attr in field_attrs2:
print ' removed attribute %s(%s=%s)' % (
name, attr, field_attrs1[attr])
# compare field attributes
for attr in field_attrs1:
if attr in field_attrs2:
value1 = field_attrs1[attr]
value2 = field_attrs2[attr]
if value1 != value2:
print ' %s attribute %s changed from %s to %s' % (
name, attr, value1, value2)
return migration2
|
mozilla/make.mozilla.org
|
vendor-local/lib/python/south/management/commands/migrate.py
|
Python
|
bsd-3-clause
| 12,083
|
#!/bin/env python
import csv
import sys
import re
import string
import argparse
def get_import_value(value_dict, regexp):
matching_keys = [re.search(regexp, key, re.IGNORECASE).groups()[0] for key in value_dict.keys() if re.search(regexp, key, re.IGNORECASE) is not None]
#print value_dict
#print regexp
#print matching_keys
if len(matching_keys) == 0:
value = ""
elif len(matching_keys) == 1:
value = value_dict[matching_keys[0]]
elif reduce(lambda x,y:x+y, [ len(value_dict[key].strip()) for key in matching_keys ]) == 0:
value = ""
else:
value = "; ".join("%s=%s"%(key, value_dict[key]) for key in matching_keys)
return value
def sanitise(options):
rowcount = 1
numcol = None
DEBUG=False
csvreader = csv.reader(sys.stdin)
csvwriter = csv.writer(sys.stdout)
filter_record = True
standard_header = ["fcid","lane","sampleid","sampleref","sampleindex","description","control","recipe","operator","sampleproject","sampleplate","samplewell","downstream_processing","basespace_project"]
for record in csvreader:
if filter_record:
# see if we have hit header
#print record
header_matches = [True for item in record if re.match("(lane|sample[_]*id|sample[_]*project)",item,re.IGNORECASE) is not None]
#print header_matches
if len(header_matches) == 3:
filter_record = False
header = record
# output header
csvwriter.writerow(standard_header)
else:
# skip any embedded section headings
if re.search("\[.*\]",record[0]) is not None:
continue
# prepare ther record, including the following mappings:
#Lane->lane
#Sample_ID->sampleid
#Sample_Name->sampleref
#Sample_Plate->sampleplate *
#Sample_Well -> samplewell *
#*Index* -> sampleindex (concatenate)
#Sample_Project -> sampleproject
#Description -> description
record_dict = dict(zip(header, record))
out_record_dict = {}
out_record_dict["fcid"] = options["fcid"]
out_record_dict["lane"] = get_import_value(record_dict, "(lane)")
out_record_dict["sampleid"] = get_import_value(record_dict, "(sample[_]*id)")
out_record_dict["sampleref"] = get_import_value(record_dict, "(sampleref|sample[_]*name)")
out_record_dict["sampleindex"] = get_import_value(record_dict, "(.*index.*)")
out_record_dict["description"] = get_import_value(record_dict, "(description)")
out_record_dict["control"] = get_import_value(record_dict, "(control)")
out_record_dict["recipe"] = get_import_value(record_dict, "(recipe)")
out_record_dict["operator"] = get_import_value(record_dict, "(operator)")
out_record_dict["sampleproject"] = get_import_value(record_dict, "(sample[_]*project)")
out_record_dict["sampleplate"] = get_import_value(record_dict, "(sample[_]*plate)")
out_record_dict["samplewell"] = get_import_value(record_dict, "(sample[_]*well)")
out_record_dict["downstream_processing"] = get_import_value(record_dict, "(downstream_processing)")
out_record_dict["basespace_project"] = get_import_value(record_dict, "(basespace_project)")
record = [out_record_dict.get(key,"") for key in standard_header]
csvwriter.writerow(record)
def get_options():
description = """
prepares a sanitised version of a sample sheet for subseqent import into the database sample sheet table.
"""
long_description = """
example : cat myfile.csv | sanitiseSampleSheet.py -r 161205_D00390_0274_AC9KW9ANXX
"""
parser = argparse.ArgumentParser(description=description, epilog=long_description, formatter_class = argparse.RawDescriptionHelpFormatter)
parser.add_argument('-r', dest='run', required=True , help="name of run")
args = vars(parser.parse_args())
# parse fcid
mymatch=re.match("^\d+_\S+_\d+_.(\S+)", args["run"])
if mymatch is None:
raise Exception("unable to parse fcid from run")
args["fcid"] = mymatch.groups()[0]
return args
def main():
options = get_options()
sanitise(options)
if __name__ == "__main__":
main()
|
AgResearch/DECONVQC
|
database/sanitiseSampleSheet.py
|
Python
|
gpl-3.0
| 4,396
|
from django import forms
from django.contrib.postgres.forms import SimpleArrayField
import django_filters
from .models import (Plan, Goal, Theme, Sector, Target, Indicator, Component,
Progress, Area, AreaType)
class SimpleIntegerArrayField(SimpleArrayField):
def __init__(self, base_field=forms.IntegerField(), delimiter=',',
max_length=None, min_length=None, *args, **kwargs):
super(SimpleIntegerArrayField, self).__init__(
base_field=base_field, delimiter=delimiter,
max_length=max_length, min_length=min_length, *args, **kwargs)
class IntegerArrayFilter(django_filters.Filter):
field_class = SimpleIntegerArrayField
class AreaFilter(django_filters.FilterSet):
name = django_filters.CharFilter(lookup_expr='iexact')
type = django_filters.CharFilter(lookup_expr='iexact')
class Meta:
model = Area
fields = ['code', 'level']
class SectorFilter(django_filters.FilterSet):
name = django_filters.CharFilter(lookup_expr='iexact')
description = django_filters.CharFilter(lookup_expr='icontains')
class Meta:
model = Sector
fields = ['parent', 'themes']
class PlanFilter(django_filters.FilterSet):
name = django_filters.CharFilter(lookup_expr='iexact')
class Meta:
model = Plan
fields = ['code']
class ThemeFilter(django_filters.FilterSet):
name = django_filters.CharFilter(lookup_expr='iexact')
description = django_filters.CharFilter(lookup_expr='icontains')
plan_code = django_filters.CharFilter(name='plan__code')
class Meta:
model = Theme
fields = ['plan', 'code']
class GoalFilter(ThemeFilter):
class Meta:
model = Goal
fields = ['plan', 'code']
class TargetFilter(django_filters.FilterSet):
description = django_filters.CharFilter(lookup_expr='icontains')
class Meta:
model = Target
fields = ['goal', 'code']
class IndicatorFilter(django_filters.FilterSet):
goal = django_filters.ModelChoiceFilter(name='target__goal',
queryset=Goal.objects.all())
description = django_filters.CharFilter(lookup_expr='icontains')
data_source = django_filters.CharFilter(lookup_expr='icontains')
agency = django_filters.CharFilter(lookup_expr='iexact')
progress_count = django_filters.NumberFilter(lookup_expr='gte')
sectors_ids = IntegerArrayFilter(lookup_expr='contains')
class Meta:
model = Indicator
fields = ['plan_id', 'theme', 'target', 'sector', 'code']
class ComponentFilter(django_filters.FilterSet):
name = django_filters.CharFilter(lookup_expr='icontains')
description = django_filters.CharFilter(lookup_expr='icontains')
goal = django_filters.ModelChoiceFilter(
name='indicators__target__goal', queryset=Goal.objects.all())
progress_count = django_filters.NumberFilter(lookup_expr='gte')
class Meta:
model = Component
fields = ['indicators', 'code', 'stats_available']
class ProgressFilter(django_filters.FilterSet):
indicator = django_filters.ModelChoiceFilter(
name='component__indicators', queryset=Indicator.objects.all())
target = django_filters.ModelChoiceFilter(
name='component__indicators__target',
queryset=Target.objects.all())
area_code = django_filters.CharFilter(name='area__code')
area_name = django_filters.CharFilter(name='area__name')
area_type = django_filters.ModelChoiceFilter(
name='area__type', queryset=AreaType.objects.all())
area_type_code = django_filters.CharFilter(name='area__type__code')
area_type_name = django_filters.CharFilter(name='area__type__name')
class Meta:
model = Progress
fields = {
'component': ['exact'],
'area': ['exact'],
'year': ['exact', 'lt', 'lte', 'gt', 'gte'],
'fiscal_year': ['exact', 'lt', 'lte', 'gt', 'gte'],
'value': ['exact', 'lt', 'lte', 'gt', 'gte']
}
|
tehamalab/dgs
|
goals/filters.py
|
Python
|
unlicense
| 4,056
|
#!/usr/bin/env python
from oerp_rest import oerp_rest
oerp_rest.run(debug = True)
|
bdunnette/oerp-rest
|
server.py
|
Python
|
agpl-3.0
| 83
|
# Android Device Testing Framework ("dtf")
# Copyright 2013-2016 Jake Valletta (@jake_valletta)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Integration tests for the "pm export" utility"""
from __future__ import absolute_import
import os.path
import dtf.testutils as testutils
import dtf.core.utils as utils
class PmExportTests(testutils.BasicIntegrationTest):
"""Wraper for integration tests"""
def test_no_content(self):
"""Attempt to export nothing"""
rtn = self.run_cmd("pm export test.zip")
assert(rtn.return_code == 254)
def test_existing_file(self):
"""Attempt to export to exisitng file"""
utils.touch("test.zip")
rtn = self.run_cmd("pm export test.zip")
utils.delete_file("test.zip")
assert(rtn.return_code == 255)
def test_real_export(self):
"""Perform an export"""
data_file = testutils.DataFile("integration_pm_valid_zip.zip")
rtn = self.run_cmd("pm install --zip %s" % str(data_file))
assert(rtn.return_code == 0)
rtn = self.run_cmd("pm export test.zip")
assert(rtn.return_code == 0)
assert(os.path.isfile("test.zip"))
utils.delete_file("test.zip")
|
jakev/dtf
|
python-dtf/tests/integration/pm/test_pm_export.py
|
Python
|
apache-2.0
| 1,737
|
"""
==============
Blob Detection
==============
Blobs are bright on dark or dark on bright regions in an image. In
this example, blobs are detected using 3 algorithms. The image used
in this case is the Hubble eXtreme Deep Field. Each bright dot in the
image is a star or a galaxy.
Laplacian of Gaussian (LoG)
-----------------------------
This is the most accurate and slowest approach. It computes the Laplacian
of Gaussian images with successively increasing standard deviation and
stacks them up in a cube. Blobs are local maximas in this cube. Detecting
larger blobs is especially slower because of larger kernel sizes during
convolution. Only bright blobs on dark backgrounds are detected. See
:py:meth:`skimage.feature.blob_log` for usage.
Difference of Gaussian (DoG)
----------------------------
This is a faster approximation of LoG approach. In this case the image is
blurred with increasing standard deviations and the difference between
two successively blurred images are stacked up in a cube. This method
suffers from the same disadvantage as LoG approach for detecting larger
blobs. Blobs are again assumed to be bright on dark. See
:py:meth:`skimage.feature.blob_dog` for usage.
Determinant of Hessian (DoH)
----------------------------
This is the fastest approach. It detects blobs by finding maximas in the
matrix of the Determinant of Hessian of the image. The detection speed is
independent of the size of blobs as internally the implementation uses
box filters instead of convolutions. Bright on dark as well as dark on
bright blobs are detected. The downside is that small blobs (<3px) are not
detected accurately. See :py:meth:`skimage.feature.blob_doh` for usage.
"""
from math import sqrt
from skimage import data
from skimage.feature import blob_dog, blob_log, blob_doh
from skimage.color import rgb2gray
import matplotlib.pyplot as plt
image = data.hubble_deep_field()[0:500, 0:500]
image_gray = rgb2gray(image)
blobs_log = blob_log(image_gray, max_sigma=30, num_sigma=10, threshold=.1)
# Compute radii in the 3rd column.
blobs_log[:, 2] = blobs_log[:, 2] * sqrt(2)
blobs_dog = blob_dog(image_gray, max_sigma=30, threshold=.1)
blobs_dog[:, 2] = blobs_dog[:, 2] * sqrt(2)
blobs_doh = blob_doh(image_gray, max_sigma=30, threshold=.01)
blobs_list = [blobs_log, blobs_dog, blobs_doh]
colors = ['yellow', 'lime', 'red']
titles = ['Laplacian of Gaussian', 'Difference of Gaussian',
'Determinant of Hessian']
sequence = zip(blobs_list, colors, titles)
fig, axes = plt.subplots(1, 3, figsize=(9, 3), sharex=True, sharey=True,
subplot_kw={'adjustable': 'box-forced'})
ax = axes.ravel()
for idx, (blobs, color, title) in enumerate(sequence):
ax[idx].set_title(title)
ax[idx].imshow(image, interpolation='nearest')
for blob in blobs:
y, x, r = blob
c = plt.Circle((x, y), r, color=color, linewidth=2, fill=False)
ax[idx].add_patch(c)
ax[idx].set_axis_off()
plt.tight_layout()
plt.show()
|
paalge/scikit-image
|
doc/examples/features_detection/plot_blob.py
|
Python
|
bsd-3-clause
| 2,997
|
import sublime
import sublime_plugin
try: # Python 3
from ...HaxeHelper import runcmd, show_quick_panel
except (ValueError): # Python 2
from HaxeHelper import runcmd, show_quick_panel
print("HAXE : haxelib list ")
class HaxelibListInstalled( sublime_plugin.WindowCommand ):
def run(self, paths = [] , t = "list"):
self.action = t
settings = self.window.active_view().settings()
haxelib_path = settings.get("haxelib_path","haxelib")
out,err = runcmd([haxelib_path , "list"]);
libs = out.splitlines()
self.libs = []
menu = []
for _lib in libs :
libname,libcurrent,libversions = self.haxelib_parse_libversions(_lib)
menu.append([ libname + " " + libcurrent , libversions ])
self.libs.append(libname)
self.window.show_quick_panel( menu, self.on_select )
def on_select(self, index) :
if(index < 0):
return;
if(self.action == "remove"):
self.do_remove(self.libs[index])
elif(self.action == "update"):
self.do_update(self.libs[index])
def do_remove(self,library):
sublime.status_message("Please wait, removing haxelib " + library);
settings = self.window.active_view().settings()
haxelib_path = settings.get("haxelib_path","haxelib")
out,err = runcmd([haxelib_path , "remove", library]);
sublime.status_message(str(out))
show_quick_panel(self.window, out.splitlines(), None)
def do_update(self,library):
sublime.status_message("Please wait, updating haxelib " + library);
settings = self.window.active_view().settings()
haxelib_path = settings.get("haxelib_path","haxelib")
out,err = runcmd([haxelib_path , "update", library]);
sublime.status_message(str(out))
show_quick_panel(self.window, out.splitlines(), None)
def haxelib_parse_libversions( self, libinfo ):
# the info comes in along these lines format: 3.0.2 [3.0.4]
# so first : is the lib name, the ones inside of [ ] is active and
# the rest are installed but not active.
first_colon = libinfo.find(':');
libname = ''
versions = 'unknown'
active_version = 'unknown'
#parse the lib name and versions separately
if(first_colon != -1) :
libname = libinfo[0:first_colon]
versions = libinfo[first_colon+1:].split()
# now parse the versions into active and inactive list
for _version in versions:
if(_version.find("[") != -1):
active_version = _version
#remove the active from the list
if active_version in versions: versions.remove(active_version)
#parse for better output
versions_str = (", ".join(str(x) for x in versions)).replace('dev:','')
active_version_str = str(active_version).replace('dev:','')
active_str = str(active_version).strip('[]')
#nicer output if none others
if(versions_str == ""):
versions_str = "none"
#parse the dev flag
if(active_str.find("dev:") != -1):
active_str = "dev"
return libname, active_version_str, "active: " +active_str +" installed: " + versions_str
|
joa/haxe-sublime2-bundle
|
features/haxelib/haxelib_list_installed.py
|
Python
|
apache-2.0
| 3,355
|
#!flask/bin/python
from migrate.versioning import api
from config import SQLALCHEMY_DATABASE_URI
from config import SQLALCHEMY_MIGRATE_REPO
v = api.db_version(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO)
api.downgrade(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO, v-1)
print 'Current database version: ' + str(api.db_version(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO))
|
VagrantApe/flaskMicroblog
|
db_downgrade.py
|
Python
|
bsd-3-clause
| 385
|
from nameparser.parser import HumanName
def impute_names(name):
human = HumanName(name)
return {
'given': human.first,
'middle': human.middle,
'family': human.last,
'suffix': human.suffix,
}
def impute_names_model(name):
human = HumanName(name)
return {
'given_name': human.first,
'middle_names': human.middle,
'family_name': human.last,
'suffix': human.suffix,
}
def privacy_info_handle(info, anonymous, name=False):
"""hide user info from api if anonymous
:param str info: info which suppose to return
:param bool anonymous: anonymous or not
:param bool name: if the info is a name,
:return str: the handled info should be passed through api
"""
if anonymous:
return 'A user' if name else ''
return info
|
GaryKriebel/osf.io
|
framework/auth/utils.py
|
Python
|
apache-2.0
| 842
|
import requests
import re
import json
import ast
import os
import ui
import threading
import tarfile
import math
import time
import plistlib
import console
import shutil
import sqlite3
import base64
import clipboard
import os
import Image
import io
import copy
import yaml
from Managers import DBManager, TypeManager
from Utilities import LogThread
from distutils.version import LooseVersion
class UserContributed (object):
def __init__(self):
self.__version = ''
self.__globalversion = ''
self.__name = ''
self.__aliases = []
self.__icon = None
self.__id = ''
self.__path = None
self.__status = ''
self.__stats = ''
self.__archive = ''
self.__authorName = ''
self.__onlineid = ''
self.__imageData = ''
self.__hasVersions = False
self.__specificVersions = []
@property
def version(self):
return self.__version
@version.setter
def version(self, version):
self.__version = version
@property
def globalversion(self):
return self.__globalversion
@globalversion.setter
def globalversion(self, globalversion):
self.__globalversion = globalversion
@property
def name(self):
return self.__name
@name.setter
def name(self, name):
self.__name = name
@property
def aliases(self):
return self.__aliases
@aliases.setter
def aliases(self, aliases):
self.__aliases = aliases
@property
def image(self):
return self.__icon
@image.setter
def image(self, icon):
self.__icon = icon
@property
def id(self):
return self.__id
@id.setter
def id(self, id):
self.__id = id
@property
def onlineid(self):
return self.__onlineid
@onlineid.setter
def onlineid(self, id):
self.__onlineid = id
@property
def path(self):
return self.__path
@path.setter
def path(self, path):
self.__path = path
@property
def status(self):
return self.__status
@status.setter
def status(self, status):
self.__status = status
@property
def stats(self):
return self.__stats
@stats.setter
def stats(self, stats):
self.__stats = stats
@property
def archive(self):
return self.__archive
@archive.setter
def archive(self, archive):
self.__archive = archive
@property
def authorName(self):
return self.__authorName
@authorName.setter
def authorName(self, an):
self.__authorName = an
@property
def imageData(self):
return self.__imageData
@imageData.setter
def imageData(self, data):
self.__imageData = data
@property
def hasVersions(self):
return self.__hasVersions
@hasVersions.setter
def hasVersions(self, data):
self.__hasVersions = data
@property
def specificVersions(self):
return self.__specificVersions
@specificVersions.setter
def specificVersions(self, data):
self.__specificVersions = data
class UserContributedManager (object):
def __init__(self, serverManager, iconPath, typeIconPath):
self.typeManager = TypeManager.TypeManager(typeIconPath)
self.serverManager = serverManager
self.iconPath = iconPath
self.typeIconPath = typeIconPath
self.localServer = None
self.jsonServerLocation = 'zzz/user_contributed/build/index.json'
self.downloadServerLocation = 'zzz/user_contributed/build/%@/%$'
self.plistPath = 'Contents/Info.plist'
self.indexPath = 'Contents/Resources/docSet.dsidx'
self.userContributedFolder = 'Docsets/UserContributions'
self.headers = {'User-Agent': 'PyDoc-Pythonista'}
self.usercontributed = None
self.downloading = []
self.updateAvailable = []
self.workThreads = []
self.downloadThreads = []
self.uiUpdateThreads = []
self.lastDocsetGroup = None
self.__createUserContributedFolder()
self.createInitialSearchIndexAllDocsets()
def getAvailableUserContributed(self):
usercontributed = self.__getOnlineUserContributed()
for d in self.__getDownloadedUserContributed():
for c in usercontributed:
if c.name == d.name and c.version == d.version:
c.status = 'installed'
c.path = d.path
c.id = d.id
for d in self.updateAvailable:
for c in usercontributed:
if c.name == d.name:
c.status = "Update Available"
for d in self.__getDownloadingUserContributed():
for c in usercontributed:
if c.name == d.name and c.version == d.version:
c.status = d.status
c.version = d.version
c.hasVersions = d.hasVersions
try:
c.stats = d.stats
except KeyError:
c.stats = 'downloading'
return usercontributed
def __getOnlineUserContributed(self):
if self.usercontributed == None:
self.usercontributed = self.__getUserContributed()
return self.usercontributed
def __getDownloadedUserContributed(self):
dbManager = DBManager.DBManager()
t = dbManager.InstalledDocsetsByType('usercontributed')
ds = []
for d in t:
aa = UserContributed()
aa.name = d[1]
aa.id = d[0]
aa.path = os.path.join(os.path.abspath('.'),d[2])
imgData = str(d[4])
if not imgData == '':
imgdata = base64.standard_b64decode(imgData)
aa.image = ui.Image.from_data(imgdata)
else:
aa.image = self.__getIconWithName('Other')
od = yaml.load(d[6])
if type(od) is type({}):
aa.authorName = od['author']
aa.hasVersions = od['hasVersions']
else:
aa.authorName = od
aa.version = d[5]
ds.append(aa)
return ds
def __getDownloadingUserContributed(self):
return self.downloading
def getDownloadedUserContributed(self):
return self.__getDownloadedUserContributed()
def __getUserContributed(self):
server = self.serverManager.getDownloadServer(self.localServer)
url = server.url
if not url[-1] == '/':
url = url + '/'
url = url + self.jsonServerLocation
data = requests.get(url).text
data = ast.literal_eval(data)
usercontributed = []
defaultIcon = self.__getIconWithName('Other')
for k,d in data['docsets'].items():
u = UserContributed()
u.name = d['name']
if 'aliases' in d.keys():
u.aliases = d['aliases']
u.version = d['version']
u.archive = d['archive']
u.authorName = d['author']['name']
u.hasVersions = 'specific_versions' in d.keys()
if u.hasVersions:
u.specificVersions = d['specific_versions']
if 'icon' in d.keys():
imgdata = base64.standard_b64decode(d['icon'])
u.image = ui.Image.from_data(imgdata)
u.imageData = d['icon']
else:
u.image = defaultIcon
u.onlineid = k
u.status = 'online'
usercontributed.append(u)
return sorted(usercontributed, key=lambda x: x.name.lower())
def checkDocsetsForUpdates(self, docsets):
console.show_activity('Checking for updates...')
self.usercontributed = None
online = self.__getOnlineUserContributed()
for d in docsets:
if not d.hasVersions and d.status == 'installed':
console.show_activity('Checking ' + d.name + ' for update...')
for f in online:
if f.name == d.name:
if LooseVersion(str(d.version).replace('/','')) < LooseVersion(str(f.version).replace('/','')):
d.status = 'Update Available'
d.version = f.version
self.updateAvailable.append(d)
def getOnlineVersions(self, doc= None):
d = None
if doc == None:
d = self.lastDocsetGroup
else:
self.lastDocsetGroup = doc
d = doc
data = [d]
downloaded = self.getDownloadedUserContributed()
endCheck = []
for dad in downloaded:
if dad.name == d.name:
endCheck.append(dad)
toRemoveOrig = [i for i in endCheck if i.name==d.name and i.version == d.version]
for rt in toRemoveOrig:
endCheck.remove(rt)
for version in d.specificVersions:
if not '_comment' in version.keys():
da = copy.copy(d)
da.specificVersions = []
da.status = 'online'
da.version = version['version']
da.archive = version['archive'].replace('\\','')
da.path = None
for down in downloaded:
if da.name == down.name and da.version == down.version:
da.status = 'installed'
da.path = down.path
da.id = down.id
toRemoveFromEndCheck = [i for i in endCheck if i.name==da.name and i.version == da.version]
for rt in toRemoveFromEndCheck:
endCheck.remove(rt)
add = True
for toCheck in data:
if toCheck.name == da.name and toCheck.version == da.version:
add = False
if add:
data.append(da)
for e in endCheck:
e.status = 'installed'
data.append(e)
return sorted(data, key=lambda x: x.version, reverse=True)
def __getLocalIcon(self, path):
imgPath = os.path.join(os.path.abspath('.'),path,'icon.png')
if not os.path.exists(imgPath):
imgPath = os.path.join(os.path.abspath('.'), self.iconPath, 'Other.png')
return ui.Image.named(imgPath)
def __getIconWithName(self, name):
imgPath = os.path.join(os.path.abspath('.'), self.iconPath, name+'.png')
if not os.path.exists(imgPath):
imgPath = os.path.join(os.path.abspath('.'), self.iconPath, 'Other.png')
return ui.Image.named(imgPath)
def __createUserContributedFolder(self):
if not os.path.exists(self.userContributedFolder):
os.mkdir(self.userContributedFolder)
def downloadUserContributed(self, usercontributed, action, refresh_main_view):
if not usercontributed in self.downloading:
removeSoon = []
for d in self.updateAvailable:
if d.name == usercontributed.name:
removeSoon.append(d)
for d in removeSoon:
self.updateAvailable.remove(d)
usercontributed.status = 'downloading'
self.downloading.append(usercontributed)
action()
workThread = LogThread.LogThread(target=self.__determineUrlAndDownload, args=(usercontributed,action,refresh_main_view,))
self.workThreads.append(workThread)
workThread.start()
def __determineUrlAndDownload(self, usercontributed, action, refresh_main_view):
usercontributed.stats = 'getting download link'
action()
downloadLink = self.__getDownloadLink(usercontributed.onlineid, usercontributed.archive)
downloadThread = LogThread.LogThread(target=self.downloadFile, args=(downloadLink,usercontributed,refresh_main_view,))
self.downloadThreads.append(downloadThread)
downloadThread.start()
updateThread = LogThread.LogThread(target=self.updateUi, args=(action,downloadThread,))
self.uiUpdateThreads.append(updateThread)
updateThread.start()
def updateUi(self, action, t):
while t.is_alive():
action()
time.sleep(0.5)
action()
def __getDownloadLink(self, id, archive):
server = self.serverManager.getDownloadServer(self.localServer)
url = server.url
if not url[-1] == '/':
url = url + '/'
url = url + self.downloadServerLocation
url = url.replace('%@', id)
url = url.replace('%$', archive)
return url
def downloadFile(self, url, usercontributed, refresh_main_view):
local_filename = self.__downloadFile(url, usercontributed)
#self.__downloadFile(url+'.tarix', cheatsheet)
usercontributed.status = 'waiting for install'
self.installUserContributed(local_filename, usercontributed, refresh_main_view)
def __downloadFile(self, url, usercontributed):
local_filename = self.userContributedFolder+'/'+str(usercontributed.version).replace('/','_')+url.split('/')[-1]
r = requests.get(url, headers = self.headers, stream=True)
ret = None
if r.status_code == 200:
ret = local_filename
total_length = r.headers.get('content-length')
dl = 0
last = 0
if os.path.exists(local_filename):
os.remove(local_filename)
with open(local_filename, 'wb') as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
dl += len(chunk)
f.write(chunk)
if not total_length == None:
done = 100 * dl / int(total_length)
usercontributed.stats = str(round(done,2)) + '% ' + str(self.convertSize(dl)) + ' / '+ str(self.convertSize(float(total_length)))
else:
usercontributed.stats = str(self.convertSize(dl))
r.close()
return ret
def installUserContributed(self, filename, usercontributed, refresh_main_view):
extract_location = os.path.join(self.userContributedFolder, '_'+usercontributed.name.replace('/','_'), '_'+usercontributed.version.replace('/','_'))
usercontributed.status = 'Preparing to install: This might take a while.'
tar = tarfile.open(filename, 'r:gz')
n = [name for name in tar.getnames() if '/' not in name][0]
m = os.path.join(extract_location, n)
tar.extractall(path=extract_location, members = self.track_progress(tar, usercontributed, len(tar.getmembers())))
tar.close()
encodedImg = usercontributed.imageData
dbManager = DBManager.DBManager()
otherAtt = {}
otherAtt['author'] = usercontributed.authorName
otherAtt['hasVersions'] = usercontributed.hasVersions
dbManager.DocsetInstalled(usercontributed.name, m, 'usercontributed', str(encodedImg), usercontributed.version, str(otherAtt))
os.remove(filename)
if usercontributed in self.downloading:
self.downloading.remove(usercontributed)
self.indexUserContributed(usercontributed, refresh_main_view, m)
def track_progress(self, members, usercontributed, totalFiles):
i = 0
for member in members:
i = i + 1
done = 100 * i / totalFiles
usercontributed.status = 'installing: ' + str(round(done,2)) + '% ' + str(i) + ' / '+ str(totalFiles)
yield member
def indexUserContributed(self, usercontributed, refresh_main_view, path):
usercontributed.status = 'indexing'
indexPath = os.path.join(path, self.indexPath)
conn = sqlite3.connect(indexPath)
sql = 'SELECT count(*) FROM sqlite_master WHERE type = \'table\' AND name = \'searchIndex\''
c = conn.execute(sql)
data = c.fetchone()
if int(data[0]) == 0:
sql = 'CREATE TABLE searchIndex(rowid INTEGER PRIMARY KEY, name TEXT, type TEXT, path TEXT)'
c = conn.execute(sql)
conn.commit()
sql = 'SELECT f.ZPATH, m.ZANCHOR, t.ZTOKENNAME, ty.ZTYPENAME, t.rowid FROM ZTOKEN t, ZTOKENTYPE ty, ZFILEPATH f, ZTOKENMETAINFORMATION m WHERE ty.Z_PK = t.ZTOKENTYPE AND f.Z_PK = m.ZFILE AND m.ZTOKEN = t.Z_PK ORDER BY t.ZTOKENNAME'
c = conn.execute(sql)
data = c.fetchall()
for t in data:
conn.execute("insert into searchIndex values (?, ?, ?, ?)", (t[4], t[2], self.typeManager.getTypeForName(t[3]).name, t[0] ))
conn.commit()
else:
sql = 'SELECT rowid, type FROM searchIndex'
c = conn.execute(sql)
data = c.fetchall()
for t in data:
newType = self.typeManager.getTypeForName(t[1])
if not newType == None and not newType.name == t[1]:
conn.execute("UPDATE searchIndex SET type=(?) WHERE rowid = (?)", (newType.name, t[0] ))
conn.commit()
indexSql = 'CREATE INDEX ix_searchIndex_name ON searchIndex(name)'
conn.execute(indexSql)
conn.close()
self.postProcess(usercontributed, refresh_main_view)
def createInitialSearchIndexAllDocsets(self):
docsets = self.getDownloadedUserContributed()
for d in docsets:
indexPath = os.path.join(d.path, self.indexPath)
conn = sqlite3.connect(indexPath)
conn = sqlite3.connect(indexPath)
indexSql = 'CREATE INDEX IF NOT EXISTS ix_searchIndex_name ON searchIndex(name)'
conn.execute(indexSql)
conn.close()
def postProcess(self, usercontributed, refresh_main_view):
usercontributed.status = 'installed'
refresh_main_view()
def convertSize(self, size):
if (size == 0):
return '0B'
size_name = ("B", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB")
i = int(math.floor(math.log(size,1024)))
p = math.pow(1024,i)
s = round(size/p,2)
return '%s %s' % (s,size_name[i])
def deleteUserContributed(self, usercontributed, post_action, confirm = True):
but = 1
if confirm:
but = console.alert('Are you sure?', 'Would you like to delete the docset, ' + usercontributed.name, 'Ok')
if but == 1:
dbmanager = DBManager.DBManager()
dbmanager.DocsetRemoved(usercontributed.id)
shutil.rmtree(usercontributed.path)
usercontributed.status = 'online'
usercontributed.path = None
if not post_action == None:
post_action()
def getTypesForUserContributed(self, usercontributed):
types = []
path = usercontributed.path
indexPath = os.path.join(path, self.indexPath)
conn = sqlite3.connect(indexPath)
sql = 'SELECT type FROM searchIndex GROUP BY type ORDER BY type COLLATE NOCASE'
c = conn.execute(sql)
data = c.fetchall()
conn.close()
for t in data:
types.append(self.typeManager.getTypeForName(t[0]))
return types
def getIndexesbyTypeAndNameForUserContributed(self, usercontributed, typeName, name):
indexes = []
path = usercontributed.path
indexPath = os.path.join(path, self.indexPath)
conn = sqlite3.connect(indexPath)
sql = 'SELECT type, name, path FROM searchIndex WHERE type = (?) AND name LIKE (?) ORDER BY name COLLATE NOCASE'
c = conn.execute(sql, (typeName, name,))
data = c.fetchall()
conn.close()
dTypes ={}
type = None
for t in data:
if t[0] in dTypes.keys():
type= dTypes[t[0]]
else:
type = self.typeManager.getTypeForName(t[0])
dTypes[t[0]] = type
indexes.append({'type':type, 'name':t[1],'path':t[2]})
return indexes
def getIndexesbyNameForUserContributed(self, usercontributed, name):
indexes = []
path = usercontributed.path
indexPath = os.path.join(path, self.indexPath)
conn = sqlite3.connect(indexPath)
sql = 'SELECT type, name, path FROM searchIndex WHERE name LIKE (?) ORDER BY name COLLATE NOCASE'
c = conn.execute(sql, (name,))
data = c.fetchall()
conn.close()
dTypes ={}
type = None
for t in data:
if t[0] in dTypes.keys():
type= dTypes[t[0]]
else:
type = self.typeManager.getTypeForName(t[0])
dTypes[t[0]] = type
indexes.append({'type':type, 'name':t[1],'path':t[2]})
return indexes
def getIndexesbyTypeForUserContributed(self, usercontributed, type):
indexes = []
path = usercontributed.path
indexPath = os.path.join(path, self.indexPath)
conn = sqlite3.connect(indexPath)
sql = 'SELECT type, name, path FROM searchIndex WHERE type = (?) ORDER BY name COLLATE NOCASE'
c = conn.execute(sql, (type.name,))
data = c.fetchall()
conn.close()
dTypes ={}
type = None
for t in data:
if t[0] in dTypes.keys():
type= dTypes[t[0]]
else:
type = self.typeManager.getTypeForName(t[0])
dTypes[t[0]] = type
indexes.append({'type':type, 'name':t[1],'path':t[2]})
return indexes
def getIndexesForUserContributed(self, usercontributed):
indexes = []
path = usercontributed.path
indexPath = os.path.join(path, self.indexPath)
conn = sqlite3.connect(indexPath)
sql = 'SELECT type, name, path FROM searchIndex ORDER BY name COLLATE NOCASE'
c = conn.execute(sql)
data = c.fetchall()
conn.close()
dTypes ={}
type = None
for t in data:
if t[0] in dTypes.keys():
type= dTypes[t[0]]
else:
type = self.typeManager.getTypeForName(t[0])
dTypes[t[0]] = type
indexes.append({'type':type, 'image':self.__getTypeIconWithName(t[0]), 'name':t[1],'path':t[2]})
return types
def getIndexesbyNameForAllUserContributed(self, name):
if name == None or name == '':
return {}
else:
docsets = self.getDownloadedUserContributed()
indexes = {}
for d in docsets:
ind = self.getIndexesbyNameForDocsetSearch(d, name)
for k in ind:
if not k in indexes.keys():
indexes[k] = []
indexes[k].extend(ind[k])
return indexes
def getIndexesbyNameForDocsetSearch(self, docset, name):
if name == None or name == '':
return []
else:
ind = {}
path = docset.path
indexPath = os.path.join(path, self.indexPath)
conn = sqlite3.connect(indexPath)
sql = 'SELECT type, name, path FROM searchIndex WHERE name LIKE (?) ORDER BY name COLLATE NOCASE'
c = conn.execute(sql, (name, ))
data = {'first' : c.fetchall()}
sql = 'SELECT type, name, path FROM searchIndex WHERE name LIKE (?) AND name NOT LIKE (?) ORDER BY name COLLATE NOCASE'
c = conn.execute(sql, (name.replace(' ','%'), name, ))
data['second'] = c.fetchall()
sql = 'SELECT type, name, path FROM searchIndex WHERE name LIKE (?) AND name NOT LIKE (?) AND name NOT LIKE (?) ORDER BY name COLLATE NOCASE'
c = conn.execute(sql, (name.replace(' ','%')+'%', name.replace(' ','%'), name, ))
data['third'] = c.fetchall()
sql = 'SELECT type, name, path FROM searchIndex WHERE name LIKE (?) AND name NOT LIKE (?) AND name NOT LIKE (?) AND name NOT LIKE (?) ORDER BY name COLLATE NOCASE'
c = conn.execute(sql, ('%'+name.replace(' ','%')+'%',name.replace(' ','%')+'%',name.replace(' ','%'), name, ))
data['fourth'] = c.fetchall()
conn.close()
dTypes = {}
for k in data:
ind[k] = []
for t in data[k]:
url = 'file://' + os.path.join(path, 'Contents/Resources/Documents', t[2])
url = url.replace(' ', '%20')
type = None
if t[0] in dTypes.keys():
type= dTypes[t[0]]
else:
type = self.typeManager.getTypeForName(t[0])
dTypes[t[0]] = type
ind[k].append({'name':t[1], 'path':url, 'icon':docset.image,'docsetname':docset.name,'type':type, 'callbackOverride':'', 'docset': docset, 'hasVersions':docset.hasVersions,'version':docset.version})
return ind
if __name__ == '__main__':
import ServerManager
c = UserContributedManager(ServerManager.ServerManager(), '../Images/icons', '../Images/types')
print(c.getAvailableUserContributed())
|
shaun-h/PyDoc
|
Managers/UserContributedManager.py
|
Python
|
mit
| 21,125
|
from voluptuous import Match
from datatypes.core import SingleValueValidator
ogc_urn_schema = Match(pattern='urn:ogc:def:crs:EPSG::\d{4,5}')
class OgcUrn(SingleValueValidator):
def define_schema(self):
return ogc_urn_schema
def define_error_message(self):
return "Value must be a correctly formatted OGC URN"
|
LandRegistry/datatypes-alpha
|
datatypes/validators/ogc_urn_validator.py
|
Python
|
mit
| 337
|
import numpy as np
from metaworld.policies.action import Action
from metaworld.policies.policy import Policy, assert_fully_parsed, move
class SawyerHandlePressV2Policy(Policy):
@staticmethod
@assert_fully_parsed
def _parse_obs(obs):
return {
'hand_pos': obs[:3],
'gripper': obs[3],
'handle_pos': obs[4:7],
'unused_info': obs[7:],
}
def get_action(self, obs):
o_d = self._parse_obs(obs)
action = Action({
'delta_pos': np.arange(3),
'grab_effort': 3
})
action['delta_pos'] = move(o_d['hand_pos'], to_xyz=self._desired_pos(o_d), p=25.)
action['grab_effort'] = -1.
return action.array
@staticmethod
def _desired_pos(o_d):
pos_curr = o_d['hand_pos']
pos_button = o_d['handle_pos'] + np.array([.0, -.02, .0])
if np.linalg.norm(pos_curr[:2] - pos_button[:2]) > 0.02:
return pos_button + np.array([0., 0., 0.2])
else:
return pos_button + np.array([.0, .0, -.5])
|
rlworkgroup/metaworld
|
metaworld/policies/sawyer_handle_press_v2_policy.py
|
Python
|
mit
| 1,079
|
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2014-2017 Florian Bruhin (The Compiler) <mail@qutebrowser.org>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Utilities used for the documentation and built-in help."""
import re
import sys
import inspect
import os.path
import collections
import qutebrowser
from qutebrowser.utils import usertypes, log, utils
def is_git_repo():
"""Check if we're running from a git repository."""
gitfolder = os.path.join(qutebrowser.basedir, os.path.pardir, '.git')
return os.path.isdir(gitfolder)
def docs_up_to_date(path):
"""Check if the generated html documentation is up to date.
Args:
path: The path of the document to check.
Return:
True if they are up to date or we couldn't check.
False if they are outdated.
"""
if hasattr(sys, 'frozen') or not is_git_repo():
return True
html_path = os.path.join(qutebrowser.basedir, 'html', 'doc', path)
filename = os.path.splitext(path)[0]
asciidoc_path = os.path.join(qutebrowser.basedir, os.path.pardir,
'doc', 'help', filename + '.asciidoc')
try:
html_time = os.path.getmtime(html_path)
asciidoc_time = os.path.getmtime(asciidoc_path)
except FileNotFoundError:
return True
return asciidoc_time <= html_time
class DocstringParser:
"""Generate documentation based on a docstring of a command handler.
The docstring needs to follow the format described in CONTRIBUTING.
Attributes:
_state: The current state of the parser state machine.
_cur_arg_name: The name of the argument we're currently handling.
_short_desc_parts: The short description of the function as list.
_long_desc_parts: The long description of the function as list.
short_desc: The short description of the function.
long_desc: The long description of the function.
arg_descs: A dict of argument names to their descriptions
"""
State = usertypes.enum('State', ['short', 'desc', 'desc_hidden',
'arg_start', 'arg_inside', 'misc'])
def __init__(self, func):
"""Constructor.
Args:
func: The function to parse the docstring for.
"""
self._state = self.State.short
self._cur_arg_name = None
self._short_desc_parts = []
self._long_desc_parts = []
self.arg_descs = collections.OrderedDict()
doc = inspect.getdoc(func)
handlers = {
self.State.short: self._parse_short,
self.State.desc: self._parse_desc,
self.State.desc_hidden: self._skip,
self.State.arg_start: self._parse_arg_start,
self.State.arg_inside: self._parse_arg_inside,
self.State.misc: self._skip,
}
if doc is None:
if sys.flags.optimize < 2:
log.commands.warning(
"Function {}() from {} has no docstring".format(
utils.qualname(func),
inspect.getsourcefile(func)))
self.long_desc = ""
self.short_desc = ""
return
for line in doc.splitlines():
handler = handlers[self._state]
stop = handler(line)
if stop:
break
for k, v in self.arg_descs.items():
desc = ' '.join(v)
desc = re.sub(r', or None($|\.)', r'\1', desc)
desc = re.sub(r', or None', r', or not given', desc)
self.arg_descs[k] = desc
self.long_desc = ' '.join(self._long_desc_parts)
self.short_desc = ' '.join(self._short_desc_parts)
def _process_arg(self, line):
"""Helper method to process a line like 'fooarg: Blah blub'."""
self._cur_arg_name, argdesc = line.split(':', maxsplit=1)
self._cur_arg_name = self._cur_arg_name.strip().lstrip('*')
self.arg_descs[self._cur_arg_name] = [argdesc.strip()]
def _skip(self, line):
"""Handler to ignore everything until we get 'Args:'."""
if line.startswith('Args:'):
self._state = self.State.arg_start
def _parse_short(self, line):
"""Parse the short description (first block) in the docstring."""
if not line:
self._state = self.State.desc
else:
self._short_desc_parts.append(line.strip())
def _parse_desc(self, line):
"""Parse the long description in the docstring."""
if line.startswith('Args:'):
self._state = self.State.arg_start
elif line.strip() == '//':
self._state = self.State.desc_hidden
elif line.strip():
self._long_desc_parts.append(line.strip())
def _parse_arg_start(self, line):
"""Parse first argument line."""
self._process_arg(line)
self._state = self.State.arg_inside
def _parse_arg_inside(self, line):
"""Parse subsequent argument lines."""
argname = self._cur_arg_name
if re.match(r'^[A-Z][a-z]+:$', line):
if not self.arg_descs[argname][-1].strip():
self.arg_descs[argname] = self.arg_descs[argname][:-1]
return True
elif not line.strip():
self.arg_descs[argname].append('\n\n')
elif line[4:].startswith(' '):
self.arg_descs[argname].append(line.strip() + '\n')
else:
self._process_arg(line)
return False
|
lahwaacz/qutebrowser
|
qutebrowser/utils/docutils.py
|
Python
|
gpl-3.0
| 6,161
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Author Vincent Renaville. Copyright 2013 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{"name": "Tax analysis",
"version": "1.0",
"depends": ["base", "account"],
"author": "Camptocamp SA,Odoo Community Association (OCA)",
"category": 'Accounting & Finance',
"website": "http://www.camptocamp.com",
"license": "AGPL-3",
"data": ["account_tax_analysis_view.xml"],
'installable': True,
"active": False,
}
|
DarkoNikolovski/account-financial-tools
|
account_tax_analysis/__openerp__.py
|
Python
|
agpl-3.0
| 1,258
|
# encoding: utf-8
from yast import import_module
import_module('UI')
from yast import *
class MultiSelectionBox4Client:
def main(self):
items = [
Item(Id("a"), "a"),
Item(Id("x"), "x"),
Item(Id("y"), "y"),
Item(Id("z"), "z")
]
contents = HBox(
VSpacing(20),
VBox(
HSpacing(70),
VSpacing(0.2),
MultiSelectionBox(
Id("multisel"),
Opt("notify"),
"Multiselection",
items
),
HBox(
#PushButton(Id("ok"), Opt("default"), Label.OKButton),
#PushButton(Id("cancel"), Opt("key_F9"), Label.CancelButton)
PushButton(Id("ok"), Opt("default"), "OK"),
PushButton(Id("cancel"), Opt("key_F9"), "Cancel")
),
VSpacing(0.2)
)
)
UI.OpenDialog(Opt("decorated"), contents)
UI.ChangeWidget("multisel", "CurrentItem", None)
UI.ChangeWidget("multisel", "SelectedItems", [Id("a"), Id("x")])
UI.ChangeWidget("multisel", "SelectedItems", [Id("y"), Id("z")])
selected_items = UI.QueryWidget(Id("multisel"), "SelectedItems")
ycpbuiltins.y2milestone("Selected items: %1", selected_items)
ycpbuiltins.y2milestone(
"Current item: %1",
UI.QueryWidget(Id("multisel"), "CurrentItem")
)
while True:
event = UI.WaitForEvent()
ret = event["ID"]
if ret == "multisel":
current2 = UI.QueryWidget(Id("multisel"), "CurrentItem")
ycpbuiltins.y2internal("current: %1", current2)
if ret == "ok" or ret == "cancel":
break
current = UI.QueryWidget(Id("multisel"), "CurrentItem")
ycpbuiltins.y2internal("current before leaving: %1", current)
UI.CloseDialog()
MultiSelectionBox4Client().main()
|
yast/yast-python-bindings
|
examples/MultiSelectionBox4.py
|
Python
|
gpl-2.0
| 1,852
|
# -*- coding: utf-8 -*-
"""
:copyright: (c) 2015 by Jan-Hendrik Dolling.
:license: Apache 2.0, see LICENSE for more details.
"""
try:
import unittest2 as unittest
except ImportError:
import unittest
from configvalidator import LoadException
class MyTestCase(unittest.TestCase):
def test_something(self):
from configvalidator.tools.basics import list_objects
res = list_objects()
self.assertListEqual(
sorted(res["option_features"]), sorted(['sub_ini', 'default']))
self.assertListEqual(
sorted(res["section_features"]), sorted(['raw_section_input', 'default']))
self.assertListEqual(
sorted(res["validators"]),
sorted(['error', 'or', 'default', 'json',
'email', 'url', 'ip', 'bool', 'float', 'str', 'generalizedTime', 'int', 'ipv6',
'regex', 'file', 'cert', 'and', 'ipv4', 'dir', 'path', 'port',
'items', 'item', 'netbios', "list", "dict", "base64",
'StripQuotationMark', 'strip_dir', 'strip_file', 'strip_path',
'empty', 'not-empty', 'one-off', "freePort"]))
def test_load_validator(self):
from configvalidator import load_validator
with self.assertRaises(LoadException) as e:
load_validator("NOT-DEFINED")
self.assertEqual(str(e.exception), "no validator with the name NOT-DEFINED")
def test_load_section_feature(self):
from configvalidator.tools.basics import load_section_feature
with self.assertRaises(LoadException) as e:
load_section_feature("NOT-DEFINED")
self.assertEqual(str(e.exception), "no Section feature with the name NOT-DEFINED")
def test_load_option_feature(self):
from configvalidator.tools.basics import load_option_feature
with self.assertRaises(LoadException) as e:
load_option_feature("NOT-DEFINED")
self.assertEqual(str(e.exception), "no option feature with the name NOT-DEFINED")
def test_add_new_class(self):
from configvalidator.tools.basics import Validator
from configvalidator import load_validator
# try to loas class -> error
with self.assertRaises(LoadException) as e:
load_validator("DEMO_CLASS")
self.assertEqual(str(e.exception), "no validator with the name DEMO_CLASS")
# gen new class
newclass = type("DEMO_CLASS", (Validator,), {"validate": lambda s, x: x})
# load klass -> ok
res = load_validator("DEMO_CLASS")
self.assertEqual(newclass, res)
# remove class so that other test pass
from configvalidator.tools.basics import DATA_VALIDATOR
del DATA_VALIDATOR["DEMO_CLASS"]
with self.assertRaises(LoadException) as e:
load_validator("DEMO_CLASS")
self.assertEqual(str(e.exception), "no validator with the name DEMO_CLASS")
def test_strip_validators(self):
from configvalidator.validators import StripQuotationMarkPathValidator
from configvalidator.validators import PathValidator
self.assertEqual(PathValidator, StripQuotationMarkPathValidator()._validator.__class__)
self.assertEqual("strip_path", StripQuotationMarkPathValidator.name)
from configvalidator.validators import StripQuotationMarkFileValidator
from configvalidator.validators import FileValidator
self.assertEqual(FileValidator, StripQuotationMarkFileValidator()._validator.__class__)
self.assertEqual("strip_file", StripQuotationMarkFileValidator.name)
from configvalidator.validators import StripQuotationMarkDirValidator
from configvalidator.validators import DirValidator
self.assertEqual(DirValidator, StripQuotationMarkDirValidator()._validator.__class__)
self.assertEqual("strip_dir", StripQuotationMarkDirValidator.name)
if __name__ == '__main__':
unittest.main()
|
JanHendrikDolling/configvalidator
|
test/test_metaclass.py
|
Python
|
apache-2.0
| 3,964
|
"""Analyze a game using gp learn"""
import argparse
import json
import sys
from gameanalysis import gameio
from gameanalysis import nash
from gameanalysis import regret
from gameanalysis import gpgame
def add_parser(subparsers):
parser = subparsers.add_parser(
'learning', help="""Analyze game using learning""",
description="""Perform game analysis""")
parser.add_argument(
'--input', '-i', metavar='<input-file>', default=sys.stdin,
type=argparse.FileType('r'), help="""Input file for script. (default:
stdin)""")
parser.add_argument(
'--output', '-o', metavar='<output-file>', default=sys.stdout,
type=argparse.FileType('w'), help="""Output file for script. (default:
stdout)""")
parser.add_argument(
'--dist-thresh', metavar='<distance-threshold>', type=float,
default=1e-3, help="""L2 norm threshold, inside of which, equilibria
are considered identical. (default: %(default)f)""")
parser.add_argument(
'--regret-thresh', '-r', metavar='<regret-threshold>', type=float,
default=1e-3, help="""Maximum regret to consider an equilibrium
confirmed. (default: %(default)f)""")
parser.add_argument(
'--supp-thresh', '-t', metavar='<support-threshold>', type=float,
default=1e-3, help="""Maximum probability to consider a strategy in
support. (default: %(default)f)""")
parser.add_argument(
'--rand-restarts', metavar='<random-restarts>', type=int, default=0,
help="""The number of random points to add to nash equilibrium finding.
(default: %(default)d)""")
parser.add_argument(
'--max-iters', '-m', metavar='<maximum-iterations>', type=int,
default=10000, help="""The maximum number of iterations to run through
replicator dynamics. (default: %(default)d)""")
parser.add_argument(
'--converge-thresh', '-c', metavar='<convergence-threshold>',
type=float, default=1e-8, help="""The convergence threshold for
replicator dynamics. (default: %(default)f)""")
parser.add_argument(
'--processes', '-p', metavar='<num-procs>', type=int, help="""Number of
processes to use to run nash finding. (default: number of cores)""")
return parser
def main(args):
game, serial = gameio.read_game(json.load(args.input))
# create gpgame
lgame = gpgame.PointGPGame(game)
# mixed strategy nash equilibria search
methods = {
'replicator': {
'max_iters': args.max_iters,
'converge_thresh': args.converge_thresh}}
mixed_equilibria = game.trim_mixture_support(
nash.mixed_nash(lgame, regret_thresh=args.regret_thresh,
dist_thresh=args.dist_thresh, processes=args.processes,
at_least_one=True, **methods),
args.supp_thresh)
equilibria = [(eqm, regret.mixture_regret(lgame, eqm))
for eqm in mixed_equilibria]
# Output game
args.output.write('Game Learning\n')
args.output.write('=============\n')
args.output.write(serial.to_game_printstr(game))
args.output.write('\n\n')
# Output social welfare
args.output.write('Social Welfare\n')
args.output.write('--------------\n')
welfare, profile = regret.max_pure_social_welfare(game)
args.output.write('\nMaximum social welfare profile:\n')
args.output.write(serial.to_prof_printstr(profile))
args.output.write('Welfare: {:.4f}\n\n'.format(welfare))
if game.num_roles > 1:
for role, welfare, profile in zip(
serial.role_names,
*regret.max_pure_social_welfare(game, True)):
args.output.write('Maximum "{}" welfare profile:\n'.format(
role))
args.output.write(serial.to_prof_printstr(profile))
args.output.write('Welfare: {:.4f}\n\n'.format(welfare))
args.output.write('\n')
# Output Equilibria
args.output.write('Equilibria\n')
args.output.write('----------\n')
if equilibria:
args.output.write('Found {:d} equilibri{}\n\n'.format(
len(equilibria), 'um' if len(equilibria) == 1 else 'a'))
for i, (eqm, reg) in enumerate(equilibria, 1):
args.output.write('Equilibrium {:d}:\n'.format(i))
args.output.write(serial.to_mix_printstr(eqm))
args.output.write('Regret: {:.4f}\n\n'.format(reg))
else:
args.output.write('Found no equilibria\n\n') # pragma: no cover
args.output.write('\n')
# Output json data
args.output.write('Json Data\n')
args.output.write('=========\n')
json_data = {
'equilibria': [serial.to_mix_json(eqm) for eqm, _ in equilibria]}
json.dump(json_data, args.output)
args.output.write('\n')
|
yackj/GameAnalysis
|
gameanalysis/script/learning.py
|
Python
|
apache-2.0
| 4,827
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
This module is deprecated. Please use `airflow.gcp.hooks.tasks`.
"""
import warnings
# pylint: disable=unused-import
from airflow.gcp.hooks.tasks import CloudTasksHook # noqa
warnings.warn(
"This module is deprecated. Please use `airflow.gcp.hooks.tasks`",
DeprecationWarning, stacklevel=2
)
|
Fokko/incubator-airflow
|
airflow/contrib/hooks/gcp_tasks_hook.py
|
Python
|
apache-2.0
| 1,119
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import sys
import os
import time
import notedown
import nbformat
def md2ipynb():
assert len(sys.argv) == 3, 'usage: input.md output.rst'
(src_fn, input_fn, output_fn) = sys.argv
# timeout for each notebook, in sec
timeout = 60 * 60
# if enable evaluation
do_eval = int(os.environ.get('EVAL', True))
# Skip these notebooks as some APIs will no longer be used
skip_list = ["pytorch.md", "mnist.md", "custom-loss.md", "fit_api_tutorial.md", \
"01-ndarray-intro.md", "02-ndarray-operations.md", "03-ndarray-contexts.md", \
"gotchas_numpy_in_mxnet.md", "csr.md", "row_sparse.md", "fine_tuning_gluon.md", \
"inference_on_onnx_model.md", "amp.md", "profiler.md"]
require_gpu = []
# the files will be ignored for execution
ignore_execution = skip_list + require_gpu
reader = notedown.MarkdownReader(match='strict')
with open(input_fn, 'r', encoding="utf8") as f:
notebook = reader.read(f)
if do_eval:
if not any([i in input_fn for i in ignore_execution]):
tic = time.time()
notedown.run(notebook, timeout)
print('%s: Evaluated %s in %f sec'%(src_fn, input_fn, time.time()-tic))
# need to add language info to for syntax highlight
notebook['metadata'].update({'language_info':{'name':'python'}})
with open(output_fn, 'w', encoding='utf-8') as f:
f.write(nbformat.writes(notebook))
print('%s: Write results into %s'%(src_fn, output_fn))
if __name__ == '__main__':
md2ipynb()
|
szha/mxnet
|
docs/python_docs/python/scripts/md2ipynb.py
|
Python
|
apache-2.0
| 2,321
|
#!python
# Collect for each region the list of players by league
# Strategy: we go through the list of all the known players and check their games
# As a starting list, we can take master/challenger players
import os
import multiprocessing
import time
import pickle
import sys
from pickle import PickleError
from InterfaceAPI import InterfaceAPI, ApiError403, ApiError
import Modes
MAX_DAYS = 1 # up to how many days we look up
# Note it's not important that we get every single player, since we only need one participant for each game
MAX_DEPTH = 1000 * (time.time() - 86400 * MAX_DAYS)
ATTEMPTS = 6
ATTEMPTS_WAIT = 300
SAVE_INTERVAL = 60 # save every minute
DATABASE_WAIT = 60 # if the database cannot be reached, wait
class PlayerListing:
def __init__(self, database, leagues, region, fast=False):
self.api = InterfaceAPI()
self.database = database
self.leagues = leagues
self.region = region
self.nextSave = time.time() + SAVE_INTERVAL
from_scratch = True
if not os.path.isdir(self.database):
raise FileNotFoundError(self.database)
if not os.path.isdir(os.path.join(self.database, 'player_listing', self.region)):
os.makedirs(os.path.join(self.database, 'player_listing', self.region))
if os.path.exists(os.path.join(database, 'player_listing', self.region, 'players')):
self.players = pickle.load(open(os.path.join(database, 'player_listing', self.region, 'players'), 'rb'))
for league in leagues:
if self.players[league]:
from_scratch = False
break
else:
self.players = {}
for league in leagues:
self.players[league] = []
# to make sure we don't explore several time the same player/ games
if os.path.exists(os.path.join(database, 'player_listing', self.region, 'exploredPlayers')):
self.exploredPlayers = pickle.load(open(os.path.join(database, 'player_listing', self.region, 'exploredPlayers'), 'rb'))
else:
self.exploredPlayers = []
if os.path.exists(os.path.join(database, 'player_listing', self.region, 'exploredGames')):
self.exploredGames = pickle.load(open(os.path.join(database, 'player_listing', self.region, 'exploredGames'), 'rb'))
else:
self.exploredGames = []
if os.path.exists(os.path.join(database, 'player_listing', self.region, 'to_explore')):
self.to_explore = pickle.load(open(os.path.join(database, 'player_listing', self.region, 'to_explore'), 'rb'))
else:
self.to_explore = []
if os.path.exists(os.path.join(database, 'player_listing', self.region, 'exploredLeagues')):
self.exploredLeagues = pickle.load(open(os.path.join(database, 'player_listing', self.region, 'exploredLeagues'), 'rb'))
else:
self.exploredLeagues = []
if from_scratch:
print(region, 'first time exploration, checking challenger and master leagues', file=sys.stderr)
# only the first time
if fast: # only the challenger and master league, no need to explore anything
if 'challenger' in self.players:
challLeague = self.api.getData('https://%s.api.riotgames.com/lol/league/v4/challengerleagues/by-queue/RANKED_SOLO_5x5' % self.region)
for e in challLeague['entries']:
self.players['challenger'].append(e['summonerId'])
if 'grandmaster' in self.players:
grandmasterLeague = self.api.getData('https://%s.api.riotgames.com/lol/league/v4/grandmasterleagues/by-queue/RANKED_SOLO_5x5' % self.region)
for e in grandmasterLeague['entries']:
self.players['grandmaster'].append(e['summonerId'])
if 'master' in self.players:
masterLeague = self.api.getData('https://%s.api.riotgames.com/lol/league/v4/masterleagues/by-queue/RANKED_SOLO_5x5' % self.region)
for e in masterLeague['entries']:
self.players['master'].append(e['summonerId'])
else:
challLeague = self.api.getData('https://%s.api.riotgames.com/lol/league/v4/challengerleagues/by-queue/RANKED_SOLO_5x5' % self.region)
for e in challLeague['entries']:
self.to_explore.append(e['summonerId'])
grandmasterLeague = self.api.getData('https://%s.api.riotgames.com/lol/league/v4/grandmasterleagues/by-queue/RANKED_SOLO_5x5' % self.region)
for e in grandmasterLeague['entries']:
self.to_explore.append(e['summonerId'])
masterLeague = self.api.getData('https://%s.api.riotgames.com/lol/league/v4/masterleagues/by-queue/RANKED_SOLO_5x5' % self.region)
for e in masterLeague['entries']:
self.to_explore.append(e['summonerId'])
self.exploredPlayers = list(self.to_explore)
def explore(self):
print(self.region, len(self.to_explore), 'players left to explore', file=sys.stderr)
while self.to_explore:
if time.time() > self.nextSave:
print(self.region, len(self.to_explore), 'players left to explore', file=sys.stderr)
print(self.region, 'saving...', file=sys.stderr)
self.save()
self.nextSave = time.time() + SAVE_INTERVAL
sumID = self.to_explore.pop(0) # strongest players first
try:
accountID = self.api.getData('https://%s.api.riotgames.com/lol/summoner/v4/summoners/%s' % (self.region, sumID))['accountId']
games = \
self.api.getData('https://%s.api.riotgames.com/lol/match/v4/matchlists/by-account/%s' % (self.region, accountID), {'queue': 420})[
'matches']
playerLeagueList = self.api.getData('https://%s.api.riotgames.com/lol/league/v4/positions/by-summoner/%s' % (self.region, sumID))
except ApiError403 as e:
print(e, file=sys.stderr)
return e
except ApiError as e:
print(e, file=sys.stderr)
continue
# we check that the summoner is in one of the leagues we want
playerSoloQLeague = None
for league in playerLeagueList:
if league['queueType'] == 'RANKED_SOLO_5x5':
playerSoloQLeague = league
break
if not playerSoloQLeague:
print('no soloQ rank: ', self.region, sumID)
continue
playerLeagueTier = playerSoloQLeague['tier'].lower()
playerLeagueId = playerSoloQLeague['leagueId']
if playerLeagueTier not in self.leagues:
print('refused tier:', self.region, sumID, playerLeagueTier)
continue
self.players[playerLeagueTier].append(sumID)
print('added:', self.region, sumID, playerLeagueTier)
# We add all the people in the same league for exploration
if playerLeagueId not in self.exploredLeagues:
self.exploredLeagues.append(playerLeagueId)
print('new league found:', self.region, playerLeagueTier, playerLeagueId)
try:
newLeague = self.api.getData('https://%s.api.riotgames.com/lol/league/v4/leagues/%s' % (self.region, playerLeagueId))['accountId']
except ApiError403 as e:
print(e, file=sys.stderr)
return e
except ApiError as e:
print(e, file=sys.stderr)
continue
for e in newLeague['entries']:
sumID = int(e['summonerId'])
if sumID not in self.exploredPlayers:
self.to_explore.append(sumID)
self.exploredPlayers.append(sumID)
# old API
# for e in playerSoloQLeague['entries']:
# sumID = int(e['summonerId'])
# if sumID not in self.exploredPlayers:
# self.to_explore.append(sumID)
# self.exploredPlayers.append(sumID)
# We have to explore some games to get to other leagues
# We hope that at least 1 player of each league has played within the time window
for game in games: # from most recent to oldest
# the same game can come up to 10 times, so it's better to not make useless API calls
if game['gameId'] in self.exploredGames:
continue
self.exploredGames.append(game['gameId'])
gameID = str(game['gameId'])
timestamp = game['timestamp']
if timestamp < MAX_DEPTH: # game is too old?
break
try:
gameData = self.api.getData('https://%s.api.riotgames.com/lol/match/v4/matches/%s' % (self.region, gameID))
except ApiError403 as e:
print(e, file=sys.stderr)
return e
except ApiError as e:
print(e, file=sys.stderr)
continue
# adding all the non explored players from the game
for participant in gameData['participantIdentities']:
sumID = participant['player']['summonerId']
if sumID not in self.exploredPlayers:
self.to_explore.append(sumID)
self.exploredPlayers.append(sumID)
return None # everything explored
def save(self):
while True:
if not os.path.isdir(self.database):
print(self.region, 'cannot access the local database', file=sys.stderr)
time.sleep(DATABASE_WAIT)
continue
try:
pickle.dump(self.players, open(os.path.join(self.database, 'player_listing', self.region, 'players'), 'wb'))
pickle.dump(self.exploredPlayers, open(os.path.join(self.database, 'player_listing', self.region, 'exploredPlayers'), 'wb'))
pickle.dump(self.exploredLeagues, open(os.path.join(self.database, 'player_listing', self.region, 'exploredLeagues'), 'wb'))
pickle.dump(self.exploredGames, open(os.path.join(self.database, 'player_listing', self.region, 'exploredGames'), 'wb'))
pickle.dump(self.to_explore, open(os.path.join(self.database, 'player_listing', self.region, 'to_explore'), 'wb'))
except (PickleError, FileNotFoundError) as e:
print(e, file=sys.stderr)
print(self.region, 'saving failed', file=sys.stderr)
time.sleep(DATABASE_WAIT)
continue
break
def keepExploring(database, leagues, region, attempts=ATTEMPTS):
print(region, 'starting player listing', file=sys.stderr)
pl = None
if list(set(leagues) - {'challenger', 'grandmaster', 'master'}): # we check it is necessary to look for the leagues
while True:
if not pl:
try:
pl = PlayerListing(database, leagues, region)
except ApiError403 as e:
print('FATAL ERROR', region, e, file=sys.stderr)
break
except ApiError as e:
print(e, file=sys.stderr)
attempts -= 1
if attempts <= 0:
print(region, 'initial connection failed. No more connection attempt.', file=sys.stderr)
break
print(region, 'initial connection failed. Retrying in {} minutes. Attempts left:'.format(ATTEMPTS_WAIT, attempts), file=sys.stderr)
time.sleep(ATTEMPTS_WAIT)
continue
except (PickleError, FileNotFoundError) as e:
print(e, file=sys.stderr)
print(region, 'cannot access the local database', file=sys.stderr)
time.sleep(DATABASE_WAIT)
continue
try:
e = pl.explore()
except KeyError: # appends sometime, looks like some data is corrupted
continue
if e is not None:
print('FATAL ERROR', region, e, file=sys.stderr)
else:
print(region, 'all players explored downloaded', file=sys.stderr)
break
else: # only master/challenger league
while True:
if not pl:
try:
pl = PlayerListing(database, leagues, region, fast=True)
except ApiError403 as e:
print('FATAL ERROR', region, e, file=sys.stderr)
break
except ApiError as e:
print(e, file=sys.stderr)
attempts -= 1
if attempts <= 0:
print(region, 'initial connection failed. No more connection attempt.', file=sys.stderr)
break
print(region, 'initial connection failed. Retrying in {} minutes. Attempts left: {}'.format(ATTEMPTS_WAIT, attempts), file=sys.stderr)
time.sleep(ATTEMPTS_WAIT)
continue
except (PickleError, FileNotFoundError) as e:
print(e, file=sys.stderr)
print(region, 'cannot access the local database', file=sys.stderr)
time.sleep(DATABASE_WAIT)
continue
# No need to explore
print(region, 'all players explored downloaded', file=sys.stderr)
break
# we finally save the players list
if pl is not None:
print(region, 'Saving players list', file=sys.stderr)
pl.save()
def run(mode):
assert isinstance(mode, Modes.Base_Mode), 'Unrecognized mode {}'.format(mode)
keprocs = []
for region in mode.REGIONS:
keprocs.append(multiprocessing.Process(target=keepExploring, args=(mode.DATABASE, mode.LEAGUES, region)))
keprocs[-1].start()
for keproc in keprocs:
keproc.join()
print('-- Listing complete --')
if __name__ == '__main__':
m = Modes.Base_Mode()
run(m)
|
vingtfranc/LoLAnalyzer
|
PlayersListing.py
|
Python
|
mit
| 14,631
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.